3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
37 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
39 * Written by Bill Paul <wpaul@windriver.com>
40 * Senior Networking Software Engineer
45 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that
46 * combines a tri-speed ethernet MAC and PHY, with the following
49 * o Jumbo frame support up to 16K
50 * o Transmit and receive flow control
51 * o IPv4 checksum offload
52 * o VLAN tag insertion and stripping
54 * o 64-bit multicast hash table filter
55 * o 64 entry CAM filter
56 * o 16K RX FIFO and 48K TX FIFO memory
57 * o Interrupt moderation
59 * The VT6122 supports up to four transmit DMA queues. The descriptors
60 * in the transmit ring can address up to 7 data fragments; frames which
61 * span more than 7 data buffers must be coalesced, but in general the
62 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
63 * long. The receive descriptors address only a single buffer.
65 * There are two peculiar design issues with the VT6122. One is that
66 * receive data buffers must be aligned on a 32-bit boundary. This is
67 * not a problem where the VT6122 is used as a LOM device in x86-based
68 * systems, but on architectures that generate unaligned access traps, we
69 * have to do some copying.
71 * The other issue has to do with the way 64-bit addresses are handled.
72 * The DMA descriptors only allow you to specify 48 bits of addressing
73 * information. The remaining 16 bits are specified using one of the
74 * I/O registers. If you only have a 32-bit system, then this isn't
75 * an issue, but if you have a 64-bit system and more than 4GB of
76 * memory, you must have to make sure your network data buffers reside
77 * in the same 48-bit 'segment.'
79 * Special thanks to Ryan Fu at VIA Networking for providing documentation
80 * and sample NICs for testing.
83 #ifdef HAVE_KERNEL_OPTION_HEADERS
84 #include "opt_device_polling.h"
87 #include <sys/param.h>
88 #include <sys/endian.h>
89 #include <sys/systm.h>
90 #include <sys/sockio.h>
92 #include <sys/malloc.h>
93 #include <sys/module.h>
94 #include <sys/kernel.h>
95 #include <sys/socket.h>
96 #include <sys/sysctl.h>
99 #include <net/if_arp.h>
100 #include <net/ethernet.h>
101 #include <net/if_dl.h>
102 #include <net/if_media.h>
103 #include <net/if_types.h>
104 #include <net/if_vlan_var.h>
108 #include <machine/bus.h>
109 #include <machine/resource.h>
111 #include <sys/rman.h>
113 #include <dev/mii/mii.h>
114 #include <dev/mii/miivar.h>
116 #include <dev/pci/pcireg.h>
117 #include <dev/pci/pcivar.h>
119 MODULE_DEPEND(vge, pci, 1, 1, 1);
120 MODULE_DEPEND(vge, ether, 1, 1, 1);
121 MODULE_DEPEND(vge, miibus, 1, 1, 1);
123 /* "device miibus" required. See GENERIC if you get errors here. */
124 #include "miibus_if.h"
126 #include <dev/vge/if_vgereg.h>
127 #include <dev/vge/if_vgevar.h>
129 #define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
132 static int msi_disable = 0;
133 TUNABLE_INT("hw.vge.msi_disable", &msi_disable);
136 * The SQE error counter of MIB seems to report bogus value.
137 * Vendor's workaround does not seem to work on PCIe based
138 * controllers. Disable it until we find better workaround.
140 #undef VGE_ENABLE_SQEERR
143 * Various supported device vendors/types and their names.
145 static struct vge_type vge_devs[] = {
146 { VIA_VENDORID, VIA_DEVICEID_61XX,
147 "VIA Networking Velocity Gigabit Ethernet" },
151 static int vge_attach(device_t);
152 static int vge_detach(device_t);
153 static int vge_probe(device_t);
154 static int vge_resume(device_t);
155 static int vge_shutdown(device_t);
156 static int vge_suspend(device_t);
158 static void vge_cam_clear(struct vge_softc *);
159 static int vge_cam_set(struct vge_softc *, uint8_t *);
160 static void vge_clrwol(struct vge_softc *);
161 static void vge_discard_rxbuf(struct vge_softc *, int);
162 static int vge_dma_alloc(struct vge_softc *);
163 static void vge_dma_free(struct vge_softc *);
164 static void vge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
166 static void vge_eeprom_getword(struct vge_softc *, int, uint16_t *);
168 static int vge_encap(struct vge_softc *, struct mbuf **);
169 #ifndef __NO_STRICT_ALIGNMENT
171 vge_fixup_rx(struct mbuf *);
173 static void vge_freebufs(struct vge_softc *);
174 static void vge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
175 static int vge_ifmedia_upd(struct ifnet *);
176 static int vge_ifmedia_upd_locked(struct vge_softc *);
177 static void vge_init(void *);
178 static void vge_init_locked(struct vge_softc *);
179 static void vge_intr(void *);
180 static void vge_intr_holdoff(struct vge_softc *);
181 static int vge_ioctl(struct ifnet *, u_long, caddr_t);
182 static void vge_link_statchg(void *);
183 static int vge_miibus_readreg(device_t, int, int);
184 static int vge_miibus_writereg(device_t, int, int, int);
185 static void vge_miipoll_start(struct vge_softc *);
186 static void vge_miipoll_stop(struct vge_softc *);
187 static int vge_newbuf(struct vge_softc *, int);
188 static void vge_read_eeprom(struct vge_softc *, caddr_t, int, int, int);
189 static void vge_reset(struct vge_softc *);
190 static int vge_rx_list_init(struct vge_softc *);
191 static int vge_rxeof(struct vge_softc *, int);
192 static void vge_rxfilter(struct vge_softc *);
193 static void vge_setmedia(struct vge_softc *);
194 static void vge_setvlan(struct vge_softc *);
195 static void vge_setwol(struct vge_softc *);
196 static void vge_start(struct ifnet *);
197 static void vge_start_locked(struct ifnet *);
198 static void vge_stats_clear(struct vge_softc *);
199 static void vge_stats_update(struct vge_softc *);
200 static void vge_stop(struct vge_softc *);
201 static void vge_sysctl_node(struct vge_softc *);
202 static int vge_tx_list_init(struct vge_softc *);
203 static void vge_txeof(struct vge_softc *);
204 static void vge_watchdog(void *);
206 static device_method_t vge_methods[] = {
207 /* Device interface */
208 DEVMETHOD(device_probe, vge_probe),
209 DEVMETHOD(device_attach, vge_attach),
210 DEVMETHOD(device_detach, vge_detach),
211 DEVMETHOD(device_suspend, vge_suspend),
212 DEVMETHOD(device_resume, vge_resume),
213 DEVMETHOD(device_shutdown, vge_shutdown),
216 DEVMETHOD(miibus_readreg, vge_miibus_readreg),
217 DEVMETHOD(miibus_writereg, vge_miibus_writereg),
222 static driver_t vge_driver = {
225 sizeof(struct vge_softc)
228 static devclass_t vge_devclass;
230 DRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0);
231 DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0);
235 * Read a word of data stored in the EEPROM at address 'addr.'
238 vge_eeprom_getword(struct vge_softc *sc, int addr, uint16_t *dest)
244 * Enter EEPROM embedded programming mode. In order to
245 * access the EEPROM at all, we first have to set the
246 * EELOAD bit in the CHIPCFG2 register.
248 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
249 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
251 /* Select the address of the word we want to read */
252 CSR_WRITE_1(sc, VGE_EEADDR, addr);
254 /* Issue read command */
255 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
257 /* Wait for the done bit to be set. */
258 for (i = 0; i < VGE_TIMEOUT; i++) {
259 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
263 if (i == VGE_TIMEOUT) {
264 device_printf(sc->vge_dev, "EEPROM read timed out\n");
269 /* Read the result */
270 word = CSR_READ_2(sc, VGE_EERDDAT);
272 /* Turn off EEPROM access mode. */
273 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
274 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
281 * Read a sequence of words from the EEPROM.
284 vge_read_eeprom(struct vge_softc *sc, caddr_t dest, int off, int cnt, int swap)
288 uint16_t word = 0, *ptr;
290 for (i = 0; i < cnt; i++) {
291 vge_eeprom_getword(sc, off + i, &word);
292 ptr = (uint16_t *)(dest + (i * 2));
299 for (i = 0; i < ETHER_ADDR_LEN; i++)
300 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i);
305 vge_miipoll_stop(struct vge_softc *sc)
309 CSR_WRITE_1(sc, VGE_MIICMD, 0);
311 for (i = 0; i < VGE_TIMEOUT; i++) {
313 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
317 if (i == VGE_TIMEOUT)
318 device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
322 vge_miipoll_start(struct vge_softc *sc)
326 /* First, make sure we're idle. */
328 CSR_WRITE_1(sc, VGE_MIICMD, 0);
329 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
331 for (i = 0; i < VGE_TIMEOUT; i++) {
333 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
337 if (i == VGE_TIMEOUT) {
338 device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
342 /* Now enable auto poll mode. */
344 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
346 /* And make sure it started. */
348 for (i = 0; i < VGE_TIMEOUT; i++) {
350 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
354 if (i == VGE_TIMEOUT)
355 device_printf(sc->vge_dev, "failed to start MII autopoll\n");
359 vge_miibus_readreg(device_t dev, int phy, int reg)
361 struct vge_softc *sc;
365 sc = device_get_softc(dev);
367 vge_miipoll_stop(sc);
369 /* Specify the register we want to read. */
370 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
372 /* Issue read command. */
373 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
375 /* Wait for the read command bit to self-clear. */
376 for (i = 0; i < VGE_TIMEOUT; i++) {
378 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
382 if (i == VGE_TIMEOUT)
383 device_printf(sc->vge_dev, "MII read timed out\n");
385 rval = CSR_READ_2(sc, VGE_MIIDATA);
387 vge_miipoll_start(sc);
393 vge_miibus_writereg(device_t dev, int phy, int reg, int data)
395 struct vge_softc *sc;
398 sc = device_get_softc(dev);
400 vge_miipoll_stop(sc);
402 /* Specify the register we want to write. */
403 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
405 /* Specify the data we want to write. */
406 CSR_WRITE_2(sc, VGE_MIIDATA, data);
408 /* Issue write command. */
409 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
411 /* Wait for the write command bit to self-clear. */
412 for (i = 0; i < VGE_TIMEOUT; i++) {
414 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
418 if (i == VGE_TIMEOUT) {
419 device_printf(sc->vge_dev, "MII write timed out\n");
423 vge_miipoll_start(sc);
429 vge_cam_clear(struct vge_softc *sc)
434 * Turn off all the mask bits. This tells the chip
435 * that none of the entries in the CAM filter are valid.
436 * desired entries will be enabled as we fill the filter in.
439 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
440 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
441 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
442 for (i = 0; i < 8; i++)
443 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
445 /* Clear the VLAN filter too. */
447 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
448 for (i = 0; i < 8; i++)
449 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
451 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
452 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
453 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
459 vge_cam_set(struct vge_softc *sc, uint8_t *addr)
463 if (sc->vge_camidx == VGE_CAM_MAXADDRS)
466 /* Select the CAM data page. */
467 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
468 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
470 /* Set the filter entry we want to update and enable writing. */
471 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
473 /* Write the address to the CAM registers */
474 for (i = 0; i < ETHER_ADDR_LEN; i++)
475 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
477 /* Issue a write command. */
478 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
480 /* Wake for it to clear. */
481 for (i = 0; i < VGE_TIMEOUT; i++) {
483 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
487 if (i == VGE_TIMEOUT) {
488 device_printf(sc->vge_dev, "setting CAM filter failed\n");
493 /* Select the CAM mask page. */
494 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
495 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
497 /* Set the mask bit that enables this filter. */
498 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8),
499 1<<(sc->vge_camidx & 7));
504 /* Turn off access to CAM. */
505 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
506 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
507 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
513 vge_setvlan(struct vge_softc *sc)
521 cfg = CSR_READ_1(sc, VGE_RXCFG);
522 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
523 cfg |= VGE_VTAG_OPT2;
525 cfg &= ~VGE_VTAG_OPT2;
526 CSR_WRITE_1(sc, VGE_RXCFG, cfg);
530 * Program the multicast filter. We use the 64-entry CAM filter
531 * for perfect filtering. If there's more than 64 multicast addresses,
532 * we use the hash filter instead.
535 vge_rxfilter(struct vge_softc *sc)
538 struct ifmultiaddr *ifma;
539 uint32_t h, hashes[2];
545 /* First, zot all the multicast entries. */
549 rxcfg = CSR_READ_1(sc, VGE_RXCTL);
550 rxcfg &= ~(VGE_RXCTL_RX_MCAST | VGE_RXCTL_RX_BCAST |
551 VGE_RXCTL_RX_PROMISC);
553 * Always allow VLAN oversized frames and frames for
556 rxcfg |= VGE_RXCTL_RX_GIANT | VGE_RXCTL_RX_UCAST;
559 if ((ifp->if_flags & IFF_BROADCAST) != 0)
560 rxcfg |= VGE_RXCTL_RX_BCAST;
561 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
562 if ((ifp->if_flags & IFF_PROMISC) != 0)
563 rxcfg |= VGE_RXCTL_RX_PROMISC;
564 if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
565 hashes[0] = 0xFFFFFFFF;
566 hashes[1] = 0xFFFFFFFF;
572 /* Now program new ones */
574 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
575 if (ifma->ifma_addr->sa_family != AF_LINK)
577 error = vge_cam_set(sc,
578 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
583 /* If there were too many addresses, use the hash filter. */
587 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
588 if (ifma->ifma_addr->sa_family != AF_LINK)
590 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
591 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
593 hashes[0] |= (1 << h);
595 hashes[1] |= (1 << (h - 32));
598 if_maddr_runlock(ifp);
601 if (hashes[0] != 0 || hashes[1] != 0)
602 rxcfg |= VGE_RXCTL_RX_MCAST;
603 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
604 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
605 CSR_WRITE_1(sc, VGE_RXCTL, rxcfg);
609 vge_reset(struct vge_softc *sc)
613 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
615 for (i = 0; i < VGE_TIMEOUT; i++) {
617 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
621 if (i == VGE_TIMEOUT) {
622 device_printf(sc->vge_dev, "soft reset timed out\n");
623 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
631 * Probe for a VIA gigabit chip. Check the PCI vendor and device
632 * IDs against our list and return a device name if we find a match.
635 vge_probe(device_t dev)
641 while (t->vge_name != NULL) {
642 if ((pci_get_vendor(dev) == t->vge_vid) &&
643 (pci_get_device(dev) == t->vge_did)) {
644 device_set_desc(dev, t->vge_name);
645 return (BUS_PROBE_DEFAULT);
654 * Map a single buffer address.
657 struct vge_dmamap_arg {
658 bus_addr_t vge_busaddr;
662 vge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
664 struct vge_dmamap_arg *ctx;
669 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
671 ctx = (struct vge_dmamap_arg *)arg;
672 ctx->vge_busaddr = segs[0].ds_addr;
676 vge_dma_alloc(struct vge_softc *sc)
678 struct vge_dmamap_arg ctx;
679 struct vge_txdesc *txd;
680 struct vge_rxdesc *rxd;
681 bus_addr_t lowaddr, tx_ring_end, rx_ring_end;
685 * It seems old PCI controllers do not support DAC. DAC
686 * configuration can be enabled by accessing VGE_CHIPCFG3
687 * register but honor EEPROM configuration instead of
688 * blindly overriding DAC configuration. PCIe based
689 * controllers are supposed to support 64bit DMA so enable
690 * 64bit DMA on these controllers.
692 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0)
693 lowaddr = BUS_SPACE_MAXADDR;
695 lowaddr = BUS_SPACE_MAXADDR_32BIT;
698 /* Create parent ring tag. */
699 error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */
700 1, 0, /* algnmnt, boundary */
701 lowaddr, /* lowaddr */
702 BUS_SPACE_MAXADDR, /* highaddr */
703 NULL, NULL, /* filter, filterarg */
704 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
706 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
708 NULL, NULL, /* lockfunc, lockarg */
709 &sc->vge_cdata.vge_ring_tag);
711 device_printf(sc->vge_dev,
712 "could not create parent DMA tag.\n");
716 /* Create tag for Tx ring. */
717 error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */
718 VGE_TX_RING_ALIGN, 0, /* algnmnt, boundary */
719 BUS_SPACE_MAXADDR, /* lowaddr */
720 BUS_SPACE_MAXADDR, /* highaddr */
721 NULL, NULL, /* filter, filterarg */
722 VGE_TX_LIST_SZ, /* maxsize */
724 VGE_TX_LIST_SZ, /* maxsegsize */
726 NULL, NULL, /* lockfunc, lockarg */
727 &sc->vge_cdata.vge_tx_ring_tag);
729 device_printf(sc->vge_dev,
730 "could not allocate Tx ring DMA tag.\n");
734 /* Create tag for Rx ring. */
735 error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */
736 VGE_RX_RING_ALIGN, 0, /* algnmnt, boundary */
737 BUS_SPACE_MAXADDR, /* lowaddr */
738 BUS_SPACE_MAXADDR, /* highaddr */
739 NULL, NULL, /* filter, filterarg */
740 VGE_RX_LIST_SZ, /* maxsize */
742 VGE_RX_LIST_SZ, /* maxsegsize */
744 NULL, NULL, /* lockfunc, lockarg */
745 &sc->vge_cdata.vge_rx_ring_tag);
747 device_printf(sc->vge_dev,
748 "could not allocate Rx ring DMA tag.\n");
752 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
753 error = bus_dmamem_alloc(sc->vge_cdata.vge_tx_ring_tag,
754 (void **)&sc->vge_rdata.vge_tx_ring,
755 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
756 &sc->vge_cdata.vge_tx_ring_map);
758 device_printf(sc->vge_dev,
759 "could not allocate DMA'able memory for Tx ring.\n");
764 error = bus_dmamap_load(sc->vge_cdata.vge_tx_ring_tag,
765 sc->vge_cdata.vge_tx_ring_map, sc->vge_rdata.vge_tx_ring,
766 VGE_TX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
767 if (error != 0 || ctx.vge_busaddr == 0) {
768 device_printf(sc->vge_dev,
769 "could not load DMA'able memory for Tx ring.\n");
772 sc->vge_rdata.vge_tx_ring_paddr = ctx.vge_busaddr;
774 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
775 error = bus_dmamem_alloc(sc->vge_cdata.vge_rx_ring_tag,
776 (void **)&sc->vge_rdata.vge_rx_ring,
777 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
778 &sc->vge_cdata.vge_rx_ring_map);
780 device_printf(sc->vge_dev,
781 "could not allocate DMA'able memory for Rx ring.\n");
786 error = bus_dmamap_load(sc->vge_cdata.vge_rx_ring_tag,
787 sc->vge_cdata.vge_rx_ring_map, sc->vge_rdata.vge_rx_ring,
788 VGE_RX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
789 if (error != 0 || ctx.vge_busaddr == 0) {
790 device_printf(sc->vge_dev,
791 "could not load DMA'able memory for Rx ring.\n");
794 sc->vge_rdata.vge_rx_ring_paddr = ctx.vge_busaddr;
796 /* Tx/Rx descriptor queue should reside within 4GB boundary. */
797 tx_ring_end = sc->vge_rdata.vge_tx_ring_paddr + VGE_TX_LIST_SZ;
798 rx_ring_end = sc->vge_rdata.vge_rx_ring_paddr + VGE_RX_LIST_SZ;
799 if ((VGE_ADDR_HI(tx_ring_end) !=
800 VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)) ||
801 (VGE_ADDR_HI(rx_ring_end) !=
802 VGE_ADDR_HI(sc->vge_rdata.vge_rx_ring_paddr)) ||
803 VGE_ADDR_HI(tx_ring_end) != VGE_ADDR_HI(rx_ring_end)) {
804 device_printf(sc->vge_dev, "4GB boundary crossed, "
805 "switching to 32bit DMA address mode.\n");
807 /* Limit DMA address space to 32bit and try again. */
808 lowaddr = BUS_SPACE_MAXADDR_32BIT;
812 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0)
813 lowaddr = VGE_BUF_DMA_MAXADDR;
815 lowaddr = BUS_SPACE_MAXADDR_32BIT;
816 /* Create parent buffer tag. */
817 error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */
818 1, 0, /* algnmnt, boundary */
819 lowaddr, /* lowaddr */
820 BUS_SPACE_MAXADDR, /* highaddr */
821 NULL, NULL, /* filter, filterarg */
822 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
824 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
826 NULL, NULL, /* lockfunc, lockarg */
827 &sc->vge_cdata.vge_buffer_tag);
829 device_printf(sc->vge_dev,
830 "could not create parent buffer DMA tag.\n");
834 /* Create tag for Tx buffers. */
835 error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */
836 1, 0, /* algnmnt, boundary */
837 BUS_SPACE_MAXADDR, /* lowaddr */
838 BUS_SPACE_MAXADDR, /* highaddr */
839 NULL, NULL, /* filter, filterarg */
840 MCLBYTES * VGE_MAXTXSEGS, /* maxsize */
841 VGE_MAXTXSEGS, /* nsegments */
842 MCLBYTES, /* maxsegsize */
844 NULL, NULL, /* lockfunc, lockarg */
845 &sc->vge_cdata.vge_tx_tag);
847 device_printf(sc->vge_dev, "could not create Tx DMA tag.\n");
851 /* Create tag for Rx buffers. */
852 error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */
853 VGE_RX_BUF_ALIGN, 0, /* algnmnt, boundary */
854 BUS_SPACE_MAXADDR, /* lowaddr */
855 BUS_SPACE_MAXADDR, /* highaddr */
856 NULL, NULL, /* filter, filterarg */
857 MCLBYTES, /* maxsize */
859 MCLBYTES, /* maxsegsize */
861 NULL, NULL, /* lockfunc, lockarg */
862 &sc->vge_cdata.vge_rx_tag);
864 device_printf(sc->vge_dev, "could not create Rx DMA tag.\n");
868 /* Create DMA maps for Tx buffers. */
869 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
870 txd = &sc->vge_cdata.vge_txdesc[i];
872 txd->tx_dmamap = NULL;
873 error = bus_dmamap_create(sc->vge_cdata.vge_tx_tag, 0,
876 device_printf(sc->vge_dev,
877 "could not create Tx dmamap.\n");
881 /* Create DMA maps for Rx buffers. */
882 if ((error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0,
883 &sc->vge_cdata.vge_rx_sparemap)) != 0) {
884 device_printf(sc->vge_dev,
885 "could not create spare Rx dmamap.\n");
888 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
889 rxd = &sc->vge_cdata.vge_rxdesc[i];
891 rxd->rx_dmamap = NULL;
892 error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0,
895 device_printf(sc->vge_dev,
896 "could not create Rx dmamap.\n");
906 vge_dma_free(struct vge_softc *sc)
908 struct vge_txdesc *txd;
909 struct vge_rxdesc *rxd;
913 if (sc->vge_cdata.vge_tx_ring_tag != NULL) {
914 if (sc->vge_cdata.vge_tx_ring_map)
915 bus_dmamap_unload(sc->vge_cdata.vge_tx_ring_tag,
916 sc->vge_cdata.vge_tx_ring_map);
917 if (sc->vge_cdata.vge_tx_ring_map &&
918 sc->vge_rdata.vge_tx_ring)
919 bus_dmamem_free(sc->vge_cdata.vge_tx_ring_tag,
920 sc->vge_rdata.vge_tx_ring,
921 sc->vge_cdata.vge_tx_ring_map);
922 sc->vge_rdata.vge_tx_ring = NULL;
923 sc->vge_cdata.vge_tx_ring_map = NULL;
924 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_ring_tag);
925 sc->vge_cdata.vge_tx_ring_tag = NULL;
928 if (sc->vge_cdata.vge_rx_ring_tag != NULL) {
929 if (sc->vge_cdata.vge_rx_ring_map)
930 bus_dmamap_unload(sc->vge_cdata.vge_rx_ring_tag,
931 sc->vge_cdata.vge_rx_ring_map);
932 if (sc->vge_cdata.vge_rx_ring_map &&
933 sc->vge_rdata.vge_rx_ring)
934 bus_dmamem_free(sc->vge_cdata.vge_rx_ring_tag,
935 sc->vge_rdata.vge_rx_ring,
936 sc->vge_cdata.vge_rx_ring_map);
937 sc->vge_rdata.vge_rx_ring = NULL;
938 sc->vge_cdata.vge_rx_ring_map = NULL;
939 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_ring_tag);
940 sc->vge_cdata.vge_rx_ring_tag = NULL;
943 if (sc->vge_cdata.vge_tx_tag != NULL) {
944 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
945 txd = &sc->vge_cdata.vge_txdesc[i];
946 if (txd->tx_dmamap != NULL) {
947 bus_dmamap_destroy(sc->vge_cdata.vge_tx_tag,
949 txd->tx_dmamap = NULL;
952 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_tag);
953 sc->vge_cdata.vge_tx_tag = NULL;
956 if (sc->vge_cdata.vge_rx_tag != NULL) {
957 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
958 rxd = &sc->vge_cdata.vge_rxdesc[i];
959 if (rxd->rx_dmamap != NULL) {
960 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag,
962 rxd->rx_dmamap = NULL;
965 if (sc->vge_cdata.vge_rx_sparemap != NULL) {
966 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag,
967 sc->vge_cdata.vge_rx_sparemap);
968 sc->vge_cdata.vge_rx_sparemap = NULL;
970 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_tag);
971 sc->vge_cdata.vge_rx_tag = NULL;
974 if (sc->vge_cdata.vge_buffer_tag != NULL) {
975 bus_dma_tag_destroy(sc->vge_cdata.vge_buffer_tag);
976 sc->vge_cdata.vge_buffer_tag = NULL;
978 if (sc->vge_cdata.vge_ring_tag != NULL) {
979 bus_dma_tag_destroy(sc->vge_cdata.vge_ring_tag);
980 sc->vge_cdata.vge_ring_tag = NULL;
985 * Attach the interface. Allocate softc structures, do ifmedia
986 * setup and ethernet/BPF attach.
989 vge_attach(device_t dev)
991 u_char eaddr[ETHER_ADDR_LEN];
992 struct vge_softc *sc;
994 int error = 0, cap, i, msic, rid;
996 sc = device_get_softc(dev);
999 mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1001 callout_init_mtx(&sc->vge_watchdog, &sc->vge_mtx, 0);
1004 * Map control/status registers.
1006 pci_enable_busmaster(dev);
1009 sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1012 if (sc->vge_res == NULL) {
1013 device_printf(dev, "couldn't map ports/memory\n");
1018 if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) {
1019 sc->vge_flags |= VGE_FLAG_PCIE;
1020 sc->vge_expcap = cap;
1022 sc->vge_flags |= VGE_FLAG_JUMBO;
1023 if (pci_find_cap(dev, PCIY_PMG, &cap) == 0) {
1024 sc->vge_flags |= VGE_FLAG_PMCAP;
1025 sc->vge_pmcap = cap;
1028 msic = pci_msi_count(dev);
1029 if (msi_disable == 0 && msic > 0) {
1031 if (pci_alloc_msi(dev, &msic) == 0) {
1033 sc->vge_flags |= VGE_FLAG_MSI;
1034 device_printf(dev, "Using %d MSI message\n",
1038 pci_release_msi(dev);
1042 /* Allocate interrupt */
1043 sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1044 ((sc->vge_flags & VGE_FLAG_MSI) ? 0 : RF_SHAREABLE) | RF_ACTIVE);
1045 if (sc->vge_irq == NULL) {
1046 device_printf(dev, "couldn't map interrupt\n");
1051 /* Reset the adapter. */
1053 /* Reload EEPROM. */
1054 CSR_WRITE_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
1055 for (i = 0; i < VGE_TIMEOUT; i++) {
1057 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
1060 if (i == VGE_TIMEOUT)
1061 device_printf(dev, "EEPROM reload timed out\n");
1063 * Clear PACPI as EEPROM reload will set the bit. Otherwise
1064 * MAC will receive magic packet which in turn confuses
1067 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
1070 * Get station address from the EEPROM.
1072 vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0);
1074 * Save configured PHY address.
1075 * It seems the PHY address of PCIe controllers just
1076 * reflects media jump strapping status so we assume the
1077 * internal PHY address of PCIe controller is at 1.
1079 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0)
1080 sc->vge_phyaddr = 1;
1082 sc->vge_phyaddr = CSR_READ_1(sc, VGE_MIICFG) &
1084 /* Clear WOL and take hardware from powerdown. */
1086 vge_sysctl_node(sc);
1087 error = vge_dma_alloc(sc);
1091 ifp = sc->vge_ifp = if_alloc(IFT_ETHER);
1093 device_printf(dev, "can not if_alloc()\n");
1098 vge_miipoll_start(sc);
1100 error = mii_attach(dev, &sc->vge_miibus, ifp, vge_ifmedia_upd,
1101 vge_ifmedia_sts, BMSR_DEFCAPMASK, sc->vge_phyaddr, MII_OFFSET_ANY,
1104 device_printf(dev, "attaching PHYs failed\n");
1109 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1110 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1111 ifp->if_ioctl = vge_ioctl;
1112 ifp->if_capabilities = IFCAP_VLAN_MTU;
1113 ifp->if_start = vge_start;
1114 ifp->if_hwassist = VGE_CSUM_FEATURES;
1115 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM |
1116 IFCAP_VLAN_HWTAGGING;
1117 if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0)
1118 ifp->if_capabilities |= IFCAP_WOL;
1119 ifp->if_capenable = ifp->if_capabilities;
1120 #ifdef DEVICE_POLLING
1121 ifp->if_capabilities |= IFCAP_POLLING;
1123 ifp->if_init = vge_init;
1124 IFQ_SET_MAXLEN(&ifp->if_snd, VGE_TX_DESC_CNT - 1);
1125 ifp->if_snd.ifq_drv_maxlen = VGE_TX_DESC_CNT - 1;
1126 IFQ_SET_READY(&ifp->if_snd);
1129 * Call MI attach routine.
1131 ether_ifattach(ifp, eaddr);
1133 /* Tell the upper layer(s) we support long frames. */
1134 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1136 /* Hook interrupt last to avoid having to lock softc */
1137 error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE,
1138 NULL, vge_intr, sc, &sc->vge_intrhand);
1141 device_printf(dev, "couldn't set up irq\n");
1142 ether_ifdetach(ifp);
1154 * Shutdown hardware and free up resources. This can be called any
1155 * time after the mutex has been initialized. It is called in both
1156 * the error case in attach and the normal detach case so it needs
1157 * to be careful about only freeing resources that have actually been
1161 vge_detach(device_t dev)
1163 struct vge_softc *sc;
1166 sc = device_get_softc(dev);
1167 KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized"));
1170 #ifdef DEVICE_POLLING
1171 if (ifp->if_capenable & IFCAP_POLLING)
1172 ether_poll_deregister(ifp);
1175 /* These should only be active if attach succeeded */
1176 if (device_is_attached(dev)) {
1177 ether_ifdetach(ifp);
1181 callout_drain(&sc->vge_watchdog);
1184 device_delete_child(dev, sc->vge_miibus);
1185 bus_generic_detach(dev);
1187 if (sc->vge_intrhand)
1188 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand);
1190 bus_release_resource(dev, SYS_RES_IRQ,
1191 sc->vge_flags & VGE_FLAG_MSI ? 1 : 0, sc->vge_irq);
1192 if (sc->vge_flags & VGE_FLAG_MSI)
1193 pci_release_msi(dev);
1195 bus_release_resource(dev, SYS_RES_MEMORY,
1196 PCIR_BAR(1), sc->vge_res);
1201 mtx_destroy(&sc->vge_mtx);
1207 vge_discard_rxbuf(struct vge_softc *sc, int prod)
1209 struct vge_rxdesc *rxd;
1212 rxd = &sc->vge_cdata.vge_rxdesc[prod];
1213 rxd->rx_desc->vge_sts = 0;
1214 rxd->rx_desc->vge_ctl = 0;
1217 * Note: the manual fails to document the fact that for
1218 * proper opration, the driver needs to replentish the RX
1219 * DMA ring 4 descriptors at a time (rather than one at a
1220 * time, like most chips). We can allocate the new buffers
1221 * but we should not set the OWN bits until we're ready
1222 * to hand back 4 of them in one shot.
1224 if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) {
1225 for (i = VGE_RXCHUNK; i > 0; i--) {
1226 rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN);
1227 rxd = rxd->rxd_prev;
1229 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK;
1234 vge_newbuf(struct vge_softc *sc, int prod)
1236 struct vge_rxdesc *rxd;
1238 bus_dma_segment_t segs[1];
1242 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1246 * This is part of an evil trick to deal with strict-alignment
1247 * architectures. The VIA chip requires RX buffers to be aligned
1248 * on 32-bit boundaries, but that will hose strict-alignment
1249 * architectures. To get around this, we leave some empty space
1250 * at the start of each buffer and for non-strict-alignment hosts,
1251 * we copy the buffer back two bytes to achieve word alignment.
1252 * This is slightly more efficient than allocating a new buffer,
1253 * copying the contents, and discarding the old buffer.
1255 m->m_len = m->m_pkthdr.len = MCLBYTES;
1256 m_adj(m, VGE_RX_BUF_ALIGN);
1258 if (bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_rx_tag,
1259 sc->vge_cdata.vge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1263 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1265 rxd = &sc->vge_cdata.vge_rxdesc[prod];
1266 if (rxd->rx_m != NULL) {
1267 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap,
1268 BUS_DMASYNC_POSTREAD);
1269 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap);
1271 map = rxd->rx_dmamap;
1272 rxd->rx_dmamap = sc->vge_cdata.vge_rx_sparemap;
1273 sc->vge_cdata.vge_rx_sparemap = map;
1274 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap,
1275 BUS_DMASYNC_PREREAD);
1278 rxd->rx_desc->vge_sts = 0;
1279 rxd->rx_desc->vge_ctl = 0;
1280 rxd->rx_desc->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
1281 rxd->rx_desc->vge_addrhi = htole32(VGE_ADDR_HI(segs[0].ds_addr) |
1282 (VGE_BUFLEN(segs[0].ds_len) << 16) | VGE_RXDESC_I);
1285 * Note: the manual fails to document the fact that for
1286 * proper operation, the driver needs to replenish the RX
1287 * DMA ring 4 descriptors at a time (rather than one at a
1288 * time, like most chips). We can allocate the new buffers
1289 * but we should not set the OWN bits until we're ready
1290 * to hand back 4 of them in one shot.
1292 if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) {
1293 for (i = VGE_RXCHUNK; i > 0; i--) {
1294 rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN);
1295 rxd = rxd->rxd_prev;
1297 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK;
1304 vge_tx_list_init(struct vge_softc *sc)
1306 struct vge_ring_data *rd;
1307 struct vge_txdesc *txd;
1310 VGE_LOCK_ASSERT(sc);
1312 sc->vge_cdata.vge_tx_prodidx = 0;
1313 sc->vge_cdata.vge_tx_considx = 0;
1314 sc->vge_cdata.vge_tx_cnt = 0;
1316 rd = &sc->vge_rdata;
1317 bzero(rd->vge_tx_ring, VGE_TX_LIST_SZ);
1318 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
1319 txd = &sc->vge_cdata.vge_txdesc[i];
1321 txd->tx_desc = &rd->vge_tx_ring[i];
1324 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1325 sc->vge_cdata.vge_tx_ring_map,
1326 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1332 vge_rx_list_init(struct vge_softc *sc)
1334 struct vge_ring_data *rd;
1335 struct vge_rxdesc *rxd;
1338 VGE_LOCK_ASSERT(sc);
1340 sc->vge_cdata.vge_rx_prodidx = 0;
1341 sc->vge_cdata.vge_head = NULL;
1342 sc->vge_cdata.vge_tail = NULL;
1343 sc->vge_cdata.vge_rx_commit = 0;
1345 rd = &sc->vge_rdata;
1346 bzero(rd->vge_rx_ring, VGE_RX_LIST_SZ);
1347 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1348 rxd = &sc->vge_cdata.vge_rxdesc[i];
1350 rxd->rx_desc = &rd->vge_rx_ring[i];
1353 &sc->vge_cdata.vge_rxdesc[VGE_RX_DESC_CNT - 1];
1355 rxd->rxd_prev = &sc->vge_cdata.vge_rxdesc[i - 1];
1356 if (vge_newbuf(sc, i) != 0)
1360 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1361 sc->vge_cdata.vge_rx_ring_map,
1362 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1364 sc->vge_cdata.vge_rx_commit = 0;
1370 vge_freebufs(struct vge_softc *sc)
1372 struct vge_txdesc *txd;
1373 struct vge_rxdesc *rxd;
1377 VGE_LOCK_ASSERT(sc);
1381 * Free RX and TX mbufs still in the queues.
1383 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1384 rxd = &sc->vge_cdata.vge_rxdesc[i];
1385 if (rxd->rx_m != NULL) {
1386 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag,
1387 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
1388 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag,
1395 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
1396 txd = &sc->vge_cdata.vge_txdesc[i];
1397 if (txd->tx_m != NULL) {
1398 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag,
1399 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
1400 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag,
1409 #ifndef __NO_STRICT_ALIGNMENT
1410 static __inline void
1411 vge_fixup_rx(struct mbuf *m)
1414 uint16_t *src, *dst;
1416 src = mtod(m, uint16_t *);
1419 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1422 m->m_data -= ETHER_ALIGN;
1427 * RX handler. We support the reception of jumbo frames that have
1428 * been fragmented across multiple 2K mbuf cluster buffers.
1431 vge_rxeof(struct vge_softc *sc, int count)
1435 int prod, prog, total_len;
1436 struct vge_rxdesc *rxd;
1437 struct vge_rx_desc *cur_rx;
1438 uint32_t rxstat, rxctl;
1440 VGE_LOCK_ASSERT(sc);
1444 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1445 sc->vge_cdata.vge_rx_ring_map,
1446 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1448 prod = sc->vge_cdata.vge_rx_prodidx;
1449 for (prog = 0; count > 0 &&
1450 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
1451 VGE_RX_DESC_INC(prod)) {
1452 cur_rx = &sc->vge_rdata.vge_rx_ring[prod];
1453 rxstat = le32toh(cur_rx->vge_sts);
1454 if ((rxstat & VGE_RDSTS_OWN) != 0)
1458 rxctl = le32toh(cur_rx->vge_ctl);
1459 total_len = VGE_RXBYTES(rxstat);
1460 rxd = &sc->vge_cdata.vge_rxdesc[prod];
1464 * If the 'start of frame' bit is set, this indicates
1465 * either the first fragment in a multi-fragment receive,
1466 * or an intermediate fragment. Either way, we want to
1467 * accumulate the buffers.
1469 if ((rxstat & VGE_RXPKT_SOF) != 0) {
1470 if (vge_newbuf(sc, prod) != 0) {
1472 VGE_CHAIN_RESET(sc);
1473 vge_discard_rxbuf(sc, prod);
1476 m->m_len = MCLBYTES - VGE_RX_BUF_ALIGN;
1477 if (sc->vge_cdata.vge_head == NULL) {
1478 sc->vge_cdata.vge_head = m;
1479 sc->vge_cdata.vge_tail = m;
1481 m->m_flags &= ~M_PKTHDR;
1482 sc->vge_cdata.vge_tail->m_next = m;
1483 sc->vge_cdata.vge_tail = m;
1489 * Bad/error frames will have the RXOK bit cleared.
1490 * However, there's one error case we want to allow:
1491 * if a VLAN tagged frame arrives and the chip can't
1492 * match it against the CAM filter, it considers this
1493 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
1494 * We don't want to drop the frame though: our VLAN
1495 * filtering is done in software.
1496 * We also want to receive bad-checksummed frames and
1497 * and frames with bad-length.
1499 if ((rxstat & VGE_RDSTS_RXOK) == 0 &&
1500 (rxstat & (VGE_RDSTS_VIDM | VGE_RDSTS_RLERR |
1501 VGE_RDSTS_CSUMERR)) == 0) {
1504 * If this is part of a multi-fragment packet,
1505 * discard all the pieces.
1507 VGE_CHAIN_RESET(sc);
1508 vge_discard_rxbuf(sc, prod);
1512 if (vge_newbuf(sc, prod) != 0) {
1514 VGE_CHAIN_RESET(sc);
1515 vge_discard_rxbuf(sc, prod);
1519 /* Chain received mbufs. */
1520 if (sc->vge_cdata.vge_head != NULL) {
1521 m->m_len = total_len % (MCLBYTES - VGE_RX_BUF_ALIGN);
1523 * Special case: if there's 4 bytes or less
1524 * in this buffer, the mbuf can be discarded:
1525 * the last 4 bytes is the CRC, which we don't
1526 * care about anyway.
1528 if (m->m_len <= ETHER_CRC_LEN) {
1529 sc->vge_cdata.vge_tail->m_len -=
1530 (ETHER_CRC_LEN - m->m_len);
1533 m->m_len -= ETHER_CRC_LEN;
1534 m->m_flags &= ~M_PKTHDR;
1535 sc->vge_cdata.vge_tail->m_next = m;
1537 m = sc->vge_cdata.vge_head;
1538 m->m_flags |= M_PKTHDR;
1539 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1541 m->m_flags |= M_PKTHDR;
1542 m->m_pkthdr.len = m->m_len =
1543 (total_len - ETHER_CRC_LEN);
1546 #ifndef __NO_STRICT_ALIGNMENT
1549 m->m_pkthdr.rcvif = ifp;
1551 /* Do RX checksumming if enabled */
1552 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
1553 (rxctl & VGE_RDCTL_FRAG) == 0) {
1554 /* Check IP header checksum */
1555 if ((rxctl & VGE_RDCTL_IPPKT) != 0)
1556 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1557 if ((rxctl & VGE_RDCTL_IPCSUMOK) != 0)
1558 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1560 /* Check TCP/UDP checksum */
1561 if (rxctl & (VGE_RDCTL_TCPPKT | VGE_RDCTL_UDPPKT) &&
1562 rxctl & VGE_RDCTL_PROTOCSUMOK) {
1563 m->m_pkthdr.csum_flags |=
1564 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1565 m->m_pkthdr.csum_data = 0xffff;
1569 if ((rxstat & VGE_RDSTS_VTAG) != 0) {
1571 * The 32-bit rxctl register is stored in little-endian.
1572 * However, the 16-bit vlan tag is stored in big-endian,
1573 * so we have to byte swap it.
1575 m->m_pkthdr.ether_vtag =
1576 bswap16(rxctl & VGE_RDCTL_VLANID);
1577 m->m_flags |= M_VLANTAG;
1581 (*ifp->if_input)(ifp, m);
1583 sc->vge_cdata.vge_head = NULL;
1584 sc->vge_cdata.vge_tail = NULL;
1588 sc->vge_cdata.vge_rx_prodidx = prod;
1589 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1590 sc->vge_cdata.vge_rx_ring_map,
1591 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1592 /* Update residue counter. */
1593 if (sc->vge_cdata.vge_rx_commit != 0) {
1594 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT,
1595 sc->vge_cdata.vge_rx_commit);
1596 sc->vge_cdata.vge_rx_commit = 0;
1603 vge_txeof(struct vge_softc *sc)
1606 struct vge_tx_desc *cur_tx;
1607 struct vge_txdesc *txd;
1611 VGE_LOCK_ASSERT(sc);
1615 if (sc->vge_cdata.vge_tx_cnt == 0)
1618 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1619 sc->vge_cdata.vge_tx_ring_map,
1620 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1623 * Go through our tx list and free mbufs for those
1624 * frames that have been transmitted.
1626 cons = sc->vge_cdata.vge_tx_considx;
1627 prod = sc->vge_cdata.vge_tx_prodidx;
1628 for (; cons != prod; VGE_TX_DESC_INC(cons)) {
1629 cur_tx = &sc->vge_rdata.vge_tx_ring[cons];
1630 txstat = le32toh(cur_tx->vge_sts);
1631 if ((txstat & VGE_TDSTS_OWN) != 0)
1633 sc->vge_cdata.vge_tx_cnt--;
1634 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1636 txd = &sc->vge_cdata.vge_txdesc[cons];
1637 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap,
1638 BUS_DMASYNC_POSTWRITE);
1639 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap);
1641 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n",
1645 txd->tx_desc->vge_frag[0].vge_addrhi = 0;
1647 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1648 sc->vge_cdata.vge_tx_ring_map,
1649 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1650 sc->vge_cdata.vge_tx_considx = cons;
1651 if (sc->vge_cdata.vge_tx_cnt == 0)
1656 vge_link_statchg(void *xsc)
1658 struct vge_softc *sc;
1664 VGE_LOCK_ASSERT(sc);
1666 physts = CSR_READ_1(sc, VGE_PHYSTS0);
1667 if ((physts & VGE_PHYSTS_RESETSTS) == 0) {
1668 if ((physts & VGE_PHYSTS_LINK) == 0) {
1669 sc->vge_flags &= ~VGE_FLAG_LINK;
1670 if_link_state_change(sc->vge_ifp,
1673 sc->vge_flags |= VGE_FLAG_LINK;
1674 if_link_state_change(sc->vge_ifp,
1676 CSR_WRITE_1(sc, VGE_CRC2, VGE_CR2_FDX_TXFLOWCTL_ENABLE |
1677 VGE_CR2_FDX_RXFLOWCTL_ENABLE);
1678 if ((physts & VGE_PHYSTS_FDX) != 0) {
1679 if ((physts & VGE_PHYSTS_TXFLOWCAP) != 0)
1680 CSR_WRITE_1(sc, VGE_CRS2,
1681 VGE_CR2_FDX_TXFLOWCTL_ENABLE);
1682 if ((physts & VGE_PHYSTS_RXFLOWCAP) != 0)
1683 CSR_WRITE_1(sc, VGE_CRS2,
1684 VGE_CR2_FDX_RXFLOWCTL_ENABLE);
1686 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1687 vge_start_locked(ifp);
1691 * Restart MII auto-polling because link state change interrupt
1694 vge_miipoll_start(sc);
1697 #ifdef DEVICE_POLLING
1699 vge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count)
1701 struct vge_softc *sc = ifp->if_softc;
1705 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1708 rx_npkts = vge_rxeof(sc, count);
1711 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1712 vge_start_locked(ifp);
1714 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
1716 status = CSR_READ_4(sc, VGE_ISR);
1717 if (status == 0xFFFFFFFF)
1720 CSR_WRITE_4(sc, VGE_ISR, status);
1723 * XXX check behaviour on receiver stalls.
1726 if (status & VGE_ISR_TXDMA_STALL ||
1727 status & VGE_ISR_RXDMA_STALL) {
1728 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1729 vge_init_locked(sc);
1732 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1733 vge_rxeof(sc, count);
1734 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1735 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1742 #endif /* DEVICE_POLLING */
1747 struct vge_softc *sc;
1755 if ((sc->vge_flags & VGE_FLAG_SUSPENDED) != 0 ||
1756 (ifp->if_flags & IFF_UP) == 0) {
1761 #ifdef DEVICE_POLLING
1762 if (ifp->if_capenable & IFCAP_POLLING) {
1763 status = CSR_READ_4(sc, VGE_ISR);
1764 CSR_WRITE_4(sc, VGE_ISR, status);
1765 if (status != 0xFFFFFFFF && (status & VGE_ISR_LINKSTS) != 0)
1766 vge_link_statchg(sc);
1772 /* Disable interrupts */
1773 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1774 status = CSR_READ_4(sc, VGE_ISR);
1775 CSR_WRITE_4(sc, VGE_ISR, status | VGE_ISR_HOLDOFF_RELOAD);
1776 /* If the card has gone away the read returns 0xffff. */
1777 if (status == 0xFFFFFFFF || (status & VGE_INTRS) == 0)
1779 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1780 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
1781 vge_rxeof(sc, VGE_RX_DESC_CNT);
1782 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1783 vge_rxeof(sc, VGE_RX_DESC_CNT);
1784 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1785 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1788 if (status & (VGE_ISR_TXOK0|VGE_ISR_TXOK_HIPRIO))
1791 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) {
1792 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1793 vge_init_locked(sc);
1796 if (status & VGE_ISR_LINKSTS)
1797 vge_link_statchg(sc);
1800 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1801 /* Re-enable interrupts */
1802 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1804 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1805 vge_start_locked(ifp);
1811 vge_encap(struct vge_softc *sc, struct mbuf **m_head)
1813 struct vge_txdesc *txd;
1814 struct vge_tx_frag *frag;
1816 bus_dma_segment_t txsegs[VGE_MAXTXSEGS];
1817 int error, i, nsegs, padlen;
1820 VGE_LOCK_ASSERT(sc);
1822 M_ASSERTPKTHDR((*m_head));
1824 /* Argh. This chip does not autopad short frames. */
1825 if ((*m_head)->m_pkthdr.len < VGE_MIN_FRAMELEN) {
1827 padlen = VGE_MIN_FRAMELEN - m->m_pkthdr.len;
1828 if (M_WRITABLE(m) == 0) {
1829 /* Get a writable copy. */
1830 m = m_dup(*m_head, M_NOWAIT);
1838 if (M_TRAILINGSPACE(m) < padlen) {
1839 m = m_defrag(m, M_NOWAIT);
1847 * Manually pad short frames, and zero the pad space
1848 * to avoid leaking data.
1850 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1851 m->m_pkthdr.len += padlen;
1852 m->m_len = m->m_pkthdr.len;
1856 txd = &sc->vge_cdata.vge_txdesc[sc->vge_cdata.vge_tx_prodidx];
1858 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag,
1859 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1860 if (error == EFBIG) {
1861 m = m_collapse(*m_head, M_NOWAIT, VGE_MAXTXSEGS);
1868 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag,
1869 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1875 } else if (error != 0)
1877 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap,
1878 BUS_DMASYNC_PREWRITE);
1883 /* Configure checksum offload. */
1884 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
1885 cflags |= VGE_TDCTL_IPCSUM;
1886 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1887 cflags |= VGE_TDCTL_TCPCSUM;
1888 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1889 cflags |= VGE_TDCTL_UDPCSUM;
1891 /* Configure VLAN. */
1892 if ((m->m_flags & M_VLANTAG) != 0)
1893 cflags |= m->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG;
1894 txd->tx_desc->vge_sts = htole32(m->m_pkthdr.len << 16);
1897 * Velocity family seems to support TSO but no information
1898 * for MSS configuration is available. Also the number of
1899 * fragments supported by a descriptor is too small to hold
1900 * entire 64KB TCP/IP segment. Maybe VGE_TD_LS_MOF,
1901 * VGE_TD_LS_SOF and VGE_TD_LS_EOF could be used to build
1902 * longer chain of buffers but no additional information is
1905 * When telling the chip how many segments there are, we
1906 * must use nsegs + 1 instead of just nsegs. Darned if I
1907 * know why. This also means we can't use the last fragment
1908 * field of Tx descriptor.
1910 txd->tx_desc->vge_ctl = htole32(cflags | ((nsegs + 1) << 28) |
1912 for (i = 0; i < nsegs; i++) {
1913 frag = &txd->tx_desc->vge_frag[i];
1914 frag->vge_addrlo = htole32(VGE_ADDR_LO(txsegs[i].ds_addr));
1915 frag->vge_addrhi = htole32(VGE_ADDR_HI(txsegs[i].ds_addr) |
1916 (VGE_BUFLEN(txsegs[i].ds_len) << 16));
1919 sc->vge_cdata.vge_tx_cnt++;
1920 VGE_TX_DESC_INC(sc->vge_cdata.vge_tx_prodidx);
1923 * Finally request interrupt and give the first descriptor
1924 * ownership to hardware.
1926 txd->tx_desc->vge_ctl |= htole32(VGE_TDCTL_TIC);
1927 txd->tx_desc->vge_sts |= htole32(VGE_TDSTS_OWN);
1934 * Main transmit routine.
1938 vge_start(struct ifnet *ifp)
1940 struct vge_softc *sc;
1944 vge_start_locked(ifp);
1950 vge_start_locked(struct ifnet *ifp)
1952 struct vge_softc *sc;
1953 struct vge_txdesc *txd;
1954 struct mbuf *m_head;
1959 VGE_LOCK_ASSERT(sc);
1961 if ((sc->vge_flags & VGE_FLAG_LINK) == 0 ||
1962 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1966 idx = sc->vge_cdata.vge_tx_prodidx;
1967 VGE_TX_DESC_DEC(idx);
1968 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1969 sc->vge_cdata.vge_tx_cnt < VGE_TX_DESC_CNT - 1; ) {
1970 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1974 * Pack the data into the transmit ring. If we
1975 * don't have room, set the OACTIVE flag and wait
1976 * for the NIC to drain the ring.
1978 if (vge_encap(sc, &m_head)) {
1981 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1982 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1986 txd = &sc->vge_cdata.vge_txdesc[idx];
1987 txd->tx_desc->vge_frag[0].vge_addrhi |= htole32(VGE_TXDESC_Q);
1988 VGE_TX_DESC_INC(idx);
1992 * If there's a BPF listener, bounce a copy of this frame
1995 ETHER_BPF_MTAP(ifp, m_head);
1999 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
2000 sc->vge_cdata.vge_tx_ring_map,
2001 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2002 /* Issue a transmit command. */
2003 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
2005 * Set a timeout in case the chip goes out to lunch.
2014 struct vge_softc *sc = xsc;
2017 vge_init_locked(sc);
2022 vge_init_locked(struct vge_softc *sc)
2024 struct ifnet *ifp = sc->vge_ifp;
2027 VGE_LOCK_ASSERT(sc);
2029 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2033 * Cancel pending I/O and free all RX/TX buffers.
2037 vge_miipoll_start(sc);
2040 * Initialize the RX and TX descriptors and mbufs.
2043 error = vge_rx_list_init(sc);
2045 device_printf(sc->vge_dev, "no memory for Rx buffers.\n");
2048 vge_tx_list_init(sc);
2049 /* Clear MAC statistics. */
2050 vge_stats_clear(sc);
2051 /* Set our station address */
2052 for (i = 0; i < ETHER_ADDR_LEN; i++)
2053 CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(sc->vge_ifp)[i]);
2056 * Set receive FIFO threshold. Also allow transmission and
2057 * reception of VLAN tagged frames.
2059 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
2060 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES);
2062 /* Set DMA burst length */
2063 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
2064 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
2066 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
2068 /* Set collision backoff algorithm */
2069 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
2070 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
2071 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
2073 /* Disable LPSEL field in priority resolution */
2074 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
2077 * Load the addresses of the DMA queues into the chip.
2078 * Note that we only use one transmit queue.
2081 CSR_WRITE_4(sc, VGE_TXDESC_HIADDR,
2082 VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr));
2083 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
2084 VGE_ADDR_LO(sc->vge_rdata.vge_tx_ring_paddr));
2085 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
2087 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
2088 VGE_ADDR_LO(sc->vge_rdata.vge_rx_ring_paddr));
2089 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
2090 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
2092 /* Configure interrupt moderation. */
2093 vge_intr_holdoff(sc);
2095 /* Enable and wake up the RX descriptor queue */
2096 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
2097 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
2099 /* Enable the TX descriptor queue */
2100 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
2102 /* Init the cam filter. */
2105 /* Set up receiver filter. */
2109 /* Initialize pause timer. */
2110 CSR_WRITE_2(sc, VGE_TX_PAUSE_TIMER, 0xFFFF);
2112 * Initialize flow control parameters.
2113 * TX XON high threshold : 48
2114 * TX pause low threshold : 24
2115 * Disable hald-duplex flow control
2117 CSR_WRITE_1(sc, VGE_CRC2, 0xFF);
2118 CSR_WRITE_1(sc, VGE_CRS2, VGE_CR2_XON_ENABLE | 0x0B);
2120 /* Enable jumbo frame reception (if desired) */
2122 /* Start the MAC. */
2123 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
2124 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
2125 CSR_WRITE_1(sc, VGE_CRS0,
2126 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
2128 #ifdef DEVICE_POLLING
2130 * Disable interrupts except link state change if we are polling.
2132 if (ifp->if_capenable & IFCAP_POLLING) {
2133 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS_POLLING);
2134 } else /* otherwise ... */
2138 * Enable interrupts.
2140 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2142 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2143 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2145 sc->vge_flags &= ~VGE_FLAG_LINK;
2146 vge_ifmedia_upd_locked(sc);
2148 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2149 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2150 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc);
2154 * Set media options.
2157 vge_ifmedia_upd(struct ifnet *ifp)
2159 struct vge_softc *sc;
2164 error = vge_ifmedia_upd_locked(sc);
2171 vge_ifmedia_upd_locked(struct vge_softc *sc)
2173 struct mii_data *mii;
2174 struct mii_softc *miisc;
2177 mii = device_get_softc(sc->vge_miibus);
2178 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2181 error = mii_mediachg(mii);
2187 * Report current media status.
2190 vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2192 struct vge_softc *sc;
2193 struct mii_data *mii;
2196 mii = device_get_softc(sc->vge_miibus);
2199 if ((ifp->if_flags & IFF_UP) == 0) {
2204 ifmr->ifm_active = mii->mii_media_active;
2205 ifmr->ifm_status = mii->mii_media_status;
2210 vge_setmedia(struct vge_softc *sc)
2212 struct mii_data *mii;
2213 struct ifmedia_entry *ife;
2215 mii = device_get_softc(sc->vge_miibus);
2216 ife = mii->mii_media.ifm_cur;
2219 * If the user manually selects a media mode, we need to turn
2220 * on the forced MAC mode bit in the DIAGCTL register. If the
2221 * user happens to choose a full duplex mode, we also need to
2222 * set the 'force full duplex' bit. This applies only to
2223 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
2224 * mode is disabled, and in 1000baseT mode, full duplex is
2225 * always implied, so we turn on the forced mode bit but leave
2226 * the FDX bit cleared.
2229 switch (IFM_SUBTYPE(ife->ifm_media)) {
2231 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2232 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2235 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2236 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2240 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2241 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) {
2242 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2244 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2248 device_printf(sc->vge_dev, "unknown media type: %x\n",
2249 IFM_SUBTYPE(ife->ifm_media));
2255 vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2257 struct vge_softc *sc = ifp->if_softc;
2258 struct ifreq *ifr = (struct ifreq *) data;
2259 struct mii_data *mii;
2260 int error = 0, mask;
2265 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VGE_JUMBO_MTU)
2267 else if (ifp->if_mtu != ifr->ifr_mtu) {
2268 if (ifr->ifr_mtu > ETHERMTU &&
2269 (sc->vge_flags & VGE_FLAG_JUMBO) == 0)
2272 ifp->if_mtu = ifr->ifr_mtu;
2278 if ((ifp->if_flags & IFF_UP) != 0) {
2279 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2280 ((ifp->if_flags ^ sc->vge_if_flags) &
2281 (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2284 vge_init_locked(sc);
2285 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2287 sc->vge_if_flags = ifp->if_flags;
2293 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2299 mii = device_get_softc(sc->vge_miibus);
2300 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2303 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2304 #ifdef DEVICE_POLLING
2305 if (mask & IFCAP_POLLING) {
2306 if (ifr->ifr_reqcap & IFCAP_POLLING) {
2307 error = ether_poll_register(vge_poll, ifp);
2311 /* Disable interrupts */
2312 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS_POLLING);
2313 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2314 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2315 ifp->if_capenable |= IFCAP_POLLING;
2318 error = ether_poll_deregister(ifp);
2319 /* Enable interrupts. */
2321 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2322 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2323 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2324 ifp->if_capenable &= ~IFCAP_POLLING;
2328 #endif /* DEVICE_POLLING */
2330 if ((mask & IFCAP_TXCSUM) != 0 &&
2331 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
2332 ifp->if_capenable ^= IFCAP_TXCSUM;
2333 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2334 ifp->if_hwassist |= VGE_CSUM_FEATURES;
2336 ifp->if_hwassist &= ~VGE_CSUM_FEATURES;
2338 if ((mask & IFCAP_RXCSUM) != 0 &&
2339 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
2340 ifp->if_capenable ^= IFCAP_RXCSUM;
2341 if ((mask & IFCAP_WOL_UCAST) != 0 &&
2342 (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0)
2343 ifp->if_capenable ^= IFCAP_WOL_UCAST;
2344 if ((mask & IFCAP_WOL_MCAST) != 0 &&
2345 (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0)
2346 ifp->if_capenable ^= IFCAP_WOL_MCAST;
2347 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2348 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
2349 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2350 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
2351 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
2352 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2353 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2354 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
2355 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2359 VLAN_CAPABILITIES(ifp);
2362 error = ether_ioctl(ifp, command, data);
2370 vge_watchdog(void *arg)
2372 struct vge_softc *sc;
2376 VGE_LOCK_ASSERT(sc);
2377 vge_stats_update(sc);
2378 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc);
2379 if (sc->vge_timer == 0 || --sc->vge_timer > 0)
2383 if_printf(ifp, "watchdog timeout\n");
2387 vge_rxeof(sc, VGE_RX_DESC_CNT);
2389 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2390 vge_init_locked(sc);
2394 * Stop the adapter and free any mbufs allocated to the
2398 vge_stop(struct vge_softc *sc)
2402 VGE_LOCK_ASSERT(sc);
2405 callout_stop(&sc->vge_watchdog);
2407 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2409 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2410 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
2411 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2412 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
2413 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
2414 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
2416 vge_stats_update(sc);
2417 VGE_CHAIN_RESET(sc);
2423 * Device suspend routine. Stop the interface and save some PCI
2424 * settings in case the BIOS doesn't restore them properly on
2428 vge_suspend(device_t dev)
2430 struct vge_softc *sc;
2432 sc = device_get_softc(dev);
2437 sc->vge_flags |= VGE_FLAG_SUSPENDED;
2444 * Device resume routine. Restore some PCI settings in case the BIOS
2445 * doesn't, re-enable busmastering, and restart the interface if
2449 vge_resume(device_t dev)
2451 struct vge_softc *sc;
2455 sc = device_get_softc(dev);
2457 if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0) {
2458 /* Disable PME and clear PME status. */
2459 pmstat = pci_read_config(sc->vge_dev,
2460 sc->vge_pmcap + PCIR_POWER_STATUS, 2);
2461 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
2462 pmstat &= ~PCIM_PSTAT_PMEENABLE;
2463 pci_write_config(sc->vge_dev,
2464 sc->vge_pmcap + PCIR_POWER_STATUS, pmstat, 2);
2468 /* Restart MII auto-polling. */
2469 vge_miipoll_start(sc);
2471 /* Reinitialize interface if necessary. */
2472 if ((ifp->if_flags & IFF_UP) != 0) {
2473 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2474 vge_init_locked(sc);
2476 sc->vge_flags &= ~VGE_FLAG_SUSPENDED;
2483 * Stop all chip I/O so that the kernel's probe routines don't
2484 * get confused by errant DMAs when rebooting.
2487 vge_shutdown(device_t dev)
2490 return (vge_suspend(dev));
2493 #define VGE_SYSCTL_STAT_ADD32(c, h, n, p, d) \
2494 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
2497 vge_sysctl_node(struct vge_softc *sc)
2499 struct sysctl_ctx_list *ctx;
2500 struct sysctl_oid_list *child, *parent;
2501 struct sysctl_oid *tree;
2502 struct vge_hw_stats *stats;
2504 stats = &sc->vge_stats;
2505 ctx = device_get_sysctl_ctx(sc->vge_dev);
2506 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vge_dev));
2508 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "int_holdoff",
2509 CTLFLAG_RW, &sc->vge_int_holdoff, 0, "interrupt holdoff");
2510 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_coal_pkt",
2511 CTLFLAG_RW, &sc->vge_rx_coal_pkt, 0, "rx coalescing packet");
2512 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_coal_pkt",
2513 CTLFLAG_RW, &sc->vge_tx_coal_pkt, 0, "tx coalescing packet");
2515 /* Pull in device tunables. */
2516 sc->vge_int_holdoff = VGE_INT_HOLDOFF_DEFAULT;
2517 resource_int_value(device_get_name(sc->vge_dev),
2518 device_get_unit(sc->vge_dev), "int_holdoff", &sc->vge_int_holdoff);
2519 sc->vge_rx_coal_pkt = VGE_RX_COAL_PKT_DEFAULT;
2520 resource_int_value(device_get_name(sc->vge_dev),
2521 device_get_unit(sc->vge_dev), "rx_coal_pkt", &sc->vge_rx_coal_pkt);
2522 sc->vge_tx_coal_pkt = VGE_TX_COAL_PKT_DEFAULT;
2523 resource_int_value(device_get_name(sc->vge_dev),
2524 device_get_unit(sc->vge_dev), "tx_coal_pkt", &sc->vge_tx_coal_pkt);
2526 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
2527 NULL, "VGE statistics");
2528 parent = SYSCTL_CHILDREN(tree);
2530 /* Rx statistics. */
2531 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
2532 NULL, "RX MAC statistics");
2533 child = SYSCTL_CHILDREN(tree);
2534 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames",
2535 &stats->rx_frames, "frames");
2536 VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
2537 &stats->rx_good_frames, "Good frames");
2538 VGE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
2539 &stats->rx_fifo_oflows, "FIFO overflows");
2540 VGE_SYSCTL_STAT_ADD32(ctx, child, "runts",
2541 &stats->rx_runts, "Too short frames");
2542 VGE_SYSCTL_STAT_ADD32(ctx, child, "runts_errs",
2543 &stats->rx_runts_errs, "Too short frames with errors");
2544 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
2545 &stats->rx_pkts_64, "64 bytes frames");
2546 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
2547 &stats->rx_pkts_65_127, "65 to 127 bytes frames");
2548 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
2549 &stats->rx_pkts_128_255, "128 to 255 bytes frames");
2550 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
2551 &stats->rx_pkts_256_511, "256 to 511 bytes frames");
2552 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
2553 &stats->rx_pkts_512_1023, "512 to 1023 bytes frames");
2554 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
2555 &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames");
2556 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
2557 &stats->rx_pkts_1519_max, "1519 to max frames");
2558 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max_errs",
2559 &stats->rx_pkts_1519_max_errs, "1519 to max frames with error");
2560 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo",
2561 &stats->rx_jumbos, "Jumbo frames");
2562 VGE_SYSCTL_STAT_ADD32(ctx, child, "crcerrs",
2563 &stats->rx_crcerrs, "CRC errors");
2564 VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
2565 &stats->rx_pause_frames, "CRC errors");
2566 VGE_SYSCTL_STAT_ADD32(ctx, child, "align_errs",
2567 &stats->rx_alignerrs, "Alignment errors");
2568 VGE_SYSCTL_STAT_ADD32(ctx, child, "nobufs",
2569 &stats->rx_nobufs, "Frames with no buffer event");
2570 VGE_SYSCTL_STAT_ADD32(ctx, child, "sym_errs",
2571 &stats->rx_symerrs, "Frames with symbol errors");
2572 VGE_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
2573 &stats->rx_lenerrs, "Frames with length mismatched");
2575 /* Tx statistics. */
2576 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
2577 NULL, "TX MAC statistics");
2578 child = SYSCTL_CHILDREN(tree);
2579 VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
2580 &stats->tx_good_frames, "Good frames");
2581 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
2582 &stats->tx_pkts_64, "64 bytes frames");
2583 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
2584 &stats->tx_pkts_65_127, "65 to 127 bytes frames");
2585 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
2586 &stats->tx_pkts_128_255, "128 to 255 bytes frames");
2587 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
2588 &stats->tx_pkts_256_511, "256 to 511 bytes frames");
2589 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
2590 &stats->tx_pkts_512_1023, "512 to 1023 bytes frames");
2591 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
2592 &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames");
2593 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo",
2594 &stats->tx_jumbos, "Jumbo frames");
2595 VGE_SYSCTL_STAT_ADD32(ctx, child, "colls",
2596 &stats->tx_colls, "Collisions");
2597 VGE_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
2598 &stats->tx_latecolls, "Late collisions");
2599 VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
2600 &stats->tx_pause, "Pause frames");
2601 #ifdef VGE_ENABLE_SQEERR
2602 VGE_SYSCTL_STAT_ADD32(ctx, child, "sqeerrs",
2603 &stats->tx_sqeerrs, "SQE errors");
2605 /* Clear MAC statistics. */
2606 vge_stats_clear(sc);
2609 #undef VGE_SYSCTL_STAT_ADD32
2612 vge_stats_clear(struct vge_softc *sc)
2616 CSR_WRITE_1(sc, VGE_MIBCSR,
2617 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FREEZE);
2618 CSR_WRITE_1(sc, VGE_MIBCSR,
2619 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_CLR);
2620 for (i = VGE_TIMEOUT; i > 0; i--) {
2622 if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_CLR) == 0)
2626 device_printf(sc->vge_dev, "MIB clear timed out!\n");
2627 CSR_WRITE_1(sc, VGE_MIBCSR, CSR_READ_1(sc, VGE_MIBCSR) &
2628 ~VGE_MIBCSR_FREEZE);
2632 vge_stats_update(struct vge_softc *sc)
2634 struct vge_hw_stats *stats;
2636 uint32_t mib[VGE_MIB_CNT], val;
2639 VGE_LOCK_ASSERT(sc);
2641 stats = &sc->vge_stats;
2644 CSR_WRITE_1(sc, VGE_MIBCSR,
2645 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FLUSH);
2646 for (i = VGE_TIMEOUT; i > 0; i--) {
2648 if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_FLUSH) == 0)
2652 device_printf(sc->vge_dev, "MIB counter dump timed out!\n");
2653 vge_stats_clear(sc);
2657 bzero(mib, sizeof(mib));
2659 /* Set MIB read index to 0. */
2660 CSR_WRITE_1(sc, VGE_MIBCSR,
2661 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_RINI);
2662 for (i = 0; i < VGE_MIB_CNT; i++) {
2663 val = CSR_READ_4(sc, VGE_MIBDATA);
2664 if (i != VGE_MIB_DATA_IDX(val)) {
2665 /* Reading interrupted. */
2668 mib[i] = val & VGE_MIB_DATA_MASK;
2672 stats->rx_frames += mib[VGE_MIB_RX_FRAMES];
2673 stats->rx_good_frames += mib[VGE_MIB_RX_GOOD_FRAMES];
2674 stats->rx_fifo_oflows += mib[VGE_MIB_RX_FIFO_OVERRUNS];
2675 stats->rx_runts += mib[VGE_MIB_RX_RUNTS];
2676 stats->rx_runts_errs += mib[VGE_MIB_RX_RUNTS_ERRS];
2677 stats->rx_pkts_64 += mib[VGE_MIB_RX_PKTS_64];
2678 stats->rx_pkts_65_127 += mib[VGE_MIB_RX_PKTS_65_127];
2679 stats->rx_pkts_128_255 += mib[VGE_MIB_RX_PKTS_128_255];
2680 stats->rx_pkts_256_511 += mib[VGE_MIB_RX_PKTS_256_511];
2681 stats->rx_pkts_512_1023 += mib[VGE_MIB_RX_PKTS_512_1023];
2682 stats->rx_pkts_1024_1518 += mib[VGE_MIB_RX_PKTS_1024_1518];
2683 stats->rx_pkts_1519_max += mib[VGE_MIB_RX_PKTS_1519_MAX];
2684 stats->rx_pkts_1519_max_errs += mib[VGE_MIB_RX_PKTS_1519_MAX_ERRS];
2685 stats->rx_jumbos += mib[VGE_MIB_RX_JUMBOS];
2686 stats->rx_crcerrs += mib[VGE_MIB_RX_CRCERRS];
2687 stats->rx_pause_frames += mib[VGE_MIB_RX_PAUSE];
2688 stats->rx_alignerrs += mib[VGE_MIB_RX_ALIGNERRS];
2689 stats->rx_nobufs += mib[VGE_MIB_RX_NOBUFS];
2690 stats->rx_symerrs += mib[VGE_MIB_RX_SYMERRS];
2691 stats->rx_lenerrs += mib[VGE_MIB_RX_LENERRS];
2694 stats->tx_good_frames += mib[VGE_MIB_TX_GOOD_FRAMES];
2695 stats->tx_pkts_64 += mib[VGE_MIB_TX_PKTS_64];
2696 stats->tx_pkts_65_127 += mib[VGE_MIB_TX_PKTS_65_127];
2697 stats->tx_pkts_128_255 += mib[VGE_MIB_TX_PKTS_128_255];
2698 stats->tx_pkts_256_511 += mib[VGE_MIB_TX_PKTS_256_511];
2699 stats->tx_pkts_512_1023 += mib[VGE_MIB_TX_PKTS_512_1023];
2700 stats->tx_pkts_1024_1518 += mib[VGE_MIB_TX_PKTS_1024_1518];
2701 stats->tx_jumbos += mib[VGE_MIB_TX_JUMBOS];
2702 stats->tx_colls += mib[VGE_MIB_TX_COLLS];
2703 stats->tx_pause += mib[VGE_MIB_TX_PAUSE];
2704 #ifdef VGE_ENABLE_SQEERR
2705 stats->tx_sqeerrs += mib[VGE_MIB_TX_SQEERRS];
2707 stats->tx_latecolls += mib[VGE_MIB_TX_LATECOLLS];
2709 /* Update counters in ifnet. */
2710 ifp->if_opackets += mib[VGE_MIB_TX_GOOD_FRAMES];
2712 ifp->if_collisions += mib[VGE_MIB_TX_COLLS] +
2713 mib[VGE_MIB_TX_LATECOLLS];
2715 ifp->if_oerrors += mib[VGE_MIB_TX_COLLS] +
2716 mib[VGE_MIB_TX_LATECOLLS];
2718 ifp->if_ipackets += mib[VGE_MIB_RX_GOOD_FRAMES];
2720 ifp->if_ierrors += mib[VGE_MIB_RX_FIFO_OVERRUNS] +
2721 mib[VGE_MIB_RX_RUNTS] +
2722 mib[VGE_MIB_RX_RUNTS_ERRS] +
2723 mib[VGE_MIB_RX_CRCERRS] +
2724 mib[VGE_MIB_RX_ALIGNERRS] +
2725 mib[VGE_MIB_RX_NOBUFS] +
2726 mib[VGE_MIB_RX_SYMERRS] +
2727 mib[VGE_MIB_RX_LENERRS];
2731 vge_intr_holdoff(struct vge_softc *sc)
2735 VGE_LOCK_ASSERT(sc);
2738 * Set Tx interrupt supression threshold.
2739 * It's possible to use single-shot timer in VGE_CRS1 register
2740 * in Tx path such that driver can remove most of Tx completion
2741 * interrupts. However this requires additional access to
2742 * VGE_CRS1 register to reload the timer in addintion to
2743 * activating Tx kick command. Another downside is we don't know
2744 * what single-shot timer value should be used in advance so
2745 * reclaiming transmitted mbufs could be delayed a lot which in
2746 * turn slows down Tx operation.
2748 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_TXSUPPTHR);
2749 CSR_WRITE_1(sc, VGE_TXSUPPTHR, sc->vge_tx_coal_pkt);
2751 /* Set Rx interrupt suppresion threshold. */
2752 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
2753 CSR_WRITE_1(sc, VGE_RXSUPPTHR, sc->vge_rx_coal_pkt);
2755 intctl = CSR_READ_1(sc, VGE_INTCTL1);
2756 intctl &= ~VGE_INTCTL_SC_RELOAD;
2757 intctl |= VGE_INTCTL_HC_RELOAD;
2758 if (sc->vge_tx_coal_pkt <= 0)
2759 intctl |= VGE_INTCTL_TXINTSUP_DISABLE;
2761 intctl &= ~VGE_INTCTL_TXINTSUP_DISABLE;
2762 if (sc->vge_rx_coal_pkt <= 0)
2763 intctl |= VGE_INTCTL_RXINTSUP_DISABLE;
2765 intctl &= ~VGE_INTCTL_RXINTSUP_DISABLE;
2766 CSR_WRITE_1(sc, VGE_INTCTL1, intctl);
2767 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_HOLDOFF);
2768 if (sc->vge_int_holdoff > 0) {
2769 /* Set interrupt holdoff timer. */
2770 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
2771 CSR_WRITE_1(sc, VGE_INTHOLDOFF,
2772 VGE_INT_HOLDOFF_USEC(sc->vge_int_holdoff));
2773 /* Enable holdoff timer. */
2774 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
2779 vge_setlinkspeed(struct vge_softc *sc)
2781 struct mii_data *mii;
2784 VGE_LOCK_ASSERT(sc);
2786 mii = device_get_softc(sc->vge_miibus);
2789 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
2790 (IFM_ACTIVE | IFM_AVALID)) {
2791 switch IFM_SUBTYPE(mii->mii_media_active) {
2801 /* Clear forced MAC speed/duplex configuration. */
2802 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2803 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2804 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_100T2CR, 0);
2805 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_ANAR,
2806 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
2807 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR,
2808 BMCR_AUTOEN | BMCR_STARTNEG);
2811 /* Poll link state until vge(4) get a 10/100 link. */
2812 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
2814 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
2815 == (IFM_ACTIVE | IFM_AVALID)) {
2816 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2825 pause("vgelnk", hz);
2828 if (i == MII_ANEGTICKS_GIGE)
2829 device_printf(sc->vge_dev, "establishing link failed, "
2830 "WOL may not work!");
2833 * No link, force MAC to have 100Mbps, full-duplex link.
2834 * This is the last resort and may/may not work.
2836 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
2837 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
2841 vge_setwol(struct vge_softc *sc)
2847 VGE_LOCK_ASSERT(sc);
2849 if ((sc->vge_flags & VGE_FLAG_PMCAP) == 0) {
2850 /* No PME capability, PHY power down. */
2851 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR,
2853 vge_miipoll_stop(sc);
2859 /* Clear WOL on pattern match. */
2860 CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL);
2861 /* Disable WOL on magic/unicast packet. */
2862 CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F);
2863 CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM |
2865 if ((ifp->if_capenable & IFCAP_WOL) != 0) {
2866 vge_setlinkspeed(sc);
2868 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
2869 val |= VGE_WOLCR1_UCAST;
2870 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
2871 val |= VGE_WOLCR1_MAGIC;
2872 CSR_WRITE_1(sc, VGE_WOLCR1S, val);
2874 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
2875 val |= VGE_WOLCFG_SAM | VGE_WOLCFG_SAB;
2876 CSR_WRITE_1(sc, VGE_WOLCFGS, val | VGE_WOLCFG_PMEOVR);
2877 /* Disable MII auto-polling. */
2878 vge_miipoll_stop(sc);
2880 CSR_SETBIT_1(sc, VGE_DIAGCTL,
2881 VGE_DIAGCTL_MACFORCE | VGE_DIAGCTL_FDXFORCE);
2882 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII);
2884 /* Clear WOL status on pattern match. */
2885 CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF);
2886 CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF);
2888 val = CSR_READ_1(sc, VGE_PWRSTAT);
2889 val |= VGE_STICKHW_SWPTAG;
2890 CSR_WRITE_1(sc, VGE_PWRSTAT, val);
2891 /* Put hardware into sleep. */
2892 val = CSR_READ_1(sc, VGE_PWRSTAT);
2893 val |= VGE_STICKHW_DS0 | VGE_STICKHW_DS1;
2894 CSR_WRITE_1(sc, VGE_PWRSTAT, val);
2895 /* Request PME if WOL is requested. */
2896 pmstat = pci_read_config(sc->vge_dev, sc->vge_pmcap +
2897 PCIR_POWER_STATUS, 2);
2898 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
2899 if ((ifp->if_capenable & IFCAP_WOL) != 0)
2900 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2901 pci_write_config(sc->vge_dev, sc->vge_pmcap + PCIR_POWER_STATUS,
2906 vge_clrwol(struct vge_softc *sc)
2910 val = CSR_READ_1(sc, VGE_PWRSTAT);
2911 val &= ~VGE_STICKHW_SWPTAG;
2912 CSR_WRITE_1(sc, VGE_PWRSTAT, val);
2913 /* Disable WOL and clear power state indicator. */
2914 val = CSR_READ_1(sc, VGE_PWRSTAT);
2915 val &= ~(VGE_STICKHW_DS0 | VGE_STICKHW_DS1);
2916 CSR_WRITE_1(sc, VGE_PWRSTAT, val);
2918 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII);
2919 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2921 /* Clear WOL on pattern match. */
2922 CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL);
2923 /* Disable WOL on magic/unicast packet. */
2924 CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F);
2925 CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM |
2927 /* Clear WOL status on pattern match. */
2928 CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF);
2929 CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF);