3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
37 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
39 * Written by Bill Paul <wpaul@windriver.com>
40 * Senior Networking Software Engineer
45 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that
46 * combines a tri-speed ethernet MAC and PHY, with the following
49 * o Jumbo frame support up to 16K
50 * o Transmit and receive flow control
51 * o IPv4 checksum offload
52 * o VLAN tag insertion and stripping
54 * o 64-bit multicast hash table filter
55 * o 64 entry CAM filter
56 * o 16K RX FIFO and 48K TX FIFO memory
57 * o Interrupt moderation
59 * The VT6122 supports up to four transmit DMA queues. The descriptors
60 * in the transmit ring can address up to 7 data fragments; frames which
61 * span more than 7 data buffers must be coalesced, but in general the
62 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
63 * long. The receive descriptors address only a single buffer.
65 * There are two peculiar design issues with the VT6122. One is that
66 * receive data buffers must be aligned on a 32-bit boundary. This is
67 * not a problem where the VT6122 is used as a LOM device in x86-based
68 * systems, but on architectures that generate unaligned access traps, we
69 * have to do some copying.
71 * The other issue has to do with the way 64-bit addresses are handled.
72 * The DMA descriptors only allow you to specify 48 bits of addressing
73 * information. The remaining 16 bits are specified using one of the
74 * I/O registers. If you only have a 32-bit system, then this isn't
75 * an issue, but if you have a 64-bit system and more than 4GB of
76 * memory, you must have to make sure your network data buffers reside
77 * in the same 48-bit 'segment.'
79 * Special thanks to Ryan Fu at VIA Networking for providing documentation
80 * and sample NICs for testing.
83 #ifdef HAVE_KERNEL_OPTION_HEADERS
84 #include "opt_device_polling.h"
87 #include <sys/param.h>
88 #include <sys/endian.h>
89 #include <sys/systm.h>
90 #include <sys/sockio.h>
92 #include <sys/malloc.h>
93 #include <sys/module.h>
94 #include <sys/kernel.h>
95 #include <sys/socket.h>
96 #include <sys/sysctl.h>
99 #include <net/if_arp.h>
100 #include <net/ethernet.h>
101 #include <net/if_dl.h>
102 #include <net/if_media.h>
103 #include <net/if_types.h>
104 #include <net/if_vlan_var.h>
108 #include <machine/bus.h>
109 #include <machine/resource.h>
111 #include <sys/rman.h>
113 #include <dev/mii/mii.h>
114 #include <dev/mii/miivar.h>
116 #include <dev/pci/pcireg.h>
117 #include <dev/pci/pcivar.h>
119 MODULE_DEPEND(vge, pci, 1, 1, 1);
120 MODULE_DEPEND(vge, ether, 1, 1, 1);
121 MODULE_DEPEND(vge, miibus, 1, 1, 1);
123 /* "device miibus" required. See GENERIC if you get errors here. */
124 #include "miibus_if.h"
126 #include <dev/vge/if_vgereg.h>
127 #include <dev/vge/if_vgevar.h>
129 #define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
132 static int msi_disable = 0;
133 TUNABLE_INT("hw.vge.msi_disable", &msi_disable);
136 * The SQE error counter of MIB seems to report bogus value.
137 * Vendor's workaround does not seem to work on PCIe based
138 * controllers. Disable it until we find better workaround.
140 #undef VGE_ENABLE_SQEERR
143 * Various supported device vendors/types and their names.
145 static struct vge_type vge_devs[] = {
146 { VIA_VENDORID, VIA_DEVICEID_61XX,
147 "VIA Networking Velocity Gigabit Ethernet" },
151 static int vge_attach(device_t);
152 static int vge_detach(device_t);
153 static int vge_probe(device_t);
154 static int vge_resume(device_t);
155 static int vge_shutdown(device_t);
156 static int vge_suspend(device_t);
158 static void vge_cam_clear(struct vge_softc *);
159 static int vge_cam_set(struct vge_softc *, uint8_t *);
160 static void vge_clrwol(struct vge_softc *);
161 static void vge_discard_rxbuf(struct vge_softc *, int);
162 static int vge_dma_alloc(struct vge_softc *);
163 static void vge_dma_free(struct vge_softc *);
164 static void vge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
166 static void vge_eeprom_getword(struct vge_softc *, int, uint16_t *);
168 static int vge_encap(struct vge_softc *, struct mbuf **);
169 #ifndef __NO_STRICT_ALIGNMENT
171 vge_fixup_rx(struct mbuf *);
173 static void vge_freebufs(struct vge_softc *);
174 static void vge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
175 static int vge_ifmedia_upd(struct ifnet *);
176 static void vge_init(void *);
177 static void vge_init_locked(struct vge_softc *);
178 static void vge_intr(void *);
179 static void vge_intr_holdoff(struct vge_softc *);
180 static int vge_ioctl(struct ifnet *, u_long, caddr_t);
181 static void vge_link_statchg(void *);
182 static int vge_miibus_readreg(device_t, int, int);
183 static void vge_miibus_statchg(device_t);
184 static int vge_miibus_writereg(device_t, int, int, int);
185 static void vge_miipoll_start(struct vge_softc *);
186 static void vge_miipoll_stop(struct vge_softc *);
187 static int vge_newbuf(struct vge_softc *, int);
188 static void vge_read_eeprom(struct vge_softc *, caddr_t, int, int, int);
189 static void vge_reset(struct vge_softc *);
190 static int vge_rx_list_init(struct vge_softc *);
191 static int vge_rxeof(struct vge_softc *, int);
192 static void vge_rxfilter(struct vge_softc *);
193 static void vge_setvlan(struct vge_softc *);
194 static void vge_setwol(struct vge_softc *);
195 static void vge_start(struct ifnet *);
196 static void vge_start_locked(struct ifnet *);
197 static void vge_stats_clear(struct vge_softc *);
198 static void vge_stats_update(struct vge_softc *);
199 static void vge_stop(struct vge_softc *);
200 static void vge_sysctl_node(struct vge_softc *);
201 static int vge_tx_list_init(struct vge_softc *);
202 static void vge_txeof(struct vge_softc *);
203 static void vge_watchdog(void *);
205 static device_method_t vge_methods[] = {
206 /* Device interface */
207 DEVMETHOD(device_probe, vge_probe),
208 DEVMETHOD(device_attach, vge_attach),
209 DEVMETHOD(device_detach, vge_detach),
210 DEVMETHOD(device_suspend, vge_suspend),
211 DEVMETHOD(device_resume, vge_resume),
212 DEVMETHOD(device_shutdown, vge_shutdown),
215 DEVMETHOD(bus_print_child, bus_generic_print_child),
216 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
219 DEVMETHOD(miibus_readreg, vge_miibus_readreg),
220 DEVMETHOD(miibus_writereg, vge_miibus_writereg),
221 DEVMETHOD(miibus_statchg, vge_miibus_statchg),
226 static driver_t vge_driver = {
229 sizeof(struct vge_softc)
232 static devclass_t vge_devclass;
234 DRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0);
235 DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0);
239 * Read a word of data stored in the EEPROM at address 'addr.'
242 vge_eeprom_getword(struct vge_softc *sc, int addr, uint16_t *dest)
248 * Enter EEPROM embedded programming mode. In order to
249 * access the EEPROM at all, we first have to set the
250 * EELOAD bit in the CHIPCFG2 register.
252 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
253 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
255 /* Select the address of the word we want to read */
256 CSR_WRITE_1(sc, VGE_EEADDR, addr);
258 /* Issue read command */
259 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
261 /* Wait for the done bit to be set. */
262 for (i = 0; i < VGE_TIMEOUT; i++) {
263 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
267 if (i == VGE_TIMEOUT) {
268 device_printf(sc->vge_dev, "EEPROM read timed out\n");
273 /* Read the result */
274 word = CSR_READ_2(sc, VGE_EERDDAT);
276 /* Turn off EEPROM access mode. */
277 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
278 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
285 * Read a sequence of words from the EEPROM.
288 vge_read_eeprom(struct vge_softc *sc, caddr_t dest, int off, int cnt, int swap)
292 uint16_t word = 0, *ptr;
294 for (i = 0; i < cnt; i++) {
295 vge_eeprom_getword(sc, off + i, &word);
296 ptr = (uint16_t *)(dest + (i * 2));
303 for (i = 0; i < ETHER_ADDR_LEN; i++)
304 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i);
309 vge_miipoll_stop(struct vge_softc *sc)
313 CSR_WRITE_1(sc, VGE_MIICMD, 0);
315 for (i = 0; i < VGE_TIMEOUT; i++) {
317 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
321 if (i == VGE_TIMEOUT)
322 device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
326 vge_miipoll_start(struct vge_softc *sc)
330 /* First, make sure we're idle. */
332 CSR_WRITE_1(sc, VGE_MIICMD, 0);
333 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
335 for (i = 0; i < VGE_TIMEOUT; i++) {
337 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
341 if (i == VGE_TIMEOUT) {
342 device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
346 /* Now enable auto poll mode. */
348 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
350 /* And make sure it started. */
352 for (i = 0; i < VGE_TIMEOUT; i++) {
354 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
358 if (i == VGE_TIMEOUT)
359 device_printf(sc->vge_dev, "failed to start MII autopoll\n");
363 vge_miibus_readreg(device_t dev, int phy, int reg)
365 struct vge_softc *sc;
369 sc = device_get_softc(dev);
371 if (phy != sc->vge_phyaddr)
374 vge_miipoll_stop(sc);
376 /* Specify the register we want to read. */
377 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
379 /* Issue read command. */
380 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
382 /* Wait for the read command bit to self-clear. */
383 for (i = 0; i < VGE_TIMEOUT; i++) {
385 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
389 if (i == VGE_TIMEOUT)
390 device_printf(sc->vge_dev, "MII read timed out\n");
392 rval = CSR_READ_2(sc, VGE_MIIDATA);
394 vge_miipoll_start(sc);
400 vge_miibus_writereg(device_t dev, int phy, int reg, int data)
402 struct vge_softc *sc;
405 sc = device_get_softc(dev);
407 if (phy != sc->vge_phyaddr)
410 vge_miipoll_stop(sc);
412 /* Specify the register we want to write. */
413 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
415 /* Specify the data we want to write. */
416 CSR_WRITE_2(sc, VGE_MIIDATA, data);
418 /* Issue write command. */
419 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
421 /* Wait for the write command bit to self-clear. */
422 for (i = 0; i < VGE_TIMEOUT; i++) {
424 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
428 if (i == VGE_TIMEOUT) {
429 device_printf(sc->vge_dev, "MII write timed out\n");
433 vge_miipoll_start(sc);
439 vge_cam_clear(struct vge_softc *sc)
444 * Turn off all the mask bits. This tells the chip
445 * that none of the entries in the CAM filter are valid.
446 * desired entries will be enabled as we fill the filter in.
449 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
450 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
451 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
452 for (i = 0; i < 8; i++)
453 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
455 /* Clear the VLAN filter too. */
457 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
458 for (i = 0; i < 8; i++)
459 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
461 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
462 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
463 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
469 vge_cam_set(struct vge_softc *sc, uint8_t *addr)
473 if (sc->vge_camidx == VGE_CAM_MAXADDRS)
476 /* Select the CAM data page. */
477 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
478 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
480 /* Set the filter entry we want to update and enable writing. */
481 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
483 /* Write the address to the CAM registers */
484 for (i = 0; i < ETHER_ADDR_LEN; i++)
485 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
487 /* Issue a write command. */
488 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
490 /* Wake for it to clear. */
491 for (i = 0; i < VGE_TIMEOUT; i++) {
493 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
497 if (i == VGE_TIMEOUT) {
498 device_printf(sc->vge_dev, "setting CAM filter failed\n");
503 /* Select the CAM mask page. */
504 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
505 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
507 /* Set the mask bit that enables this filter. */
508 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8),
509 1<<(sc->vge_camidx & 7));
514 /* Turn off access to CAM. */
515 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
516 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
517 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
523 vge_setvlan(struct vge_softc *sc)
531 cfg = CSR_READ_1(sc, VGE_RXCFG);
532 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
533 cfg |= VGE_VTAG_OPT2;
535 cfg &= ~VGE_VTAG_OPT2;
536 CSR_WRITE_1(sc, VGE_RXCFG, cfg);
540 * Program the multicast filter. We use the 64-entry CAM filter
541 * for perfect filtering. If there's more than 64 multicast addresses,
542 * we use the hash filter instead.
545 vge_rxfilter(struct vge_softc *sc)
548 struct ifmultiaddr *ifma;
549 uint32_t h, hashes[2];
555 /* First, zot all the multicast entries. */
559 rxcfg = CSR_READ_1(sc, VGE_RXCTL);
560 rxcfg &= ~(VGE_RXCTL_RX_MCAST | VGE_RXCTL_RX_BCAST |
561 VGE_RXCTL_RX_PROMISC);
563 * Always allow VLAN oversized frames and frames for
566 rxcfg |= VGE_RXCTL_RX_GIANT | VGE_RXCTL_RX_UCAST;
569 if ((ifp->if_flags & IFF_BROADCAST) != 0)
570 rxcfg |= VGE_RXCTL_RX_BCAST;
571 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
572 if ((ifp->if_flags & IFF_PROMISC) != 0)
573 rxcfg |= VGE_RXCTL_RX_PROMISC;
574 if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
575 hashes[0] = 0xFFFFFFFF;
576 hashes[1] = 0xFFFFFFFF;
582 /* Now program new ones */
584 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
585 if (ifma->ifma_addr->sa_family != AF_LINK)
587 error = vge_cam_set(sc,
588 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
593 /* If there were too many addresses, use the hash filter. */
597 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
598 if (ifma->ifma_addr->sa_family != AF_LINK)
600 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
601 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
603 hashes[0] |= (1 << h);
605 hashes[1] |= (1 << (h - 32));
608 if_maddr_runlock(ifp);
611 if (hashes[0] != 0 || hashes[1] != 0)
612 rxcfg |= VGE_RXCTL_RX_MCAST;
613 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
614 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
615 CSR_WRITE_1(sc, VGE_RXCTL, rxcfg);
619 vge_reset(struct vge_softc *sc)
623 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
625 for (i = 0; i < VGE_TIMEOUT; i++) {
627 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
631 if (i == VGE_TIMEOUT) {
632 device_printf(sc->vge_dev, "soft reset timed out\n");
633 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
641 * Probe for a VIA gigabit chip. Check the PCI vendor and device
642 * IDs against our list and return a device name if we find a match.
645 vge_probe(device_t dev)
651 while (t->vge_name != NULL) {
652 if ((pci_get_vendor(dev) == t->vge_vid) &&
653 (pci_get_device(dev) == t->vge_did)) {
654 device_set_desc(dev, t->vge_name);
655 return (BUS_PROBE_DEFAULT);
664 * Map a single buffer address.
667 struct vge_dmamap_arg {
668 bus_addr_t vge_busaddr;
672 vge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
674 struct vge_dmamap_arg *ctx;
679 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
681 ctx = (struct vge_dmamap_arg *)arg;
682 ctx->vge_busaddr = segs[0].ds_addr;
686 vge_dma_alloc(struct vge_softc *sc)
688 struct vge_dmamap_arg ctx;
689 struct vge_txdesc *txd;
690 struct vge_rxdesc *rxd;
691 bus_addr_t lowaddr, tx_ring_end, rx_ring_end;
694 lowaddr = BUS_SPACE_MAXADDR;
697 /* Create parent ring tag. */
698 error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */
699 1, 0, /* algnmnt, boundary */
700 lowaddr, /* lowaddr */
701 BUS_SPACE_MAXADDR, /* highaddr */
702 NULL, NULL, /* filter, filterarg */
703 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
705 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
707 NULL, NULL, /* lockfunc, lockarg */
708 &sc->vge_cdata.vge_ring_tag);
710 device_printf(sc->vge_dev,
711 "could not create parent DMA tag.\n");
715 /* Create tag for Tx ring. */
716 error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */
717 VGE_TX_RING_ALIGN, 0, /* algnmnt, boundary */
718 BUS_SPACE_MAXADDR, /* lowaddr */
719 BUS_SPACE_MAXADDR, /* highaddr */
720 NULL, NULL, /* filter, filterarg */
721 VGE_TX_LIST_SZ, /* maxsize */
723 VGE_TX_LIST_SZ, /* maxsegsize */
725 NULL, NULL, /* lockfunc, lockarg */
726 &sc->vge_cdata.vge_tx_ring_tag);
728 device_printf(sc->vge_dev,
729 "could not allocate Tx ring DMA tag.\n");
733 /* Create tag for Rx ring. */
734 error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */
735 VGE_RX_RING_ALIGN, 0, /* algnmnt, boundary */
736 BUS_SPACE_MAXADDR, /* lowaddr */
737 BUS_SPACE_MAXADDR, /* highaddr */
738 NULL, NULL, /* filter, filterarg */
739 VGE_RX_LIST_SZ, /* maxsize */
741 VGE_RX_LIST_SZ, /* maxsegsize */
743 NULL, NULL, /* lockfunc, lockarg */
744 &sc->vge_cdata.vge_rx_ring_tag);
746 device_printf(sc->vge_dev,
747 "could not allocate Rx ring DMA tag.\n");
751 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
752 error = bus_dmamem_alloc(sc->vge_cdata.vge_tx_ring_tag,
753 (void **)&sc->vge_rdata.vge_tx_ring,
754 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
755 &sc->vge_cdata.vge_tx_ring_map);
757 device_printf(sc->vge_dev,
758 "could not allocate DMA'able memory for Tx ring.\n");
763 error = bus_dmamap_load(sc->vge_cdata.vge_tx_ring_tag,
764 sc->vge_cdata.vge_tx_ring_map, sc->vge_rdata.vge_tx_ring,
765 VGE_TX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
766 if (error != 0 || ctx.vge_busaddr == 0) {
767 device_printf(sc->vge_dev,
768 "could not load DMA'able memory for Tx ring.\n");
771 sc->vge_rdata.vge_tx_ring_paddr = ctx.vge_busaddr;
773 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
774 error = bus_dmamem_alloc(sc->vge_cdata.vge_rx_ring_tag,
775 (void **)&sc->vge_rdata.vge_rx_ring,
776 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
777 &sc->vge_cdata.vge_rx_ring_map);
779 device_printf(sc->vge_dev,
780 "could not allocate DMA'able memory for Rx ring.\n");
785 error = bus_dmamap_load(sc->vge_cdata.vge_rx_ring_tag,
786 sc->vge_cdata.vge_rx_ring_map, sc->vge_rdata.vge_rx_ring,
787 VGE_RX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
788 if (error != 0 || ctx.vge_busaddr == 0) {
789 device_printf(sc->vge_dev,
790 "could not load DMA'able memory for Rx ring.\n");
793 sc->vge_rdata.vge_rx_ring_paddr = ctx.vge_busaddr;
795 /* Tx/Rx descriptor queue should reside within 4GB boundary. */
796 tx_ring_end = sc->vge_rdata.vge_tx_ring_paddr + VGE_TX_LIST_SZ;
797 rx_ring_end = sc->vge_rdata.vge_rx_ring_paddr + VGE_RX_LIST_SZ;
798 if ((VGE_ADDR_HI(tx_ring_end) !=
799 VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)) ||
800 (VGE_ADDR_HI(rx_ring_end) !=
801 VGE_ADDR_HI(sc->vge_rdata.vge_rx_ring_paddr)) ||
802 VGE_ADDR_HI(tx_ring_end) != VGE_ADDR_HI(rx_ring_end)) {
803 device_printf(sc->vge_dev, "4GB boundary crossed, "
804 "switching to 32bit DMA address mode.\n");
806 /* Limit DMA address space to 32bit and try again. */
807 lowaddr = BUS_SPACE_MAXADDR_32BIT;
811 /* Create parent buffer tag. */
812 error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */
813 1, 0, /* algnmnt, boundary */
814 VGE_BUF_DMA_MAXADDR, /* lowaddr */
815 BUS_SPACE_MAXADDR, /* highaddr */
816 NULL, NULL, /* filter, filterarg */
817 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
819 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
821 NULL, NULL, /* lockfunc, lockarg */
822 &sc->vge_cdata.vge_buffer_tag);
824 device_printf(sc->vge_dev,
825 "could not create parent buffer DMA tag.\n");
829 /* Create tag for Tx buffers. */
830 error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */
831 1, 0, /* algnmnt, boundary */
832 BUS_SPACE_MAXADDR, /* lowaddr */
833 BUS_SPACE_MAXADDR, /* highaddr */
834 NULL, NULL, /* filter, filterarg */
835 MCLBYTES * VGE_MAXTXSEGS, /* maxsize */
836 VGE_MAXTXSEGS, /* nsegments */
837 MCLBYTES, /* maxsegsize */
839 NULL, NULL, /* lockfunc, lockarg */
840 &sc->vge_cdata.vge_tx_tag);
842 device_printf(sc->vge_dev, "could not create Tx DMA tag.\n");
846 /* Create tag for Rx buffers. */
847 error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */
848 VGE_RX_BUF_ALIGN, 0, /* algnmnt, boundary */
849 BUS_SPACE_MAXADDR, /* lowaddr */
850 BUS_SPACE_MAXADDR, /* highaddr */
851 NULL, NULL, /* filter, filterarg */
852 MCLBYTES, /* maxsize */
854 MCLBYTES, /* maxsegsize */
856 NULL, NULL, /* lockfunc, lockarg */
857 &sc->vge_cdata.vge_rx_tag);
859 device_printf(sc->vge_dev, "could not create Rx DMA tag.\n");
863 /* Create DMA maps for Tx buffers. */
864 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
865 txd = &sc->vge_cdata.vge_txdesc[i];
867 txd->tx_dmamap = NULL;
868 error = bus_dmamap_create(sc->vge_cdata.vge_tx_tag, 0,
871 device_printf(sc->vge_dev,
872 "could not create Tx dmamap.\n");
876 /* Create DMA maps for Rx buffers. */
877 if ((error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0,
878 &sc->vge_cdata.vge_rx_sparemap)) != 0) {
879 device_printf(sc->vge_dev,
880 "could not create spare Rx dmamap.\n");
883 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
884 rxd = &sc->vge_cdata.vge_rxdesc[i];
886 rxd->rx_dmamap = NULL;
887 error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0,
890 device_printf(sc->vge_dev,
891 "could not create Rx dmamap.\n");
901 vge_dma_free(struct vge_softc *sc)
903 struct vge_txdesc *txd;
904 struct vge_rxdesc *rxd;
908 if (sc->vge_cdata.vge_tx_ring_tag != NULL) {
909 if (sc->vge_cdata.vge_tx_ring_map)
910 bus_dmamap_unload(sc->vge_cdata.vge_tx_ring_tag,
911 sc->vge_cdata.vge_tx_ring_map);
912 if (sc->vge_cdata.vge_tx_ring_map &&
913 sc->vge_rdata.vge_tx_ring)
914 bus_dmamem_free(sc->vge_cdata.vge_tx_ring_tag,
915 sc->vge_rdata.vge_tx_ring,
916 sc->vge_cdata.vge_tx_ring_map);
917 sc->vge_rdata.vge_tx_ring = NULL;
918 sc->vge_cdata.vge_tx_ring_map = NULL;
919 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_ring_tag);
920 sc->vge_cdata.vge_tx_ring_tag = NULL;
923 if (sc->vge_cdata.vge_rx_ring_tag != NULL) {
924 if (sc->vge_cdata.vge_rx_ring_map)
925 bus_dmamap_unload(sc->vge_cdata.vge_rx_ring_tag,
926 sc->vge_cdata.vge_rx_ring_map);
927 if (sc->vge_cdata.vge_rx_ring_map &&
928 sc->vge_rdata.vge_rx_ring)
929 bus_dmamem_free(sc->vge_cdata.vge_rx_ring_tag,
930 sc->vge_rdata.vge_rx_ring,
931 sc->vge_cdata.vge_rx_ring_map);
932 sc->vge_rdata.vge_rx_ring = NULL;
933 sc->vge_cdata.vge_rx_ring_map = NULL;
934 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_ring_tag);
935 sc->vge_cdata.vge_rx_ring_tag = NULL;
938 if (sc->vge_cdata.vge_tx_tag != NULL) {
939 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
940 txd = &sc->vge_cdata.vge_txdesc[i];
941 if (txd->tx_dmamap != NULL) {
942 bus_dmamap_destroy(sc->vge_cdata.vge_tx_tag,
944 txd->tx_dmamap = NULL;
947 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_tag);
948 sc->vge_cdata.vge_tx_tag = NULL;
951 if (sc->vge_cdata.vge_rx_tag != NULL) {
952 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
953 rxd = &sc->vge_cdata.vge_rxdesc[i];
954 if (rxd->rx_dmamap != NULL) {
955 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag,
957 rxd->rx_dmamap = NULL;
960 if (sc->vge_cdata.vge_rx_sparemap != NULL) {
961 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag,
962 sc->vge_cdata.vge_rx_sparemap);
963 sc->vge_cdata.vge_rx_sparemap = NULL;
965 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_tag);
966 sc->vge_cdata.vge_rx_tag = NULL;
969 if (sc->vge_cdata.vge_buffer_tag != NULL) {
970 bus_dma_tag_destroy(sc->vge_cdata.vge_buffer_tag);
971 sc->vge_cdata.vge_buffer_tag = NULL;
973 if (sc->vge_cdata.vge_ring_tag != NULL) {
974 bus_dma_tag_destroy(sc->vge_cdata.vge_ring_tag);
975 sc->vge_cdata.vge_ring_tag = NULL;
980 * Attach the interface. Allocate softc structures, do ifmedia
981 * setup and ethernet/BPF attach.
984 vge_attach(device_t dev)
986 u_char eaddr[ETHER_ADDR_LEN];
987 struct vge_softc *sc;
989 int error = 0, cap, i, msic, rid;
991 sc = device_get_softc(dev);
994 mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
996 callout_init_mtx(&sc->vge_watchdog, &sc->vge_mtx, 0);
999 * Map control/status registers.
1001 pci_enable_busmaster(dev);
1004 sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1007 if (sc->vge_res == NULL) {
1008 device_printf(dev, "couldn't map ports/memory\n");
1013 if (pci_find_extcap(dev, PCIY_EXPRESS, &cap) == 0) {
1014 sc->vge_flags |= VGE_FLAG_PCIE;
1015 sc->vge_expcap = cap;
1017 sc->vge_flags |= VGE_FLAG_JUMBO;
1018 if (pci_find_extcap(dev, PCIY_PMG, &cap) == 0) {
1019 sc->vge_flags |= VGE_FLAG_PMCAP;
1020 sc->vge_pmcap = cap;
1023 msic = pci_msi_count(dev);
1024 if (msi_disable == 0 && msic > 0) {
1026 if (pci_alloc_msi(dev, &msic) == 0) {
1028 sc->vge_flags |= VGE_FLAG_MSI;
1029 device_printf(dev, "Using %d MSI message\n",
1033 pci_release_msi(dev);
1037 /* Allocate interrupt */
1038 sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1039 ((sc->vge_flags & VGE_FLAG_MSI) ? 0 : RF_SHAREABLE) | RF_ACTIVE);
1040 if (sc->vge_irq == NULL) {
1041 device_printf(dev, "couldn't map interrupt\n");
1046 /* Reset the adapter. */
1048 /* Reload EEPROM. */
1049 CSR_WRITE_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
1050 for (i = 0; i < VGE_TIMEOUT; i++) {
1052 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
1055 if (i == VGE_TIMEOUT)
1056 device_printf(dev, "EEPROM reload timed out\n");
1058 * Clear PACPI as EEPROM reload will set the bit. Otherwise
1059 * MAC will receive magic packet which in turn confuses
1062 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
1065 * Get station address from the EEPROM.
1067 vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0);
1069 * Save configured PHY address.
1070 * It seems the PHY address of PCIe controllers just
1071 * reflects media jump strapping status so we assume the
1072 * internal PHY address of PCIe controller is at 1.
1074 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0)
1075 sc->vge_phyaddr = 1;
1077 sc->vge_phyaddr = CSR_READ_1(sc, VGE_MIICFG) &
1079 /* Clear WOL and take hardware from powerdown. */
1081 vge_sysctl_node(sc);
1082 error = vge_dma_alloc(sc);
1086 ifp = sc->vge_ifp = if_alloc(IFT_ETHER);
1088 device_printf(dev, "can not if_alloc()\n");
1094 if (mii_phy_probe(dev, &sc->vge_miibus,
1095 vge_ifmedia_upd, vge_ifmedia_sts)) {
1096 device_printf(dev, "MII without any phy!\n");
1102 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1103 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1104 ifp->if_ioctl = vge_ioctl;
1105 ifp->if_capabilities = IFCAP_VLAN_MTU;
1106 ifp->if_start = vge_start;
1107 ifp->if_hwassist = VGE_CSUM_FEATURES;
1108 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM |
1109 IFCAP_VLAN_HWTAGGING;
1110 if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0)
1111 ifp->if_capabilities |= IFCAP_WOL;
1112 ifp->if_capenable = ifp->if_capabilities;
1113 #ifdef DEVICE_POLLING
1114 ifp->if_capabilities |= IFCAP_POLLING;
1116 ifp->if_init = vge_init;
1117 IFQ_SET_MAXLEN(&ifp->if_snd, VGE_TX_DESC_CNT - 1);
1118 ifp->if_snd.ifq_drv_maxlen = VGE_TX_DESC_CNT - 1;
1119 IFQ_SET_READY(&ifp->if_snd);
1122 * Call MI attach routine.
1124 ether_ifattach(ifp, eaddr);
1126 /* Tell the upper layer(s) we support long frames. */
1127 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1129 /* Hook interrupt last to avoid having to lock softc */
1130 error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE,
1131 NULL, vge_intr, sc, &sc->vge_intrhand);
1134 device_printf(dev, "couldn't set up irq\n");
1135 ether_ifdetach(ifp);
1147 * Shutdown hardware and free up resources. This can be called any
1148 * time after the mutex has been initialized. It is called in both
1149 * the error case in attach and the normal detach case so it needs
1150 * to be careful about only freeing resources that have actually been
1154 vge_detach(device_t dev)
1156 struct vge_softc *sc;
1159 sc = device_get_softc(dev);
1160 KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized"));
1163 #ifdef DEVICE_POLLING
1164 if (ifp->if_capenable & IFCAP_POLLING)
1165 ether_poll_deregister(ifp);
1168 /* These should only be active if attach succeeded */
1169 if (device_is_attached(dev)) {
1170 ether_ifdetach(ifp);
1174 callout_drain(&sc->vge_watchdog);
1177 device_delete_child(dev, sc->vge_miibus);
1178 bus_generic_detach(dev);
1180 if (sc->vge_intrhand)
1181 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand);
1183 bus_release_resource(dev, SYS_RES_IRQ,
1184 sc->vge_flags & VGE_FLAG_MSI ? 1 : 0, sc->vge_irq);
1185 if (sc->vge_flags & VGE_FLAG_MSI)
1186 pci_release_msi(dev);
1188 bus_release_resource(dev, SYS_RES_MEMORY,
1189 PCIR_BAR(1), sc->vge_res);
1194 mtx_destroy(&sc->vge_mtx);
1200 vge_discard_rxbuf(struct vge_softc *sc, int prod)
1202 struct vge_rxdesc *rxd;
1205 rxd = &sc->vge_cdata.vge_rxdesc[prod];
1206 rxd->rx_desc->vge_sts = 0;
1207 rxd->rx_desc->vge_ctl = 0;
1210 * Note: the manual fails to document the fact that for
1211 * proper opration, the driver needs to replentish the RX
1212 * DMA ring 4 descriptors at a time (rather than one at a
1213 * time, like most chips). We can allocate the new buffers
1214 * but we should not set the OWN bits until we're ready
1215 * to hand back 4 of them in one shot.
1217 if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) {
1218 for (i = VGE_RXCHUNK; i > 0; i--) {
1219 rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN);
1220 rxd = rxd->rxd_prev;
1222 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK;
1227 vge_newbuf(struct vge_softc *sc, int prod)
1229 struct vge_rxdesc *rxd;
1231 bus_dma_segment_t segs[1];
1235 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1239 * This is part of an evil trick to deal with strict-alignment
1240 * architectures. The VIA chip requires RX buffers to be aligned
1241 * on 32-bit boundaries, but that will hose strict-alignment
1242 * architectures. To get around this, we leave some empty space
1243 * at the start of each buffer and for non-strict-alignment hosts,
1244 * we copy the buffer back two bytes to achieve word alignment.
1245 * This is slightly more efficient than allocating a new buffer,
1246 * copying the contents, and discarding the old buffer.
1248 m->m_len = m->m_pkthdr.len = MCLBYTES;
1249 m_adj(m, VGE_RX_BUF_ALIGN);
1251 if (bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_rx_tag,
1252 sc->vge_cdata.vge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1256 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1258 rxd = &sc->vge_cdata.vge_rxdesc[prod];
1259 if (rxd->rx_m != NULL) {
1260 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap,
1261 BUS_DMASYNC_POSTREAD);
1262 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap);
1264 map = rxd->rx_dmamap;
1265 rxd->rx_dmamap = sc->vge_cdata.vge_rx_sparemap;
1266 sc->vge_cdata.vge_rx_sparemap = map;
1267 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap,
1268 BUS_DMASYNC_PREREAD);
1271 rxd->rx_desc->vge_sts = 0;
1272 rxd->rx_desc->vge_ctl = 0;
1273 rxd->rx_desc->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
1274 rxd->rx_desc->vge_addrhi = htole32(VGE_ADDR_HI(segs[0].ds_addr) |
1275 (VGE_BUFLEN(segs[0].ds_len) << 16) | VGE_RXDESC_I);
1278 * Note: the manual fails to document the fact that for
1279 * proper operation, the driver needs to replenish the RX
1280 * DMA ring 4 descriptors at a time (rather than one at a
1281 * time, like most chips). We can allocate the new buffers
1282 * but we should not set the OWN bits until we're ready
1283 * to hand back 4 of them in one shot.
1285 if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) {
1286 for (i = VGE_RXCHUNK; i > 0; i--) {
1287 rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN);
1288 rxd = rxd->rxd_prev;
1290 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK;
1297 vge_tx_list_init(struct vge_softc *sc)
1299 struct vge_ring_data *rd;
1300 struct vge_txdesc *txd;
1303 VGE_LOCK_ASSERT(sc);
1305 sc->vge_cdata.vge_tx_prodidx = 0;
1306 sc->vge_cdata.vge_tx_considx = 0;
1307 sc->vge_cdata.vge_tx_cnt = 0;
1309 rd = &sc->vge_rdata;
1310 bzero(rd->vge_tx_ring, VGE_TX_LIST_SZ);
1311 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
1312 txd = &sc->vge_cdata.vge_txdesc[i];
1314 txd->tx_desc = &rd->vge_tx_ring[i];
1317 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1318 sc->vge_cdata.vge_tx_ring_map,
1319 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1325 vge_rx_list_init(struct vge_softc *sc)
1327 struct vge_ring_data *rd;
1328 struct vge_rxdesc *rxd;
1331 VGE_LOCK_ASSERT(sc);
1333 sc->vge_cdata.vge_rx_prodidx = 0;
1334 sc->vge_cdata.vge_head = NULL;
1335 sc->vge_cdata.vge_tail = NULL;
1336 sc->vge_cdata.vge_rx_commit = 0;
1338 rd = &sc->vge_rdata;
1339 bzero(rd->vge_rx_ring, VGE_RX_LIST_SZ);
1340 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1341 rxd = &sc->vge_cdata.vge_rxdesc[i];
1343 rxd->rx_desc = &rd->vge_rx_ring[i];
1346 &sc->vge_cdata.vge_rxdesc[VGE_RX_DESC_CNT - 1];
1348 rxd->rxd_prev = &sc->vge_cdata.vge_rxdesc[i - 1];
1349 if (vge_newbuf(sc, i) != 0)
1353 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1354 sc->vge_cdata.vge_rx_ring_map,
1355 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1357 sc->vge_cdata.vge_rx_commit = 0;
1363 vge_freebufs(struct vge_softc *sc)
1365 struct vge_txdesc *txd;
1366 struct vge_rxdesc *rxd;
1370 VGE_LOCK_ASSERT(sc);
1374 * Free RX and TX mbufs still in the queues.
1376 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1377 rxd = &sc->vge_cdata.vge_rxdesc[i];
1378 if (rxd->rx_m != NULL) {
1379 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag,
1380 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
1381 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag,
1388 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
1389 txd = &sc->vge_cdata.vge_txdesc[i];
1390 if (txd->tx_m != NULL) {
1391 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag,
1392 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
1393 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag,
1402 #ifndef __NO_STRICT_ALIGNMENT
1403 static __inline void
1404 vge_fixup_rx(struct mbuf *m)
1407 uint16_t *src, *dst;
1409 src = mtod(m, uint16_t *);
1412 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1415 m->m_data -= ETHER_ALIGN;
1420 * RX handler. We support the reception of jumbo frames that have
1421 * been fragmented across multiple 2K mbuf cluster buffers.
1424 vge_rxeof(struct vge_softc *sc, int count)
1428 int prod, prog, total_len;
1429 struct vge_rxdesc *rxd;
1430 struct vge_rx_desc *cur_rx;
1431 uint32_t rxstat, rxctl;
1433 VGE_LOCK_ASSERT(sc);
1437 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1438 sc->vge_cdata.vge_rx_ring_map,
1439 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1441 prod = sc->vge_cdata.vge_rx_prodidx;
1442 for (prog = 0; count > 0 &&
1443 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
1444 VGE_RX_DESC_INC(prod)) {
1445 cur_rx = &sc->vge_rdata.vge_rx_ring[prod];
1446 rxstat = le32toh(cur_rx->vge_sts);
1447 if ((rxstat & VGE_RDSTS_OWN) != 0)
1451 rxctl = le32toh(cur_rx->vge_ctl);
1452 total_len = VGE_RXBYTES(rxstat);
1453 rxd = &sc->vge_cdata.vge_rxdesc[prod];
1457 * If the 'start of frame' bit is set, this indicates
1458 * either the first fragment in a multi-fragment receive,
1459 * or an intermediate fragment. Either way, we want to
1460 * accumulate the buffers.
1462 if ((rxstat & VGE_RXPKT_SOF) != 0) {
1463 if (vge_newbuf(sc, prod) != 0) {
1465 VGE_CHAIN_RESET(sc);
1466 vge_discard_rxbuf(sc, prod);
1469 m->m_len = MCLBYTES - VGE_RX_BUF_ALIGN;
1470 if (sc->vge_cdata.vge_head == NULL) {
1471 sc->vge_cdata.vge_head = m;
1472 sc->vge_cdata.vge_tail = m;
1474 m->m_flags &= ~M_PKTHDR;
1475 sc->vge_cdata.vge_tail->m_next = m;
1476 sc->vge_cdata.vge_tail = m;
1482 * Bad/error frames will have the RXOK bit cleared.
1483 * However, there's one error case we want to allow:
1484 * if a VLAN tagged frame arrives and the chip can't
1485 * match it against the CAM filter, it considers this
1486 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
1487 * We don't want to drop the frame though: our VLAN
1488 * filtering is done in software.
1489 * We also want to receive bad-checksummed frames and
1490 * and frames with bad-length.
1492 if ((rxstat & VGE_RDSTS_RXOK) == 0 &&
1493 (rxstat & (VGE_RDSTS_VIDM | VGE_RDSTS_RLERR |
1494 VGE_RDSTS_CSUMERR)) == 0) {
1497 * If this is part of a multi-fragment packet,
1498 * discard all the pieces.
1500 VGE_CHAIN_RESET(sc);
1501 vge_discard_rxbuf(sc, prod);
1505 if (vge_newbuf(sc, prod) != 0) {
1507 VGE_CHAIN_RESET(sc);
1508 vge_discard_rxbuf(sc, prod);
1512 /* Chain received mbufs. */
1513 if (sc->vge_cdata.vge_head != NULL) {
1514 m->m_len = total_len % (MCLBYTES - VGE_RX_BUF_ALIGN);
1516 * Special case: if there's 4 bytes or less
1517 * in this buffer, the mbuf can be discarded:
1518 * the last 4 bytes is the CRC, which we don't
1519 * care about anyway.
1521 if (m->m_len <= ETHER_CRC_LEN) {
1522 sc->vge_cdata.vge_tail->m_len -=
1523 (ETHER_CRC_LEN - m->m_len);
1526 m->m_len -= ETHER_CRC_LEN;
1527 m->m_flags &= ~M_PKTHDR;
1528 sc->vge_cdata.vge_tail->m_next = m;
1530 m = sc->vge_cdata.vge_head;
1531 m->m_flags |= M_PKTHDR;
1532 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1534 m->m_flags |= M_PKTHDR;
1535 m->m_pkthdr.len = m->m_len =
1536 (total_len - ETHER_CRC_LEN);
1539 #ifndef __NO_STRICT_ALIGNMENT
1542 m->m_pkthdr.rcvif = ifp;
1544 /* Do RX checksumming if enabled */
1545 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
1546 (rxctl & VGE_RDCTL_FRAG) == 0) {
1547 /* Check IP header checksum */
1548 if ((rxctl & VGE_RDCTL_IPPKT) != 0)
1549 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1550 if ((rxctl & VGE_RDCTL_IPCSUMOK) != 0)
1551 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1553 /* Check TCP/UDP checksum */
1554 if (rxctl & (VGE_RDCTL_TCPPKT | VGE_RDCTL_UDPPKT) &&
1555 rxctl & VGE_RDCTL_PROTOCSUMOK) {
1556 m->m_pkthdr.csum_flags |=
1557 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1558 m->m_pkthdr.csum_data = 0xffff;
1562 if ((rxstat & VGE_RDSTS_VTAG) != 0) {
1564 * The 32-bit rxctl register is stored in little-endian.
1565 * However, the 16-bit vlan tag is stored in big-endian,
1566 * so we have to byte swap it.
1568 m->m_pkthdr.ether_vtag =
1569 bswap16(rxctl & VGE_RDCTL_VLANID);
1570 m->m_flags |= M_VLANTAG;
1574 (*ifp->if_input)(ifp, m);
1576 sc->vge_cdata.vge_head = NULL;
1577 sc->vge_cdata.vge_tail = NULL;
1581 sc->vge_cdata.vge_rx_prodidx = prod;
1582 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1583 sc->vge_cdata.vge_rx_ring_map,
1584 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1585 /* Update residue counter. */
1586 if (sc->vge_cdata.vge_rx_commit != 0) {
1587 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT,
1588 sc->vge_cdata.vge_rx_commit);
1589 sc->vge_cdata.vge_rx_commit = 0;
1596 vge_txeof(struct vge_softc *sc)
1599 struct vge_tx_desc *cur_tx;
1600 struct vge_txdesc *txd;
1604 VGE_LOCK_ASSERT(sc);
1608 if (sc->vge_cdata.vge_tx_cnt == 0)
1611 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1612 sc->vge_cdata.vge_tx_ring_map,
1613 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1616 * Go through our tx list and free mbufs for those
1617 * frames that have been transmitted.
1619 cons = sc->vge_cdata.vge_tx_considx;
1620 prod = sc->vge_cdata.vge_tx_prodidx;
1621 for (; cons != prod; VGE_TX_DESC_INC(cons)) {
1622 cur_tx = &sc->vge_rdata.vge_tx_ring[cons];
1623 txstat = le32toh(cur_tx->vge_sts);
1624 if ((txstat & VGE_TDSTS_OWN) != 0)
1626 sc->vge_cdata.vge_tx_cnt--;
1627 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1629 txd = &sc->vge_cdata.vge_txdesc[cons];
1630 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap,
1631 BUS_DMASYNC_POSTWRITE);
1632 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap);
1634 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n",
1638 txd->tx_desc->vge_frag[0].vge_addrhi = 0;
1640 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1641 sc->vge_cdata.vge_tx_ring_map,
1642 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1643 sc->vge_cdata.vge_tx_considx = cons;
1644 if (sc->vge_cdata.vge_tx_cnt == 0)
1649 vge_link_statchg(void *xsc)
1651 struct vge_softc *sc;
1653 struct mii_data *mii;
1657 VGE_LOCK_ASSERT(sc);
1658 mii = device_get_softc(sc->vge_miibus);
1661 if ((sc->vge_flags & VGE_FLAG_LINK) != 0) {
1662 if (!(mii->mii_media_status & IFM_ACTIVE)) {
1663 sc->vge_flags &= ~VGE_FLAG_LINK;
1664 if_link_state_change(sc->vge_ifp,
1668 if (mii->mii_media_status & IFM_ACTIVE &&
1669 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1670 sc->vge_flags |= VGE_FLAG_LINK;
1671 if_link_state_change(sc->vge_ifp,
1673 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1674 vge_start_locked(ifp);
1679 #ifdef DEVICE_POLLING
1681 vge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count)
1683 struct vge_softc *sc = ifp->if_softc;
1687 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1690 rx_npkts = vge_rxeof(sc, count);
1693 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1694 vge_start_locked(ifp);
1696 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
1698 status = CSR_READ_4(sc, VGE_ISR);
1699 if (status == 0xFFFFFFFF)
1702 CSR_WRITE_4(sc, VGE_ISR, status);
1705 * XXX check behaviour on receiver stalls.
1708 if (status & VGE_ISR_TXDMA_STALL ||
1709 status & VGE_ISR_RXDMA_STALL) {
1710 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1711 vge_init_locked(sc);
1714 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1715 vge_rxeof(sc, count);
1716 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1717 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1724 #endif /* DEVICE_POLLING */
1729 struct vge_softc *sc;
1737 if ((sc->vge_flags & VGE_FLAG_SUSPENDED) != 0 ||
1738 (ifp->if_flags & IFF_UP) == 0) {
1743 #ifdef DEVICE_POLLING
1744 if (ifp->if_capenable & IFCAP_POLLING) {
1750 /* Disable interrupts */
1751 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1752 status = CSR_READ_4(sc, VGE_ISR);
1753 CSR_WRITE_4(sc, VGE_ISR, status | VGE_ISR_HOLDOFF_RELOAD);
1754 /* If the card has gone away the read returns 0xffff. */
1755 if (status == 0xFFFFFFFF || (status & VGE_INTRS) == 0)
1757 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1758 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
1759 vge_rxeof(sc, VGE_RX_DESC_CNT);
1760 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1761 vge_rxeof(sc, VGE_RX_DESC_CNT);
1762 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1763 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1766 if (status & (VGE_ISR_TXOK0|VGE_ISR_TXOK_HIPRIO))
1769 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) {
1770 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1771 vge_init_locked(sc);
1774 if (status & VGE_ISR_LINKSTS)
1775 vge_link_statchg(sc);
1778 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1779 /* Re-enable interrupts */
1780 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1782 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1783 vge_start_locked(ifp);
1789 vge_encap(struct vge_softc *sc, struct mbuf **m_head)
1791 struct vge_txdesc *txd;
1792 struct vge_tx_frag *frag;
1794 bus_dma_segment_t txsegs[VGE_MAXTXSEGS];
1795 int error, i, nsegs, padlen;
1798 VGE_LOCK_ASSERT(sc);
1800 M_ASSERTPKTHDR((*m_head));
1802 /* Argh. This chip does not autopad short frames. */
1803 if ((*m_head)->m_pkthdr.len < VGE_MIN_FRAMELEN) {
1805 padlen = VGE_MIN_FRAMELEN - m->m_pkthdr.len;
1806 if (M_WRITABLE(m) == 0) {
1807 /* Get a writable copy. */
1808 m = m_dup(*m_head, M_DONTWAIT);
1816 if (M_TRAILINGSPACE(m) < padlen) {
1817 m = m_defrag(m, M_DONTWAIT);
1825 * Manually pad short frames, and zero the pad space
1826 * to avoid leaking data.
1828 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1829 m->m_pkthdr.len += padlen;
1830 m->m_len = m->m_pkthdr.len;
1834 txd = &sc->vge_cdata.vge_txdesc[sc->vge_cdata.vge_tx_prodidx];
1836 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag,
1837 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1838 if (error == EFBIG) {
1839 m = m_collapse(*m_head, M_DONTWAIT, VGE_MAXTXSEGS);
1846 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag,
1847 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1853 } else if (error != 0)
1855 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap,
1856 BUS_DMASYNC_PREWRITE);
1861 /* Configure checksum offload. */
1862 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
1863 cflags |= VGE_TDCTL_IPCSUM;
1864 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1865 cflags |= VGE_TDCTL_TCPCSUM;
1866 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1867 cflags |= VGE_TDCTL_UDPCSUM;
1869 /* Configure VLAN. */
1870 if ((m->m_flags & M_VLANTAG) != 0)
1871 cflags |= m->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG;
1872 txd->tx_desc->vge_sts = htole32(m->m_pkthdr.len << 16);
1875 * Velocity family seems to support TSO but no information
1876 * for MSS configuration is available. Also the number of
1877 * fragments supported by a descriptor is too small to hold
1878 * entire 64KB TCP/IP segment. Maybe VGE_TD_LS_MOF,
1879 * VGE_TD_LS_SOF and VGE_TD_LS_EOF could be used to build
1880 * longer chain of buffers but no additional information is
1883 * When telling the chip how many segments there are, we
1884 * must use nsegs + 1 instead of just nsegs. Darned if I
1885 * know why. This also means we can't use the last fragment
1886 * field of Tx descriptor.
1888 txd->tx_desc->vge_ctl = htole32(cflags | ((nsegs + 1) << 28) |
1890 for (i = 0; i < nsegs; i++) {
1891 frag = &txd->tx_desc->vge_frag[i];
1892 frag->vge_addrlo = htole32(VGE_ADDR_LO(txsegs[i].ds_addr));
1893 frag->vge_addrhi = htole32(VGE_ADDR_HI(txsegs[i].ds_addr) |
1894 (VGE_BUFLEN(txsegs[i].ds_len) << 16));
1897 sc->vge_cdata.vge_tx_cnt++;
1898 VGE_TX_DESC_INC(sc->vge_cdata.vge_tx_prodidx);
1901 * Finally request interrupt and give the first descriptor
1902 * ownership to hardware.
1904 txd->tx_desc->vge_ctl |= htole32(VGE_TDCTL_TIC);
1905 txd->tx_desc->vge_sts |= htole32(VGE_TDSTS_OWN);
1912 * Main transmit routine.
1916 vge_start(struct ifnet *ifp)
1918 struct vge_softc *sc;
1922 vge_start_locked(ifp);
1928 vge_start_locked(struct ifnet *ifp)
1930 struct vge_softc *sc;
1931 struct vge_txdesc *txd;
1932 struct mbuf *m_head;
1937 VGE_LOCK_ASSERT(sc);
1939 if ((sc->vge_flags & VGE_FLAG_LINK) == 0 ||
1940 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1944 idx = sc->vge_cdata.vge_tx_prodidx;
1945 VGE_TX_DESC_DEC(idx);
1946 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1947 sc->vge_cdata.vge_tx_cnt < VGE_TX_DESC_CNT - 1; ) {
1948 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1952 * Pack the data into the transmit ring. If we
1953 * don't have room, set the OACTIVE flag and wait
1954 * for the NIC to drain the ring.
1956 if (vge_encap(sc, &m_head)) {
1959 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1960 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1964 txd = &sc->vge_cdata.vge_txdesc[idx];
1965 txd->tx_desc->vge_frag[0].vge_addrhi |= htole32(VGE_TXDESC_Q);
1966 VGE_TX_DESC_INC(idx);
1970 * If there's a BPF listener, bounce a copy of this frame
1973 ETHER_BPF_MTAP(ifp, m_head);
1977 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1978 sc->vge_cdata.vge_tx_ring_map,
1979 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1980 /* Issue a transmit command. */
1981 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
1983 * Set a timeout in case the chip goes out to lunch.
1992 struct vge_softc *sc = xsc;
1995 vge_init_locked(sc);
2000 vge_init_locked(struct vge_softc *sc)
2002 struct ifnet *ifp = sc->vge_ifp;
2003 struct mii_data *mii;
2006 VGE_LOCK_ASSERT(sc);
2007 mii = device_get_softc(sc->vge_miibus);
2009 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2013 * Cancel pending I/O and free all RX/TX buffers.
2019 * Initialize the RX and TX descriptors and mbufs.
2022 error = vge_rx_list_init(sc);
2024 device_printf(sc->vge_dev, "no memory for Rx buffers.\n");
2027 vge_tx_list_init(sc);
2028 /* Clear MAC statistics. */
2029 vge_stats_clear(sc);
2030 /* Set our station address */
2031 for (i = 0; i < ETHER_ADDR_LEN; i++)
2032 CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(sc->vge_ifp)[i]);
2035 * Set receive FIFO threshold. Also allow transmission and
2036 * reception of VLAN tagged frames.
2038 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
2039 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES);
2041 /* Set DMA burst length */
2042 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
2043 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
2045 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
2047 /* Set collision backoff algorithm */
2048 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
2049 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
2050 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
2052 /* Disable LPSEL field in priority resolution */
2053 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
2056 * Load the addresses of the DMA queues into the chip.
2057 * Note that we only use one transmit queue.
2060 CSR_WRITE_4(sc, VGE_TXDESC_HIADDR,
2061 VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr));
2062 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
2063 VGE_ADDR_LO(sc->vge_rdata.vge_tx_ring_paddr));
2064 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
2066 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
2067 VGE_ADDR_LO(sc->vge_rdata.vge_rx_ring_paddr));
2068 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
2069 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
2071 /* Configure interrupt moderation. */
2072 vge_intr_holdoff(sc);
2074 /* Enable and wake up the RX descriptor queue */
2075 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
2076 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
2078 /* Enable the TX descriptor queue */
2079 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
2081 /* Init the cam filter. */
2084 /* Set up receiver filter. */
2088 /* Enable flow control */
2090 CSR_WRITE_1(sc, VGE_CRS2, 0x8B);
2092 /* Enable jumbo frame reception (if desired) */
2094 /* Start the MAC. */
2095 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
2096 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
2097 CSR_WRITE_1(sc, VGE_CRS0,
2098 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
2100 #ifdef DEVICE_POLLING
2102 * Disable interrupts if we are polling.
2104 if (ifp->if_capenable & IFCAP_POLLING) {
2105 CSR_WRITE_4(sc, VGE_IMR, 0);
2106 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2107 } else /* otherwise ... */
2111 * Enable interrupts.
2113 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2114 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2115 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2118 sc->vge_flags &= ~VGE_FLAG_LINK;
2121 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2122 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2123 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc);
2127 * Set media options.
2130 vge_ifmedia_upd(struct ifnet *ifp)
2132 struct vge_softc *sc;
2133 struct mii_data *mii;
2138 mii = device_get_softc(sc->vge_miibus);
2139 error = mii_mediachg(mii);
2146 * Report current media status.
2149 vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2151 struct vge_softc *sc;
2152 struct mii_data *mii;
2155 mii = device_get_softc(sc->vge_miibus);
2158 if ((ifp->if_flags & IFF_UP) == 0) {
2164 ifmr->ifm_active = mii->mii_media_active;
2165 ifmr->ifm_status = mii->mii_media_status;
2169 vge_miibus_statchg(device_t dev)
2171 struct vge_softc *sc;
2172 struct mii_data *mii;
2173 struct ifmedia_entry *ife;
2175 sc = device_get_softc(dev);
2176 mii = device_get_softc(sc->vge_miibus);
2177 ife = mii->mii_media.ifm_cur;
2180 * If the user manually selects a media mode, we need to turn
2181 * on the forced MAC mode bit in the DIAGCTL register. If the
2182 * user happens to choose a full duplex mode, we also need to
2183 * set the 'force full duplex' bit. This applies only to
2184 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
2185 * mode is disabled, and in 1000baseT mode, full duplex is
2186 * always implied, so we turn on the forced mode bit but leave
2187 * the FDX bit cleared.
2190 switch (IFM_SUBTYPE(ife->ifm_media)) {
2192 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2193 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2196 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2197 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2201 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2202 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) {
2203 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2205 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2209 device_printf(dev, "unknown media type: %x\n",
2210 IFM_SUBTYPE(ife->ifm_media));
2216 vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2218 struct vge_softc *sc = ifp->if_softc;
2219 struct ifreq *ifr = (struct ifreq *) data;
2220 struct mii_data *mii;
2221 int error = 0, mask;
2226 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VGE_JUMBO_MTU)
2228 else if (ifp->if_mtu != ifr->ifr_mtu) {
2229 if (ifr->ifr_mtu > ETHERMTU &&
2230 (sc->vge_flags & VGE_FLAG_JUMBO) == 0)
2233 ifp->if_mtu = ifr->ifr_mtu;
2239 if ((ifp->if_flags & IFF_UP) != 0) {
2240 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2241 ((ifp->if_flags ^ sc->vge_if_flags) &
2242 (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2245 vge_init_locked(sc);
2246 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2248 sc->vge_if_flags = ifp->if_flags;
2254 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2260 mii = device_get_softc(sc->vge_miibus);
2261 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2264 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2265 #ifdef DEVICE_POLLING
2266 if (mask & IFCAP_POLLING) {
2267 if (ifr->ifr_reqcap & IFCAP_POLLING) {
2268 error = ether_poll_register(vge_poll, ifp);
2272 /* Disable interrupts */
2273 CSR_WRITE_4(sc, VGE_IMR, 0);
2274 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2275 ifp->if_capenable |= IFCAP_POLLING;
2278 error = ether_poll_deregister(ifp);
2279 /* Enable interrupts. */
2281 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2282 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2283 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2284 ifp->if_capenable &= ~IFCAP_POLLING;
2288 #endif /* DEVICE_POLLING */
2290 if ((mask & IFCAP_TXCSUM) != 0 &&
2291 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
2292 ifp->if_capenable ^= IFCAP_TXCSUM;
2293 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2294 ifp->if_hwassist |= VGE_CSUM_FEATURES;
2296 ifp->if_hwassist &= ~VGE_CSUM_FEATURES;
2298 if ((mask & IFCAP_RXCSUM) != 0 &&
2299 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
2300 ifp->if_capenable ^= IFCAP_RXCSUM;
2301 if ((mask & IFCAP_WOL_UCAST) != 0 &&
2302 (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0)
2303 ifp->if_capenable ^= IFCAP_WOL_UCAST;
2304 if ((mask & IFCAP_WOL_MCAST) != 0 &&
2305 (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0)
2306 ifp->if_capenable ^= IFCAP_WOL_MCAST;
2307 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2308 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
2309 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2310 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
2311 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
2312 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2313 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2314 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
2315 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2319 VLAN_CAPABILITIES(ifp);
2322 error = ether_ioctl(ifp, command, data);
2330 vge_watchdog(void *arg)
2332 struct vge_softc *sc;
2336 VGE_LOCK_ASSERT(sc);
2337 vge_stats_update(sc);
2338 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc);
2339 if (sc->vge_timer == 0 || --sc->vge_timer > 0)
2343 if_printf(ifp, "watchdog timeout\n");
2347 vge_rxeof(sc, VGE_RX_DESC_CNT);
2349 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2350 vge_init_locked(sc);
2354 * Stop the adapter and free any mbufs allocated to the
2358 vge_stop(struct vge_softc *sc)
2362 VGE_LOCK_ASSERT(sc);
2365 callout_stop(&sc->vge_watchdog);
2367 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2369 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2370 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
2371 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2372 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
2373 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
2374 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
2376 vge_stats_update(sc);
2377 VGE_CHAIN_RESET(sc);
2383 * Device suspend routine. Stop the interface and save some PCI
2384 * settings in case the BIOS doesn't restore them properly on
2388 vge_suspend(device_t dev)
2390 struct vge_softc *sc;
2392 sc = device_get_softc(dev);
2397 sc->vge_flags |= VGE_FLAG_SUSPENDED;
2404 * Device resume routine. Restore some PCI settings in case the BIOS
2405 * doesn't, re-enable busmastering, and restart the interface if
2409 vge_resume(device_t dev)
2411 struct vge_softc *sc;
2415 sc = device_get_softc(dev);
2417 if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0) {
2418 /* Disable PME and clear PME status. */
2419 pmstat = pci_read_config(sc->vge_dev,
2420 sc->vge_pmcap + PCIR_POWER_STATUS, 2);
2421 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
2422 pmstat &= ~PCIM_PSTAT_PMEENABLE;
2423 pci_write_config(sc->vge_dev,
2424 sc->vge_pmcap + PCIR_POWER_STATUS, pmstat, 2);
2428 /* Restart MII auto-polling. */
2429 vge_miipoll_start(sc);
2431 /* Reinitialize interface if necessary. */
2432 if ((ifp->if_flags & IFF_UP) != 0) {
2433 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2434 vge_init_locked(sc);
2436 sc->vge_flags &= ~VGE_FLAG_SUSPENDED;
2443 * Stop all chip I/O so that the kernel's probe routines don't
2444 * get confused by errant DMAs when rebooting.
2447 vge_shutdown(device_t dev)
2450 return (vge_suspend(dev));
2453 #define VGE_SYSCTL_STAT_ADD32(c, h, n, p, d) \
2454 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
2457 vge_sysctl_node(struct vge_softc *sc)
2459 struct sysctl_ctx_list *ctx;
2460 struct sysctl_oid_list *child, *parent;
2461 struct sysctl_oid *tree;
2462 struct vge_hw_stats *stats;
2464 stats = &sc->vge_stats;
2465 ctx = device_get_sysctl_ctx(sc->vge_dev);
2466 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vge_dev));
2468 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "int_holdoff",
2469 CTLFLAG_RW, &sc->vge_int_holdoff, 0, "interrupt holdoff");
2470 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_coal_pkt",
2471 CTLFLAG_RW, &sc->vge_rx_coal_pkt, 0, "rx coalescing packet");
2472 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_coal_pkt",
2473 CTLFLAG_RW, &sc->vge_tx_coal_pkt, 0, "tx coalescing packet");
2475 /* Pull in device tunables. */
2476 sc->vge_int_holdoff = VGE_INT_HOLDOFF_DEFAULT;
2477 resource_int_value(device_get_name(sc->vge_dev),
2478 device_get_unit(sc->vge_dev), "int_holdoff", &sc->vge_int_holdoff);
2479 sc->vge_rx_coal_pkt = VGE_RX_COAL_PKT_DEFAULT;
2480 resource_int_value(device_get_name(sc->vge_dev),
2481 device_get_unit(sc->vge_dev), "rx_coal_pkt", &sc->vge_rx_coal_pkt);
2482 sc->vge_tx_coal_pkt = VGE_TX_COAL_PKT_DEFAULT;
2483 resource_int_value(device_get_name(sc->vge_dev),
2484 device_get_unit(sc->vge_dev), "tx_coal_pkt", &sc->vge_tx_coal_pkt);
2486 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
2487 NULL, "VGE statistics");
2488 parent = SYSCTL_CHILDREN(tree);
2490 /* Rx statistics. */
2491 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
2492 NULL, "RX MAC statistics");
2493 child = SYSCTL_CHILDREN(tree);
2494 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames",
2495 &stats->rx_frames, "frames");
2496 VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
2497 &stats->rx_good_frames, "Good frames");
2498 VGE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
2499 &stats->rx_fifo_oflows, "FIFO overflows");
2500 VGE_SYSCTL_STAT_ADD32(ctx, child, "runts",
2501 &stats->rx_runts, "Too short frames");
2502 VGE_SYSCTL_STAT_ADD32(ctx, child, "runts_errs",
2503 &stats->rx_runts_errs, "Too short frames with errors");
2504 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
2505 &stats->rx_pkts_64, "64 bytes frames");
2506 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
2507 &stats->rx_pkts_65_127, "65 to 127 bytes frames");
2508 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
2509 &stats->rx_pkts_128_255, "128 to 255 bytes frames");
2510 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
2511 &stats->rx_pkts_256_511, "256 to 511 bytes frames");
2512 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
2513 &stats->rx_pkts_512_1023, "512 to 1023 bytes frames");
2514 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
2515 &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames");
2516 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
2517 &stats->rx_pkts_1519_max, "1519 to max frames");
2518 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max_errs",
2519 &stats->rx_pkts_1519_max_errs, "1519 to max frames with error");
2520 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo",
2521 &stats->rx_jumbos, "Jumbo frames");
2522 VGE_SYSCTL_STAT_ADD32(ctx, child, "crcerrs",
2523 &stats->rx_crcerrs, "CRC errors");
2524 VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
2525 &stats->rx_pause_frames, "CRC errors");
2526 VGE_SYSCTL_STAT_ADD32(ctx, child, "align_errs",
2527 &stats->rx_alignerrs, "Alignment errors");
2528 VGE_SYSCTL_STAT_ADD32(ctx, child, "nobufs",
2529 &stats->rx_nobufs, "Frames with no buffer event");
2530 VGE_SYSCTL_STAT_ADD32(ctx, child, "sym_errs",
2531 &stats->rx_symerrs, "Frames with symbol errors");
2532 VGE_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
2533 &stats->rx_lenerrs, "Frames with length mismatched");
2535 /* Tx statistics. */
2536 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
2537 NULL, "TX MAC statistics");
2538 child = SYSCTL_CHILDREN(tree);
2539 VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
2540 &stats->tx_good_frames, "Good frames");
2541 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
2542 &stats->tx_pkts_64, "64 bytes frames");
2543 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
2544 &stats->tx_pkts_65_127, "65 to 127 bytes frames");
2545 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
2546 &stats->tx_pkts_128_255, "128 to 255 bytes frames");
2547 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
2548 &stats->tx_pkts_256_511, "256 to 511 bytes frames");
2549 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
2550 &stats->tx_pkts_512_1023, "512 to 1023 bytes frames");
2551 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
2552 &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames");
2553 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo",
2554 &stats->tx_jumbos, "Jumbo frames");
2555 VGE_SYSCTL_STAT_ADD32(ctx, child, "colls",
2556 &stats->tx_colls, "Collisions");
2557 VGE_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
2558 &stats->tx_latecolls, "Late collisions");
2559 VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
2560 &stats->tx_pause, "Pause frames");
2561 #ifdef VGE_ENABLE_SQEERR
2562 VGE_SYSCTL_STAT_ADD32(ctx, child, "sqeerrs",
2563 &stats->tx_sqeerrs, "SQE errors");
2565 /* Clear MAC statistics. */
2566 vge_stats_clear(sc);
2569 #undef VGE_SYSCTL_STAT_ADD32
2572 vge_stats_clear(struct vge_softc *sc)
2576 CSR_WRITE_1(sc, VGE_MIBCSR,
2577 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FREEZE);
2578 CSR_WRITE_1(sc, VGE_MIBCSR,
2579 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_CLR);
2580 for (i = VGE_TIMEOUT; i > 0; i--) {
2582 if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_CLR) == 0)
2586 device_printf(sc->vge_dev, "MIB clear timed out!\n");
2587 CSR_WRITE_1(sc, VGE_MIBCSR, CSR_READ_1(sc, VGE_MIBCSR) &
2588 ~VGE_MIBCSR_FREEZE);
2592 vge_stats_update(struct vge_softc *sc)
2594 struct vge_hw_stats *stats;
2596 uint32_t mib[VGE_MIB_CNT], val;
2599 VGE_LOCK_ASSERT(sc);
2601 stats = &sc->vge_stats;
2604 CSR_WRITE_1(sc, VGE_MIBCSR,
2605 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FLUSH);
2606 for (i = VGE_TIMEOUT; i > 0; i--) {
2608 if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_FLUSH) == 0)
2612 device_printf(sc->vge_dev, "MIB counter dump timed out!\n");
2613 vge_stats_clear(sc);
2617 bzero(mib, sizeof(mib));
2619 /* Set MIB read index to 0. */
2620 CSR_WRITE_1(sc, VGE_MIBCSR,
2621 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_RINI);
2622 for (i = 0; i < VGE_MIB_CNT; i++) {
2623 val = CSR_READ_4(sc, VGE_MIBDATA);
2624 if (i != VGE_MIB_DATA_IDX(val)) {
2625 /* Reading interrupted. */
2628 mib[i] = val & VGE_MIB_DATA_MASK;
2632 stats->rx_frames += mib[VGE_MIB_RX_FRAMES];
2633 stats->rx_good_frames += mib[VGE_MIB_RX_GOOD_FRAMES];
2634 stats->rx_fifo_oflows += mib[VGE_MIB_RX_FIFO_OVERRUNS];
2635 stats->rx_runts += mib[VGE_MIB_RX_RUNTS];
2636 stats->rx_runts_errs += mib[VGE_MIB_RX_RUNTS_ERRS];
2637 stats->rx_pkts_64 += mib[VGE_MIB_RX_PKTS_64];
2638 stats->rx_pkts_65_127 += mib[VGE_MIB_RX_PKTS_65_127];
2639 stats->rx_pkts_128_255 += mib[VGE_MIB_RX_PKTS_128_255];
2640 stats->rx_pkts_256_511 += mib[VGE_MIB_RX_PKTS_256_511];
2641 stats->rx_pkts_512_1023 += mib[VGE_MIB_RX_PKTS_512_1023];
2642 stats->rx_pkts_1024_1518 += mib[VGE_MIB_RX_PKTS_1024_1518];
2643 stats->rx_pkts_1519_max += mib[VGE_MIB_RX_PKTS_1519_MAX];
2644 stats->rx_pkts_1519_max_errs += mib[VGE_MIB_RX_PKTS_1519_MAX_ERRS];
2645 stats->rx_jumbos += mib[VGE_MIB_RX_JUMBOS];
2646 stats->rx_crcerrs += mib[VGE_MIB_RX_CRCERRS];
2647 stats->rx_pause_frames += mib[VGE_MIB_RX_PAUSE];
2648 stats->rx_alignerrs += mib[VGE_MIB_RX_ALIGNERRS];
2649 stats->rx_nobufs += mib[VGE_MIB_RX_NOBUFS];
2650 stats->rx_symerrs += mib[VGE_MIB_RX_SYMERRS];
2651 stats->rx_lenerrs += mib[VGE_MIB_RX_LENERRS];
2654 stats->tx_good_frames += mib[VGE_MIB_TX_GOOD_FRAMES];
2655 stats->tx_pkts_64 += mib[VGE_MIB_TX_PKTS_64];
2656 stats->tx_pkts_65_127 += mib[VGE_MIB_TX_PKTS_65_127];
2657 stats->tx_pkts_128_255 += mib[VGE_MIB_TX_PKTS_128_255];
2658 stats->tx_pkts_256_511 += mib[VGE_MIB_TX_PKTS_256_511];
2659 stats->tx_pkts_512_1023 += mib[VGE_MIB_TX_PKTS_512_1023];
2660 stats->tx_pkts_1024_1518 += mib[VGE_MIB_TX_PKTS_1024_1518];
2661 stats->tx_jumbos += mib[VGE_MIB_TX_JUMBOS];
2662 stats->tx_colls += mib[VGE_MIB_TX_COLLS];
2663 stats->tx_pause += mib[VGE_MIB_TX_PAUSE];
2664 #ifdef VGE_ENABLE_SQEERR
2665 stats->tx_sqeerrs += mib[VGE_MIB_TX_SQEERRS];
2667 stats->tx_latecolls += mib[VGE_MIB_TX_LATECOLLS];
2669 /* Update counters in ifnet. */
2670 ifp->if_opackets += mib[VGE_MIB_TX_GOOD_FRAMES];
2672 ifp->if_collisions += mib[VGE_MIB_TX_COLLS] +
2673 mib[VGE_MIB_TX_LATECOLLS];
2675 ifp->if_oerrors += mib[VGE_MIB_TX_COLLS] +
2676 mib[VGE_MIB_TX_LATECOLLS];
2678 ifp->if_ipackets += mib[VGE_MIB_RX_GOOD_FRAMES];
2680 ifp->if_ierrors += mib[VGE_MIB_RX_FIFO_OVERRUNS] +
2681 mib[VGE_MIB_RX_RUNTS] +
2682 mib[VGE_MIB_RX_RUNTS_ERRS] +
2683 mib[VGE_MIB_RX_CRCERRS] +
2684 mib[VGE_MIB_RX_ALIGNERRS] +
2685 mib[VGE_MIB_RX_NOBUFS] +
2686 mib[VGE_MIB_RX_SYMERRS] +
2687 mib[VGE_MIB_RX_LENERRS];
2691 vge_intr_holdoff(struct vge_softc *sc)
2695 VGE_LOCK_ASSERT(sc);
2698 * Set Tx interrupt supression threshold.
2699 * It's possible to use single-shot timer in VGE_CRS1 register
2700 * in Tx path such that driver can remove most of Tx completion
2701 * interrupts. However this requires additional access to
2702 * VGE_CRS1 register to reload the timer in addintion to
2703 * activating Tx kick command. Another downside is we don't know
2704 * what single-shot timer value should be used in advance so
2705 * reclaiming transmitted mbufs could be delayed a lot which in
2706 * turn slows down Tx operation.
2708 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_TXSUPPTHR);
2709 CSR_WRITE_1(sc, VGE_TXSUPPTHR, sc->vge_tx_coal_pkt);
2711 /* Set Rx interrupt suppresion threshold. */
2712 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
2713 CSR_WRITE_1(sc, VGE_RXSUPPTHR, sc->vge_rx_coal_pkt);
2715 intctl = CSR_READ_1(sc, VGE_INTCTL1);
2716 intctl &= ~VGE_INTCTL_SC_RELOAD;
2717 intctl |= VGE_INTCTL_HC_RELOAD;
2718 if (sc->vge_tx_coal_pkt <= 0)
2719 intctl |= VGE_INTCTL_TXINTSUP_DISABLE;
2721 intctl &= ~VGE_INTCTL_TXINTSUP_DISABLE;
2722 if (sc->vge_rx_coal_pkt <= 0)
2723 intctl |= VGE_INTCTL_RXINTSUP_DISABLE;
2725 intctl &= ~VGE_INTCTL_RXINTSUP_DISABLE;
2726 CSR_WRITE_1(sc, VGE_INTCTL1, intctl);
2727 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_HOLDOFF);
2728 if (sc->vge_int_holdoff > 0) {
2729 /* Set interrupt holdoff timer. */
2730 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
2731 CSR_WRITE_1(sc, VGE_INTHOLDOFF,
2732 VGE_INT_HOLDOFF_USEC(sc->vge_int_holdoff));
2733 /* Enable holdoff timer. */
2734 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
2739 vge_setlinkspeed(struct vge_softc *sc)
2741 struct mii_data *mii;
2744 VGE_LOCK_ASSERT(sc);
2746 mii = device_get_softc(sc->vge_miibus);
2749 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
2750 (IFM_ACTIVE | IFM_AVALID)) {
2751 switch IFM_SUBTYPE(mii->mii_media_active) {
2761 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_100T2CR, 0);
2762 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_ANAR,
2763 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
2764 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR,
2765 BMCR_AUTOEN | BMCR_STARTNEG);
2768 /* Poll link state until vge(4) get a 10/100 link. */
2769 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
2771 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
2772 == (IFM_ACTIVE | IFM_AVALID)) {
2773 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2782 pause("vgelnk", hz);
2785 if (i == MII_ANEGTICKS_GIGE)
2786 device_printf(sc->vge_dev, "establishing link failed, "
2787 "WOL may not work!");
2790 * No link, force MAC to have 100Mbps, full-duplex link.
2791 * This is the last resort and may/may not work.
2793 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
2794 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
2798 vge_setwol(struct vge_softc *sc)
2804 VGE_LOCK_ASSERT(sc);
2806 if ((sc->vge_flags & VGE_FLAG_PMCAP) == 0) {
2807 /* No PME capability, PHY power down. */
2808 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR,
2810 vge_miipoll_stop(sc);
2816 /* Clear WOL on pattern match. */
2817 CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL);
2818 /* Disable WOL on magic/unicast packet. */
2819 CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F);
2820 CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM |
2822 if ((ifp->if_capenable & IFCAP_WOL) != 0) {
2823 vge_setlinkspeed(sc);
2825 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
2826 val |= VGE_WOLCR1_UCAST;
2827 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
2828 val |= VGE_WOLCR1_MAGIC;
2829 CSR_WRITE_1(sc, VGE_WOLCR1S, val);
2831 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
2832 val |= VGE_WOLCFG_SAM | VGE_WOLCFG_SAB;
2833 CSR_WRITE_1(sc, VGE_WOLCFGS, val | VGE_WOLCFG_PMEOVR);
2834 /* Disable MII auto-polling. */
2835 vge_miipoll_stop(sc);
2837 CSR_SETBIT_1(sc, VGE_DIAGCTL,
2838 VGE_DIAGCTL_MACFORCE | VGE_DIAGCTL_FDXFORCE);
2839 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII);
2841 /* Clear WOL status on pattern match. */
2842 CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF);
2843 CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF);
2845 val = CSR_READ_1(sc, VGE_PWRSTAT);
2846 val |= VGE_STICKHW_SWPTAG;
2847 CSR_WRITE_1(sc, VGE_PWRSTAT, val);
2848 /* Put hardware into sleep. */
2849 val = CSR_READ_1(sc, VGE_PWRSTAT);
2850 val |= VGE_STICKHW_DS0 | VGE_STICKHW_DS1;
2851 CSR_WRITE_1(sc, VGE_PWRSTAT, val);
2852 /* Request PME if WOL is requested. */
2853 pmstat = pci_read_config(sc->vge_dev, sc->vge_pmcap +
2854 PCIR_POWER_STATUS, 2);
2855 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
2856 if ((ifp->if_capenable & IFCAP_WOL) != 0)
2857 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2858 pci_write_config(sc->vge_dev, sc->vge_pmcap + PCIR_POWER_STATUS,
2863 vge_clrwol(struct vge_softc *sc)
2867 val = CSR_READ_1(sc, VGE_PWRSTAT);
2868 val &= ~VGE_STICKHW_SWPTAG;
2869 CSR_WRITE_1(sc, VGE_PWRSTAT, val);
2870 /* Disable WOL and clear power state indicator. */
2871 val = CSR_READ_1(sc, VGE_PWRSTAT);
2872 val &= ~(VGE_STICKHW_DS0 | VGE_STICKHW_DS1);
2873 CSR_WRITE_1(sc, VGE_PWRSTAT, val);
2875 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII);
2876 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2878 /* Clear WOL on pattern match. */
2879 CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL);
2880 /* Disable WOL on magic/unicast packet. */
2881 CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F);
2882 CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM |
2884 /* Clear WOL status on pattern match. */
2885 CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF);
2886 CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF);