2 * SPDX-License-Identifier: BSD-4-Clause
5 * Bill Paul <wpaul@windriver.com>. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
39 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
41 * Written by Bill Paul <wpaul@windriver.com>
42 * Senior Networking Software Engineer
47 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that
48 * combines a tri-speed ethernet MAC and PHY, with the following
51 * o Jumbo frame support up to 16K
52 * o Transmit and receive flow control
53 * o IPv4 checksum offload
54 * o VLAN tag insertion and stripping
56 * o 64-bit multicast hash table filter
57 * o 64 entry CAM filter
58 * o 16K RX FIFO and 48K TX FIFO memory
59 * o Interrupt moderation
61 * The VT6122 supports up to four transmit DMA queues. The descriptors
62 * in the transmit ring can address up to 7 data fragments; frames which
63 * span more than 7 data buffers must be coalesced, but in general the
64 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
65 * long. The receive descriptors address only a single buffer.
67 * There are two peculiar design issues with the VT6122. One is that
68 * receive data buffers must be aligned on a 32-bit boundary. This is
69 * not a problem where the VT6122 is used as a LOM device in x86-based
70 * systems, but on architectures that generate unaligned access traps, we
71 * have to do some copying.
73 * The other issue has to do with the way 64-bit addresses are handled.
74 * The DMA descriptors only allow you to specify 48 bits of addressing
75 * information. The remaining 16 bits are specified using one of the
76 * I/O registers. If you only have a 32-bit system, then this isn't
77 * an issue, but if you have a 64-bit system and more than 4GB of
78 * memory, you must have to make sure your network data buffers reside
79 * in the same 48-bit 'segment.'
81 * Special thanks to Ryan Fu at VIA Networking for providing documentation
82 * and sample NICs for testing.
85 #ifdef HAVE_KERNEL_OPTION_HEADERS
86 #include "opt_device_polling.h"
89 #include <sys/param.h>
90 #include <sys/endian.h>
91 #include <sys/systm.h>
92 #include <sys/sockio.h>
94 #include <sys/malloc.h>
95 #include <sys/module.h>
96 #include <sys/kernel.h>
97 #include <sys/socket.h>
98 #include <sys/sysctl.h>
101 #include <net/if_arp.h>
102 #include <net/ethernet.h>
103 #include <net/if_dl.h>
104 #include <net/if_var.h>
105 #include <net/if_media.h>
106 #include <net/if_types.h>
107 #include <net/if_vlan_var.h>
111 #include <machine/bus.h>
112 #include <machine/resource.h>
114 #include <sys/rman.h>
116 #include <dev/mii/mii.h>
117 #include <dev/mii/miivar.h>
119 #include <dev/pci/pcireg.h>
120 #include <dev/pci/pcivar.h>
122 MODULE_DEPEND(vge, pci, 1, 1, 1);
123 MODULE_DEPEND(vge, ether, 1, 1, 1);
124 MODULE_DEPEND(vge, miibus, 1, 1, 1);
126 /* "device miibus" required. See GENERIC if you get errors here. */
127 #include "miibus_if.h"
129 #include <dev/vge/if_vgereg.h>
130 #include <dev/vge/if_vgevar.h>
132 #define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
135 static int msi_disable = 0;
136 TUNABLE_INT("hw.vge.msi_disable", &msi_disable);
139 * The SQE error counter of MIB seems to report bogus value.
140 * Vendor's workaround does not seem to work on PCIe based
141 * controllers. Disable it until we find better workaround.
143 #undef VGE_ENABLE_SQEERR
146 * Various supported device vendors/types and their names.
148 static struct vge_type vge_devs[] = {
149 { VIA_VENDORID, VIA_DEVICEID_61XX,
150 "VIA Networking Velocity Gigabit Ethernet" },
154 static int vge_attach(device_t);
155 static int vge_detach(device_t);
156 static int vge_probe(device_t);
157 static int vge_resume(device_t);
158 static int vge_shutdown(device_t);
159 static int vge_suspend(device_t);
161 static void vge_cam_clear(struct vge_softc *);
162 static int vge_cam_set(struct vge_softc *, uint8_t *);
163 static void vge_clrwol(struct vge_softc *);
164 static void vge_discard_rxbuf(struct vge_softc *, int);
165 static int vge_dma_alloc(struct vge_softc *);
166 static void vge_dma_free(struct vge_softc *);
167 static void vge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
169 static void vge_eeprom_getword(struct vge_softc *, int, uint16_t *);
171 static int vge_encap(struct vge_softc *, struct mbuf **);
172 #ifndef __NO_STRICT_ALIGNMENT
174 vge_fixup_rx(struct mbuf *);
176 static void vge_freebufs(struct vge_softc *);
177 static void vge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
178 static int vge_ifmedia_upd(struct ifnet *);
179 static int vge_ifmedia_upd_locked(struct vge_softc *);
180 static void vge_init(void *);
181 static void vge_init_locked(struct vge_softc *);
182 static void vge_intr(void *);
183 static void vge_intr_holdoff(struct vge_softc *);
184 static int vge_ioctl(struct ifnet *, u_long, caddr_t);
185 static void vge_link_statchg(void *);
186 static int vge_miibus_readreg(device_t, int, int);
187 static int vge_miibus_writereg(device_t, int, int, int);
188 static void vge_miipoll_start(struct vge_softc *);
189 static void vge_miipoll_stop(struct vge_softc *);
190 static int vge_newbuf(struct vge_softc *, int);
191 static void vge_read_eeprom(struct vge_softc *, caddr_t, int, int, int);
192 static void vge_reset(struct vge_softc *);
193 static int vge_rx_list_init(struct vge_softc *);
194 static int vge_rxeof(struct vge_softc *, int);
195 static void vge_rxfilter(struct vge_softc *);
196 static void vge_setmedia(struct vge_softc *);
197 static void vge_setvlan(struct vge_softc *);
198 static void vge_setwol(struct vge_softc *);
199 static void vge_start(struct ifnet *);
200 static void vge_start_locked(struct ifnet *);
201 static void vge_stats_clear(struct vge_softc *);
202 static void vge_stats_update(struct vge_softc *);
203 static void vge_stop(struct vge_softc *);
204 static void vge_sysctl_node(struct vge_softc *);
205 static int vge_tx_list_init(struct vge_softc *);
206 static void vge_txeof(struct vge_softc *);
207 static void vge_watchdog(void *);
209 static device_method_t vge_methods[] = {
210 /* Device interface */
211 DEVMETHOD(device_probe, vge_probe),
212 DEVMETHOD(device_attach, vge_attach),
213 DEVMETHOD(device_detach, vge_detach),
214 DEVMETHOD(device_suspend, vge_suspend),
215 DEVMETHOD(device_resume, vge_resume),
216 DEVMETHOD(device_shutdown, vge_shutdown),
219 DEVMETHOD(miibus_readreg, vge_miibus_readreg),
220 DEVMETHOD(miibus_writereg, vge_miibus_writereg),
225 static driver_t vge_driver = {
228 sizeof(struct vge_softc)
231 static devclass_t vge_devclass;
233 DRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0);
234 DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0);
238 * Read a word of data stored in the EEPROM at address 'addr.'
241 vge_eeprom_getword(struct vge_softc *sc, int addr, uint16_t *dest)
247 * Enter EEPROM embedded programming mode. In order to
248 * access the EEPROM at all, we first have to set the
249 * EELOAD bit in the CHIPCFG2 register.
251 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
252 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
254 /* Select the address of the word we want to read */
255 CSR_WRITE_1(sc, VGE_EEADDR, addr);
257 /* Issue read command */
258 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
260 /* Wait for the done bit to be set. */
261 for (i = 0; i < VGE_TIMEOUT; i++) {
262 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
266 if (i == VGE_TIMEOUT) {
267 device_printf(sc->vge_dev, "EEPROM read timed out\n");
272 /* Read the result */
273 word = CSR_READ_2(sc, VGE_EERDDAT);
275 /* Turn off EEPROM access mode. */
276 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
277 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
284 * Read a sequence of words from the EEPROM.
287 vge_read_eeprom(struct vge_softc *sc, caddr_t dest, int off, int cnt, int swap)
291 uint16_t word = 0, *ptr;
293 for (i = 0; i < cnt; i++) {
294 vge_eeprom_getword(sc, off + i, &word);
295 ptr = (uint16_t *)(dest + (i * 2));
302 for (i = 0; i < ETHER_ADDR_LEN; i++)
303 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i);
308 vge_miipoll_stop(struct vge_softc *sc)
312 CSR_WRITE_1(sc, VGE_MIICMD, 0);
314 for (i = 0; i < VGE_TIMEOUT; i++) {
316 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
320 if (i == VGE_TIMEOUT)
321 device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
325 vge_miipoll_start(struct vge_softc *sc)
329 /* First, make sure we're idle. */
331 CSR_WRITE_1(sc, VGE_MIICMD, 0);
332 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
334 for (i = 0; i < VGE_TIMEOUT; i++) {
336 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
340 if (i == VGE_TIMEOUT) {
341 device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
345 /* Now enable auto poll mode. */
347 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
349 /* And make sure it started. */
351 for (i = 0; i < VGE_TIMEOUT; i++) {
353 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
357 if (i == VGE_TIMEOUT)
358 device_printf(sc->vge_dev, "failed to start MII autopoll\n");
362 vge_miibus_readreg(device_t dev, int phy, int reg)
364 struct vge_softc *sc;
368 sc = device_get_softc(dev);
370 vge_miipoll_stop(sc);
372 /* Specify the register we want to read. */
373 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
375 /* Issue read command. */
376 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
378 /* Wait for the read command bit to self-clear. */
379 for (i = 0; i < VGE_TIMEOUT; i++) {
381 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
385 if (i == VGE_TIMEOUT)
386 device_printf(sc->vge_dev, "MII read timed out\n");
388 rval = CSR_READ_2(sc, VGE_MIIDATA);
390 vge_miipoll_start(sc);
396 vge_miibus_writereg(device_t dev, int phy, int reg, int data)
398 struct vge_softc *sc;
401 sc = device_get_softc(dev);
403 vge_miipoll_stop(sc);
405 /* Specify the register we want to write. */
406 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
408 /* Specify the data we want to write. */
409 CSR_WRITE_2(sc, VGE_MIIDATA, data);
411 /* Issue write command. */
412 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
414 /* Wait for the write command bit to self-clear. */
415 for (i = 0; i < VGE_TIMEOUT; i++) {
417 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
421 if (i == VGE_TIMEOUT) {
422 device_printf(sc->vge_dev, "MII write timed out\n");
426 vge_miipoll_start(sc);
432 vge_cam_clear(struct vge_softc *sc)
437 * Turn off all the mask bits. This tells the chip
438 * that none of the entries in the CAM filter are valid.
439 * desired entries will be enabled as we fill the filter in.
442 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
443 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
444 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
445 for (i = 0; i < 8; i++)
446 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
448 /* Clear the VLAN filter too. */
450 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
451 for (i = 0; i < 8; i++)
452 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
454 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
455 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
456 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
462 vge_cam_set(struct vge_softc *sc, uint8_t *addr)
466 if (sc->vge_camidx == VGE_CAM_MAXADDRS)
469 /* Select the CAM data page. */
470 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
471 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
473 /* Set the filter entry we want to update and enable writing. */
474 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
476 /* Write the address to the CAM registers */
477 for (i = 0; i < ETHER_ADDR_LEN; i++)
478 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
480 /* Issue a write command. */
481 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
483 /* Wake for it to clear. */
484 for (i = 0; i < VGE_TIMEOUT; i++) {
486 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
490 if (i == VGE_TIMEOUT) {
491 device_printf(sc->vge_dev, "setting CAM filter failed\n");
496 /* Select the CAM mask page. */
497 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
498 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
500 /* Set the mask bit that enables this filter. */
501 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8),
502 1<<(sc->vge_camidx & 7));
507 /* Turn off access to CAM. */
508 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
509 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
510 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
516 vge_setvlan(struct vge_softc *sc)
524 cfg = CSR_READ_1(sc, VGE_RXCFG);
525 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
526 cfg |= VGE_VTAG_OPT2;
528 cfg &= ~VGE_VTAG_OPT2;
529 CSR_WRITE_1(sc, VGE_RXCFG, cfg);
533 vge_set_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
535 struct vge_softc *sc = arg;
537 if (sc->vge_camidx == VGE_CAM_MAXADDRS)
540 (void )vge_cam_set(sc, LLADDR(sdl));
546 vge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
548 uint32_t h, *hashes = arg;
550 h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
552 hashes[0] |= (1 << h);
554 hashes[1] |= (1 << (h - 32));
560 * Program the multicast filter. We use the 64-entry CAM filter
561 * for perfect filtering. If there's more than 64 multicast addresses,
562 * we use the hash filter instead.
565 vge_rxfilter(struct vge_softc *sc)
573 /* First, zot all the multicast entries. */
577 rxcfg = CSR_READ_1(sc, VGE_RXCTL);
578 rxcfg &= ~(VGE_RXCTL_RX_MCAST | VGE_RXCTL_RX_BCAST |
579 VGE_RXCTL_RX_PROMISC);
581 * Always allow VLAN oversized frames and frames for
584 rxcfg |= VGE_RXCTL_RX_GIANT | VGE_RXCTL_RX_UCAST;
587 if ((ifp->if_flags & IFF_BROADCAST) != 0)
588 rxcfg |= VGE_RXCTL_RX_BCAST;
589 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
590 if ((ifp->if_flags & IFF_PROMISC) != 0)
591 rxcfg |= VGE_RXCTL_RX_PROMISC;
592 if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
593 hashes[0] = 0xFFFFFFFF;
594 hashes[1] = 0xFFFFFFFF;
601 /* Now program new ones */
602 if_foreach_llmaddr(ifp, vge_set_maddr, sc);
604 /* If there were too many addresses, use the hash filter. */
605 if (sc->vge_camidx == VGE_CAM_MAXADDRS) {
607 if_foreach_llmaddr(ifp, vge_hash_maddr, hashes);
611 if (hashes[0] != 0 || hashes[1] != 0)
612 rxcfg |= VGE_RXCTL_RX_MCAST;
613 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
614 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
615 CSR_WRITE_1(sc, VGE_RXCTL, rxcfg);
619 vge_reset(struct vge_softc *sc)
623 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
625 for (i = 0; i < VGE_TIMEOUT; i++) {
627 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
631 if (i == VGE_TIMEOUT) {
632 device_printf(sc->vge_dev, "soft reset timed out\n");
633 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
641 * Probe for a VIA gigabit chip. Check the PCI vendor and device
642 * IDs against our list and return a device name if we find a match.
645 vge_probe(device_t dev)
651 while (t->vge_name != NULL) {
652 if ((pci_get_vendor(dev) == t->vge_vid) &&
653 (pci_get_device(dev) == t->vge_did)) {
654 device_set_desc(dev, t->vge_name);
655 return (BUS_PROBE_DEFAULT);
664 * Map a single buffer address.
667 struct vge_dmamap_arg {
668 bus_addr_t vge_busaddr;
672 vge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
674 struct vge_dmamap_arg *ctx;
679 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
681 ctx = (struct vge_dmamap_arg *)arg;
682 ctx->vge_busaddr = segs[0].ds_addr;
686 vge_dma_alloc(struct vge_softc *sc)
688 struct vge_dmamap_arg ctx;
689 struct vge_txdesc *txd;
690 struct vge_rxdesc *rxd;
691 bus_addr_t lowaddr, tx_ring_end, rx_ring_end;
695 * It seems old PCI controllers do not support DAC. DAC
696 * configuration can be enabled by accessing VGE_CHIPCFG3
697 * register but honor EEPROM configuration instead of
698 * blindly overriding DAC configuration. PCIe based
699 * controllers are supposed to support 64bit DMA so enable
700 * 64bit DMA on these controllers.
702 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0)
703 lowaddr = BUS_SPACE_MAXADDR;
705 lowaddr = BUS_SPACE_MAXADDR_32BIT;
708 /* Create parent ring tag. */
709 error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */
710 1, 0, /* algnmnt, boundary */
711 lowaddr, /* lowaddr */
712 BUS_SPACE_MAXADDR, /* highaddr */
713 NULL, NULL, /* filter, filterarg */
714 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
716 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
718 NULL, NULL, /* lockfunc, lockarg */
719 &sc->vge_cdata.vge_ring_tag);
721 device_printf(sc->vge_dev,
722 "could not create parent DMA tag.\n");
726 /* Create tag for Tx ring. */
727 error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */
728 VGE_TX_RING_ALIGN, 0, /* algnmnt, boundary */
729 BUS_SPACE_MAXADDR, /* lowaddr */
730 BUS_SPACE_MAXADDR, /* highaddr */
731 NULL, NULL, /* filter, filterarg */
732 VGE_TX_LIST_SZ, /* maxsize */
734 VGE_TX_LIST_SZ, /* maxsegsize */
736 NULL, NULL, /* lockfunc, lockarg */
737 &sc->vge_cdata.vge_tx_ring_tag);
739 device_printf(sc->vge_dev,
740 "could not allocate Tx ring DMA tag.\n");
744 /* Create tag for Rx ring. */
745 error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */
746 VGE_RX_RING_ALIGN, 0, /* algnmnt, boundary */
747 BUS_SPACE_MAXADDR, /* lowaddr */
748 BUS_SPACE_MAXADDR, /* highaddr */
749 NULL, NULL, /* filter, filterarg */
750 VGE_RX_LIST_SZ, /* maxsize */
752 VGE_RX_LIST_SZ, /* maxsegsize */
754 NULL, NULL, /* lockfunc, lockarg */
755 &sc->vge_cdata.vge_rx_ring_tag);
757 device_printf(sc->vge_dev,
758 "could not allocate Rx ring DMA tag.\n");
762 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
763 error = bus_dmamem_alloc(sc->vge_cdata.vge_tx_ring_tag,
764 (void **)&sc->vge_rdata.vge_tx_ring,
765 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
766 &sc->vge_cdata.vge_tx_ring_map);
768 device_printf(sc->vge_dev,
769 "could not allocate DMA'able memory for Tx ring.\n");
774 error = bus_dmamap_load(sc->vge_cdata.vge_tx_ring_tag,
775 sc->vge_cdata.vge_tx_ring_map, sc->vge_rdata.vge_tx_ring,
776 VGE_TX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
777 if (error != 0 || ctx.vge_busaddr == 0) {
778 device_printf(sc->vge_dev,
779 "could not load DMA'able memory for Tx ring.\n");
782 sc->vge_rdata.vge_tx_ring_paddr = ctx.vge_busaddr;
784 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
785 error = bus_dmamem_alloc(sc->vge_cdata.vge_rx_ring_tag,
786 (void **)&sc->vge_rdata.vge_rx_ring,
787 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
788 &sc->vge_cdata.vge_rx_ring_map);
790 device_printf(sc->vge_dev,
791 "could not allocate DMA'able memory for Rx ring.\n");
796 error = bus_dmamap_load(sc->vge_cdata.vge_rx_ring_tag,
797 sc->vge_cdata.vge_rx_ring_map, sc->vge_rdata.vge_rx_ring,
798 VGE_RX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
799 if (error != 0 || ctx.vge_busaddr == 0) {
800 device_printf(sc->vge_dev,
801 "could not load DMA'able memory for Rx ring.\n");
804 sc->vge_rdata.vge_rx_ring_paddr = ctx.vge_busaddr;
806 /* Tx/Rx descriptor queue should reside within 4GB boundary. */
807 tx_ring_end = sc->vge_rdata.vge_tx_ring_paddr + VGE_TX_LIST_SZ;
808 rx_ring_end = sc->vge_rdata.vge_rx_ring_paddr + VGE_RX_LIST_SZ;
809 if ((VGE_ADDR_HI(tx_ring_end) !=
810 VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)) ||
811 (VGE_ADDR_HI(rx_ring_end) !=
812 VGE_ADDR_HI(sc->vge_rdata.vge_rx_ring_paddr)) ||
813 VGE_ADDR_HI(tx_ring_end) != VGE_ADDR_HI(rx_ring_end)) {
814 device_printf(sc->vge_dev, "4GB boundary crossed, "
815 "switching to 32bit DMA address mode.\n");
817 /* Limit DMA address space to 32bit and try again. */
818 lowaddr = BUS_SPACE_MAXADDR_32BIT;
822 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0)
823 lowaddr = VGE_BUF_DMA_MAXADDR;
825 lowaddr = BUS_SPACE_MAXADDR_32BIT;
826 /* Create parent buffer tag. */
827 error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */
828 1, 0, /* algnmnt, boundary */
829 lowaddr, /* lowaddr */
830 BUS_SPACE_MAXADDR, /* highaddr */
831 NULL, NULL, /* filter, filterarg */
832 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
834 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
836 NULL, NULL, /* lockfunc, lockarg */
837 &sc->vge_cdata.vge_buffer_tag);
839 device_printf(sc->vge_dev,
840 "could not create parent buffer DMA tag.\n");
844 /* Create tag for Tx buffers. */
845 error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */
846 1, 0, /* algnmnt, boundary */
847 BUS_SPACE_MAXADDR, /* lowaddr */
848 BUS_SPACE_MAXADDR, /* highaddr */
849 NULL, NULL, /* filter, filterarg */
850 MCLBYTES * VGE_MAXTXSEGS, /* maxsize */
851 VGE_MAXTXSEGS, /* nsegments */
852 MCLBYTES, /* maxsegsize */
854 NULL, NULL, /* lockfunc, lockarg */
855 &sc->vge_cdata.vge_tx_tag);
857 device_printf(sc->vge_dev, "could not create Tx DMA tag.\n");
861 /* Create tag for Rx buffers. */
862 error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */
863 VGE_RX_BUF_ALIGN, 0, /* algnmnt, boundary */
864 BUS_SPACE_MAXADDR, /* lowaddr */
865 BUS_SPACE_MAXADDR, /* highaddr */
866 NULL, NULL, /* filter, filterarg */
867 MCLBYTES, /* maxsize */
869 MCLBYTES, /* maxsegsize */
871 NULL, NULL, /* lockfunc, lockarg */
872 &sc->vge_cdata.vge_rx_tag);
874 device_printf(sc->vge_dev, "could not create Rx DMA tag.\n");
878 /* Create DMA maps for Tx buffers. */
879 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
880 txd = &sc->vge_cdata.vge_txdesc[i];
882 txd->tx_dmamap = NULL;
883 error = bus_dmamap_create(sc->vge_cdata.vge_tx_tag, 0,
886 device_printf(sc->vge_dev,
887 "could not create Tx dmamap.\n");
891 /* Create DMA maps for Rx buffers. */
892 if ((error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0,
893 &sc->vge_cdata.vge_rx_sparemap)) != 0) {
894 device_printf(sc->vge_dev,
895 "could not create spare Rx dmamap.\n");
898 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
899 rxd = &sc->vge_cdata.vge_rxdesc[i];
901 rxd->rx_dmamap = NULL;
902 error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0,
905 device_printf(sc->vge_dev,
906 "could not create Rx dmamap.\n");
916 vge_dma_free(struct vge_softc *sc)
918 struct vge_txdesc *txd;
919 struct vge_rxdesc *rxd;
923 if (sc->vge_cdata.vge_tx_ring_tag != NULL) {
924 if (sc->vge_rdata.vge_tx_ring_paddr)
925 bus_dmamap_unload(sc->vge_cdata.vge_tx_ring_tag,
926 sc->vge_cdata.vge_tx_ring_map);
927 if (sc->vge_rdata.vge_tx_ring)
928 bus_dmamem_free(sc->vge_cdata.vge_tx_ring_tag,
929 sc->vge_rdata.vge_tx_ring,
930 sc->vge_cdata.vge_tx_ring_map);
931 sc->vge_rdata.vge_tx_ring = NULL;
932 sc->vge_rdata.vge_tx_ring_paddr = 0;
933 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_ring_tag);
934 sc->vge_cdata.vge_tx_ring_tag = NULL;
937 if (sc->vge_cdata.vge_rx_ring_tag != NULL) {
938 if (sc->vge_rdata.vge_rx_ring_paddr)
939 bus_dmamap_unload(sc->vge_cdata.vge_rx_ring_tag,
940 sc->vge_cdata.vge_rx_ring_map);
941 if (sc->vge_rdata.vge_rx_ring)
942 bus_dmamem_free(sc->vge_cdata.vge_rx_ring_tag,
943 sc->vge_rdata.vge_rx_ring,
944 sc->vge_cdata.vge_rx_ring_map);
945 sc->vge_rdata.vge_rx_ring = NULL;
946 sc->vge_rdata.vge_rx_ring_paddr = 0;
947 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_ring_tag);
948 sc->vge_cdata.vge_rx_ring_tag = NULL;
951 if (sc->vge_cdata.vge_tx_tag != NULL) {
952 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
953 txd = &sc->vge_cdata.vge_txdesc[i];
954 if (txd->tx_dmamap != NULL) {
955 bus_dmamap_destroy(sc->vge_cdata.vge_tx_tag,
957 txd->tx_dmamap = NULL;
960 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_tag);
961 sc->vge_cdata.vge_tx_tag = NULL;
964 if (sc->vge_cdata.vge_rx_tag != NULL) {
965 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
966 rxd = &sc->vge_cdata.vge_rxdesc[i];
967 if (rxd->rx_dmamap != NULL) {
968 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag,
970 rxd->rx_dmamap = NULL;
973 if (sc->vge_cdata.vge_rx_sparemap != NULL) {
974 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag,
975 sc->vge_cdata.vge_rx_sparemap);
976 sc->vge_cdata.vge_rx_sparemap = NULL;
978 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_tag);
979 sc->vge_cdata.vge_rx_tag = NULL;
982 if (sc->vge_cdata.vge_buffer_tag != NULL) {
983 bus_dma_tag_destroy(sc->vge_cdata.vge_buffer_tag);
984 sc->vge_cdata.vge_buffer_tag = NULL;
986 if (sc->vge_cdata.vge_ring_tag != NULL) {
987 bus_dma_tag_destroy(sc->vge_cdata.vge_ring_tag);
988 sc->vge_cdata.vge_ring_tag = NULL;
993 * Attach the interface. Allocate softc structures, do ifmedia
994 * setup and ethernet/BPF attach.
997 vge_attach(device_t dev)
999 u_char eaddr[ETHER_ADDR_LEN];
1000 struct vge_softc *sc;
1002 int error = 0, cap, i, msic, rid;
1004 sc = device_get_softc(dev);
1007 mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1009 callout_init_mtx(&sc->vge_watchdog, &sc->vge_mtx, 0);
1012 * Map control/status registers.
1014 pci_enable_busmaster(dev);
1017 sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1020 if (sc->vge_res == NULL) {
1021 device_printf(dev, "couldn't map ports/memory\n");
1026 if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) {
1027 sc->vge_flags |= VGE_FLAG_PCIE;
1028 sc->vge_expcap = cap;
1030 sc->vge_flags |= VGE_FLAG_JUMBO;
1031 if (pci_find_cap(dev, PCIY_PMG, &cap) == 0) {
1032 sc->vge_flags |= VGE_FLAG_PMCAP;
1033 sc->vge_pmcap = cap;
1036 msic = pci_msi_count(dev);
1037 if (msi_disable == 0 && msic > 0) {
1039 if (pci_alloc_msi(dev, &msic) == 0) {
1041 sc->vge_flags |= VGE_FLAG_MSI;
1042 device_printf(dev, "Using %d MSI message\n",
1046 pci_release_msi(dev);
1050 /* Allocate interrupt */
1051 sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1052 ((sc->vge_flags & VGE_FLAG_MSI) ? 0 : RF_SHAREABLE) | RF_ACTIVE);
1053 if (sc->vge_irq == NULL) {
1054 device_printf(dev, "couldn't map interrupt\n");
1059 /* Reset the adapter. */
1061 /* Reload EEPROM. */
1062 CSR_WRITE_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
1063 for (i = 0; i < VGE_TIMEOUT; i++) {
1065 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
1068 if (i == VGE_TIMEOUT)
1069 device_printf(dev, "EEPROM reload timed out\n");
1071 * Clear PACPI as EEPROM reload will set the bit. Otherwise
1072 * MAC will receive magic packet which in turn confuses
1075 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
1078 * Get station address from the EEPROM.
1080 vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0);
1082 * Save configured PHY address.
1083 * It seems the PHY address of PCIe controllers just
1084 * reflects media jump strapping status so we assume the
1085 * internal PHY address of PCIe controller is at 1.
1087 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0)
1088 sc->vge_phyaddr = 1;
1090 sc->vge_phyaddr = CSR_READ_1(sc, VGE_MIICFG) &
1092 /* Clear WOL and take hardware from powerdown. */
1094 vge_sysctl_node(sc);
1095 error = vge_dma_alloc(sc);
1099 ifp = sc->vge_ifp = if_alloc(IFT_ETHER);
1101 device_printf(dev, "can not if_alloc()\n");
1106 vge_miipoll_start(sc);
1108 error = mii_attach(dev, &sc->vge_miibus, ifp, vge_ifmedia_upd,
1109 vge_ifmedia_sts, BMSR_DEFCAPMASK, sc->vge_phyaddr, MII_OFFSET_ANY,
1112 device_printf(dev, "attaching PHYs failed\n");
1117 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1118 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1119 ifp->if_ioctl = vge_ioctl;
1120 ifp->if_capabilities = IFCAP_VLAN_MTU;
1121 ifp->if_start = vge_start;
1122 ifp->if_hwassist = VGE_CSUM_FEATURES;
1123 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM |
1124 IFCAP_VLAN_HWTAGGING;
1125 if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0)
1126 ifp->if_capabilities |= IFCAP_WOL;
1127 ifp->if_capenable = ifp->if_capabilities;
1128 #ifdef DEVICE_POLLING
1129 ifp->if_capabilities |= IFCAP_POLLING;
1131 ifp->if_init = vge_init;
1132 IFQ_SET_MAXLEN(&ifp->if_snd, VGE_TX_DESC_CNT - 1);
1133 ifp->if_snd.ifq_drv_maxlen = VGE_TX_DESC_CNT - 1;
1134 IFQ_SET_READY(&ifp->if_snd);
1137 * Call MI attach routine.
1139 ether_ifattach(ifp, eaddr);
1141 /* Tell the upper layer(s) we support long frames. */
1142 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1144 /* Hook interrupt last to avoid having to lock softc */
1145 error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE,
1146 NULL, vge_intr, sc, &sc->vge_intrhand);
1149 device_printf(dev, "couldn't set up irq\n");
1150 ether_ifdetach(ifp);
1162 * Shutdown hardware and free up resources. This can be called any
1163 * time after the mutex has been initialized. It is called in both
1164 * the error case in attach and the normal detach case so it needs
1165 * to be careful about only freeing resources that have actually been
1169 vge_detach(device_t dev)
1171 struct vge_softc *sc;
1174 sc = device_get_softc(dev);
1175 KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized"));
1178 #ifdef DEVICE_POLLING
1179 if (ifp->if_capenable & IFCAP_POLLING)
1180 ether_poll_deregister(ifp);
1183 /* These should only be active if attach succeeded */
1184 if (device_is_attached(dev)) {
1185 ether_ifdetach(ifp);
1189 callout_drain(&sc->vge_watchdog);
1192 device_delete_child(dev, sc->vge_miibus);
1193 bus_generic_detach(dev);
1195 if (sc->vge_intrhand)
1196 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand);
1198 bus_release_resource(dev, SYS_RES_IRQ,
1199 sc->vge_flags & VGE_FLAG_MSI ? 1 : 0, sc->vge_irq);
1200 if (sc->vge_flags & VGE_FLAG_MSI)
1201 pci_release_msi(dev);
1203 bus_release_resource(dev, SYS_RES_MEMORY,
1204 PCIR_BAR(1), sc->vge_res);
1209 mtx_destroy(&sc->vge_mtx);
1215 vge_discard_rxbuf(struct vge_softc *sc, int prod)
1217 struct vge_rxdesc *rxd;
1220 rxd = &sc->vge_cdata.vge_rxdesc[prod];
1221 rxd->rx_desc->vge_sts = 0;
1222 rxd->rx_desc->vge_ctl = 0;
1225 * Note: the manual fails to document the fact that for
1226 * proper opration, the driver needs to replentish the RX
1227 * DMA ring 4 descriptors at a time (rather than one at a
1228 * time, like most chips). We can allocate the new buffers
1229 * but we should not set the OWN bits until we're ready
1230 * to hand back 4 of them in one shot.
1232 if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) {
1233 for (i = VGE_RXCHUNK; i > 0; i--) {
1234 rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN);
1235 rxd = rxd->rxd_prev;
1237 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK;
1242 vge_newbuf(struct vge_softc *sc, int prod)
1244 struct vge_rxdesc *rxd;
1246 bus_dma_segment_t segs[1];
1250 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1254 * This is part of an evil trick to deal with strict-alignment
1255 * architectures. The VIA chip requires RX buffers to be aligned
1256 * on 32-bit boundaries, but that will hose strict-alignment
1257 * architectures. To get around this, we leave some empty space
1258 * at the start of each buffer and for non-strict-alignment hosts,
1259 * we copy the buffer back two bytes to achieve word alignment.
1260 * This is slightly more efficient than allocating a new buffer,
1261 * copying the contents, and discarding the old buffer.
1263 m->m_len = m->m_pkthdr.len = MCLBYTES;
1264 m_adj(m, VGE_RX_BUF_ALIGN);
1266 if (bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_rx_tag,
1267 sc->vge_cdata.vge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1271 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1273 rxd = &sc->vge_cdata.vge_rxdesc[prod];
1274 if (rxd->rx_m != NULL) {
1275 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap,
1276 BUS_DMASYNC_POSTREAD);
1277 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap);
1279 map = rxd->rx_dmamap;
1280 rxd->rx_dmamap = sc->vge_cdata.vge_rx_sparemap;
1281 sc->vge_cdata.vge_rx_sparemap = map;
1282 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap,
1283 BUS_DMASYNC_PREREAD);
1286 rxd->rx_desc->vge_sts = 0;
1287 rxd->rx_desc->vge_ctl = 0;
1288 rxd->rx_desc->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
1289 rxd->rx_desc->vge_addrhi = htole32(VGE_ADDR_HI(segs[0].ds_addr) |
1290 (VGE_BUFLEN(segs[0].ds_len) << 16) | VGE_RXDESC_I);
1293 * Note: the manual fails to document the fact that for
1294 * proper operation, the driver needs to replenish the RX
1295 * DMA ring 4 descriptors at a time (rather than one at a
1296 * time, like most chips). We can allocate the new buffers
1297 * but we should not set the OWN bits until we're ready
1298 * to hand back 4 of them in one shot.
1300 if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) {
1301 for (i = VGE_RXCHUNK; i > 0; i--) {
1302 rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN);
1303 rxd = rxd->rxd_prev;
1305 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK;
1312 vge_tx_list_init(struct vge_softc *sc)
1314 struct vge_ring_data *rd;
1315 struct vge_txdesc *txd;
1318 VGE_LOCK_ASSERT(sc);
1320 sc->vge_cdata.vge_tx_prodidx = 0;
1321 sc->vge_cdata.vge_tx_considx = 0;
1322 sc->vge_cdata.vge_tx_cnt = 0;
1324 rd = &sc->vge_rdata;
1325 bzero(rd->vge_tx_ring, VGE_TX_LIST_SZ);
1326 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
1327 txd = &sc->vge_cdata.vge_txdesc[i];
1329 txd->tx_desc = &rd->vge_tx_ring[i];
1332 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1333 sc->vge_cdata.vge_tx_ring_map,
1334 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1340 vge_rx_list_init(struct vge_softc *sc)
1342 struct vge_ring_data *rd;
1343 struct vge_rxdesc *rxd;
1346 VGE_LOCK_ASSERT(sc);
1348 sc->vge_cdata.vge_rx_prodidx = 0;
1349 sc->vge_cdata.vge_head = NULL;
1350 sc->vge_cdata.vge_tail = NULL;
1351 sc->vge_cdata.vge_rx_commit = 0;
1353 rd = &sc->vge_rdata;
1354 bzero(rd->vge_rx_ring, VGE_RX_LIST_SZ);
1355 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1356 rxd = &sc->vge_cdata.vge_rxdesc[i];
1358 rxd->rx_desc = &rd->vge_rx_ring[i];
1361 &sc->vge_cdata.vge_rxdesc[VGE_RX_DESC_CNT - 1];
1363 rxd->rxd_prev = &sc->vge_cdata.vge_rxdesc[i - 1];
1364 if (vge_newbuf(sc, i) != 0)
1368 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1369 sc->vge_cdata.vge_rx_ring_map,
1370 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1372 sc->vge_cdata.vge_rx_commit = 0;
1378 vge_freebufs(struct vge_softc *sc)
1380 struct vge_txdesc *txd;
1381 struct vge_rxdesc *rxd;
1385 VGE_LOCK_ASSERT(sc);
1389 * Free RX and TX mbufs still in the queues.
1391 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1392 rxd = &sc->vge_cdata.vge_rxdesc[i];
1393 if (rxd->rx_m != NULL) {
1394 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag,
1395 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
1396 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag,
1403 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
1404 txd = &sc->vge_cdata.vge_txdesc[i];
1405 if (txd->tx_m != NULL) {
1406 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag,
1407 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
1408 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag,
1412 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1417 #ifndef __NO_STRICT_ALIGNMENT
1418 static __inline void
1419 vge_fixup_rx(struct mbuf *m)
1422 uint16_t *src, *dst;
1424 src = mtod(m, uint16_t *);
1427 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1430 m->m_data -= ETHER_ALIGN;
1435 * RX handler. We support the reception of jumbo frames that have
1436 * been fragmented across multiple 2K mbuf cluster buffers.
1439 vge_rxeof(struct vge_softc *sc, int count)
1443 int prod, prog, total_len;
1444 struct vge_rxdesc *rxd;
1445 struct vge_rx_desc *cur_rx;
1446 uint32_t rxstat, rxctl;
1448 VGE_LOCK_ASSERT(sc);
1452 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1453 sc->vge_cdata.vge_rx_ring_map,
1454 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1456 prod = sc->vge_cdata.vge_rx_prodidx;
1457 for (prog = 0; count > 0 &&
1458 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
1459 VGE_RX_DESC_INC(prod)) {
1460 cur_rx = &sc->vge_rdata.vge_rx_ring[prod];
1461 rxstat = le32toh(cur_rx->vge_sts);
1462 if ((rxstat & VGE_RDSTS_OWN) != 0)
1466 rxctl = le32toh(cur_rx->vge_ctl);
1467 total_len = VGE_RXBYTES(rxstat);
1468 rxd = &sc->vge_cdata.vge_rxdesc[prod];
1472 * If the 'start of frame' bit is set, this indicates
1473 * either the first fragment in a multi-fragment receive,
1474 * or an intermediate fragment. Either way, we want to
1475 * accumulate the buffers.
1477 if ((rxstat & VGE_RXPKT_SOF) != 0) {
1478 if (vge_newbuf(sc, prod) != 0) {
1479 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1480 VGE_CHAIN_RESET(sc);
1481 vge_discard_rxbuf(sc, prod);
1484 m->m_len = MCLBYTES - VGE_RX_BUF_ALIGN;
1485 if (sc->vge_cdata.vge_head == NULL) {
1486 sc->vge_cdata.vge_head = m;
1487 sc->vge_cdata.vge_tail = m;
1489 m->m_flags &= ~M_PKTHDR;
1490 sc->vge_cdata.vge_tail->m_next = m;
1491 sc->vge_cdata.vge_tail = m;
1497 * Bad/error frames will have the RXOK bit cleared.
1498 * However, there's one error case we want to allow:
1499 * if a VLAN tagged frame arrives and the chip can't
1500 * match it against the CAM filter, it considers this
1501 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
1502 * We don't want to drop the frame though: our VLAN
1503 * filtering is done in software.
1504 * We also want to receive bad-checksummed frames and
1505 * and frames with bad-length.
1507 if ((rxstat & VGE_RDSTS_RXOK) == 0 &&
1508 (rxstat & (VGE_RDSTS_VIDM | VGE_RDSTS_RLERR |
1509 VGE_RDSTS_CSUMERR)) == 0) {
1510 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1512 * If this is part of a multi-fragment packet,
1513 * discard all the pieces.
1515 VGE_CHAIN_RESET(sc);
1516 vge_discard_rxbuf(sc, prod);
1520 if (vge_newbuf(sc, prod) != 0) {
1521 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1522 VGE_CHAIN_RESET(sc);
1523 vge_discard_rxbuf(sc, prod);
1527 /* Chain received mbufs. */
1528 if (sc->vge_cdata.vge_head != NULL) {
1529 m->m_len = total_len % (MCLBYTES - VGE_RX_BUF_ALIGN);
1531 * Special case: if there's 4 bytes or less
1532 * in this buffer, the mbuf can be discarded:
1533 * the last 4 bytes is the CRC, which we don't
1534 * care about anyway.
1536 if (m->m_len <= ETHER_CRC_LEN) {
1537 sc->vge_cdata.vge_tail->m_len -=
1538 (ETHER_CRC_LEN - m->m_len);
1541 m->m_len -= ETHER_CRC_LEN;
1542 m->m_flags &= ~M_PKTHDR;
1543 sc->vge_cdata.vge_tail->m_next = m;
1545 m = sc->vge_cdata.vge_head;
1546 m->m_flags |= M_PKTHDR;
1547 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1549 m->m_flags |= M_PKTHDR;
1550 m->m_pkthdr.len = m->m_len =
1551 (total_len - ETHER_CRC_LEN);
1554 #ifndef __NO_STRICT_ALIGNMENT
1557 m->m_pkthdr.rcvif = ifp;
1559 /* Do RX checksumming if enabled */
1560 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
1561 (rxctl & VGE_RDCTL_FRAG) == 0) {
1562 /* Check IP header checksum */
1563 if ((rxctl & VGE_RDCTL_IPPKT) != 0)
1564 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1565 if ((rxctl & VGE_RDCTL_IPCSUMOK) != 0)
1566 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1568 /* Check TCP/UDP checksum */
1569 if (rxctl & (VGE_RDCTL_TCPPKT | VGE_RDCTL_UDPPKT) &&
1570 rxctl & VGE_RDCTL_PROTOCSUMOK) {
1571 m->m_pkthdr.csum_flags |=
1572 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1573 m->m_pkthdr.csum_data = 0xffff;
1577 if ((rxstat & VGE_RDSTS_VTAG) != 0) {
1579 * The 32-bit rxctl register is stored in little-endian.
1580 * However, the 16-bit vlan tag is stored in big-endian,
1581 * so we have to byte swap it.
1583 m->m_pkthdr.ether_vtag =
1584 bswap16(rxctl & VGE_RDCTL_VLANID);
1585 m->m_flags |= M_VLANTAG;
1589 (*ifp->if_input)(ifp, m);
1591 sc->vge_cdata.vge_head = NULL;
1592 sc->vge_cdata.vge_tail = NULL;
1596 sc->vge_cdata.vge_rx_prodidx = prod;
1597 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1598 sc->vge_cdata.vge_rx_ring_map,
1599 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1600 /* Update residue counter. */
1601 if (sc->vge_cdata.vge_rx_commit != 0) {
1602 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT,
1603 sc->vge_cdata.vge_rx_commit);
1604 sc->vge_cdata.vge_rx_commit = 0;
1611 vge_txeof(struct vge_softc *sc)
1614 struct vge_tx_desc *cur_tx;
1615 struct vge_txdesc *txd;
1619 VGE_LOCK_ASSERT(sc);
1623 if (sc->vge_cdata.vge_tx_cnt == 0)
1626 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1627 sc->vge_cdata.vge_tx_ring_map,
1628 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1631 * Go through our tx list and free mbufs for those
1632 * frames that have been transmitted.
1634 cons = sc->vge_cdata.vge_tx_considx;
1635 prod = sc->vge_cdata.vge_tx_prodidx;
1636 for (; cons != prod; VGE_TX_DESC_INC(cons)) {
1637 cur_tx = &sc->vge_rdata.vge_tx_ring[cons];
1638 txstat = le32toh(cur_tx->vge_sts);
1639 if ((txstat & VGE_TDSTS_OWN) != 0)
1641 sc->vge_cdata.vge_tx_cnt--;
1642 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1644 txd = &sc->vge_cdata.vge_txdesc[cons];
1645 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap,
1646 BUS_DMASYNC_POSTWRITE);
1647 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap);
1649 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n",
1653 txd->tx_desc->vge_frag[0].vge_addrhi = 0;
1655 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1656 sc->vge_cdata.vge_tx_ring_map,
1657 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1658 sc->vge_cdata.vge_tx_considx = cons;
1659 if (sc->vge_cdata.vge_tx_cnt == 0)
1664 vge_link_statchg(void *xsc)
1666 struct vge_softc *sc;
1672 VGE_LOCK_ASSERT(sc);
1674 physts = CSR_READ_1(sc, VGE_PHYSTS0);
1675 if ((physts & VGE_PHYSTS_RESETSTS) == 0) {
1676 if ((physts & VGE_PHYSTS_LINK) == 0) {
1677 sc->vge_flags &= ~VGE_FLAG_LINK;
1678 if_link_state_change(sc->vge_ifp,
1681 sc->vge_flags |= VGE_FLAG_LINK;
1682 if_link_state_change(sc->vge_ifp,
1684 CSR_WRITE_1(sc, VGE_CRC2, VGE_CR2_FDX_TXFLOWCTL_ENABLE |
1685 VGE_CR2_FDX_RXFLOWCTL_ENABLE);
1686 if ((physts & VGE_PHYSTS_FDX) != 0) {
1687 if ((physts & VGE_PHYSTS_TXFLOWCAP) != 0)
1688 CSR_WRITE_1(sc, VGE_CRS2,
1689 VGE_CR2_FDX_TXFLOWCTL_ENABLE);
1690 if ((physts & VGE_PHYSTS_RXFLOWCAP) != 0)
1691 CSR_WRITE_1(sc, VGE_CRS2,
1692 VGE_CR2_FDX_RXFLOWCTL_ENABLE);
1694 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1695 vge_start_locked(ifp);
1699 * Restart MII auto-polling because link state change interrupt
1702 vge_miipoll_start(sc);
1705 #ifdef DEVICE_POLLING
1707 vge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count)
1709 struct vge_softc *sc = ifp->if_softc;
1713 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1716 rx_npkts = vge_rxeof(sc, count);
1719 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1720 vge_start_locked(ifp);
1722 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
1724 status = CSR_READ_4(sc, VGE_ISR);
1725 if (status == 0xFFFFFFFF)
1728 CSR_WRITE_4(sc, VGE_ISR, status);
1731 * XXX check behaviour on receiver stalls.
1734 if (status & VGE_ISR_TXDMA_STALL ||
1735 status & VGE_ISR_RXDMA_STALL) {
1736 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1737 vge_init_locked(sc);
1740 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1741 vge_rxeof(sc, count);
1742 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1743 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1750 #endif /* DEVICE_POLLING */
1755 struct vge_softc *sc;
1763 if ((sc->vge_flags & VGE_FLAG_SUSPENDED) != 0 ||
1764 (ifp->if_flags & IFF_UP) == 0) {
1769 #ifdef DEVICE_POLLING
1770 if (ifp->if_capenable & IFCAP_POLLING) {
1771 status = CSR_READ_4(sc, VGE_ISR);
1772 CSR_WRITE_4(sc, VGE_ISR, status);
1773 if (status != 0xFFFFFFFF && (status & VGE_ISR_LINKSTS) != 0)
1774 vge_link_statchg(sc);
1780 /* Disable interrupts */
1781 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1782 status = CSR_READ_4(sc, VGE_ISR);
1783 CSR_WRITE_4(sc, VGE_ISR, status | VGE_ISR_HOLDOFF_RELOAD);
1784 /* If the card has gone away the read returns 0xffff. */
1785 if (status == 0xFFFFFFFF || (status & VGE_INTRS) == 0)
1787 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1788 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
1789 vge_rxeof(sc, VGE_RX_DESC_CNT);
1790 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1791 vge_rxeof(sc, VGE_RX_DESC_CNT);
1792 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1793 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1796 if (status & (VGE_ISR_TXOK0|VGE_ISR_TXOK_HIPRIO))
1799 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) {
1800 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1801 vge_init_locked(sc);
1804 if (status & VGE_ISR_LINKSTS)
1805 vge_link_statchg(sc);
1808 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1809 /* Re-enable interrupts */
1810 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1812 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1813 vge_start_locked(ifp);
1819 vge_encap(struct vge_softc *sc, struct mbuf **m_head)
1821 struct vge_txdesc *txd;
1822 struct vge_tx_frag *frag;
1824 bus_dma_segment_t txsegs[VGE_MAXTXSEGS];
1825 int error, i, nsegs, padlen;
1828 VGE_LOCK_ASSERT(sc);
1830 M_ASSERTPKTHDR((*m_head));
1832 /* Argh. This chip does not autopad short frames. */
1833 if ((*m_head)->m_pkthdr.len < VGE_MIN_FRAMELEN) {
1835 padlen = VGE_MIN_FRAMELEN - m->m_pkthdr.len;
1836 if (M_WRITABLE(m) == 0) {
1837 /* Get a writable copy. */
1838 m = m_dup(*m_head, M_NOWAIT);
1846 if (M_TRAILINGSPACE(m) < padlen) {
1847 m = m_defrag(m, M_NOWAIT);
1855 * Manually pad short frames, and zero the pad space
1856 * to avoid leaking data.
1858 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1859 m->m_pkthdr.len += padlen;
1860 m->m_len = m->m_pkthdr.len;
1864 txd = &sc->vge_cdata.vge_txdesc[sc->vge_cdata.vge_tx_prodidx];
1866 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag,
1867 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1868 if (error == EFBIG) {
1869 m = m_collapse(*m_head, M_NOWAIT, VGE_MAXTXSEGS);
1876 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag,
1877 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1883 } else if (error != 0)
1885 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap,
1886 BUS_DMASYNC_PREWRITE);
1891 /* Configure checksum offload. */
1892 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
1893 cflags |= VGE_TDCTL_IPCSUM;
1894 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1895 cflags |= VGE_TDCTL_TCPCSUM;
1896 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1897 cflags |= VGE_TDCTL_UDPCSUM;
1899 /* Configure VLAN. */
1900 if ((m->m_flags & M_VLANTAG) != 0)
1901 cflags |= m->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG;
1902 txd->tx_desc->vge_sts = htole32(m->m_pkthdr.len << 16);
1905 * Velocity family seems to support TSO but no information
1906 * for MSS configuration is available. Also the number of
1907 * fragments supported by a descriptor is too small to hold
1908 * entire 64KB TCP/IP segment. Maybe VGE_TD_LS_MOF,
1909 * VGE_TD_LS_SOF and VGE_TD_LS_EOF could be used to build
1910 * longer chain of buffers but no additional information is
1913 * When telling the chip how many segments there are, we
1914 * must use nsegs + 1 instead of just nsegs. Darned if I
1915 * know why. This also means we can't use the last fragment
1916 * field of Tx descriptor.
1918 txd->tx_desc->vge_ctl = htole32(cflags | ((nsegs + 1) << 28) |
1920 for (i = 0; i < nsegs; i++) {
1921 frag = &txd->tx_desc->vge_frag[i];
1922 frag->vge_addrlo = htole32(VGE_ADDR_LO(txsegs[i].ds_addr));
1923 frag->vge_addrhi = htole32(VGE_ADDR_HI(txsegs[i].ds_addr) |
1924 (VGE_BUFLEN(txsegs[i].ds_len) << 16));
1927 sc->vge_cdata.vge_tx_cnt++;
1928 VGE_TX_DESC_INC(sc->vge_cdata.vge_tx_prodidx);
1931 * Finally request interrupt and give the first descriptor
1932 * ownership to hardware.
1934 txd->tx_desc->vge_ctl |= htole32(VGE_TDCTL_TIC);
1935 txd->tx_desc->vge_sts |= htole32(VGE_TDSTS_OWN);
1942 * Main transmit routine.
1946 vge_start(struct ifnet *ifp)
1948 struct vge_softc *sc;
1952 vge_start_locked(ifp);
1957 vge_start_locked(struct ifnet *ifp)
1959 struct vge_softc *sc;
1960 struct vge_txdesc *txd;
1961 struct mbuf *m_head;
1966 VGE_LOCK_ASSERT(sc);
1968 if ((sc->vge_flags & VGE_FLAG_LINK) == 0 ||
1969 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1973 idx = sc->vge_cdata.vge_tx_prodidx;
1974 VGE_TX_DESC_DEC(idx);
1975 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1976 sc->vge_cdata.vge_tx_cnt < VGE_TX_DESC_CNT - 1; ) {
1977 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1981 * Pack the data into the transmit ring. If we
1982 * don't have room, set the OACTIVE flag and wait
1983 * for the NIC to drain the ring.
1985 if (vge_encap(sc, &m_head)) {
1988 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1989 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1993 txd = &sc->vge_cdata.vge_txdesc[idx];
1994 txd->tx_desc->vge_frag[0].vge_addrhi |= htole32(VGE_TXDESC_Q);
1995 VGE_TX_DESC_INC(idx);
1999 * If there's a BPF listener, bounce a copy of this frame
2002 ETHER_BPF_MTAP(ifp, m_head);
2006 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
2007 sc->vge_cdata.vge_tx_ring_map,
2008 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2009 /* Issue a transmit command. */
2010 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
2012 * Set a timeout in case the chip goes out to lunch.
2021 struct vge_softc *sc = xsc;
2024 vge_init_locked(sc);
2029 vge_init_locked(struct vge_softc *sc)
2031 struct ifnet *ifp = sc->vge_ifp;
2034 VGE_LOCK_ASSERT(sc);
2036 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2040 * Cancel pending I/O and free all RX/TX buffers.
2044 vge_miipoll_start(sc);
2047 * Initialize the RX and TX descriptors and mbufs.
2050 error = vge_rx_list_init(sc);
2052 device_printf(sc->vge_dev, "no memory for Rx buffers.\n");
2055 vge_tx_list_init(sc);
2056 /* Clear MAC statistics. */
2057 vge_stats_clear(sc);
2058 /* Set our station address */
2059 for (i = 0; i < ETHER_ADDR_LEN; i++)
2060 CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(sc->vge_ifp)[i]);
2063 * Set receive FIFO threshold. Also allow transmission and
2064 * reception of VLAN tagged frames.
2066 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
2067 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES);
2069 /* Set DMA burst length */
2070 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
2071 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
2073 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
2075 /* Set collision backoff algorithm */
2076 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
2077 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
2078 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
2080 /* Disable LPSEL field in priority resolution */
2081 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
2084 * Load the addresses of the DMA queues into the chip.
2085 * Note that we only use one transmit queue.
2088 CSR_WRITE_4(sc, VGE_TXDESC_HIADDR,
2089 VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr));
2090 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
2091 VGE_ADDR_LO(sc->vge_rdata.vge_tx_ring_paddr));
2092 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
2094 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
2095 VGE_ADDR_LO(sc->vge_rdata.vge_rx_ring_paddr));
2096 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
2097 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
2099 /* Configure interrupt moderation. */
2100 vge_intr_holdoff(sc);
2102 /* Enable and wake up the RX descriptor queue */
2103 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
2104 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
2106 /* Enable the TX descriptor queue */
2107 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
2109 /* Init the cam filter. */
2112 /* Set up receiver filter. */
2116 /* Initialize pause timer. */
2117 CSR_WRITE_2(sc, VGE_TX_PAUSE_TIMER, 0xFFFF);
2119 * Initialize flow control parameters.
2120 * TX XON high threshold : 48
2121 * TX pause low threshold : 24
2122 * Disable hald-duplex flow control
2124 CSR_WRITE_1(sc, VGE_CRC2, 0xFF);
2125 CSR_WRITE_1(sc, VGE_CRS2, VGE_CR2_XON_ENABLE | 0x0B);
2127 /* Enable jumbo frame reception (if desired) */
2129 /* Start the MAC. */
2130 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
2131 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
2132 CSR_WRITE_1(sc, VGE_CRS0,
2133 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
2135 #ifdef DEVICE_POLLING
2137 * Disable interrupts except link state change if we are polling.
2139 if (ifp->if_capenable & IFCAP_POLLING) {
2140 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS_POLLING);
2141 } else /* otherwise ... */
2145 * Enable interrupts.
2147 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2149 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2150 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2152 sc->vge_flags &= ~VGE_FLAG_LINK;
2153 vge_ifmedia_upd_locked(sc);
2155 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2156 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2157 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc);
2161 * Set media options.
2164 vge_ifmedia_upd(struct ifnet *ifp)
2166 struct vge_softc *sc;
2171 error = vge_ifmedia_upd_locked(sc);
2178 vge_ifmedia_upd_locked(struct vge_softc *sc)
2180 struct mii_data *mii;
2181 struct mii_softc *miisc;
2184 mii = device_get_softc(sc->vge_miibus);
2185 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2188 error = mii_mediachg(mii);
2194 * Report current media status.
2197 vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2199 struct vge_softc *sc;
2200 struct mii_data *mii;
2203 mii = device_get_softc(sc->vge_miibus);
2206 if ((ifp->if_flags & IFF_UP) == 0) {
2211 ifmr->ifm_active = mii->mii_media_active;
2212 ifmr->ifm_status = mii->mii_media_status;
2217 vge_setmedia(struct vge_softc *sc)
2219 struct mii_data *mii;
2220 struct ifmedia_entry *ife;
2222 mii = device_get_softc(sc->vge_miibus);
2223 ife = mii->mii_media.ifm_cur;
2226 * If the user manually selects a media mode, we need to turn
2227 * on the forced MAC mode bit in the DIAGCTL register. If the
2228 * user happens to choose a full duplex mode, we also need to
2229 * set the 'force full duplex' bit. This applies only to
2230 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
2231 * mode is disabled, and in 1000baseT mode, full duplex is
2232 * always implied, so we turn on the forced mode bit but leave
2233 * the FDX bit cleared.
2236 switch (IFM_SUBTYPE(ife->ifm_media)) {
2238 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2239 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2242 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2243 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2247 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2248 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) {
2249 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2251 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2255 device_printf(sc->vge_dev, "unknown media type: %x\n",
2256 IFM_SUBTYPE(ife->ifm_media));
2262 vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2264 struct vge_softc *sc = ifp->if_softc;
2265 struct ifreq *ifr = (struct ifreq *) data;
2266 struct mii_data *mii;
2267 int error = 0, mask;
2272 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VGE_JUMBO_MTU)
2274 else if (ifp->if_mtu != ifr->ifr_mtu) {
2275 if (ifr->ifr_mtu > ETHERMTU &&
2276 (sc->vge_flags & VGE_FLAG_JUMBO) == 0)
2279 ifp->if_mtu = ifr->ifr_mtu;
2285 if ((ifp->if_flags & IFF_UP) != 0) {
2286 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2287 ((ifp->if_flags ^ sc->vge_if_flags) &
2288 (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2291 vge_init_locked(sc);
2292 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2294 sc->vge_if_flags = ifp->if_flags;
2300 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2306 mii = device_get_softc(sc->vge_miibus);
2307 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2310 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2311 #ifdef DEVICE_POLLING
2312 if (mask & IFCAP_POLLING) {
2313 if (ifr->ifr_reqcap & IFCAP_POLLING) {
2314 error = ether_poll_register(vge_poll, ifp);
2318 /* Disable interrupts */
2319 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS_POLLING);
2320 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2321 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2322 ifp->if_capenable |= IFCAP_POLLING;
2325 error = ether_poll_deregister(ifp);
2326 /* Enable interrupts. */
2328 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2329 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2330 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2331 ifp->if_capenable &= ~IFCAP_POLLING;
2335 #endif /* DEVICE_POLLING */
2337 if ((mask & IFCAP_TXCSUM) != 0 &&
2338 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
2339 ifp->if_capenable ^= IFCAP_TXCSUM;
2340 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2341 ifp->if_hwassist |= VGE_CSUM_FEATURES;
2343 ifp->if_hwassist &= ~VGE_CSUM_FEATURES;
2345 if ((mask & IFCAP_RXCSUM) != 0 &&
2346 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
2347 ifp->if_capenable ^= IFCAP_RXCSUM;
2348 if ((mask & IFCAP_WOL_UCAST) != 0 &&
2349 (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0)
2350 ifp->if_capenable ^= IFCAP_WOL_UCAST;
2351 if ((mask & IFCAP_WOL_MCAST) != 0 &&
2352 (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0)
2353 ifp->if_capenable ^= IFCAP_WOL_MCAST;
2354 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2355 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
2356 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2357 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
2358 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
2359 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2360 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2361 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
2362 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2366 VLAN_CAPABILITIES(ifp);
2369 error = ether_ioctl(ifp, command, data);
2377 vge_watchdog(void *arg)
2379 struct vge_softc *sc;
2383 VGE_LOCK_ASSERT(sc);
2384 vge_stats_update(sc);
2385 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc);
2386 if (sc->vge_timer == 0 || --sc->vge_timer > 0)
2390 if_printf(ifp, "watchdog timeout\n");
2391 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2394 vge_rxeof(sc, VGE_RX_DESC_CNT);
2396 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2397 vge_init_locked(sc);
2401 * Stop the adapter and free any mbufs allocated to the
2405 vge_stop(struct vge_softc *sc)
2409 VGE_LOCK_ASSERT(sc);
2412 callout_stop(&sc->vge_watchdog);
2414 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2416 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2417 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
2418 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2419 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
2420 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
2421 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
2423 vge_stats_update(sc);
2424 VGE_CHAIN_RESET(sc);
2430 * Device suspend routine. Stop the interface and save some PCI
2431 * settings in case the BIOS doesn't restore them properly on
2435 vge_suspend(device_t dev)
2437 struct vge_softc *sc;
2439 sc = device_get_softc(dev);
2444 sc->vge_flags |= VGE_FLAG_SUSPENDED;
2451 * Device resume routine. Restore some PCI settings in case the BIOS
2452 * doesn't, re-enable busmastering, and restart the interface if
2456 vge_resume(device_t dev)
2458 struct vge_softc *sc;
2462 sc = device_get_softc(dev);
2464 if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0) {
2465 /* Disable PME and clear PME status. */
2466 pmstat = pci_read_config(sc->vge_dev,
2467 sc->vge_pmcap + PCIR_POWER_STATUS, 2);
2468 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
2469 pmstat &= ~PCIM_PSTAT_PMEENABLE;
2470 pci_write_config(sc->vge_dev,
2471 sc->vge_pmcap + PCIR_POWER_STATUS, pmstat, 2);
2475 /* Restart MII auto-polling. */
2476 vge_miipoll_start(sc);
2478 /* Reinitialize interface if necessary. */
2479 if ((ifp->if_flags & IFF_UP) != 0) {
2480 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2481 vge_init_locked(sc);
2483 sc->vge_flags &= ~VGE_FLAG_SUSPENDED;
2490 * Stop all chip I/O so that the kernel's probe routines don't
2491 * get confused by errant DMAs when rebooting.
2494 vge_shutdown(device_t dev)
2497 return (vge_suspend(dev));
2500 #define VGE_SYSCTL_STAT_ADD32(c, h, n, p, d) \
2501 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
2504 vge_sysctl_node(struct vge_softc *sc)
2506 struct sysctl_ctx_list *ctx;
2507 struct sysctl_oid_list *child, *parent;
2508 struct sysctl_oid *tree;
2509 struct vge_hw_stats *stats;
2511 stats = &sc->vge_stats;
2512 ctx = device_get_sysctl_ctx(sc->vge_dev);
2513 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vge_dev));
2515 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "int_holdoff",
2516 CTLFLAG_RW, &sc->vge_int_holdoff, 0, "interrupt holdoff");
2517 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_coal_pkt",
2518 CTLFLAG_RW, &sc->vge_rx_coal_pkt, 0, "rx coalescing packet");
2519 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_coal_pkt",
2520 CTLFLAG_RW, &sc->vge_tx_coal_pkt, 0, "tx coalescing packet");
2522 /* Pull in device tunables. */
2523 sc->vge_int_holdoff = VGE_INT_HOLDOFF_DEFAULT;
2524 resource_int_value(device_get_name(sc->vge_dev),
2525 device_get_unit(sc->vge_dev), "int_holdoff", &sc->vge_int_holdoff);
2526 sc->vge_rx_coal_pkt = VGE_RX_COAL_PKT_DEFAULT;
2527 resource_int_value(device_get_name(sc->vge_dev),
2528 device_get_unit(sc->vge_dev), "rx_coal_pkt", &sc->vge_rx_coal_pkt);
2529 sc->vge_tx_coal_pkt = VGE_TX_COAL_PKT_DEFAULT;
2530 resource_int_value(device_get_name(sc->vge_dev),
2531 device_get_unit(sc->vge_dev), "tx_coal_pkt", &sc->vge_tx_coal_pkt);
2533 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
2534 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "VGE statistics");
2535 parent = SYSCTL_CHILDREN(tree);
2537 /* Rx statistics. */
2538 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx",
2539 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX MAC statistics");
2540 child = SYSCTL_CHILDREN(tree);
2541 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames",
2542 &stats->rx_frames, "frames");
2543 VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
2544 &stats->rx_good_frames, "Good frames");
2545 VGE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
2546 &stats->rx_fifo_oflows, "FIFO overflows");
2547 VGE_SYSCTL_STAT_ADD32(ctx, child, "runts",
2548 &stats->rx_runts, "Too short frames");
2549 VGE_SYSCTL_STAT_ADD32(ctx, child, "runts_errs",
2550 &stats->rx_runts_errs, "Too short frames with errors");
2551 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
2552 &stats->rx_pkts_64, "64 bytes frames");
2553 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
2554 &stats->rx_pkts_65_127, "65 to 127 bytes frames");
2555 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
2556 &stats->rx_pkts_128_255, "128 to 255 bytes frames");
2557 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
2558 &stats->rx_pkts_256_511, "256 to 511 bytes frames");
2559 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
2560 &stats->rx_pkts_512_1023, "512 to 1023 bytes frames");
2561 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
2562 &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames");
2563 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
2564 &stats->rx_pkts_1519_max, "1519 to max frames");
2565 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max_errs",
2566 &stats->rx_pkts_1519_max_errs, "1519 to max frames with error");
2567 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo",
2568 &stats->rx_jumbos, "Jumbo frames");
2569 VGE_SYSCTL_STAT_ADD32(ctx, child, "crcerrs",
2570 &stats->rx_crcerrs, "CRC errors");
2571 VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
2572 &stats->rx_pause_frames, "CRC errors");
2573 VGE_SYSCTL_STAT_ADD32(ctx, child, "align_errs",
2574 &stats->rx_alignerrs, "Alignment errors");
2575 VGE_SYSCTL_STAT_ADD32(ctx, child, "nobufs",
2576 &stats->rx_nobufs, "Frames with no buffer event");
2577 VGE_SYSCTL_STAT_ADD32(ctx, child, "sym_errs",
2578 &stats->rx_symerrs, "Frames with symbol errors");
2579 VGE_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
2580 &stats->rx_lenerrs, "Frames with length mismatched");
2582 /* Tx statistics. */
2583 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx",
2584 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX MAC statistics");
2585 child = SYSCTL_CHILDREN(tree);
2586 VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
2587 &stats->tx_good_frames, "Good frames");
2588 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
2589 &stats->tx_pkts_64, "64 bytes frames");
2590 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
2591 &stats->tx_pkts_65_127, "65 to 127 bytes frames");
2592 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
2593 &stats->tx_pkts_128_255, "128 to 255 bytes frames");
2594 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
2595 &stats->tx_pkts_256_511, "256 to 511 bytes frames");
2596 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
2597 &stats->tx_pkts_512_1023, "512 to 1023 bytes frames");
2598 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
2599 &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames");
2600 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo",
2601 &stats->tx_jumbos, "Jumbo frames");
2602 VGE_SYSCTL_STAT_ADD32(ctx, child, "colls",
2603 &stats->tx_colls, "Collisions");
2604 VGE_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
2605 &stats->tx_latecolls, "Late collisions");
2606 VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
2607 &stats->tx_pause, "Pause frames");
2608 #ifdef VGE_ENABLE_SQEERR
2609 VGE_SYSCTL_STAT_ADD32(ctx, child, "sqeerrs",
2610 &stats->tx_sqeerrs, "SQE errors");
2612 /* Clear MAC statistics. */
2613 vge_stats_clear(sc);
2616 #undef VGE_SYSCTL_STAT_ADD32
2619 vge_stats_clear(struct vge_softc *sc)
2623 CSR_WRITE_1(sc, VGE_MIBCSR,
2624 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FREEZE);
2625 CSR_WRITE_1(sc, VGE_MIBCSR,
2626 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_CLR);
2627 for (i = VGE_TIMEOUT; i > 0; i--) {
2629 if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_CLR) == 0)
2633 device_printf(sc->vge_dev, "MIB clear timed out!\n");
2634 CSR_WRITE_1(sc, VGE_MIBCSR, CSR_READ_1(sc, VGE_MIBCSR) &
2635 ~VGE_MIBCSR_FREEZE);
2639 vge_stats_update(struct vge_softc *sc)
2641 struct vge_hw_stats *stats;
2643 uint32_t mib[VGE_MIB_CNT], val;
2646 VGE_LOCK_ASSERT(sc);
2648 stats = &sc->vge_stats;
2651 CSR_WRITE_1(sc, VGE_MIBCSR,
2652 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FLUSH);
2653 for (i = VGE_TIMEOUT; i > 0; i--) {
2655 if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_FLUSH) == 0)
2659 device_printf(sc->vge_dev, "MIB counter dump timed out!\n");
2660 vge_stats_clear(sc);
2664 bzero(mib, sizeof(mib));
2666 /* Set MIB read index to 0. */
2667 CSR_WRITE_1(sc, VGE_MIBCSR,
2668 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_RINI);
2669 for (i = 0; i < VGE_MIB_CNT; i++) {
2670 val = CSR_READ_4(sc, VGE_MIBDATA);
2671 if (i != VGE_MIB_DATA_IDX(val)) {
2672 /* Reading interrupted. */
2675 mib[i] = val & VGE_MIB_DATA_MASK;
2679 stats->rx_frames += mib[VGE_MIB_RX_FRAMES];
2680 stats->rx_good_frames += mib[VGE_MIB_RX_GOOD_FRAMES];
2681 stats->rx_fifo_oflows += mib[VGE_MIB_RX_FIFO_OVERRUNS];
2682 stats->rx_runts += mib[VGE_MIB_RX_RUNTS];
2683 stats->rx_runts_errs += mib[VGE_MIB_RX_RUNTS_ERRS];
2684 stats->rx_pkts_64 += mib[VGE_MIB_RX_PKTS_64];
2685 stats->rx_pkts_65_127 += mib[VGE_MIB_RX_PKTS_65_127];
2686 stats->rx_pkts_128_255 += mib[VGE_MIB_RX_PKTS_128_255];
2687 stats->rx_pkts_256_511 += mib[VGE_MIB_RX_PKTS_256_511];
2688 stats->rx_pkts_512_1023 += mib[VGE_MIB_RX_PKTS_512_1023];
2689 stats->rx_pkts_1024_1518 += mib[VGE_MIB_RX_PKTS_1024_1518];
2690 stats->rx_pkts_1519_max += mib[VGE_MIB_RX_PKTS_1519_MAX];
2691 stats->rx_pkts_1519_max_errs += mib[VGE_MIB_RX_PKTS_1519_MAX_ERRS];
2692 stats->rx_jumbos += mib[VGE_MIB_RX_JUMBOS];
2693 stats->rx_crcerrs += mib[VGE_MIB_RX_CRCERRS];
2694 stats->rx_pause_frames += mib[VGE_MIB_RX_PAUSE];
2695 stats->rx_alignerrs += mib[VGE_MIB_RX_ALIGNERRS];
2696 stats->rx_nobufs += mib[VGE_MIB_RX_NOBUFS];
2697 stats->rx_symerrs += mib[VGE_MIB_RX_SYMERRS];
2698 stats->rx_lenerrs += mib[VGE_MIB_RX_LENERRS];
2701 stats->tx_good_frames += mib[VGE_MIB_TX_GOOD_FRAMES];
2702 stats->tx_pkts_64 += mib[VGE_MIB_TX_PKTS_64];
2703 stats->tx_pkts_65_127 += mib[VGE_MIB_TX_PKTS_65_127];
2704 stats->tx_pkts_128_255 += mib[VGE_MIB_TX_PKTS_128_255];
2705 stats->tx_pkts_256_511 += mib[VGE_MIB_TX_PKTS_256_511];
2706 stats->tx_pkts_512_1023 += mib[VGE_MIB_TX_PKTS_512_1023];
2707 stats->tx_pkts_1024_1518 += mib[VGE_MIB_TX_PKTS_1024_1518];
2708 stats->tx_jumbos += mib[VGE_MIB_TX_JUMBOS];
2709 stats->tx_colls += mib[VGE_MIB_TX_COLLS];
2710 stats->tx_pause += mib[VGE_MIB_TX_PAUSE];
2711 #ifdef VGE_ENABLE_SQEERR
2712 stats->tx_sqeerrs += mib[VGE_MIB_TX_SQEERRS];
2714 stats->tx_latecolls += mib[VGE_MIB_TX_LATECOLLS];
2716 /* Update counters in ifnet. */
2717 if_inc_counter(ifp, IFCOUNTER_OPACKETS, mib[VGE_MIB_TX_GOOD_FRAMES]);
2719 if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
2720 mib[VGE_MIB_TX_COLLS] + mib[VGE_MIB_TX_LATECOLLS]);
2722 if_inc_counter(ifp, IFCOUNTER_OERRORS,
2723 mib[VGE_MIB_TX_COLLS] + mib[VGE_MIB_TX_LATECOLLS]);
2725 if_inc_counter(ifp, IFCOUNTER_IPACKETS, mib[VGE_MIB_RX_GOOD_FRAMES]);
2727 if_inc_counter(ifp, IFCOUNTER_IERRORS,
2728 mib[VGE_MIB_RX_FIFO_OVERRUNS] +
2729 mib[VGE_MIB_RX_RUNTS] +
2730 mib[VGE_MIB_RX_RUNTS_ERRS] +
2731 mib[VGE_MIB_RX_CRCERRS] +
2732 mib[VGE_MIB_RX_ALIGNERRS] +
2733 mib[VGE_MIB_RX_NOBUFS] +
2734 mib[VGE_MIB_RX_SYMERRS] +
2735 mib[VGE_MIB_RX_LENERRS]);
2739 vge_intr_holdoff(struct vge_softc *sc)
2743 VGE_LOCK_ASSERT(sc);
2746 * Set Tx interrupt supression threshold.
2747 * It's possible to use single-shot timer in VGE_CRS1 register
2748 * in Tx path such that driver can remove most of Tx completion
2749 * interrupts. However this requires additional access to
2750 * VGE_CRS1 register to reload the timer in addintion to
2751 * activating Tx kick command. Another downside is we don't know
2752 * what single-shot timer value should be used in advance so
2753 * reclaiming transmitted mbufs could be delayed a lot which in
2754 * turn slows down Tx operation.
2756 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_TXSUPPTHR);
2757 CSR_WRITE_1(sc, VGE_TXSUPPTHR, sc->vge_tx_coal_pkt);
2759 /* Set Rx interrupt suppresion threshold. */
2760 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
2761 CSR_WRITE_1(sc, VGE_RXSUPPTHR, sc->vge_rx_coal_pkt);
2763 intctl = CSR_READ_1(sc, VGE_INTCTL1);
2764 intctl &= ~VGE_INTCTL_SC_RELOAD;
2765 intctl |= VGE_INTCTL_HC_RELOAD;
2766 if (sc->vge_tx_coal_pkt <= 0)
2767 intctl |= VGE_INTCTL_TXINTSUP_DISABLE;
2769 intctl &= ~VGE_INTCTL_TXINTSUP_DISABLE;
2770 if (sc->vge_rx_coal_pkt <= 0)
2771 intctl |= VGE_INTCTL_RXINTSUP_DISABLE;
2773 intctl &= ~VGE_INTCTL_RXINTSUP_DISABLE;
2774 CSR_WRITE_1(sc, VGE_INTCTL1, intctl);
2775 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_HOLDOFF);
2776 if (sc->vge_int_holdoff > 0) {
2777 /* Set interrupt holdoff timer. */
2778 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
2779 CSR_WRITE_1(sc, VGE_INTHOLDOFF,
2780 VGE_INT_HOLDOFF_USEC(sc->vge_int_holdoff));
2781 /* Enable holdoff timer. */
2782 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
2787 vge_setlinkspeed(struct vge_softc *sc)
2789 struct mii_data *mii;
2792 VGE_LOCK_ASSERT(sc);
2794 mii = device_get_softc(sc->vge_miibus);
2797 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
2798 (IFM_ACTIVE | IFM_AVALID)) {
2799 switch IFM_SUBTYPE(mii->mii_media_active) {
2809 /* Clear forced MAC speed/duplex configuration. */
2810 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2811 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2812 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_100T2CR, 0);
2813 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_ANAR,
2814 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
2815 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR,
2816 BMCR_AUTOEN | BMCR_STARTNEG);
2819 /* Poll link state until vge(4) get a 10/100 link. */
2820 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
2822 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
2823 == (IFM_ACTIVE | IFM_AVALID)) {
2824 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2833 pause("vgelnk", hz);
2836 if (i == MII_ANEGTICKS_GIGE)
2837 device_printf(sc->vge_dev, "establishing link failed, "
2838 "WOL may not work!");
2841 * No link, force MAC to have 100Mbps, full-duplex link.
2842 * This is the last resort and may/may not work.
2844 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
2845 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
2849 vge_setwol(struct vge_softc *sc)
2855 VGE_LOCK_ASSERT(sc);
2857 if ((sc->vge_flags & VGE_FLAG_PMCAP) == 0) {
2858 /* No PME capability, PHY power down. */
2859 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR,
2861 vge_miipoll_stop(sc);
2867 /* Clear WOL on pattern match. */
2868 CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL);
2869 /* Disable WOL on magic/unicast packet. */
2870 CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F);
2871 CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM |
2873 if ((ifp->if_capenable & IFCAP_WOL) != 0) {
2874 vge_setlinkspeed(sc);
2876 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
2877 val |= VGE_WOLCR1_UCAST;
2878 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
2879 val |= VGE_WOLCR1_MAGIC;
2880 CSR_WRITE_1(sc, VGE_WOLCR1S, val);
2882 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
2883 val |= VGE_WOLCFG_SAM | VGE_WOLCFG_SAB;
2884 CSR_WRITE_1(sc, VGE_WOLCFGS, val | VGE_WOLCFG_PMEOVR);
2885 /* Disable MII auto-polling. */
2886 vge_miipoll_stop(sc);
2888 CSR_SETBIT_1(sc, VGE_DIAGCTL,
2889 VGE_DIAGCTL_MACFORCE | VGE_DIAGCTL_FDXFORCE);
2890 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII);
2892 /* Clear WOL status on pattern match. */
2893 CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF);
2894 CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF);
2896 val = CSR_READ_1(sc, VGE_PWRSTAT);
2897 val |= VGE_STICKHW_SWPTAG;
2898 CSR_WRITE_1(sc, VGE_PWRSTAT, val);
2899 /* Put hardware into sleep. */
2900 val = CSR_READ_1(sc, VGE_PWRSTAT);
2901 val |= VGE_STICKHW_DS0 | VGE_STICKHW_DS1;
2902 CSR_WRITE_1(sc, VGE_PWRSTAT, val);
2903 /* Request PME if WOL is requested. */
2904 pmstat = pci_read_config(sc->vge_dev, sc->vge_pmcap +
2905 PCIR_POWER_STATUS, 2);
2906 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
2907 if ((ifp->if_capenable & IFCAP_WOL) != 0)
2908 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2909 pci_write_config(sc->vge_dev, sc->vge_pmcap + PCIR_POWER_STATUS,
2914 vge_clrwol(struct vge_softc *sc)
2918 val = CSR_READ_1(sc, VGE_PWRSTAT);
2919 val &= ~VGE_STICKHW_SWPTAG;
2920 CSR_WRITE_1(sc, VGE_PWRSTAT, val);
2921 /* Disable WOL and clear power state indicator. */
2922 val = CSR_READ_1(sc, VGE_PWRSTAT);
2923 val &= ~(VGE_STICKHW_DS0 | VGE_STICKHW_DS1);
2924 CSR_WRITE_1(sc, VGE_PWRSTAT, val);
2926 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII);
2927 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2929 /* Clear WOL on pattern match. */
2930 CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL);
2931 /* Disable WOL on magic/unicast packet. */
2932 CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F);
2933 CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM |
2935 /* Clear WOL status on pattern match. */
2936 CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF);
2937 CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF);