3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
37 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
39 * Written by Bill Paul <wpaul@windriver.com>
40 * Senior Networking Software Engineer
45 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that
46 * combines a tri-speed ethernet MAC and PHY, with the following
49 * o Jumbo frame support up to 16K
50 * o Transmit and receive flow control
51 * o IPv4 checksum offload
52 * o VLAN tag insertion and stripping
54 * o 64-bit multicast hash table filter
55 * o 64 entry CAM filter
56 * o 16K RX FIFO and 48K TX FIFO memory
57 * o Interrupt moderation
59 * The VT6122 supports up to four transmit DMA queues. The descriptors
60 * in the transmit ring can address up to 7 data fragments; frames which
61 * span more than 7 data buffers must be coalesced, but in general the
62 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
63 * long. The receive descriptors address only a single buffer.
65 * There are two peculiar design issues with the VT6122. One is that
66 * receive data buffers must be aligned on a 32-bit boundary. This is
67 * not a problem where the VT6122 is used as a LOM device in x86-based
68 * systems, but on architectures that generate unaligned access traps, we
69 * have to do some copying.
71 * The other issue has to do with the way 64-bit addresses are handled.
72 * The DMA descriptors only allow you to specify 48 bits of addressing
73 * information. The remaining 16 bits are specified using one of the
74 * I/O registers. If you only have a 32-bit system, then this isn't
75 * an issue, but if you have a 64-bit system and more than 4GB of
76 * memory, you must have to make sure your network data buffers reside
77 * in the same 48-bit 'segment.'
79 * Special thanks to Ryan Fu at VIA Networking for providing documentation
80 * and sample NICs for testing.
83 #ifdef HAVE_KERNEL_OPTION_HEADERS
84 #include "opt_device_polling.h"
87 #include <sys/param.h>
88 #include <sys/endian.h>
89 #include <sys/systm.h>
90 #include <sys/sockio.h>
92 #include <sys/malloc.h>
93 #include <sys/module.h>
94 #include <sys/kernel.h>
95 #include <sys/socket.h>
96 #include <sys/taskqueue.h>
99 #include <net/if_arp.h>
100 #include <net/ethernet.h>
101 #include <net/if_dl.h>
102 #include <net/if_media.h>
103 #include <net/if_types.h>
104 #include <net/if_vlan_var.h>
108 #include <machine/bus.h>
109 #include <machine/resource.h>
111 #include <sys/rman.h>
113 #include <dev/mii/mii.h>
114 #include <dev/mii/miivar.h>
116 #include <dev/pci/pcireg.h>
117 #include <dev/pci/pcivar.h>
119 MODULE_DEPEND(vge, pci, 1, 1, 1);
120 MODULE_DEPEND(vge, ether, 1, 1, 1);
121 MODULE_DEPEND(vge, miibus, 1, 1, 1);
123 /* "device miibus" required. See GENERIC if you get errors here. */
124 #include "miibus_if.h"
126 #include <dev/vge/if_vgereg.h>
127 #include <dev/vge/if_vgevar.h>
129 #define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
132 * Various supported device vendors/types and their names.
134 static struct vge_type vge_devs[] = {
135 { VIA_VENDORID, VIA_DEVICEID_61XX,
136 "VIA Networking Gigabit Ethernet" },
140 static int vge_probe (device_t);
141 static int vge_attach (device_t);
142 static int vge_detach (device_t);
144 static int vge_encap (struct vge_softc *, struct mbuf *, int);
146 static void vge_dma_map_addr (void *, bus_dma_segment_t *, int, int);
147 static void vge_dma_map_rx_desc (void *, bus_dma_segment_t *, int,
149 static void vge_dma_map_tx_desc (void *, bus_dma_segment_t *, int,
151 static int vge_allocmem (device_t, struct vge_softc *);
152 static int vge_newbuf (struct vge_softc *, int, struct mbuf *);
153 static int vge_rx_list_init (struct vge_softc *);
154 static int vge_tx_list_init (struct vge_softc *);
156 static __inline void vge_fixup_rx
159 static int vge_rxeof (struct vge_softc *);
160 static void vge_txeof (struct vge_softc *);
161 static void vge_intr (void *);
162 static void vge_tick (void *);
163 static void vge_tx_task (void *, int);
164 static void vge_start (struct ifnet *);
165 static int vge_ioctl (struct ifnet *, u_long, caddr_t);
166 static void vge_init (void *);
167 static void vge_stop (struct vge_softc *);
168 static void vge_watchdog (struct ifnet *);
169 static int vge_suspend (device_t);
170 static int vge_resume (device_t);
171 static int vge_shutdown (device_t);
172 static int vge_ifmedia_upd (struct ifnet *);
173 static void vge_ifmedia_sts (struct ifnet *, struct ifmediareq *);
176 static void vge_eeprom_getword (struct vge_softc *, int, u_int16_t *);
178 static void vge_read_eeprom (struct vge_softc *, caddr_t, int, int, int);
180 static void vge_miipoll_start (struct vge_softc *);
181 static void vge_miipoll_stop (struct vge_softc *);
182 static int vge_miibus_readreg (device_t, int, int);
183 static int vge_miibus_writereg (device_t, int, int, int);
184 static void vge_miibus_statchg (device_t);
186 static void vge_cam_clear (struct vge_softc *);
187 static int vge_cam_set (struct vge_softc *, uint8_t *);
188 static void vge_setmulti (struct vge_softc *);
189 static void vge_reset (struct vge_softc *);
191 #define VGE_PCI_LOIO 0x10
192 #define VGE_PCI_LOMEM 0x14
194 static device_method_t vge_methods[] = {
195 /* Device interface */
196 DEVMETHOD(device_probe, vge_probe),
197 DEVMETHOD(device_attach, vge_attach),
198 DEVMETHOD(device_detach, vge_detach),
199 DEVMETHOD(device_suspend, vge_suspend),
200 DEVMETHOD(device_resume, vge_resume),
201 DEVMETHOD(device_shutdown, vge_shutdown),
204 DEVMETHOD(bus_print_child, bus_generic_print_child),
205 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
208 DEVMETHOD(miibus_readreg, vge_miibus_readreg),
209 DEVMETHOD(miibus_writereg, vge_miibus_writereg),
210 DEVMETHOD(miibus_statchg, vge_miibus_statchg),
215 static driver_t vge_driver = {
218 sizeof(struct vge_softc)
221 static devclass_t vge_devclass;
223 DRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0);
224 DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0);
228 * Read a word of data stored in the EEPROM at address 'addr.'
231 vge_eeprom_getword(sc, addr, dest)
232 struct vge_softc *sc;
240 * Enter EEPROM embedded programming mode. In order to
241 * access the EEPROM at all, we first have to set the
242 * EELOAD bit in the CHIPCFG2 register.
244 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
245 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
247 /* Select the address of the word we want to read */
248 CSR_WRITE_1(sc, VGE_EEADDR, addr);
250 /* Issue read command */
251 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
253 /* Wait for the done bit to be set. */
254 for (i = 0; i < VGE_TIMEOUT; i++) {
255 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
259 if (i == VGE_TIMEOUT) {
260 device_printf(sc->vge_dev, "EEPROM read timed out\n");
265 /* Read the result */
266 word = CSR_READ_2(sc, VGE_EERDDAT);
268 /* Turn off EEPROM access mode. */
269 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
270 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
279 * Read a sequence of words from the EEPROM.
282 vge_read_eeprom(sc, dest, off, cnt, swap)
283 struct vge_softc *sc;
291 u_int16_t word = 0, *ptr;
293 for (i = 0; i < cnt; i++) {
294 vge_eeprom_getword(sc, off + i, &word);
295 ptr = (u_int16_t *)(dest + (i * 2));
302 for (i = 0; i < ETHER_ADDR_LEN; i++)
303 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i);
309 struct vge_softc *sc;
313 CSR_WRITE_1(sc, VGE_MIICMD, 0);
315 for (i = 0; i < VGE_TIMEOUT; i++) {
317 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
321 if (i == VGE_TIMEOUT)
322 device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
328 vge_miipoll_start(sc)
329 struct vge_softc *sc;
333 /* First, make sure we're idle. */
335 CSR_WRITE_1(sc, VGE_MIICMD, 0);
336 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
338 for (i = 0; i < VGE_TIMEOUT; i++) {
340 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
344 if (i == VGE_TIMEOUT) {
345 device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
349 /* Now enable auto poll mode. */
351 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
353 /* And make sure it started. */
355 for (i = 0; i < VGE_TIMEOUT; i++) {
357 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
361 if (i == VGE_TIMEOUT)
362 device_printf(sc->vge_dev, "failed to start MII autopoll\n");
368 vge_miibus_readreg(dev, phy, reg)
372 struct vge_softc *sc;
376 sc = device_get_softc(dev);
378 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
382 vge_miipoll_stop(sc);
384 /* Specify the register we want to read. */
385 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
387 /* Issue read command. */
388 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
390 /* Wait for the read command bit to self-clear. */
391 for (i = 0; i < VGE_TIMEOUT; i++) {
393 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
397 if (i == VGE_TIMEOUT)
398 device_printf(sc->vge_dev, "MII read timed out\n");
400 rval = CSR_READ_2(sc, VGE_MIIDATA);
402 vge_miipoll_start(sc);
409 vge_miibus_writereg(dev, phy, reg, data)
413 struct vge_softc *sc;
416 sc = device_get_softc(dev);
418 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
422 vge_miipoll_stop(sc);
424 /* Specify the register we want to write. */
425 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
427 /* Specify the data we want to write. */
428 CSR_WRITE_2(sc, VGE_MIIDATA, data);
430 /* Issue write command. */
431 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
433 /* Wait for the write command bit to self-clear. */
434 for (i = 0; i < VGE_TIMEOUT; i++) {
436 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
440 if (i == VGE_TIMEOUT) {
441 device_printf(sc->vge_dev, "MII write timed out\n");
445 vge_miipoll_start(sc);
453 struct vge_softc *sc;
458 * Turn off all the mask bits. This tells the chip
459 * that none of the entries in the CAM filter are valid.
460 * desired entries will be enabled as we fill the filter in.
463 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
464 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
465 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
466 for (i = 0; i < 8; i++)
467 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
469 /* Clear the VLAN filter too. */
471 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
472 for (i = 0; i < 8; i++)
473 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
475 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
476 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
477 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
485 vge_cam_set(sc, addr)
486 struct vge_softc *sc;
491 if (sc->vge_camidx == VGE_CAM_MAXADDRS)
494 /* Select the CAM data page. */
495 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
496 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
498 /* Set the filter entry we want to update and enable writing. */
499 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
501 /* Write the address to the CAM registers */
502 for (i = 0; i < ETHER_ADDR_LEN; i++)
503 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
505 /* Issue a write command. */
506 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
508 /* Wake for it to clear. */
509 for (i = 0; i < VGE_TIMEOUT; i++) {
511 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
515 if (i == VGE_TIMEOUT) {
516 device_printf(sc->vge_dev, "setting CAM filter failed\n");
521 /* Select the CAM mask page. */
522 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
523 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
525 /* Set the mask bit that enables this filter. */
526 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8),
527 1<<(sc->vge_camidx & 7));
532 /* Turn off access to CAM. */
533 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
534 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
535 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
541 * Program the multicast filter. We use the 64-entry CAM filter
542 * for perfect filtering. If there's more than 64 multicast addresses,
543 * we use the hash filter insted.
547 struct vge_softc *sc;
550 int error = 0/*, h = 0*/;
551 struct ifmultiaddr *ifma;
552 u_int32_t h, hashes[2] = { 0, 0 };
556 /* First, zot all the multicast entries. */
558 CSR_WRITE_4(sc, VGE_MAR0, 0);
559 CSR_WRITE_4(sc, VGE_MAR1, 0);
562 * If the user wants allmulti or promisc mode, enable reception
563 * of all multicast frames.
565 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
566 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF);
567 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF);
571 /* Now program new ones */
573 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
574 if (ifma->ifma_addr->sa_family != AF_LINK)
576 error = vge_cam_set(sc,
577 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
582 /* If there were too many addresses, use the hash filter. */
586 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
587 if (ifma->ifma_addr->sa_family != AF_LINK)
589 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
590 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
592 hashes[0] |= (1 << h);
594 hashes[1] |= (1 << (h - 32));
597 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
598 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
600 if_maddr_runlock(ifp);
607 struct vge_softc *sc;
611 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
613 for (i = 0; i < VGE_TIMEOUT; i++) {
615 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
619 if (i == VGE_TIMEOUT) {
620 device_printf(sc->vge_dev, "soft reset timed out");
621 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
627 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
629 for (i = 0; i < VGE_TIMEOUT; i++) {
631 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
635 if (i == VGE_TIMEOUT) {
636 device_printf(sc->vge_dev, "EEPROM reload timed out\n");
640 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
646 * Probe for a VIA gigabit chip. Check the PCI vendor and device
647 * IDs against our list and return a device name if we find a match.
657 while (t->vge_name != NULL) {
658 if ((pci_get_vendor(dev) == t->vge_vid) &&
659 (pci_get_device(dev) == t->vge_did)) {
660 device_set_desc(dev, t->vge_name);
661 return (BUS_PROBE_DEFAULT);
670 vge_dma_map_rx_desc(arg, segs, nseg, mapsize, error)
672 bus_dma_segment_t *segs;
678 struct vge_dmaload_arg *ctx;
679 struct vge_rx_desc *d = NULL;
686 /* Signal error to caller if there's too many segments */
687 if (nseg > ctx->vge_maxsegs) {
688 ctx->vge_maxsegs = 0;
693 * Map the segment array into descriptors.
696 d = &ctx->sc->vge_ldata.vge_rx_list[ctx->vge_idx];
698 /* If this descriptor is still owned by the chip, bail. */
700 if (le32toh(d->vge_sts) & VGE_RDSTS_OWN) {
701 device_printf(ctx->sc->vge_dev,
702 "tried to map busy descriptor\n");
703 ctx->vge_maxsegs = 0;
707 d->vge_buflen = htole16(VGE_BUFLEN(segs[0].ds_len) | VGE_RXDESC_I);
708 d->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
709 d->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF);
713 ctx->vge_maxsegs = 1;
719 vge_dma_map_tx_desc(arg, segs, nseg, mapsize, error)
721 bus_dma_segment_t *segs;
726 struct vge_dmaload_arg *ctx;
727 struct vge_tx_desc *d = NULL;
728 struct vge_tx_frag *f;
736 /* Signal error to caller if there's too many segments */
737 if (nseg > ctx->vge_maxsegs) {
738 ctx->vge_maxsegs = 0;
742 /* Map the segment array into descriptors. */
744 d = &ctx->sc->vge_ldata.vge_tx_list[ctx->vge_idx];
746 /* If this descriptor is still owned by the chip, bail. */
748 if (le32toh(d->vge_sts) & VGE_TDSTS_OWN) {
749 ctx->vge_maxsegs = 0;
753 for (i = 0; i < nseg; i++) {
755 f->vge_buflen = htole16(VGE_BUFLEN(segs[i].ds_len));
756 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[i].ds_addr));
757 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[i].ds_addr) & 0xFFFF);
760 /* Argh. This chip does not autopad short frames */
762 if (ctx->vge_m0->m_pkthdr.len < VGE_MIN_FRAMELEN) {
764 f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN -
765 ctx->vge_m0->m_pkthdr.len));
766 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
767 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF);
768 ctx->vge_m0->m_pkthdr.len = VGE_MIN_FRAMELEN;
773 * When telling the chip how many segments there are, we
774 * must use nsegs + 1 instead of just nsegs. Darned if I
779 d->vge_sts = ctx->vge_m0->m_pkthdr.len << 16;
780 d->vge_ctl = ctx->vge_flags|(i << 28)|VGE_TD_LS_NORM;
782 if (ctx->vge_m0->m_pkthdr.len > ETHERMTU + ETHER_HDR_LEN)
783 d->vge_ctl |= VGE_TDCTL_JUMBO;
785 ctx->vge_maxsegs = nseg;
791 * Map a single buffer address.
795 vge_dma_map_addr(arg, segs, nseg, error)
797 bus_dma_segment_t *segs;
806 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
808 *addr = segs->ds_addr;
814 vge_allocmem(dev, sc)
816 struct vge_softc *sc;
823 * Allocate map for RX mbufs.
826 error = bus_dma_tag_create(sc->vge_parent_tag, ETHER_ALIGN, 0,
827 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
828 NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW,
829 NULL, NULL, &sc->vge_ldata.vge_mtag);
831 device_printf(dev, "could not allocate dma tag\n");
836 * Allocate map for TX descriptor list.
838 error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN,
839 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
840 NULL, VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, BUS_DMA_ALLOCNOW,
841 NULL, NULL, &sc->vge_ldata.vge_tx_list_tag);
843 device_printf(dev, "could not allocate dma tag\n");
847 /* Allocate DMA'able memory for the TX ring */
849 error = bus_dmamem_alloc(sc->vge_ldata.vge_tx_list_tag,
850 (void **)&sc->vge_ldata.vge_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
851 &sc->vge_ldata.vge_tx_list_map);
855 /* Load the map for the TX ring. */
857 error = bus_dmamap_load(sc->vge_ldata.vge_tx_list_tag,
858 sc->vge_ldata.vge_tx_list_map, sc->vge_ldata.vge_tx_list,
859 VGE_TX_LIST_SZ, vge_dma_map_addr,
860 &sc->vge_ldata.vge_tx_list_addr, BUS_DMA_NOWAIT);
862 /* Create DMA maps for TX buffers */
864 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
865 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0,
866 &sc->vge_ldata.vge_tx_dmamap[i]);
868 device_printf(dev, "can't create DMA map for TX\n");
874 * Allocate map for RX descriptor list.
876 error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN,
877 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
878 NULL, VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, BUS_DMA_ALLOCNOW,
879 NULL, NULL, &sc->vge_ldata.vge_rx_list_tag);
881 device_printf(dev, "could not allocate dma tag\n");
885 /* Allocate DMA'able memory for the RX ring */
887 error = bus_dmamem_alloc(sc->vge_ldata.vge_rx_list_tag,
888 (void **)&sc->vge_ldata.vge_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
889 &sc->vge_ldata.vge_rx_list_map);
893 /* Load the map for the RX ring. */
895 error = bus_dmamap_load(sc->vge_ldata.vge_rx_list_tag,
896 sc->vge_ldata.vge_rx_list_map, sc->vge_ldata.vge_rx_list,
897 VGE_TX_LIST_SZ, vge_dma_map_addr,
898 &sc->vge_ldata.vge_rx_list_addr, BUS_DMA_NOWAIT);
900 /* Create DMA maps for RX buffers */
902 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
903 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0,
904 &sc->vge_ldata.vge_rx_dmamap[i]);
906 device_printf(dev, "can't create DMA map for RX\n");
915 * Attach the interface. Allocate softc structures, do ifmedia
916 * setup and ethernet/BPF attach.
922 u_char eaddr[ETHER_ADDR_LEN];
923 struct vge_softc *sc;
925 int unit, error = 0, rid;
927 sc = device_get_softc(dev);
928 unit = device_get_unit(dev);
931 mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
932 MTX_DEF | MTX_RECURSE);
934 * Map control/status registers.
936 pci_enable_busmaster(dev);
939 sc->vge_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
940 0, ~0, 1, RF_ACTIVE);
942 if (sc->vge_res == NULL) {
943 printf ("vge%d: couldn't map ports/memory\n", unit);
948 sc->vge_btag = rman_get_bustag(sc->vge_res);
949 sc->vge_bhandle = rman_get_bushandle(sc->vge_res);
951 /* Allocate interrupt */
953 sc->vge_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid,
954 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE);
956 if (sc->vge_irq == NULL) {
957 printf("vge%d: couldn't map interrupt\n", unit);
962 /* Reset the adapter. */
966 * Get station address from the EEPROM.
968 vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0);
973 * Allocate the parent bus DMA tag appropriate for PCI.
975 #define VGE_NSEG_NEW 32
976 error = bus_dma_tag_create(NULL, /* parent */
977 1, 0, /* alignment, boundary */
978 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
979 BUS_SPACE_MAXADDR, /* highaddr */
980 NULL, NULL, /* filter, filterarg */
981 MAXBSIZE, VGE_NSEG_NEW, /* maxsize, nsegments */
982 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
983 BUS_DMA_ALLOCNOW, /* flags */
984 NULL, NULL, /* lockfunc, lockarg */
985 &sc->vge_parent_tag);
989 error = vge_allocmem(dev, sc);
994 ifp = sc->vge_ifp = if_alloc(IFT_ETHER);
996 printf("vge%d: can not if_alloc()\n", sc->vge_unit);
1002 if (mii_phy_probe(dev, &sc->vge_miibus,
1003 vge_ifmedia_upd, vge_ifmedia_sts)) {
1004 printf("vge%d: MII without any phy!\n", sc->vge_unit);
1010 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1011 ifp->if_mtu = ETHERMTU;
1012 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1013 ifp->if_ioctl = vge_ioctl;
1014 ifp->if_capabilities = IFCAP_VLAN_MTU;
1015 ifp->if_start = vge_start;
1016 ifp->if_hwassist = VGE_CSUM_FEATURES;
1017 ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING;
1018 ifp->if_capenable = ifp->if_capabilities;
1019 #ifdef DEVICE_POLLING
1020 ifp->if_capabilities |= IFCAP_POLLING;
1022 ifp->if_watchdog = vge_watchdog;
1023 ifp->if_init = vge_init;
1024 IFQ_SET_MAXLEN(&ifp->if_snd, VGE_IFQ_MAXLEN);
1025 ifp->if_snd.ifq_drv_maxlen = VGE_IFQ_MAXLEN;
1026 IFQ_SET_READY(&ifp->if_snd);
1028 TASK_INIT(&sc->vge_txtask, 0, vge_tx_task, ifp);
1031 * Call MI attach routine.
1033 ether_ifattach(ifp, eaddr);
1035 /* Hook interrupt last to avoid having to lock softc */
1036 error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE,
1037 NULL, vge_intr, sc, &sc->vge_intrhand);
1040 printf("vge%d: couldn't set up irq\n", unit);
1041 ether_ifdetach(ifp);
1053 * Shutdown hardware and free up resources. This can be called any
1054 * time after the mutex has been initialized. It is called in both
1055 * the error case in attach and the normal detach case so it needs
1056 * to be careful about only freeing resources that have actually been
1063 struct vge_softc *sc;
1067 sc = device_get_softc(dev);
1068 KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized"));
1071 #ifdef DEVICE_POLLING
1072 if (ifp->if_capenable & IFCAP_POLLING)
1073 ether_poll_deregister(ifp);
1076 /* These should only be active if attach succeeded */
1077 if (device_is_attached(dev)) {
1080 * Force off the IFF_UP flag here, in case someone
1081 * still had a BPF descriptor attached to this
1082 * interface. If they do, ether_ifattach() will cause
1083 * the BPF code to try and clear the promisc mode
1084 * flag, which will bubble down to vge_ioctl(),
1085 * which will try to call vge_init() again. This will
1086 * turn the NIC back on and restart the MII ticker,
1087 * which will panic the system when the kernel tries
1088 * to invoke the vge_tick() function that isn't there
1091 ifp->if_flags &= ~IFF_UP;
1092 ether_ifdetach(ifp);
1095 device_delete_child(dev, sc->vge_miibus);
1096 bus_generic_detach(dev);
1098 if (sc->vge_intrhand)
1099 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand);
1101 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vge_irq);
1103 bus_release_resource(dev, SYS_RES_MEMORY,
1104 VGE_PCI_LOMEM, sc->vge_res);
1108 /* Unload and free the RX DMA ring memory and map */
1110 if (sc->vge_ldata.vge_rx_list_tag) {
1111 bus_dmamap_unload(sc->vge_ldata.vge_rx_list_tag,
1112 sc->vge_ldata.vge_rx_list_map);
1113 bus_dmamem_free(sc->vge_ldata.vge_rx_list_tag,
1114 sc->vge_ldata.vge_rx_list,
1115 sc->vge_ldata.vge_rx_list_map);
1116 bus_dma_tag_destroy(sc->vge_ldata.vge_rx_list_tag);
1119 /* Unload and free the TX DMA ring memory and map */
1121 if (sc->vge_ldata.vge_tx_list_tag) {
1122 bus_dmamap_unload(sc->vge_ldata.vge_tx_list_tag,
1123 sc->vge_ldata.vge_tx_list_map);
1124 bus_dmamem_free(sc->vge_ldata.vge_tx_list_tag,
1125 sc->vge_ldata.vge_tx_list,
1126 sc->vge_ldata.vge_tx_list_map);
1127 bus_dma_tag_destroy(sc->vge_ldata.vge_tx_list_tag);
1130 /* Destroy all the RX and TX buffer maps */
1132 if (sc->vge_ldata.vge_mtag) {
1133 for (i = 0; i < VGE_TX_DESC_CNT; i++)
1134 bus_dmamap_destroy(sc->vge_ldata.vge_mtag,
1135 sc->vge_ldata.vge_tx_dmamap[i]);
1136 for (i = 0; i < VGE_RX_DESC_CNT; i++)
1137 bus_dmamap_destroy(sc->vge_ldata.vge_mtag,
1138 sc->vge_ldata.vge_rx_dmamap[i]);
1139 bus_dma_tag_destroy(sc->vge_ldata.vge_mtag);
1142 if (sc->vge_parent_tag)
1143 bus_dma_tag_destroy(sc->vge_parent_tag);
1145 mtx_destroy(&sc->vge_mtx);
1151 vge_newbuf(sc, idx, m)
1152 struct vge_softc *sc;
1156 struct vge_dmaload_arg arg;
1157 struct mbuf *n = NULL;
1161 n = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1166 m->m_data = m->m_ext.ext_buf;
1171 * This is part of an evil trick to deal with non-x86 platforms.
1172 * The VIA chip requires RX buffers to be aligned on 32-bit
1173 * boundaries, but that will hose non-x86 machines. To get around
1174 * this, we leave some empty space at the start of each buffer
1175 * and for non-x86 hosts, we copy the buffer back two bytes
1176 * to achieve word alignment. This is slightly more efficient
1177 * than allocating a new buffer, copying the contents, and
1178 * discarding the old buffer.
1180 m->m_len = m->m_pkthdr.len = MCLBYTES - VGE_ETHER_ALIGN;
1181 m_adj(m, VGE_ETHER_ALIGN);
1183 m->m_len = m->m_pkthdr.len = MCLBYTES;
1188 arg.vge_maxsegs = 1;
1191 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag,
1192 sc->vge_ldata.vge_rx_dmamap[idx], m, vge_dma_map_rx_desc,
1193 &arg, BUS_DMA_NOWAIT);
1194 if (error || arg.vge_maxsegs != 1) {
1201 * Note: the manual fails to document the fact that for
1202 * proper opration, the driver needs to replentish the RX
1203 * DMA ring 4 descriptors at a time (rather than one at a
1204 * time, like most chips). We can allocate the new buffers
1205 * but we should not set the OWN bits until we're ready
1206 * to hand back 4 of them in one shot.
1209 #define VGE_RXCHUNK 4
1210 sc->vge_rx_consumed++;
1211 if (sc->vge_rx_consumed == VGE_RXCHUNK) {
1212 for (i = idx; i != idx - sc->vge_rx_consumed; i--)
1213 sc->vge_ldata.vge_rx_list[i].vge_sts |=
1214 htole32(VGE_RDSTS_OWN);
1215 sc->vge_rx_consumed = 0;
1218 sc->vge_ldata.vge_rx_mbuf[idx] = m;
1220 bus_dmamap_sync(sc->vge_ldata.vge_mtag,
1221 sc->vge_ldata.vge_rx_dmamap[idx],
1222 BUS_DMASYNC_PREREAD);
1228 vge_tx_list_init(sc)
1229 struct vge_softc *sc;
1231 bzero ((char *)sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ);
1232 bzero ((char *)&sc->vge_ldata.vge_tx_mbuf,
1233 (VGE_TX_DESC_CNT * sizeof(struct mbuf *)));
1235 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag,
1236 sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_PREWRITE);
1237 sc->vge_ldata.vge_tx_prodidx = 0;
1238 sc->vge_ldata.vge_tx_considx = 0;
1239 sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT;
1245 vge_rx_list_init(sc)
1246 struct vge_softc *sc;
1250 bzero ((char *)sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ);
1251 bzero ((char *)&sc->vge_ldata.vge_rx_mbuf,
1252 (VGE_RX_DESC_CNT * sizeof(struct mbuf *)));
1254 sc->vge_rx_consumed = 0;
1256 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1257 if (vge_newbuf(sc, i, NULL) == ENOBUFS)
1261 /* Flush the RX descriptors */
1263 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag,
1264 sc->vge_ldata.vge_rx_list_map,
1265 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1267 sc->vge_ldata.vge_rx_prodidx = 0;
1268 sc->vge_rx_consumed = 0;
1269 sc->vge_head = sc->vge_tail = NULL;
1275 static __inline void
1280 uint16_t *src, *dst;
1282 src = mtod(m, uint16_t *);
1285 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1288 m->m_data -= ETHER_ALIGN;
1295 * RX handler. We support the reception of jumbo frames that have
1296 * been fragmented across multiple 2K mbuf cluster buffers.
1300 struct vge_softc *sc;
1306 struct vge_rx_desc *cur_rx;
1307 u_int32_t rxstat, rxctl;
1309 VGE_LOCK_ASSERT(sc);
1311 i = sc->vge_ldata.vge_rx_prodidx;
1313 /* Invalidate the descriptor memory */
1315 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag,
1316 sc->vge_ldata.vge_rx_list_map,
1317 BUS_DMASYNC_POSTREAD);
1319 while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])) {
1321 #ifdef DEVICE_POLLING
1322 if (ifp->if_capenable & IFCAP_POLLING) {
1323 if (sc->rxcycles <= 0)
1329 cur_rx = &sc->vge_ldata.vge_rx_list[i];
1330 m = sc->vge_ldata.vge_rx_mbuf[i];
1331 total_len = VGE_RXBYTES(cur_rx);
1332 rxstat = le32toh(cur_rx->vge_sts);
1333 rxctl = le32toh(cur_rx->vge_ctl);
1335 /* Invalidate the RX mbuf and unload its map */
1337 bus_dmamap_sync(sc->vge_ldata.vge_mtag,
1338 sc->vge_ldata.vge_rx_dmamap[i],
1339 BUS_DMASYNC_POSTWRITE);
1340 bus_dmamap_unload(sc->vge_ldata.vge_mtag,
1341 sc->vge_ldata.vge_rx_dmamap[i]);
1344 * If the 'start of frame' bit is set, this indicates
1345 * either the first fragment in a multi-fragment receive,
1346 * or an intermediate fragment. Either way, we want to
1347 * accumulate the buffers.
1349 if (rxstat & VGE_RXPKT_SOF) {
1350 m->m_len = MCLBYTES - VGE_ETHER_ALIGN;
1351 if (sc->vge_head == NULL)
1352 sc->vge_head = sc->vge_tail = m;
1354 m->m_flags &= ~M_PKTHDR;
1355 sc->vge_tail->m_next = m;
1358 vge_newbuf(sc, i, NULL);
1364 * Bad/error frames will have the RXOK bit cleared.
1365 * However, there's one error case we want to allow:
1366 * if a VLAN tagged frame arrives and the chip can't
1367 * match it against the CAM filter, it considers this
1368 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
1369 * We don't want to drop the frame though: our VLAN
1370 * filtering is done in software.
1372 if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM)
1373 && !(rxstat & VGE_RDSTS_CSUMERR)) {
1376 * If this is part of a multi-fragment packet,
1377 * discard all the pieces.
1379 if (sc->vge_head != NULL) {
1380 m_freem(sc->vge_head);
1381 sc->vge_head = sc->vge_tail = NULL;
1383 vge_newbuf(sc, i, m);
1389 * If allocating a replacement mbuf fails,
1390 * reload the current one.
1393 if (vge_newbuf(sc, i, NULL)) {
1395 if (sc->vge_head != NULL) {
1396 m_freem(sc->vge_head);
1397 sc->vge_head = sc->vge_tail = NULL;
1399 vge_newbuf(sc, i, m);
1406 if (sc->vge_head != NULL) {
1407 m->m_len = total_len % (MCLBYTES - VGE_ETHER_ALIGN);
1409 * Special case: if there's 4 bytes or less
1410 * in this buffer, the mbuf can be discarded:
1411 * the last 4 bytes is the CRC, which we don't
1412 * care about anyway.
1414 if (m->m_len <= ETHER_CRC_LEN) {
1415 sc->vge_tail->m_len -=
1416 (ETHER_CRC_LEN - m->m_len);
1419 m->m_len -= ETHER_CRC_LEN;
1420 m->m_flags &= ~M_PKTHDR;
1421 sc->vge_tail->m_next = m;
1424 sc->vge_head = sc->vge_tail = NULL;
1425 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1427 m->m_pkthdr.len = m->m_len =
1428 (total_len - ETHER_CRC_LEN);
1434 m->m_pkthdr.rcvif = ifp;
1436 /* Do RX checksumming if enabled */
1437 if (ifp->if_capenable & IFCAP_RXCSUM) {
1439 /* Check IP header checksum */
1440 if (rxctl & VGE_RDCTL_IPPKT)
1441 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1442 if (rxctl & VGE_RDCTL_IPCSUMOK)
1443 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1445 /* Check TCP/UDP checksum */
1446 if (rxctl & (VGE_RDCTL_TCPPKT|VGE_RDCTL_UDPPKT) &&
1447 rxctl & VGE_RDCTL_PROTOCSUMOK) {
1448 m->m_pkthdr.csum_flags |=
1449 CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
1450 m->m_pkthdr.csum_data = 0xffff;
1454 if (rxstat & VGE_RDSTS_VTAG) {
1456 * The 32-bit rxctl register is stored in little-endian.
1457 * However, the 16-bit vlan tag is stored in big-endian,
1458 * so we have to byte swap it.
1460 m->m_pkthdr.ether_vtag =
1461 bswap16(rxctl & VGE_RDCTL_VLANID);
1462 m->m_flags |= M_VLANTAG;
1466 (*ifp->if_input)(ifp, m);
1470 if (lim == VGE_RX_DESC_CNT)
1475 /* Flush the RX DMA ring */
1477 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag,
1478 sc->vge_ldata.vge_rx_list_map,
1479 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1481 sc->vge_ldata.vge_rx_prodidx = i;
1482 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim);
1490 struct vge_softc *sc;
1497 idx = sc->vge_ldata.vge_tx_considx;
1499 /* Invalidate the TX descriptor list */
1501 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag,
1502 sc->vge_ldata.vge_tx_list_map,
1503 BUS_DMASYNC_POSTREAD);
1505 while (idx != sc->vge_ldata.vge_tx_prodidx) {
1507 txstat = le32toh(sc->vge_ldata.vge_tx_list[idx].vge_sts);
1508 if (txstat & VGE_TDSTS_OWN)
1511 m_freem(sc->vge_ldata.vge_tx_mbuf[idx]);
1512 sc->vge_ldata.vge_tx_mbuf[idx] = NULL;
1513 bus_dmamap_unload(sc->vge_ldata.vge_mtag,
1514 sc->vge_ldata.vge_tx_dmamap[idx]);
1515 if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL))
1516 ifp->if_collisions++;
1517 if (txstat & VGE_TDSTS_TXERR)
1522 sc->vge_ldata.vge_tx_free++;
1523 VGE_TX_DESC_INC(idx);
1526 /* No changes made to the TX ring, so no flush needed */
1528 if (idx != sc->vge_ldata.vge_tx_considx) {
1529 sc->vge_ldata.vge_tx_considx = idx;
1530 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1535 * If not all descriptors have been released reaped yet,
1536 * reload the timer so that we will eventually get another
1537 * interrupt that will cause us to re-enter this routine.
1538 * This is done in case the transmitter has gone idle.
1540 if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT) {
1541 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1551 struct vge_softc *sc;
1553 struct mii_data *mii;
1558 mii = device_get_softc(sc->vge_miibus);
1562 if (!(mii->mii_media_status & IFM_ACTIVE)) {
1564 if_link_state_change(sc->vge_ifp,
1568 if (mii->mii_media_status & IFM_ACTIVE &&
1569 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1571 if_link_state_change(sc->vge_ifp,
1573 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1574 taskqueue_enqueue(taskqueue_swi,
1584 #ifdef DEVICE_POLLING
1586 vge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count)
1588 struct vge_softc *sc = ifp->if_softc;
1592 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1595 sc->rxcycles = count;
1596 rx_npkts = vge_rxeof(sc);
1599 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1600 taskqueue_enqueue(taskqueue_swi, &sc->vge_txtask);
1602 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
1604 status = CSR_READ_4(sc, VGE_ISR);
1605 if (status == 0xFFFFFFFF)
1608 CSR_WRITE_4(sc, VGE_ISR, status);
1611 * XXX check behaviour on receiver stalls.
1614 if (status & VGE_ISR_TXDMA_STALL ||
1615 status & VGE_ISR_RXDMA_STALL)
1618 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1621 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1622 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1629 #endif /* DEVICE_POLLING */
1635 struct vge_softc *sc;
1641 if (sc->suspended) {
1648 if (!(ifp->if_flags & IFF_UP)) {
1653 #ifdef DEVICE_POLLING
1654 if (ifp->if_capenable & IFCAP_POLLING) {
1660 /* Disable interrupts */
1661 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1665 status = CSR_READ_4(sc, VGE_ISR);
1666 /* If the card has gone away the read returns 0xffff. */
1667 if (status == 0xFFFFFFFF)
1671 CSR_WRITE_4(sc, VGE_ISR, status);
1673 if ((status & VGE_INTRS) == 0)
1676 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
1679 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1681 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1682 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1685 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0))
1688 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL))
1691 if (status & VGE_ISR_LINKSTS)
1695 /* Re-enable interrupts */
1696 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1700 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1701 taskqueue_enqueue(taskqueue_swi, &sc->vge_txtask);
1707 vge_encap(sc, m_head, idx)
1708 struct vge_softc *sc;
1709 struct mbuf *m_head;
1712 struct mbuf *m_new = NULL;
1713 struct vge_dmaload_arg arg;
1717 if (sc->vge_ldata.vge_tx_free <= 2)
1722 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
1723 arg.vge_flags |= VGE_TDCTL_IPCSUM;
1724 if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
1725 arg.vge_flags |= VGE_TDCTL_TCPCSUM;
1726 if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
1727 arg.vge_flags |= VGE_TDCTL_UDPCSUM;
1731 arg.vge_m0 = m_head;
1732 arg.vge_maxsegs = VGE_TX_FRAGS;
1734 map = sc->vge_ldata.vge_tx_dmamap[idx];
1735 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map,
1736 m_head, vge_dma_map_tx_desc, &arg, BUS_DMA_NOWAIT);
1738 if (error && error != EFBIG) {
1739 printf("vge%d: can't map mbuf (error %d)\n",
1740 sc->vge_unit, error);
1744 /* Too many segments to map, coalesce into a single mbuf */
1746 if (error || arg.vge_maxsegs == 0) {
1747 m_new = m_defrag(m_head, M_DONTWAIT);
1754 arg.vge_m0 = m_head;
1756 arg.vge_maxsegs = 1;
1758 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map,
1759 m_head, vge_dma_map_tx_desc, &arg, BUS_DMA_NOWAIT);
1761 printf("vge%d: can't map mbuf (error %d)\n",
1762 sc->vge_unit, error);
1767 sc->vge_ldata.vge_tx_mbuf[idx] = m_head;
1768 sc->vge_ldata.vge_tx_free--;
1771 * Set up hardware VLAN tagging.
1774 if (m_head->m_flags & M_VLANTAG)
1775 sc->vge_ldata.vge_tx_list[idx].vge_ctl |=
1776 htole32(m_head->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG);
1778 sc->vge_ldata.vge_tx_list[idx].vge_sts |= htole32(VGE_TDSTS_OWN);
1784 vge_tx_task(arg, npending)
1797 * Main transmit routine.
1804 struct vge_softc *sc;
1805 struct mbuf *m_head = NULL;
1811 if (!sc->vge_link || ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1816 if (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
1821 idx = sc->vge_ldata.vge_tx_prodidx;
1825 pidx = VGE_TX_DESC_CNT - 1;
1828 while (sc->vge_ldata.vge_tx_mbuf[idx] == NULL) {
1829 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1833 if (vge_encap(sc, m_head, idx)) {
1834 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1835 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1839 sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |=
1840 htole16(VGE_TXDESC_Q);
1843 VGE_TX_DESC_INC(idx);
1846 * If there's a BPF listener, bounce a copy of this frame
1849 ETHER_BPF_MTAP(ifp, m_head);
1852 if (idx == sc->vge_ldata.vge_tx_prodidx) {
1857 /* Flush the TX descriptors */
1859 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag,
1860 sc->vge_ldata.vge_tx_list_map,
1861 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1863 /* Issue a transmit command. */
1864 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
1866 sc->vge_ldata.vge_tx_prodidx = idx;
1869 * Use the countdown timer for interrupt moderation.
1870 * 'TX done' interrupts are disabled. Instead, we reset the
1871 * countdown timer, which will begin counting until it hits
1872 * the value in the SSTIMER register, and then trigger an
1873 * interrupt. Each time we set the TIMER0_ENABLE bit, the
1874 * the timer count is reloaded. Only when the transmitter
1875 * is idle will the timer hit 0 and an interrupt fire.
1877 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1882 * Set a timeout in case the chip goes out to lunch.
1893 struct vge_softc *sc = xsc;
1894 struct ifnet *ifp = sc->vge_ifp;
1895 struct mii_data *mii;
1899 mii = device_get_softc(sc->vge_miibus);
1902 * Cancel pending I/O and free all RX/TX buffers.
1908 * Initialize the RX and TX descriptors and mbufs.
1911 vge_rx_list_init(sc);
1912 vge_tx_list_init(sc);
1914 /* Set our station address */
1915 for (i = 0; i < ETHER_ADDR_LEN; i++)
1916 CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(sc->vge_ifp)[i]);
1919 * Set receive FIFO threshold. Also allow transmission and
1920 * reception of VLAN tagged frames.
1922 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
1923 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2);
1925 /* Set DMA burst length */
1926 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
1927 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
1929 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
1931 /* Set collision backoff algorithm */
1932 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
1933 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
1934 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
1936 /* Disable LPSEL field in priority resolution */
1937 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
1940 * Load the addresses of the DMA queues into the chip.
1941 * Note that we only use one transmit queue.
1944 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
1945 VGE_ADDR_LO(sc->vge_ldata.vge_tx_list_addr));
1946 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
1948 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
1949 VGE_ADDR_LO(sc->vge_ldata.vge_rx_list_addr));
1950 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
1951 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
1953 /* Enable and wake up the RX descriptor queue */
1954 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1955 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1957 /* Enable the TX descriptor queue */
1958 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
1960 /* Set up the receive filter -- allow large frames for VLANs. */
1961 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT);
1963 /* If we want promiscuous mode, set the allframes bit. */
1964 if (ifp->if_flags & IFF_PROMISC) {
1965 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC);
1968 /* Set capture broadcast bit to capture broadcast frames. */
1969 if (ifp->if_flags & IFF_BROADCAST) {
1970 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST);
1973 /* Set multicast bit to capture multicast frames. */
1974 if (ifp->if_flags & IFF_MULTICAST) {
1975 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST);
1978 /* Init the cam filter. */
1981 /* Init the multicast filter. */
1984 /* Enable flow control */
1986 CSR_WRITE_1(sc, VGE_CRS2, 0x8B);
1988 /* Enable jumbo frame reception (if desired) */
1990 /* Start the MAC. */
1991 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
1992 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
1993 CSR_WRITE_1(sc, VGE_CRS0,
1994 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
1997 * Configure one-shot timer for microsecond
1998 * resulution and load it for 500 usecs.
2000 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES);
2001 CSR_WRITE_2(sc, VGE_SSTIMER, 400);
2004 * Configure interrupt moderation for receive. Enable
2005 * the holdoff counter and load it, and set the RX
2006 * suppression count to the number of descriptors we
2007 * want to allow before triggering an interrupt.
2008 * The holdoff timer is in units of 20 usecs.
2012 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE);
2013 /* Select the interrupt holdoff timer page. */
2014 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
2015 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
2016 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */
2018 /* Enable use of the holdoff timer. */
2019 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
2020 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD);
2022 /* Select the RX suppression threshold page. */
2023 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
2024 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
2025 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */
2027 /* Restore the page select bits. */
2028 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
2029 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
2032 #ifdef DEVICE_POLLING
2034 * Disable interrupts if we are polling.
2036 if (ifp->if_capenable & IFCAP_POLLING) {
2037 CSR_WRITE_4(sc, VGE_IMR, 0);
2038 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2039 } else /* otherwise ... */
2043 * Enable interrupts.
2045 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2046 CSR_WRITE_4(sc, VGE_ISR, 0);
2047 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2052 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2053 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2055 sc->vge_if_flags = 0;
2064 * Set media options.
2067 vge_ifmedia_upd(ifp)
2070 struct vge_softc *sc;
2071 struct mii_data *mii;
2075 mii = device_get_softc(sc->vge_miibus);
2083 * Report current media status.
2086 vge_ifmedia_sts(ifp, ifmr)
2088 struct ifmediareq *ifmr;
2090 struct vge_softc *sc;
2091 struct mii_data *mii;
2094 mii = device_get_softc(sc->vge_miibus);
2097 ifmr->ifm_active = mii->mii_media_active;
2098 ifmr->ifm_status = mii->mii_media_status;
2104 vge_miibus_statchg(dev)
2107 struct vge_softc *sc;
2108 struct mii_data *mii;
2109 struct ifmedia_entry *ife;
2111 sc = device_get_softc(dev);
2112 mii = device_get_softc(sc->vge_miibus);
2113 ife = mii->mii_media.ifm_cur;
2116 * If the user manually selects a media mode, we need to turn
2117 * on the forced MAC mode bit in the DIAGCTL register. If the
2118 * user happens to choose a full duplex mode, we also need to
2119 * set the 'force full duplex' bit. This applies only to
2120 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
2121 * mode is disabled, and in 1000baseT mode, full duplex is
2122 * always implied, so we turn on the forced mode bit but leave
2123 * the FDX bit cleared.
2126 switch (IFM_SUBTYPE(ife->ifm_media)) {
2128 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2129 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2132 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2133 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2137 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2138 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) {
2139 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2141 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2145 device_printf(dev, "unknown media type: %x\n",
2146 IFM_SUBTYPE(ife->ifm_media));
2154 vge_ioctl(ifp, command, data)
2159 struct vge_softc *sc = ifp->if_softc;
2160 struct ifreq *ifr = (struct ifreq *) data;
2161 struct mii_data *mii;
2166 if (ifr->ifr_mtu > VGE_JUMBO_MTU)
2168 ifp->if_mtu = ifr->ifr_mtu;
2171 if (ifp->if_flags & IFF_UP) {
2172 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2173 ifp->if_flags & IFF_PROMISC &&
2174 !(sc->vge_if_flags & IFF_PROMISC)) {
2175 CSR_SETBIT_1(sc, VGE_RXCTL,
2176 VGE_RXCTL_RX_PROMISC);
2178 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2179 !(ifp->if_flags & IFF_PROMISC) &&
2180 sc->vge_if_flags & IFF_PROMISC) {
2181 CSR_CLRBIT_1(sc, VGE_RXCTL,
2182 VGE_RXCTL_RX_PROMISC);
2187 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2190 sc->vge_if_flags = ifp->if_flags;
2198 mii = device_get_softc(sc->vge_miibus);
2199 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2203 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2204 #ifdef DEVICE_POLLING
2205 if (mask & IFCAP_POLLING) {
2206 if (ifr->ifr_reqcap & IFCAP_POLLING) {
2207 error = ether_poll_register(vge_poll, ifp);
2211 /* Disable interrupts */
2212 CSR_WRITE_4(sc, VGE_IMR, 0);
2213 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2214 ifp->if_capenable |= IFCAP_POLLING;
2217 error = ether_poll_deregister(ifp);
2218 /* Enable interrupts. */
2220 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2221 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2222 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2223 ifp->if_capenable &= ~IFCAP_POLLING;
2227 #endif /* DEVICE_POLLING */
2228 if ((mask & IFCAP_TXCSUM) != 0 &&
2229 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
2230 ifp->if_capenable ^= IFCAP_TXCSUM;
2231 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2232 ifp->if_hwassist |= VGE_CSUM_FEATURES;
2234 ifp->if_hwassist &= ~VGE_CSUM_FEATURES;
2236 if ((mask & IFCAP_RXCSUM) != 0 &&
2237 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
2238 ifp->if_capenable ^= IFCAP_RXCSUM;
2242 error = ether_ioctl(ifp, command, data);
2253 struct vge_softc *sc;
2257 printf("vge%d: watchdog timeout\n", sc->vge_unit);
2271 * Stop the adapter and free any mbufs allocated to the
2276 struct vge_softc *sc;
2285 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2287 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2288 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
2289 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2290 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
2291 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
2292 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
2294 if (sc->vge_head != NULL) {
2295 m_freem(sc->vge_head);
2296 sc->vge_head = sc->vge_tail = NULL;
2299 /* Free the TX list buffers. */
2301 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
2302 if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) {
2303 bus_dmamap_unload(sc->vge_ldata.vge_mtag,
2304 sc->vge_ldata.vge_tx_dmamap[i]);
2305 m_freem(sc->vge_ldata.vge_tx_mbuf[i]);
2306 sc->vge_ldata.vge_tx_mbuf[i] = NULL;
2310 /* Free the RX list buffers. */
2312 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
2313 if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) {
2314 bus_dmamap_unload(sc->vge_ldata.vge_mtag,
2315 sc->vge_ldata.vge_rx_dmamap[i]);
2316 m_freem(sc->vge_ldata.vge_rx_mbuf[i]);
2317 sc->vge_ldata.vge_rx_mbuf[i] = NULL;
2327 * Device suspend routine. Stop the interface and save some PCI
2328 * settings in case the BIOS doesn't restore them properly on
2335 struct vge_softc *sc;
2337 sc = device_get_softc(dev);
2347 * Device resume routine. Restore some PCI settings in case the BIOS
2348 * doesn't, re-enable busmastering, and restart the interface if
2355 struct vge_softc *sc;
2358 sc = device_get_softc(dev);
2361 /* reenable busmastering */
2362 pci_enable_busmaster(dev);
2363 pci_enable_io(dev, SYS_RES_MEMORY);
2365 /* reinitialize interface if necessary */
2366 if (ifp->if_flags & IFF_UP)
2375 * Stop all chip I/O so that the kernel's probe routines don't
2376 * get confused by errant DMAs when rebooting.
2382 struct vge_softc *sc;
2384 sc = device_get_softc(dev);