]> CyberLeo.Net >> Repos - FreeBSD/releng/9.0.git/blob - sys/dev/vge/if_vge.c
Copy stable/9 to releng/9.0 as part of the FreeBSD 9.0-RELEASE release
[FreeBSD/releng/9.0.git] / sys / dev / vge / if_vge.c
1 /*-
2  * Copyright (c) 2004
3  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 /*
37  * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
38  *
39  * Written by Bill Paul <wpaul@windriver.com>
40  * Senior Networking Software Engineer
41  * Wind River Systems
42  */
43
44 /*
45  * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that
46  * combines a tri-speed ethernet MAC and PHY, with the following
47  * features:
48  *
49  *      o Jumbo frame support up to 16K
50  *      o Transmit and receive flow control
51  *      o IPv4 checksum offload
52  *      o VLAN tag insertion and stripping
53  *      o TCP large send
54  *      o 64-bit multicast hash table filter
55  *      o 64 entry CAM filter
56  *      o 16K RX FIFO and 48K TX FIFO memory
57  *      o Interrupt moderation
58  *
59  * The VT6122 supports up to four transmit DMA queues. The descriptors
60  * in the transmit ring can address up to 7 data fragments; frames which
61  * span more than 7 data buffers must be coalesced, but in general the
62  * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
63  * long. The receive descriptors address only a single buffer.
64  *
65  * There are two peculiar design issues with the VT6122. One is that
66  * receive data buffers must be aligned on a 32-bit boundary. This is
67  * not a problem where the VT6122 is used as a LOM device in x86-based
68  * systems, but on architectures that generate unaligned access traps, we
69  * have to do some copying.
70  *
71  * The other issue has to do with the way 64-bit addresses are handled.
72  * The DMA descriptors only allow you to specify 48 bits of addressing
73  * information. The remaining 16 bits are specified using one of the
74  * I/O registers. If you only have a 32-bit system, then this isn't
75  * an issue, but if you have a 64-bit system and more than 4GB of
76  * memory, you must have to make sure your network data buffers reside
77  * in the same 48-bit 'segment.'
78  *
79  * Special thanks to Ryan Fu at VIA Networking for providing documentation
80  * and sample NICs for testing.
81  */
82
83 #ifdef HAVE_KERNEL_OPTION_HEADERS
84 #include "opt_device_polling.h"
85 #endif
86
87 #include <sys/param.h>
88 #include <sys/endian.h>
89 #include <sys/systm.h>
90 #include <sys/sockio.h>
91 #include <sys/mbuf.h>
92 #include <sys/malloc.h>
93 #include <sys/module.h>
94 #include <sys/kernel.h>
95 #include <sys/socket.h>
96 #include <sys/sysctl.h>
97
98 #include <net/if.h>
99 #include <net/if_arp.h>
100 #include <net/ethernet.h>
101 #include <net/if_dl.h>
102 #include <net/if_media.h>
103 #include <net/if_types.h>
104 #include <net/if_vlan_var.h>
105
106 #include <net/bpf.h>
107
108 #include <machine/bus.h>
109 #include <machine/resource.h>
110 #include <sys/bus.h>
111 #include <sys/rman.h>
112
113 #include <dev/mii/mii.h>
114 #include <dev/mii/miivar.h>
115
116 #include <dev/pci/pcireg.h>
117 #include <dev/pci/pcivar.h>
118
119 MODULE_DEPEND(vge, pci, 1, 1, 1);
120 MODULE_DEPEND(vge, ether, 1, 1, 1);
121 MODULE_DEPEND(vge, miibus, 1, 1, 1);
122
123 /* "device miibus" required.  See GENERIC if you get errors here. */
124 #include "miibus_if.h"
125
126 #include <dev/vge/if_vgereg.h>
127 #include <dev/vge/if_vgevar.h>
128
129 #define VGE_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
130
131 /* Tunables */
132 static int msi_disable = 0;
133 TUNABLE_INT("hw.vge.msi_disable", &msi_disable);
134
135 /*
136  * The SQE error counter of MIB seems to report bogus value.
137  * Vendor's workaround does not seem to work on PCIe based
138  * controllers. Disable it until we find better workaround.
139  */
140 #undef VGE_ENABLE_SQEERR
141
142 /*
143  * Various supported device vendors/types and their names.
144  */
145 static struct vge_type vge_devs[] = {
146         { VIA_VENDORID, VIA_DEVICEID_61XX,
147                 "VIA Networking Velocity Gigabit Ethernet" },
148         { 0, 0, NULL }
149 };
150
151 static int      vge_attach(device_t);
152 static int      vge_detach(device_t);
153 static int      vge_probe(device_t);
154 static int      vge_resume(device_t);
155 static int      vge_shutdown(device_t);
156 static int      vge_suspend(device_t);
157
158 static void     vge_cam_clear(struct vge_softc *);
159 static int      vge_cam_set(struct vge_softc *, uint8_t *);
160 static void     vge_clrwol(struct vge_softc *);
161 static void     vge_discard_rxbuf(struct vge_softc *, int);
162 static int      vge_dma_alloc(struct vge_softc *);
163 static void     vge_dma_free(struct vge_softc *);
164 static void     vge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
165 #ifdef VGE_EEPROM
166 static void     vge_eeprom_getword(struct vge_softc *, int, uint16_t *);
167 #endif
168 static int      vge_encap(struct vge_softc *, struct mbuf **);
169 #ifndef __NO_STRICT_ALIGNMENT
170 static __inline void
171                 vge_fixup_rx(struct mbuf *);
172 #endif
173 static void     vge_freebufs(struct vge_softc *);
174 static void     vge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
175 static int      vge_ifmedia_upd(struct ifnet *);
176 static void     vge_init(void *);
177 static void     vge_init_locked(struct vge_softc *);
178 static void     vge_intr(void *);
179 static void     vge_intr_holdoff(struct vge_softc *);
180 static int      vge_ioctl(struct ifnet *, u_long, caddr_t);
181 static void     vge_link_statchg(void *);
182 static int      vge_miibus_readreg(device_t, int, int);
183 static void     vge_miibus_statchg(device_t);
184 static int      vge_miibus_writereg(device_t, int, int, int);
185 static void     vge_miipoll_start(struct vge_softc *);
186 static void     vge_miipoll_stop(struct vge_softc *);
187 static int      vge_newbuf(struct vge_softc *, int);
188 static void     vge_read_eeprom(struct vge_softc *, caddr_t, int, int, int);
189 static void     vge_reset(struct vge_softc *);
190 static int      vge_rx_list_init(struct vge_softc *);
191 static int      vge_rxeof(struct vge_softc *, int);
192 static void     vge_rxfilter(struct vge_softc *);
193 static void     vge_setvlan(struct vge_softc *);
194 static void     vge_setwol(struct vge_softc *);
195 static void     vge_start(struct ifnet *);
196 static void     vge_start_locked(struct ifnet *);
197 static void     vge_stats_clear(struct vge_softc *);
198 static void     vge_stats_update(struct vge_softc *);
199 static void     vge_stop(struct vge_softc *);
200 static void     vge_sysctl_node(struct vge_softc *);
201 static int      vge_tx_list_init(struct vge_softc *);
202 static void     vge_txeof(struct vge_softc *);
203 static void     vge_watchdog(void *);
204
205 static device_method_t vge_methods[] = {
206         /* Device interface */
207         DEVMETHOD(device_probe,         vge_probe),
208         DEVMETHOD(device_attach,        vge_attach),
209         DEVMETHOD(device_detach,        vge_detach),
210         DEVMETHOD(device_suspend,       vge_suspend),
211         DEVMETHOD(device_resume,        vge_resume),
212         DEVMETHOD(device_shutdown,      vge_shutdown),
213
214         /* bus interface */
215         DEVMETHOD(bus_print_child,      bus_generic_print_child),
216         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
217
218         /* MII interface */
219         DEVMETHOD(miibus_readreg,       vge_miibus_readreg),
220         DEVMETHOD(miibus_writereg,      vge_miibus_writereg),
221         DEVMETHOD(miibus_statchg,       vge_miibus_statchg),
222
223         { 0, 0 }
224 };
225
226 static driver_t vge_driver = {
227         "vge",
228         vge_methods,
229         sizeof(struct vge_softc)
230 };
231
232 static devclass_t vge_devclass;
233
234 DRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0);
235 DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0);
236
237 #ifdef VGE_EEPROM
238 /*
239  * Read a word of data stored in the EEPROM at address 'addr.'
240  */
241 static void
242 vge_eeprom_getword(struct vge_softc *sc, int addr, uint16_t *dest)
243 {
244         int i;
245         uint16_t word = 0;
246
247         /*
248          * Enter EEPROM embedded programming mode. In order to
249          * access the EEPROM at all, we first have to set the
250          * EELOAD bit in the CHIPCFG2 register.
251          */
252         CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
253         CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
254
255         /* Select the address of the word we want to read */
256         CSR_WRITE_1(sc, VGE_EEADDR, addr);
257
258         /* Issue read command */
259         CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
260
261         /* Wait for the done bit to be set. */
262         for (i = 0; i < VGE_TIMEOUT; i++) {
263                 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
264                         break;
265         }
266
267         if (i == VGE_TIMEOUT) {
268                 device_printf(sc->vge_dev, "EEPROM read timed out\n");
269                 *dest = 0;
270                 return;
271         }
272
273         /* Read the result */
274         word = CSR_READ_2(sc, VGE_EERDDAT);
275
276         /* Turn off EEPROM access mode. */
277         CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
278         CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
279
280         *dest = word;
281 }
282 #endif
283
284 /*
285  * Read a sequence of words from the EEPROM.
286  */
287 static void
288 vge_read_eeprom(struct vge_softc *sc, caddr_t dest, int off, int cnt, int swap)
289 {
290         int i;
291 #ifdef VGE_EEPROM
292         uint16_t word = 0, *ptr;
293
294         for (i = 0; i < cnt; i++) {
295                 vge_eeprom_getword(sc, off + i, &word);
296                 ptr = (uint16_t *)(dest + (i * 2));
297                 if (swap)
298                         *ptr = ntohs(word);
299                 else
300                         *ptr = word;
301         }
302 #else
303         for (i = 0; i < ETHER_ADDR_LEN; i++)
304                 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i);
305 #endif
306 }
307
308 static void
309 vge_miipoll_stop(struct vge_softc *sc)
310 {
311         int i;
312
313         CSR_WRITE_1(sc, VGE_MIICMD, 0);
314
315         for (i = 0; i < VGE_TIMEOUT; i++) {
316                 DELAY(1);
317                 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
318                         break;
319         }
320
321         if (i == VGE_TIMEOUT)
322                 device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
323 }
324
325 static void
326 vge_miipoll_start(struct vge_softc *sc)
327 {
328         int i;
329
330         /* First, make sure we're idle. */
331
332         CSR_WRITE_1(sc, VGE_MIICMD, 0);
333         CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
334
335         for (i = 0; i < VGE_TIMEOUT; i++) {
336                 DELAY(1);
337                 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
338                         break;
339         }
340
341         if (i == VGE_TIMEOUT) {
342                 device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
343                 return;
344         }
345
346         /* Now enable auto poll mode. */
347
348         CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
349
350         /* And make sure it started. */
351
352         for (i = 0; i < VGE_TIMEOUT; i++) {
353                 DELAY(1);
354                 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
355                         break;
356         }
357
358         if (i == VGE_TIMEOUT)
359                 device_printf(sc->vge_dev, "failed to start MII autopoll\n");
360 }
361
362 static int
363 vge_miibus_readreg(device_t dev, int phy, int reg)
364 {
365         struct vge_softc *sc;
366         int i;
367         uint16_t rval = 0;
368
369         sc = device_get_softc(dev);
370
371         vge_miipoll_stop(sc);
372
373         /* Specify the register we want to read. */
374         CSR_WRITE_1(sc, VGE_MIIADDR, reg);
375
376         /* Issue read command. */
377         CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
378
379         /* Wait for the read command bit to self-clear. */
380         for (i = 0; i < VGE_TIMEOUT; i++) {
381                 DELAY(1);
382                 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
383                         break;
384         }
385
386         if (i == VGE_TIMEOUT)
387                 device_printf(sc->vge_dev, "MII read timed out\n");
388         else
389                 rval = CSR_READ_2(sc, VGE_MIIDATA);
390
391         vge_miipoll_start(sc);
392
393         return (rval);
394 }
395
396 static int
397 vge_miibus_writereg(device_t dev, int phy, int reg, int data)
398 {
399         struct vge_softc *sc;
400         int i, rval = 0;
401
402         sc = device_get_softc(dev);
403
404         vge_miipoll_stop(sc);
405
406         /* Specify the register we want to write. */
407         CSR_WRITE_1(sc, VGE_MIIADDR, reg);
408
409         /* Specify the data we want to write. */
410         CSR_WRITE_2(sc, VGE_MIIDATA, data);
411
412         /* Issue write command. */
413         CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
414
415         /* Wait for the write command bit to self-clear. */
416         for (i = 0; i < VGE_TIMEOUT; i++) {
417                 DELAY(1);
418                 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
419                         break;
420         }
421
422         if (i == VGE_TIMEOUT) {
423                 device_printf(sc->vge_dev, "MII write timed out\n");
424                 rval = EIO;
425         }
426
427         vge_miipoll_start(sc);
428
429         return (rval);
430 }
431
432 static void
433 vge_cam_clear(struct vge_softc *sc)
434 {
435         int i;
436
437         /*
438          * Turn off all the mask bits. This tells the chip
439          * that none of the entries in the CAM filter are valid.
440          * desired entries will be enabled as we fill the filter in.
441          */
442
443         CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
444         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
445         CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
446         for (i = 0; i < 8; i++)
447                 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
448
449         /* Clear the VLAN filter too. */
450
451         CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
452         for (i = 0; i < 8; i++)
453                 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
454
455         CSR_WRITE_1(sc, VGE_CAMADDR, 0);
456         CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
457         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
458
459         sc->vge_camidx = 0;
460 }
461
462 static int
463 vge_cam_set(struct vge_softc *sc, uint8_t *addr)
464 {
465         int i, error = 0;
466
467         if (sc->vge_camidx == VGE_CAM_MAXADDRS)
468                 return (ENOSPC);
469
470         /* Select the CAM data page. */
471         CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
472         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
473
474         /* Set the filter entry we want to update and enable writing. */
475         CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
476
477         /* Write the address to the CAM registers */
478         for (i = 0; i < ETHER_ADDR_LEN; i++)
479                 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
480
481         /* Issue a write command. */
482         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
483
484         /* Wake for it to clear. */
485         for (i = 0; i < VGE_TIMEOUT; i++) {
486                 DELAY(1);
487                 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
488                         break;
489         }
490
491         if (i == VGE_TIMEOUT) {
492                 device_printf(sc->vge_dev, "setting CAM filter failed\n");
493                 error = EIO;
494                 goto fail;
495         }
496
497         /* Select the CAM mask page. */
498         CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
499         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
500
501         /* Set the mask bit that enables this filter. */
502         CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8),
503             1<<(sc->vge_camidx & 7));
504
505         sc->vge_camidx++;
506
507 fail:
508         /* Turn off access to CAM. */
509         CSR_WRITE_1(sc, VGE_CAMADDR, 0);
510         CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
511         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
512
513         return (error);
514 }
515
516 static void
517 vge_setvlan(struct vge_softc *sc)
518 {
519         struct ifnet *ifp;
520         uint8_t cfg;
521
522         VGE_LOCK_ASSERT(sc);
523
524         ifp = sc->vge_ifp;
525         cfg = CSR_READ_1(sc, VGE_RXCFG);
526         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
527                 cfg |= VGE_VTAG_OPT2;
528         else
529                 cfg &= ~VGE_VTAG_OPT2;
530         CSR_WRITE_1(sc, VGE_RXCFG, cfg);
531 }
532
533 /*
534  * Program the multicast filter. We use the 64-entry CAM filter
535  * for perfect filtering. If there's more than 64 multicast addresses,
536  * we use the hash filter instead.
537  */
538 static void
539 vge_rxfilter(struct vge_softc *sc)
540 {
541         struct ifnet *ifp;
542         struct ifmultiaddr *ifma;
543         uint32_t h, hashes[2];
544         uint8_t rxcfg;
545         int error = 0;
546
547         VGE_LOCK_ASSERT(sc);
548
549         /* First, zot all the multicast entries. */
550         hashes[0] = 0;
551         hashes[1] = 0;
552
553         rxcfg = CSR_READ_1(sc, VGE_RXCTL);
554         rxcfg &= ~(VGE_RXCTL_RX_MCAST | VGE_RXCTL_RX_BCAST |
555             VGE_RXCTL_RX_PROMISC);
556         /*
557          * Always allow VLAN oversized frames and frames for
558          * this host.
559          */
560         rxcfg |= VGE_RXCTL_RX_GIANT | VGE_RXCTL_RX_UCAST;
561
562         ifp = sc->vge_ifp;
563         if ((ifp->if_flags & IFF_BROADCAST) != 0)
564                 rxcfg |= VGE_RXCTL_RX_BCAST;
565         if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
566                 if ((ifp->if_flags & IFF_PROMISC) != 0)
567                         rxcfg |= VGE_RXCTL_RX_PROMISC;
568                 if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
569                         hashes[0] = 0xFFFFFFFF;
570                         hashes[1] = 0xFFFFFFFF;
571                 }
572                 goto done;
573         }
574
575         vge_cam_clear(sc);
576         /* Now program new ones */
577         if_maddr_rlock(ifp);
578         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
579                 if (ifma->ifma_addr->sa_family != AF_LINK)
580                         continue;
581                 error = vge_cam_set(sc,
582                     LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
583                 if (error)
584                         break;
585         }
586
587         /* If there were too many addresses, use the hash filter. */
588         if (error) {
589                 vge_cam_clear(sc);
590
591                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
592                         if (ifma->ifma_addr->sa_family != AF_LINK)
593                                 continue;
594                         h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
595                             ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
596                         if (h < 32)
597                                 hashes[0] |= (1 << h);
598                         else
599                                 hashes[1] |= (1 << (h - 32));
600                 }
601         }
602         if_maddr_runlock(ifp);
603
604 done:
605         if (hashes[0] != 0 || hashes[1] != 0)
606                 rxcfg |= VGE_RXCTL_RX_MCAST;
607         CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
608         CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
609         CSR_WRITE_1(sc, VGE_RXCTL, rxcfg);
610 }
611
612 static void
613 vge_reset(struct vge_softc *sc)
614 {
615         int i;
616
617         CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
618
619         for (i = 0; i < VGE_TIMEOUT; i++) {
620                 DELAY(5);
621                 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
622                         break;
623         }
624
625         if (i == VGE_TIMEOUT) {
626                 device_printf(sc->vge_dev, "soft reset timed out\n");
627                 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
628                 DELAY(2000);
629         }
630
631         DELAY(5000);
632 }
633
634 /*
635  * Probe for a VIA gigabit chip. Check the PCI vendor and device
636  * IDs against our list and return a device name if we find a match.
637  */
638 static int
639 vge_probe(device_t dev)
640 {
641         struct vge_type *t;
642
643         t = vge_devs;
644
645         while (t->vge_name != NULL) {
646                 if ((pci_get_vendor(dev) == t->vge_vid) &&
647                     (pci_get_device(dev) == t->vge_did)) {
648                         device_set_desc(dev, t->vge_name);
649                         return (BUS_PROBE_DEFAULT);
650                 }
651                 t++;
652         }
653
654         return (ENXIO);
655 }
656
657 /*
658  * Map a single buffer address.
659  */
660
661 struct vge_dmamap_arg {
662         bus_addr_t      vge_busaddr;
663 };
664
665 static void
666 vge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
667 {
668         struct vge_dmamap_arg *ctx;
669
670         if (error != 0)
671                 return;
672
673         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
674
675         ctx = (struct vge_dmamap_arg *)arg;
676         ctx->vge_busaddr = segs[0].ds_addr;
677 }
678
679 static int
680 vge_dma_alloc(struct vge_softc *sc)
681 {
682         struct vge_dmamap_arg ctx;
683         struct vge_txdesc *txd;
684         struct vge_rxdesc *rxd;
685         bus_addr_t lowaddr, tx_ring_end, rx_ring_end;
686         int error, i;
687
688         /*
689          * It seems old PCI controllers do not support DAC.  DAC
690          * configuration can be enabled by accessing VGE_CHIPCFG3
691          * register but honor EEPROM configuration instead of
692          * blindly overriding DAC configuration.  PCIe based
693          * controllers are supposed to support 64bit DMA so enable
694          * 64bit DMA on these controllers.
695          */
696         if ((sc->vge_flags & VGE_FLAG_PCIE) != 0)
697                 lowaddr = BUS_SPACE_MAXADDR;
698         else
699                 lowaddr = BUS_SPACE_MAXADDR_32BIT;
700
701 again:
702         /* Create parent ring tag. */
703         error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */
704             1, 0,                       /* algnmnt, boundary */
705             lowaddr,                    /* lowaddr */
706             BUS_SPACE_MAXADDR,          /* highaddr */
707             NULL, NULL,                 /* filter, filterarg */
708             BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
709             0,                          /* nsegments */
710             BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
711             0,                          /* flags */
712             NULL, NULL,                 /* lockfunc, lockarg */
713             &sc->vge_cdata.vge_ring_tag);
714         if (error != 0) {
715                 device_printf(sc->vge_dev,
716                     "could not create parent DMA tag.\n");
717                 goto fail;
718         }
719
720         /* Create tag for Tx ring. */
721         error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */
722             VGE_TX_RING_ALIGN, 0,       /* algnmnt, boundary */
723             BUS_SPACE_MAXADDR,          /* lowaddr */
724             BUS_SPACE_MAXADDR,          /* highaddr */
725             NULL, NULL,                 /* filter, filterarg */
726             VGE_TX_LIST_SZ,             /* maxsize */
727             1,                          /* nsegments */
728             VGE_TX_LIST_SZ,             /* maxsegsize */
729             0,                          /* flags */
730             NULL, NULL,                 /* lockfunc, lockarg */
731             &sc->vge_cdata.vge_tx_ring_tag);
732         if (error != 0) {
733                 device_printf(sc->vge_dev,
734                     "could not allocate Tx ring DMA tag.\n");
735                 goto fail;
736         }
737
738         /* Create tag for Rx ring. */
739         error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */
740             VGE_RX_RING_ALIGN, 0,       /* algnmnt, boundary */
741             BUS_SPACE_MAXADDR,          /* lowaddr */
742             BUS_SPACE_MAXADDR,          /* highaddr */
743             NULL, NULL,                 /* filter, filterarg */
744             VGE_RX_LIST_SZ,             /* maxsize */
745             1,                          /* nsegments */
746             VGE_RX_LIST_SZ,             /* maxsegsize */
747             0,                          /* flags */
748             NULL, NULL,                 /* lockfunc, lockarg */
749             &sc->vge_cdata.vge_rx_ring_tag);
750         if (error != 0) {
751                 device_printf(sc->vge_dev,
752                     "could not allocate Rx ring DMA tag.\n");
753                 goto fail;
754         }
755
756         /* Allocate DMA'able memory and load the DMA map for Tx ring. */
757         error = bus_dmamem_alloc(sc->vge_cdata.vge_tx_ring_tag,
758             (void **)&sc->vge_rdata.vge_tx_ring,
759             BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
760             &sc->vge_cdata.vge_tx_ring_map);
761         if (error != 0) {
762                 device_printf(sc->vge_dev,
763                     "could not allocate DMA'able memory for Tx ring.\n");
764                 goto fail;
765         }
766
767         ctx.vge_busaddr = 0;
768         error = bus_dmamap_load(sc->vge_cdata.vge_tx_ring_tag,
769             sc->vge_cdata.vge_tx_ring_map, sc->vge_rdata.vge_tx_ring,
770             VGE_TX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
771         if (error != 0 || ctx.vge_busaddr == 0) {
772                 device_printf(sc->vge_dev,
773                     "could not load DMA'able memory for Tx ring.\n");
774                 goto fail;
775         }
776         sc->vge_rdata.vge_tx_ring_paddr = ctx.vge_busaddr;
777
778         /* Allocate DMA'able memory and load the DMA map for Rx ring. */
779         error = bus_dmamem_alloc(sc->vge_cdata.vge_rx_ring_tag,
780             (void **)&sc->vge_rdata.vge_rx_ring,
781             BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
782             &sc->vge_cdata.vge_rx_ring_map);
783         if (error != 0) {
784                 device_printf(sc->vge_dev,
785                     "could not allocate DMA'able memory for Rx ring.\n");
786                 goto fail;
787         }
788
789         ctx.vge_busaddr = 0;
790         error = bus_dmamap_load(sc->vge_cdata.vge_rx_ring_tag,
791             sc->vge_cdata.vge_rx_ring_map, sc->vge_rdata.vge_rx_ring,
792             VGE_RX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
793         if (error != 0 || ctx.vge_busaddr == 0) {
794                 device_printf(sc->vge_dev,
795                     "could not load DMA'able memory for Rx ring.\n");
796                 goto fail;
797         }
798         sc->vge_rdata.vge_rx_ring_paddr = ctx.vge_busaddr;
799
800         /* Tx/Rx descriptor queue should reside within 4GB boundary. */
801         tx_ring_end = sc->vge_rdata.vge_tx_ring_paddr + VGE_TX_LIST_SZ;
802         rx_ring_end = sc->vge_rdata.vge_rx_ring_paddr + VGE_RX_LIST_SZ;
803         if ((VGE_ADDR_HI(tx_ring_end) !=
804             VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)) ||
805             (VGE_ADDR_HI(rx_ring_end) !=
806             VGE_ADDR_HI(sc->vge_rdata.vge_rx_ring_paddr)) ||
807             VGE_ADDR_HI(tx_ring_end) != VGE_ADDR_HI(rx_ring_end)) {
808                 device_printf(sc->vge_dev, "4GB boundary crossed, "
809                     "switching to 32bit DMA address mode.\n");
810                 vge_dma_free(sc);
811                 /* Limit DMA address space to 32bit and try again. */
812                 lowaddr = BUS_SPACE_MAXADDR_32BIT;
813                 goto again;
814         }
815
816         if ((sc->vge_flags & VGE_FLAG_PCIE) != 0)
817                 lowaddr = VGE_BUF_DMA_MAXADDR;
818         else
819                 lowaddr = BUS_SPACE_MAXADDR_32BIT;
820         /* Create parent buffer tag. */
821         error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */
822             1, 0,                       /* algnmnt, boundary */
823             lowaddr,                    /* lowaddr */
824             BUS_SPACE_MAXADDR,          /* highaddr */
825             NULL, NULL,                 /* filter, filterarg */
826             BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
827             0,                          /* nsegments */
828             BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
829             0,                          /* flags */
830             NULL, NULL,                 /* lockfunc, lockarg */
831             &sc->vge_cdata.vge_buffer_tag);
832         if (error != 0) {
833                 device_printf(sc->vge_dev,
834                     "could not create parent buffer DMA tag.\n");
835                 goto fail;
836         }
837
838         /* Create tag for Tx buffers. */
839         error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */
840             1, 0,                       /* algnmnt, boundary */
841             BUS_SPACE_MAXADDR,          /* lowaddr */
842             BUS_SPACE_MAXADDR,          /* highaddr */
843             NULL, NULL,                 /* filter, filterarg */
844             MCLBYTES * VGE_MAXTXSEGS,   /* maxsize */
845             VGE_MAXTXSEGS,              /* nsegments */
846             MCLBYTES,                   /* maxsegsize */
847             0,                          /* flags */
848             NULL, NULL,                 /* lockfunc, lockarg */
849             &sc->vge_cdata.vge_tx_tag);
850         if (error != 0) {
851                 device_printf(sc->vge_dev, "could not create Tx DMA tag.\n");
852                 goto fail;
853         }
854
855         /* Create tag for Rx buffers. */
856         error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */
857             VGE_RX_BUF_ALIGN, 0,        /* algnmnt, boundary */
858             BUS_SPACE_MAXADDR,          /* lowaddr */
859             BUS_SPACE_MAXADDR,          /* highaddr */
860             NULL, NULL,                 /* filter, filterarg */
861             MCLBYTES,                   /* maxsize */
862             1,                          /* nsegments */
863             MCLBYTES,                   /* maxsegsize */
864             0,                          /* flags */
865             NULL, NULL,                 /* lockfunc, lockarg */
866             &sc->vge_cdata.vge_rx_tag);
867         if (error != 0) {
868                 device_printf(sc->vge_dev, "could not create Rx DMA tag.\n");
869                 goto fail;
870         }
871
872         /* Create DMA maps for Tx buffers. */
873         for (i = 0; i < VGE_TX_DESC_CNT; i++) {
874                 txd = &sc->vge_cdata.vge_txdesc[i];
875                 txd->tx_m = NULL;
876                 txd->tx_dmamap = NULL;
877                 error = bus_dmamap_create(sc->vge_cdata.vge_tx_tag, 0,
878                     &txd->tx_dmamap);
879                 if (error != 0) {
880                         device_printf(sc->vge_dev,
881                             "could not create Tx dmamap.\n");
882                         goto fail;
883                 }
884         }
885         /* Create DMA maps for Rx buffers. */
886         if ((error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0,
887             &sc->vge_cdata.vge_rx_sparemap)) != 0) {
888                 device_printf(sc->vge_dev,
889                     "could not create spare Rx dmamap.\n");
890                 goto fail;
891         }
892         for (i = 0; i < VGE_RX_DESC_CNT; i++) {
893                 rxd = &sc->vge_cdata.vge_rxdesc[i];
894                 rxd->rx_m = NULL;
895                 rxd->rx_dmamap = NULL;
896                 error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0,
897                     &rxd->rx_dmamap);
898                 if (error != 0) {
899                         device_printf(sc->vge_dev,
900                             "could not create Rx dmamap.\n");
901                         goto fail;
902                 }
903         }
904
905 fail:
906         return (error);
907 }
908
909 static void
910 vge_dma_free(struct vge_softc *sc)
911 {
912         struct vge_txdesc *txd;
913         struct vge_rxdesc *rxd;
914         int i;
915
916         /* Tx ring. */
917         if (sc->vge_cdata.vge_tx_ring_tag != NULL) {
918                 if (sc->vge_cdata.vge_tx_ring_map)
919                         bus_dmamap_unload(sc->vge_cdata.vge_tx_ring_tag,
920                             sc->vge_cdata.vge_tx_ring_map);
921                 if (sc->vge_cdata.vge_tx_ring_map &&
922                     sc->vge_rdata.vge_tx_ring)
923                         bus_dmamem_free(sc->vge_cdata.vge_tx_ring_tag,
924                             sc->vge_rdata.vge_tx_ring,
925                             sc->vge_cdata.vge_tx_ring_map);
926                 sc->vge_rdata.vge_tx_ring = NULL;
927                 sc->vge_cdata.vge_tx_ring_map = NULL;
928                 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_ring_tag);
929                 sc->vge_cdata.vge_tx_ring_tag = NULL;
930         }
931         /* Rx ring. */
932         if (sc->vge_cdata.vge_rx_ring_tag != NULL) {
933                 if (sc->vge_cdata.vge_rx_ring_map)
934                         bus_dmamap_unload(sc->vge_cdata.vge_rx_ring_tag,
935                             sc->vge_cdata.vge_rx_ring_map);
936                 if (sc->vge_cdata.vge_rx_ring_map &&
937                     sc->vge_rdata.vge_rx_ring)
938                         bus_dmamem_free(sc->vge_cdata.vge_rx_ring_tag,
939                             sc->vge_rdata.vge_rx_ring,
940                             sc->vge_cdata.vge_rx_ring_map);
941                 sc->vge_rdata.vge_rx_ring = NULL;
942                 sc->vge_cdata.vge_rx_ring_map = NULL;
943                 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_ring_tag);
944                 sc->vge_cdata.vge_rx_ring_tag = NULL;
945         }
946         /* Tx buffers. */
947         if (sc->vge_cdata.vge_tx_tag != NULL) {
948                 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
949                         txd = &sc->vge_cdata.vge_txdesc[i];
950                         if (txd->tx_dmamap != NULL) {
951                                 bus_dmamap_destroy(sc->vge_cdata.vge_tx_tag,
952                                     txd->tx_dmamap);
953                                 txd->tx_dmamap = NULL;
954                         }
955                 }
956                 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_tag);
957                 sc->vge_cdata.vge_tx_tag = NULL;
958         }
959         /* Rx buffers. */
960         if (sc->vge_cdata.vge_rx_tag != NULL) {
961                 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
962                         rxd = &sc->vge_cdata.vge_rxdesc[i];
963                         if (rxd->rx_dmamap != NULL) {
964                                 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag,
965                                     rxd->rx_dmamap);
966                                 rxd->rx_dmamap = NULL;
967                         }
968                 }
969                 if (sc->vge_cdata.vge_rx_sparemap != NULL) {
970                         bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag,
971                             sc->vge_cdata.vge_rx_sparemap);
972                         sc->vge_cdata.vge_rx_sparemap = NULL;
973                 }
974                 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_tag);
975                 sc->vge_cdata.vge_rx_tag = NULL;
976         }
977
978         if (sc->vge_cdata.vge_buffer_tag != NULL) {
979                 bus_dma_tag_destroy(sc->vge_cdata.vge_buffer_tag);
980                 sc->vge_cdata.vge_buffer_tag = NULL;
981         }
982         if (sc->vge_cdata.vge_ring_tag != NULL) {
983                 bus_dma_tag_destroy(sc->vge_cdata.vge_ring_tag);
984                 sc->vge_cdata.vge_ring_tag = NULL;
985         }
986 }
987
988 /*
989  * Attach the interface. Allocate softc structures, do ifmedia
990  * setup and ethernet/BPF attach.
991  */
992 static int
993 vge_attach(device_t dev)
994 {
995         u_char eaddr[ETHER_ADDR_LEN];
996         struct vge_softc *sc;
997         struct ifnet *ifp;
998         int error = 0, cap, i, msic, rid;
999
1000         sc = device_get_softc(dev);
1001         sc->vge_dev = dev;
1002
1003         mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1004             MTX_DEF);
1005         callout_init_mtx(&sc->vge_watchdog, &sc->vge_mtx, 0);
1006
1007         /*
1008          * Map control/status registers.
1009          */
1010         pci_enable_busmaster(dev);
1011
1012         rid = PCIR_BAR(1);
1013         sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1014             RF_ACTIVE);
1015
1016         if (sc->vge_res == NULL) {
1017                 device_printf(dev, "couldn't map ports/memory\n");
1018                 error = ENXIO;
1019                 goto fail;
1020         }
1021
1022         if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) {
1023                 sc->vge_flags |= VGE_FLAG_PCIE;
1024                 sc->vge_expcap = cap;
1025         } else
1026                 sc->vge_flags |= VGE_FLAG_JUMBO;
1027         if (pci_find_cap(dev, PCIY_PMG, &cap) == 0) {
1028                 sc->vge_flags |= VGE_FLAG_PMCAP;
1029                 sc->vge_pmcap = cap;
1030         }
1031         rid = 0;
1032         msic = pci_msi_count(dev);
1033         if (msi_disable == 0 && msic > 0) {
1034                 msic = 1;
1035                 if (pci_alloc_msi(dev, &msic) == 0) {
1036                         if (msic == 1) {
1037                                 sc->vge_flags |= VGE_FLAG_MSI;
1038                                 device_printf(dev, "Using %d MSI message\n",
1039                                     msic);
1040                                 rid = 1;
1041                         } else
1042                                 pci_release_msi(dev);
1043                 }
1044         }
1045
1046         /* Allocate interrupt */
1047         sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1048             ((sc->vge_flags & VGE_FLAG_MSI) ? 0 : RF_SHAREABLE) | RF_ACTIVE);
1049         if (sc->vge_irq == NULL) {
1050                 device_printf(dev, "couldn't map interrupt\n");
1051                 error = ENXIO;
1052                 goto fail;
1053         }
1054
1055         /* Reset the adapter. */
1056         vge_reset(sc);
1057         /* Reload EEPROM. */
1058         CSR_WRITE_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
1059         for (i = 0; i < VGE_TIMEOUT; i++) {
1060                 DELAY(5);
1061                 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
1062                         break;
1063         }
1064         if (i == VGE_TIMEOUT)
1065                 device_printf(dev, "EEPROM reload timed out\n");
1066         /*
1067          * Clear PACPI as EEPROM reload will set the bit. Otherwise
1068          * MAC will receive magic packet which in turn confuses
1069          * controller.
1070          */
1071         CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
1072
1073         /*
1074          * Get station address from the EEPROM.
1075          */
1076         vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0);
1077         /*
1078          * Save configured PHY address.
1079          * It seems the PHY address of PCIe controllers just
1080          * reflects media jump strapping status so we assume the
1081          * internal PHY address of PCIe controller is at 1.
1082          */
1083         if ((sc->vge_flags & VGE_FLAG_PCIE) != 0)
1084                 sc->vge_phyaddr = 1;
1085         else
1086                 sc->vge_phyaddr = CSR_READ_1(sc, VGE_MIICFG) &
1087                     VGE_MIICFG_PHYADDR;
1088         /* Clear WOL and take hardware from powerdown. */
1089         vge_clrwol(sc);
1090         vge_sysctl_node(sc);
1091         error = vge_dma_alloc(sc);
1092         if (error)
1093                 goto fail;
1094
1095         ifp = sc->vge_ifp = if_alloc(IFT_ETHER);
1096         if (ifp == NULL) {
1097                 device_printf(dev, "can not if_alloc()\n");
1098                 error = ENOSPC;
1099                 goto fail;
1100         }
1101
1102         /* Do MII setup */
1103         error = mii_attach(dev, &sc->vge_miibus, ifp, vge_ifmedia_upd,
1104             vge_ifmedia_sts, BMSR_DEFCAPMASK, sc->vge_phyaddr, MII_OFFSET_ANY,
1105             0);
1106         if (error != 0) {
1107                 device_printf(dev, "attaching PHYs failed\n");
1108                 goto fail;
1109         }
1110
1111         ifp->if_softc = sc;
1112         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1113         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1114         ifp->if_ioctl = vge_ioctl;
1115         ifp->if_capabilities = IFCAP_VLAN_MTU;
1116         ifp->if_start = vge_start;
1117         ifp->if_hwassist = VGE_CSUM_FEATURES;
1118         ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM |
1119             IFCAP_VLAN_HWTAGGING;
1120         if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0)
1121                 ifp->if_capabilities |= IFCAP_WOL;
1122         ifp->if_capenable = ifp->if_capabilities;
1123 #ifdef DEVICE_POLLING
1124         ifp->if_capabilities |= IFCAP_POLLING;
1125 #endif
1126         ifp->if_init = vge_init;
1127         IFQ_SET_MAXLEN(&ifp->if_snd, VGE_TX_DESC_CNT - 1);
1128         ifp->if_snd.ifq_drv_maxlen = VGE_TX_DESC_CNT - 1;
1129         IFQ_SET_READY(&ifp->if_snd);
1130
1131         /*
1132          * Call MI attach routine.
1133          */
1134         ether_ifattach(ifp, eaddr);
1135
1136         /* Tell the upper layer(s) we support long frames. */
1137         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1138
1139         /* Hook interrupt last to avoid having to lock softc */
1140         error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE,
1141             NULL, vge_intr, sc, &sc->vge_intrhand);
1142
1143         if (error) {
1144                 device_printf(dev, "couldn't set up irq\n");
1145                 ether_ifdetach(ifp);
1146                 goto fail;
1147         }
1148
1149 fail:
1150         if (error)
1151                 vge_detach(dev);
1152
1153         return (error);
1154 }
1155
1156 /*
1157  * Shutdown hardware and free up resources. This can be called any
1158  * time after the mutex has been initialized. It is called in both
1159  * the error case in attach and the normal detach case so it needs
1160  * to be careful about only freeing resources that have actually been
1161  * allocated.
1162  */
1163 static int
1164 vge_detach(device_t dev)
1165 {
1166         struct vge_softc *sc;
1167         struct ifnet *ifp;
1168
1169         sc = device_get_softc(dev);
1170         KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized"));
1171         ifp = sc->vge_ifp;
1172
1173 #ifdef DEVICE_POLLING
1174         if (ifp->if_capenable & IFCAP_POLLING)
1175                 ether_poll_deregister(ifp);
1176 #endif
1177
1178         /* These should only be active if attach succeeded */
1179         if (device_is_attached(dev)) {
1180                 ether_ifdetach(ifp);
1181                 VGE_LOCK(sc);
1182                 vge_stop(sc);
1183                 VGE_UNLOCK(sc);
1184                 callout_drain(&sc->vge_watchdog);
1185         }
1186         if (sc->vge_miibus)
1187                 device_delete_child(dev, sc->vge_miibus);
1188         bus_generic_detach(dev);
1189
1190         if (sc->vge_intrhand)
1191                 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand);
1192         if (sc->vge_irq)
1193                 bus_release_resource(dev, SYS_RES_IRQ,
1194                     sc->vge_flags & VGE_FLAG_MSI ? 1 : 0, sc->vge_irq);
1195         if (sc->vge_flags & VGE_FLAG_MSI)
1196                 pci_release_msi(dev);
1197         if (sc->vge_res)
1198                 bus_release_resource(dev, SYS_RES_MEMORY,
1199                     PCIR_BAR(1), sc->vge_res);
1200         if (ifp)
1201                 if_free(ifp);
1202
1203         vge_dma_free(sc);
1204         mtx_destroy(&sc->vge_mtx);
1205
1206         return (0);
1207 }
1208
1209 static void
1210 vge_discard_rxbuf(struct vge_softc *sc, int prod)
1211 {
1212         struct vge_rxdesc *rxd;
1213         int i;
1214
1215         rxd = &sc->vge_cdata.vge_rxdesc[prod];
1216         rxd->rx_desc->vge_sts = 0;
1217         rxd->rx_desc->vge_ctl = 0;
1218
1219         /*
1220          * Note: the manual fails to document the fact that for
1221          * proper opration, the driver needs to replentish the RX
1222          * DMA ring 4 descriptors at a time (rather than one at a
1223          * time, like most chips). We can allocate the new buffers
1224          * but we should not set the OWN bits until we're ready
1225          * to hand back 4 of them in one shot.
1226          */
1227         if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) {
1228                 for (i = VGE_RXCHUNK; i > 0; i--) {
1229                         rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN);
1230                         rxd = rxd->rxd_prev;
1231                 }
1232                 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK;
1233         }
1234 }
1235
1236 static int
1237 vge_newbuf(struct vge_softc *sc, int prod)
1238 {
1239         struct vge_rxdesc *rxd;
1240         struct mbuf *m;
1241         bus_dma_segment_t segs[1];
1242         bus_dmamap_t map;
1243         int i, nsegs;
1244
1245         m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1246         if (m == NULL)
1247                 return (ENOBUFS);
1248         /*
1249          * This is part of an evil trick to deal with strict-alignment
1250          * architectures. The VIA chip requires RX buffers to be aligned
1251          * on 32-bit boundaries, but that will hose strict-alignment
1252          * architectures. To get around this, we leave some empty space
1253          * at the start of each buffer and for non-strict-alignment hosts,
1254          * we copy the buffer back two bytes to achieve word alignment.
1255          * This is slightly more efficient than allocating a new buffer,
1256          * copying the contents, and discarding the old buffer.
1257          */
1258         m->m_len = m->m_pkthdr.len = MCLBYTES;
1259         m_adj(m, VGE_RX_BUF_ALIGN);
1260
1261         if (bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_rx_tag,
1262             sc->vge_cdata.vge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1263                 m_freem(m);
1264                 return (ENOBUFS);
1265         }
1266         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1267
1268         rxd = &sc->vge_cdata.vge_rxdesc[prod];
1269         if (rxd->rx_m != NULL) {
1270                 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap,
1271                     BUS_DMASYNC_POSTREAD);
1272                 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap);
1273         }
1274         map = rxd->rx_dmamap;
1275         rxd->rx_dmamap = sc->vge_cdata.vge_rx_sparemap;
1276         sc->vge_cdata.vge_rx_sparemap = map;
1277         bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap,
1278             BUS_DMASYNC_PREREAD);
1279         rxd->rx_m = m;
1280
1281         rxd->rx_desc->vge_sts = 0;
1282         rxd->rx_desc->vge_ctl = 0;
1283         rxd->rx_desc->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
1284         rxd->rx_desc->vge_addrhi = htole32(VGE_ADDR_HI(segs[0].ds_addr) |
1285             (VGE_BUFLEN(segs[0].ds_len) << 16) | VGE_RXDESC_I);
1286
1287         /*
1288          * Note: the manual fails to document the fact that for
1289          * proper operation, the driver needs to replenish the RX
1290          * DMA ring 4 descriptors at a time (rather than one at a
1291          * time, like most chips). We can allocate the new buffers
1292          * but we should not set the OWN bits until we're ready
1293          * to hand back 4 of them in one shot.
1294          */
1295         if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) {
1296                 for (i = VGE_RXCHUNK; i > 0; i--) {
1297                         rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN);
1298                         rxd = rxd->rxd_prev;
1299                 }
1300                 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK;
1301         }
1302
1303         return (0);
1304 }
1305
1306 static int
1307 vge_tx_list_init(struct vge_softc *sc)
1308 {
1309         struct vge_ring_data *rd;
1310         struct vge_txdesc *txd;
1311         int i;
1312
1313         VGE_LOCK_ASSERT(sc);
1314
1315         sc->vge_cdata.vge_tx_prodidx = 0;
1316         sc->vge_cdata.vge_tx_considx = 0;
1317         sc->vge_cdata.vge_tx_cnt = 0;
1318
1319         rd = &sc->vge_rdata;
1320         bzero(rd->vge_tx_ring, VGE_TX_LIST_SZ);
1321         for (i = 0; i < VGE_TX_DESC_CNT; i++) {
1322                 txd = &sc->vge_cdata.vge_txdesc[i];
1323                 txd->tx_m = NULL;
1324                 txd->tx_desc = &rd->vge_tx_ring[i];
1325         }
1326
1327         bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1328             sc->vge_cdata.vge_tx_ring_map,
1329             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1330
1331         return (0);
1332 }
1333
1334 static int
1335 vge_rx_list_init(struct vge_softc *sc)
1336 {
1337         struct vge_ring_data *rd;
1338         struct vge_rxdesc *rxd;
1339         int i;
1340
1341         VGE_LOCK_ASSERT(sc);
1342
1343         sc->vge_cdata.vge_rx_prodidx = 0;
1344         sc->vge_cdata.vge_head = NULL;
1345         sc->vge_cdata.vge_tail = NULL;
1346         sc->vge_cdata.vge_rx_commit = 0;
1347
1348         rd = &sc->vge_rdata;
1349         bzero(rd->vge_rx_ring, VGE_RX_LIST_SZ);
1350         for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1351                 rxd = &sc->vge_cdata.vge_rxdesc[i];
1352                 rxd->rx_m = NULL;
1353                 rxd->rx_desc = &rd->vge_rx_ring[i];
1354                 if (i == 0)
1355                         rxd->rxd_prev =
1356                             &sc->vge_cdata.vge_rxdesc[VGE_RX_DESC_CNT - 1];
1357                 else
1358                         rxd->rxd_prev = &sc->vge_cdata.vge_rxdesc[i - 1];
1359                 if (vge_newbuf(sc, i) != 0)
1360                         return (ENOBUFS);
1361         }
1362
1363         bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1364             sc->vge_cdata.vge_rx_ring_map,
1365             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1366
1367         sc->vge_cdata.vge_rx_commit = 0;
1368
1369         return (0);
1370 }
1371
1372 static void
1373 vge_freebufs(struct vge_softc *sc)
1374 {
1375         struct vge_txdesc *txd;
1376         struct vge_rxdesc *rxd;
1377         struct ifnet *ifp;
1378         int i;
1379
1380         VGE_LOCK_ASSERT(sc);
1381
1382         ifp = sc->vge_ifp;
1383         /*
1384          * Free RX and TX mbufs still in the queues.
1385          */
1386         for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1387                 rxd = &sc->vge_cdata.vge_rxdesc[i];
1388                 if (rxd->rx_m != NULL) {
1389                         bus_dmamap_sync(sc->vge_cdata.vge_rx_tag,
1390                             rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
1391                         bus_dmamap_unload(sc->vge_cdata.vge_rx_tag,
1392                             rxd->rx_dmamap);
1393                         m_freem(rxd->rx_m);
1394                         rxd->rx_m = NULL;
1395                 }
1396         }
1397
1398         for (i = 0; i < VGE_TX_DESC_CNT; i++) {
1399                 txd = &sc->vge_cdata.vge_txdesc[i];
1400                 if (txd->tx_m != NULL) {
1401                         bus_dmamap_sync(sc->vge_cdata.vge_tx_tag,
1402                             txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
1403                         bus_dmamap_unload(sc->vge_cdata.vge_tx_tag,
1404                             txd->tx_dmamap);
1405                         m_freem(txd->tx_m);
1406                         txd->tx_m = NULL;
1407                         ifp->if_oerrors++;
1408                 }
1409         }
1410 }
1411
1412 #ifndef __NO_STRICT_ALIGNMENT
1413 static __inline void
1414 vge_fixup_rx(struct mbuf *m)
1415 {
1416         int i;
1417         uint16_t *src, *dst;
1418
1419         src = mtod(m, uint16_t *);
1420         dst = src - 1;
1421
1422         for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1423                 *dst++ = *src++;
1424
1425         m->m_data -= ETHER_ALIGN;
1426 }
1427 #endif
1428
1429 /*
1430  * RX handler. We support the reception of jumbo frames that have
1431  * been fragmented across multiple 2K mbuf cluster buffers.
1432  */
1433 static int
1434 vge_rxeof(struct vge_softc *sc, int count)
1435 {
1436         struct mbuf *m;
1437         struct ifnet *ifp;
1438         int prod, prog, total_len;
1439         struct vge_rxdesc *rxd;
1440         struct vge_rx_desc *cur_rx;
1441         uint32_t rxstat, rxctl;
1442
1443         VGE_LOCK_ASSERT(sc);
1444
1445         ifp = sc->vge_ifp;
1446
1447         bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1448             sc->vge_cdata.vge_rx_ring_map,
1449             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1450
1451         prod = sc->vge_cdata.vge_rx_prodidx;
1452         for (prog = 0; count > 0 &&
1453             (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
1454             VGE_RX_DESC_INC(prod)) {
1455                 cur_rx = &sc->vge_rdata.vge_rx_ring[prod];
1456                 rxstat = le32toh(cur_rx->vge_sts);
1457                 if ((rxstat & VGE_RDSTS_OWN) != 0)
1458                         break;
1459                 count--;
1460                 prog++;
1461                 rxctl = le32toh(cur_rx->vge_ctl);
1462                 total_len = VGE_RXBYTES(rxstat);
1463                 rxd = &sc->vge_cdata.vge_rxdesc[prod];
1464                 m = rxd->rx_m;
1465
1466                 /*
1467                  * If the 'start of frame' bit is set, this indicates
1468                  * either the first fragment in a multi-fragment receive,
1469                  * or an intermediate fragment. Either way, we want to
1470                  * accumulate the buffers.
1471                  */
1472                 if ((rxstat & VGE_RXPKT_SOF) != 0) {
1473                         if (vge_newbuf(sc, prod) != 0) {
1474                                 ifp->if_iqdrops++;
1475                                 VGE_CHAIN_RESET(sc);
1476                                 vge_discard_rxbuf(sc, prod);
1477                                 continue;
1478                         }
1479                         m->m_len = MCLBYTES - VGE_RX_BUF_ALIGN;
1480                         if (sc->vge_cdata.vge_head == NULL) {
1481                                 sc->vge_cdata.vge_head = m;
1482                                 sc->vge_cdata.vge_tail = m;
1483                         } else {
1484                                 m->m_flags &= ~M_PKTHDR;
1485                                 sc->vge_cdata.vge_tail->m_next = m;
1486                                 sc->vge_cdata.vge_tail = m;
1487                         }
1488                         continue;
1489                 }
1490
1491                 /*
1492                  * Bad/error frames will have the RXOK bit cleared.
1493                  * However, there's one error case we want to allow:
1494                  * if a VLAN tagged frame arrives and the chip can't
1495                  * match it against the CAM filter, it considers this
1496                  * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
1497                  * We don't want to drop the frame though: our VLAN
1498                  * filtering is done in software.
1499                  * We also want to receive bad-checksummed frames and
1500                  * and frames with bad-length.
1501                  */
1502                 if ((rxstat & VGE_RDSTS_RXOK) == 0 &&
1503                     (rxstat & (VGE_RDSTS_VIDM | VGE_RDSTS_RLERR |
1504                     VGE_RDSTS_CSUMERR)) == 0) {
1505                         ifp->if_ierrors++;
1506                         /*
1507                          * If this is part of a multi-fragment packet,
1508                          * discard all the pieces.
1509                          */
1510                         VGE_CHAIN_RESET(sc);
1511                         vge_discard_rxbuf(sc, prod);
1512                         continue;
1513                 }
1514
1515                 if (vge_newbuf(sc, prod) != 0) {
1516                         ifp->if_iqdrops++;
1517                         VGE_CHAIN_RESET(sc);
1518                         vge_discard_rxbuf(sc, prod);
1519                         continue;
1520                 }
1521
1522                 /* Chain received mbufs. */
1523                 if (sc->vge_cdata.vge_head != NULL) {
1524                         m->m_len = total_len % (MCLBYTES - VGE_RX_BUF_ALIGN);
1525                         /*
1526                          * Special case: if there's 4 bytes or less
1527                          * in this buffer, the mbuf can be discarded:
1528                          * the last 4 bytes is the CRC, which we don't
1529                          * care about anyway.
1530                          */
1531                         if (m->m_len <= ETHER_CRC_LEN) {
1532                                 sc->vge_cdata.vge_tail->m_len -=
1533                                     (ETHER_CRC_LEN - m->m_len);
1534                                 m_freem(m);
1535                         } else {
1536                                 m->m_len -= ETHER_CRC_LEN;
1537                                 m->m_flags &= ~M_PKTHDR;
1538                                 sc->vge_cdata.vge_tail->m_next = m;
1539                         }
1540                         m = sc->vge_cdata.vge_head;
1541                         m->m_flags |= M_PKTHDR;
1542                         m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1543                 } else {
1544                         m->m_flags |= M_PKTHDR;
1545                         m->m_pkthdr.len = m->m_len =
1546                             (total_len - ETHER_CRC_LEN);
1547                 }
1548
1549 #ifndef __NO_STRICT_ALIGNMENT
1550                 vge_fixup_rx(m);
1551 #endif
1552                 m->m_pkthdr.rcvif = ifp;
1553
1554                 /* Do RX checksumming if enabled */
1555                 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
1556                     (rxctl & VGE_RDCTL_FRAG) == 0) {
1557                         /* Check IP header checksum */
1558                         if ((rxctl & VGE_RDCTL_IPPKT) != 0)
1559                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1560                         if ((rxctl & VGE_RDCTL_IPCSUMOK) != 0)
1561                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1562
1563                         /* Check TCP/UDP checksum */
1564                         if (rxctl & (VGE_RDCTL_TCPPKT | VGE_RDCTL_UDPPKT) &&
1565                             rxctl & VGE_RDCTL_PROTOCSUMOK) {
1566                                 m->m_pkthdr.csum_flags |=
1567                                     CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1568                                 m->m_pkthdr.csum_data = 0xffff;
1569                         }
1570                 }
1571
1572                 if ((rxstat & VGE_RDSTS_VTAG) != 0) {
1573                         /*
1574                          * The 32-bit rxctl register is stored in little-endian.
1575                          * However, the 16-bit vlan tag is stored in big-endian,
1576                          * so we have to byte swap it.
1577                          */
1578                         m->m_pkthdr.ether_vtag =
1579                             bswap16(rxctl & VGE_RDCTL_VLANID);
1580                         m->m_flags |= M_VLANTAG;
1581                 }
1582
1583                 VGE_UNLOCK(sc);
1584                 (*ifp->if_input)(ifp, m);
1585                 VGE_LOCK(sc);
1586                 sc->vge_cdata.vge_head = NULL;
1587                 sc->vge_cdata.vge_tail = NULL;
1588         }
1589
1590         if (prog > 0) {
1591                 sc->vge_cdata.vge_rx_prodidx = prod;
1592                 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1593                     sc->vge_cdata.vge_rx_ring_map,
1594                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1595                 /* Update residue counter. */
1596                 if (sc->vge_cdata.vge_rx_commit != 0) {
1597                         CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT,
1598                             sc->vge_cdata.vge_rx_commit);
1599                         sc->vge_cdata.vge_rx_commit = 0;
1600                 }
1601         }
1602         return (prog);
1603 }
1604
1605 static void
1606 vge_txeof(struct vge_softc *sc)
1607 {
1608         struct ifnet *ifp;
1609         struct vge_tx_desc *cur_tx;
1610         struct vge_txdesc *txd;
1611         uint32_t txstat;
1612         int cons, prod;
1613
1614         VGE_LOCK_ASSERT(sc);
1615
1616         ifp = sc->vge_ifp;
1617
1618         if (sc->vge_cdata.vge_tx_cnt == 0)
1619                 return;
1620
1621         bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1622             sc->vge_cdata.vge_tx_ring_map,
1623             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1624
1625         /*
1626          * Go through our tx list and free mbufs for those
1627          * frames that have been transmitted.
1628          */
1629         cons = sc->vge_cdata.vge_tx_considx;
1630         prod = sc->vge_cdata.vge_tx_prodidx;
1631         for (; cons != prod; VGE_TX_DESC_INC(cons)) {
1632                 cur_tx = &sc->vge_rdata.vge_tx_ring[cons];
1633                 txstat = le32toh(cur_tx->vge_sts);
1634                 if ((txstat & VGE_TDSTS_OWN) != 0)
1635                         break;
1636                 sc->vge_cdata.vge_tx_cnt--;
1637                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1638
1639                 txd = &sc->vge_cdata.vge_txdesc[cons];
1640                 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap,
1641                     BUS_DMASYNC_POSTWRITE);
1642                 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap);
1643
1644                 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n",
1645                     __func__));
1646                 m_freem(txd->tx_m);
1647                 txd->tx_m = NULL;
1648                 txd->tx_desc->vge_frag[0].vge_addrhi = 0;
1649         }
1650         bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1651             sc->vge_cdata.vge_tx_ring_map,
1652             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1653         sc->vge_cdata.vge_tx_considx = cons;
1654         if (sc->vge_cdata.vge_tx_cnt == 0)
1655                 sc->vge_timer = 0;
1656 }
1657
1658 static void
1659 vge_link_statchg(void *xsc)
1660 {
1661         struct vge_softc *sc;
1662         struct ifnet *ifp;
1663         struct mii_data *mii;
1664
1665         sc = xsc;
1666         ifp = sc->vge_ifp;
1667         VGE_LOCK_ASSERT(sc);
1668         mii = device_get_softc(sc->vge_miibus);
1669
1670         mii_pollstat(mii);
1671         if ((sc->vge_flags & VGE_FLAG_LINK) != 0) {
1672                 if (!(mii->mii_media_status & IFM_ACTIVE)) {
1673                         sc->vge_flags &= ~VGE_FLAG_LINK;
1674                         if_link_state_change(sc->vge_ifp,
1675                             LINK_STATE_DOWN);
1676                 }
1677         } else {
1678                 if (mii->mii_media_status & IFM_ACTIVE &&
1679                     IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1680                         sc->vge_flags |= VGE_FLAG_LINK;
1681                         if_link_state_change(sc->vge_ifp,
1682                             LINK_STATE_UP);
1683                         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1684                                 vge_start_locked(ifp);
1685                 }
1686         }
1687 }
1688
1689 #ifdef DEVICE_POLLING
1690 static int
1691 vge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count)
1692 {
1693         struct vge_softc *sc = ifp->if_softc;
1694         int rx_npkts = 0;
1695
1696         VGE_LOCK(sc);
1697         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1698                 goto done;
1699
1700         rx_npkts = vge_rxeof(sc, count);
1701         vge_txeof(sc);
1702
1703         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1704                 vge_start_locked(ifp);
1705
1706         if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
1707                 uint32_t       status;
1708                 status = CSR_READ_4(sc, VGE_ISR);
1709                 if (status == 0xFFFFFFFF)
1710                         goto done;
1711                 if (status)
1712                         CSR_WRITE_4(sc, VGE_ISR, status);
1713
1714                 /*
1715                  * XXX check behaviour on receiver stalls.
1716                  */
1717
1718                 if (status & VGE_ISR_TXDMA_STALL ||
1719                     status & VGE_ISR_RXDMA_STALL) {
1720                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1721                         vge_init_locked(sc);
1722                 }
1723
1724                 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1725                         vge_rxeof(sc, count);
1726                         CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1727                         CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1728                 }
1729         }
1730 done:
1731         VGE_UNLOCK(sc);
1732         return (rx_npkts);
1733 }
1734 #endif /* DEVICE_POLLING */
1735
1736 static void
1737 vge_intr(void *arg)
1738 {
1739         struct vge_softc *sc;
1740         struct ifnet *ifp;
1741         uint32_t status;
1742
1743         sc = arg;
1744         VGE_LOCK(sc);
1745
1746         ifp = sc->vge_ifp;
1747         if ((sc->vge_flags & VGE_FLAG_SUSPENDED) != 0 ||
1748             (ifp->if_flags & IFF_UP) == 0) {
1749                 VGE_UNLOCK(sc);
1750                 return;
1751         }
1752
1753 #ifdef DEVICE_POLLING
1754         if  (ifp->if_capenable & IFCAP_POLLING) {
1755                 status = CSR_READ_4(sc, VGE_ISR);
1756                 CSR_WRITE_4(sc, VGE_ISR, status);
1757                 if (status != 0xFFFFFFFF && (status & VGE_ISR_LINKSTS) != 0)
1758                         vge_link_statchg(sc);
1759                 VGE_UNLOCK(sc);
1760                 return;
1761         }
1762 #endif
1763
1764         /* Disable interrupts */
1765         CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1766         status = CSR_READ_4(sc, VGE_ISR);
1767         CSR_WRITE_4(sc, VGE_ISR, status | VGE_ISR_HOLDOFF_RELOAD);
1768         /* If the card has gone away the read returns 0xffff. */
1769         if (status == 0xFFFFFFFF || (status & VGE_INTRS) == 0)
1770                 goto done;
1771         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1772                 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
1773                         vge_rxeof(sc, VGE_RX_DESC_CNT);
1774                 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1775                         vge_rxeof(sc, VGE_RX_DESC_CNT);
1776                         CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1777                         CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1778                 }
1779
1780                 if (status & (VGE_ISR_TXOK0|VGE_ISR_TXOK_HIPRIO))
1781                         vge_txeof(sc);
1782
1783                 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) {
1784                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1785                         vge_init_locked(sc);
1786                 }
1787
1788                 if (status & VGE_ISR_LINKSTS)
1789                         vge_link_statchg(sc);
1790         }
1791 done:
1792         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1793                 /* Re-enable interrupts */
1794                 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1795
1796                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1797                         vge_start_locked(ifp);
1798         }
1799         VGE_UNLOCK(sc);
1800 }
1801
1802 static int
1803 vge_encap(struct vge_softc *sc, struct mbuf **m_head)
1804 {
1805         struct vge_txdesc *txd;
1806         struct vge_tx_frag *frag;
1807         struct mbuf *m;
1808         bus_dma_segment_t txsegs[VGE_MAXTXSEGS];
1809         int error, i, nsegs, padlen;
1810         uint32_t cflags;
1811
1812         VGE_LOCK_ASSERT(sc);
1813
1814         M_ASSERTPKTHDR((*m_head));
1815
1816         /* Argh. This chip does not autopad short frames. */
1817         if ((*m_head)->m_pkthdr.len < VGE_MIN_FRAMELEN) {
1818                 m = *m_head;
1819                 padlen = VGE_MIN_FRAMELEN - m->m_pkthdr.len;
1820                 if (M_WRITABLE(m) == 0) {
1821                         /* Get a writable copy. */
1822                         m = m_dup(*m_head, M_DONTWAIT);
1823                         m_freem(*m_head);
1824                         if (m == NULL) {
1825                                 *m_head = NULL;
1826                                 return (ENOBUFS);
1827                         }
1828                         *m_head = m;
1829                 }
1830                 if (M_TRAILINGSPACE(m) < padlen) {
1831                         m = m_defrag(m, M_DONTWAIT);
1832                         if (m == NULL) {
1833                                 m_freem(*m_head);
1834                                 *m_head = NULL;
1835                                 return (ENOBUFS);
1836                         }
1837                 }
1838                 /*
1839                  * Manually pad short frames, and zero the pad space
1840                  * to avoid leaking data.
1841                  */
1842                 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1843                 m->m_pkthdr.len += padlen;
1844                 m->m_len = m->m_pkthdr.len;
1845                 *m_head = m;
1846         }
1847
1848         txd = &sc->vge_cdata.vge_txdesc[sc->vge_cdata.vge_tx_prodidx];
1849
1850         error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag,
1851             txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1852         if (error == EFBIG) {
1853                 m = m_collapse(*m_head, M_DONTWAIT, VGE_MAXTXSEGS);
1854                 if (m == NULL) {
1855                         m_freem(*m_head);
1856                         *m_head = NULL;
1857                         return (ENOMEM);
1858                 }
1859                 *m_head = m;
1860                 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag,
1861                     txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1862                 if (error != 0) {
1863                         m_freem(*m_head);
1864                         *m_head = NULL;
1865                         return (error);
1866                 }
1867         } else if (error != 0)
1868                 return (error);
1869         bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap,
1870             BUS_DMASYNC_PREWRITE);
1871
1872         m = *m_head;
1873         cflags = 0;
1874
1875         /* Configure checksum offload. */
1876         if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
1877                 cflags |= VGE_TDCTL_IPCSUM;
1878         if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1879                 cflags |= VGE_TDCTL_TCPCSUM;
1880         if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1881                 cflags |= VGE_TDCTL_UDPCSUM;
1882
1883         /* Configure VLAN. */
1884         if ((m->m_flags & M_VLANTAG) != 0)
1885                 cflags |= m->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG;
1886         txd->tx_desc->vge_sts = htole32(m->m_pkthdr.len << 16);
1887         /*
1888          * XXX
1889          * Velocity family seems to support TSO but no information
1890          * for MSS configuration is available. Also the number of
1891          * fragments supported by a descriptor is too small to hold
1892          * entire 64KB TCP/IP segment. Maybe VGE_TD_LS_MOF,
1893          * VGE_TD_LS_SOF and VGE_TD_LS_EOF could be used to build
1894          * longer chain of buffers but no additional information is
1895          * available.
1896          *
1897          * When telling the chip how many segments there are, we
1898          * must use nsegs + 1 instead of just nsegs. Darned if I
1899          * know why. This also means we can't use the last fragment
1900          * field of Tx descriptor.
1901          */
1902         txd->tx_desc->vge_ctl = htole32(cflags | ((nsegs + 1) << 28) |
1903             VGE_TD_LS_NORM);
1904         for (i = 0; i < nsegs; i++) {
1905                 frag = &txd->tx_desc->vge_frag[i];
1906                 frag->vge_addrlo = htole32(VGE_ADDR_LO(txsegs[i].ds_addr));
1907                 frag->vge_addrhi = htole32(VGE_ADDR_HI(txsegs[i].ds_addr) |
1908                     (VGE_BUFLEN(txsegs[i].ds_len) << 16));
1909         }
1910
1911         sc->vge_cdata.vge_tx_cnt++;
1912         VGE_TX_DESC_INC(sc->vge_cdata.vge_tx_prodidx);
1913
1914         /*
1915          * Finally request interrupt and give the first descriptor
1916          * ownership to hardware.
1917          */
1918         txd->tx_desc->vge_ctl |= htole32(VGE_TDCTL_TIC);
1919         txd->tx_desc->vge_sts |= htole32(VGE_TDSTS_OWN);
1920         txd->tx_m = m;
1921
1922         return (0);
1923 }
1924
1925 /*
1926  * Main transmit routine.
1927  */
1928
1929 static void
1930 vge_start(struct ifnet *ifp)
1931 {
1932         struct vge_softc *sc;
1933
1934         sc = ifp->if_softc;
1935         VGE_LOCK(sc);
1936         vge_start_locked(ifp);
1937         VGE_UNLOCK(sc);
1938 }
1939
1940
1941 static void
1942 vge_start_locked(struct ifnet *ifp)
1943 {
1944         struct vge_softc *sc;
1945         struct vge_txdesc *txd;
1946         struct mbuf *m_head;
1947         int enq, idx;
1948
1949         sc = ifp->if_softc;
1950
1951         VGE_LOCK_ASSERT(sc);
1952
1953         if ((sc->vge_flags & VGE_FLAG_LINK) == 0 ||
1954             (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1955             IFF_DRV_RUNNING)
1956                 return;
1957
1958         idx = sc->vge_cdata.vge_tx_prodidx;
1959         VGE_TX_DESC_DEC(idx);
1960         for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1961             sc->vge_cdata.vge_tx_cnt < VGE_TX_DESC_CNT - 1; ) {
1962                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1963                 if (m_head == NULL)
1964                         break;
1965                 /*
1966                  * Pack the data into the transmit ring. If we
1967                  * don't have room, set the OACTIVE flag and wait
1968                  * for the NIC to drain the ring.
1969                  */
1970                 if (vge_encap(sc, &m_head)) {
1971                         if (m_head == NULL)
1972                                 break;
1973                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1974                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1975                         break;
1976                 }
1977
1978                 txd = &sc->vge_cdata.vge_txdesc[idx];
1979                 txd->tx_desc->vge_frag[0].vge_addrhi |= htole32(VGE_TXDESC_Q);
1980                 VGE_TX_DESC_INC(idx);
1981
1982                 enq++;
1983                 /*
1984                  * If there's a BPF listener, bounce a copy of this frame
1985                  * to him.
1986                  */
1987                 ETHER_BPF_MTAP(ifp, m_head);
1988         }
1989
1990         if (enq > 0) {
1991                 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1992                     sc->vge_cdata.vge_tx_ring_map,
1993                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1994                 /* Issue a transmit command. */
1995                 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
1996                 /*
1997                  * Set a timeout in case the chip goes out to lunch.
1998                  */
1999                 sc->vge_timer = 5;
2000         }
2001 }
2002
2003 static void
2004 vge_init(void *xsc)
2005 {
2006         struct vge_softc *sc = xsc;
2007
2008         VGE_LOCK(sc);
2009         vge_init_locked(sc);
2010         VGE_UNLOCK(sc);
2011 }
2012
2013 static void
2014 vge_init_locked(struct vge_softc *sc)
2015 {
2016         struct ifnet *ifp = sc->vge_ifp;
2017         struct mii_data *mii;
2018         int error, i;
2019
2020         VGE_LOCK_ASSERT(sc);
2021         mii = device_get_softc(sc->vge_miibus);
2022
2023         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2024                 return;
2025
2026         /*
2027          * Cancel pending I/O and free all RX/TX buffers.
2028          */
2029         vge_stop(sc);
2030         vge_reset(sc);
2031
2032         /*
2033          * Initialize the RX and TX descriptors and mbufs.
2034          */
2035
2036         error = vge_rx_list_init(sc);
2037         if (error != 0) {
2038                 device_printf(sc->vge_dev, "no memory for Rx buffers.\n");
2039                 return;
2040         }
2041         vge_tx_list_init(sc);
2042         /* Clear MAC statistics. */
2043         vge_stats_clear(sc);
2044         /* Set our station address */
2045         for (i = 0; i < ETHER_ADDR_LEN; i++)
2046                 CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(sc->vge_ifp)[i]);
2047
2048         /*
2049          * Set receive FIFO threshold. Also allow transmission and
2050          * reception of VLAN tagged frames.
2051          */
2052         CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
2053         CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES);
2054
2055         /* Set DMA burst length */
2056         CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
2057         CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
2058
2059         CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
2060
2061         /* Set collision backoff algorithm */
2062         CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
2063             VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
2064         CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
2065
2066         /* Disable LPSEL field in priority resolution */
2067         CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
2068
2069         /*
2070          * Load the addresses of the DMA queues into the chip.
2071          * Note that we only use one transmit queue.
2072          */
2073
2074         CSR_WRITE_4(sc, VGE_TXDESC_HIADDR,
2075             VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr));
2076         CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
2077             VGE_ADDR_LO(sc->vge_rdata.vge_tx_ring_paddr));
2078         CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
2079
2080         CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
2081             VGE_ADDR_LO(sc->vge_rdata.vge_rx_ring_paddr));
2082         CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
2083         CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
2084
2085         /* Configure interrupt moderation. */
2086         vge_intr_holdoff(sc);
2087
2088         /* Enable and wake up the RX descriptor queue */
2089         CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
2090         CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
2091
2092         /* Enable the TX descriptor queue */
2093         CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
2094
2095         /* Init the cam filter. */
2096         vge_cam_clear(sc);
2097
2098         /* Set up receiver filter. */
2099         vge_rxfilter(sc);
2100         vge_setvlan(sc);
2101
2102         /* Enable flow control */
2103
2104         CSR_WRITE_1(sc, VGE_CRS2, 0x8B);
2105
2106         /* Enable jumbo frame reception (if desired) */
2107
2108         /* Start the MAC. */
2109         CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
2110         CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
2111         CSR_WRITE_1(sc, VGE_CRS0,
2112             VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
2113
2114 #ifdef DEVICE_POLLING
2115         /*
2116          * Disable interrupts except link state change if we are polling.
2117          */
2118         if (ifp->if_capenable & IFCAP_POLLING) {
2119                 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS_POLLING);
2120         } else  /* otherwise ... */
2121 #endif
2122         {
2123         /*
2124          * Enable interrupts.
2125          */
2126                 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2127         }
2128         CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2129         CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2130
2131         sc->vge_flags &= ~VGE_FLAG_LINK;
2132         mii_mediachg(mii);
2133
2134         ifp->if_drv_flags |= IFF_DRV_RUNNING;
2135         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2136         callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc);
2137 }
2138
2139 /*
2140  * Set media options.
2141  */
2142 static int
2143 vge_ifmedia_upd(struct ifnet *ifp)
2144 {
2145         struct vge_softc *sc;
2146         struct mii_data *mii;
2147         int error;
2148
2149         sc = ifp->if_softc;
2150         VGE_LOCK(sc);
2151         mii = device_get_softc(sc->vge_miibus);
2152         error = mii_mediachg(mii);
2153         VGE_UNLOCK(sc);
2154
2155         return (error);
2156 }
2157
2158 /*
2159  * Report current media status.
2160  */
2161 static void
2162 vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2163 {
2164         struct vge_softc *sc;
2165         struct mii_data *mii;
2166
2167         sc = ifp->if_softc;
2168         mii = device_get_softc(sc->vge_miibus);
2169
2170         VGE_LOCK(sc);
2171         if ((ifp->if_flags & IFF_UP) == 0) {
2172                 VGE_UNLOCK(sc);
2173                 return;
2174         }
2175         mii_pollstat(mii);
2176         VGE_UNLOCK(sc);
2177         ifmr->ifm_active = mii->mii_media_active;
2178         ifmr->ifm_status = mii->mii_media_status;
2179 }
2180
2181 static void
2182 vge_miibus_statchg(device_t dev)
2183 {
2184         struct vge_softc *sc;
2185         struct mii_data *mii;
2186         struct ifmedia_entry *ife;
2187
2188         sc = device_get_softc(dev);
2189         mii = device_get_softc(sc->vge_miibus);
2190         ife = mii->mii_media.ifm_cur;
2191
2192         /*
2193          * If the user manually selects a media mode, we need to turn
2194          * on the forced MAC mode bit in the DIAGCTL register. If the
2195          * user happens to choose a full duplex mode, we also need to
2196          * set the 'force full duplex' bit. This applies only to
2197          * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
2198          * mode is disabled, and in 1000baseT mode, full duplex is
2199          * always implied, so we turn on the forced mode bit but leave
2200          * the FDX bit cleared.
2201          */
2202
2203         switch (IFM_SUBTYPE(ife->ifm_media)) {
2204         case IFM_AUTO:
2205                 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2206                 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2207                 break;
2208         case IFM_1000_T:
2209                 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2210                 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2211                 break;
2212         case IFM_100_TX:
2213         case IFM_10_T:
2214                 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2215                 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) {
2216                         CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2217                 } else {
2218                         CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2219                 }
2220                 break;
2221         default:
2222                 device_printf(dev, "unknown media type: %x\n",
2223                     IFM_SUBTYPE(ife->ifm_media));
2224                 break;
2225         }
2226 }
2227
2228 static int
2229 vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2230 {
2231         struct vge_softc *sc = ifp->if_softc;
2232         struct ifreq *ifr = (struct ifreq *) data;
2233         struct mii_data *mii;
2234         int error = 0, mask;
2235
2236         switch (command) {
2237         case SIOCSIFMTU:
2238                 VGE_LOCK(sc);
2239                 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VGE_JUMBO_MTU)
2240                         error = EINVAL;
2241                 else if (ifp->if_mtu != ifr->ifr_mtu) {
2242                         if (ifr->ifr_mtu > ETHERMTU &&
2243                             (sc->vge_flags & VGE_FLAG_JUMBO) == 0)
2244                                 error = EINVAL;
2245                         else
2246                                 ifp->if_mtu = ifr->ifr_mtu;
2247                 }
2248                 VGE_UNLOCK(sc);
2249                 break;
2250         case SIOCSIFFLAGS:
2251                 VGE_LOCK(sc);
2252                 if ((ifp->if_flags & IFF_UP) != 0) {
2253                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2254                             ((ifp->if_flags ^ sc->vge_if_flags) &
2255                             (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2256                                 vge_rxfilter(sc);
2257                         else
2258                                 vge_init_locked(sc);
2259                 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2260                         vge_stop(sc);
2261                 sc->vge_if_flags = ifp->if_flags;
2262                 VGE_UNLOCK(sc);
2263                 break;
2264         case SIOCADDMULTI:
2265         case SIOCDELMULTI:
2266                 VGE_LOCK(sc);
2267                 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2268                         vge_rxfilter(sc);
2269                 VGE_UNLOCK(sc);
2270                 break;
2271         case SIOCGIFMEDIA:
2272         case SIOCSIFMEDIA:
2273                 mii = device_get_softc(sc->vge_miibus);
2274                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2275                 break;
2276         case SIOCSIFCAP:
2277                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2278 #ifdef DEVICE_POLLING
2279                 if (mask & IFCAP_POLLING) {
2280                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
2281                                 error = ether_poll_register(vge_poll, ifp);
2282                                 if (error)
2283                                         return (error);
2284                                 VGE_LOCK(sc);
2285                                         /* Disable interrupts */
2286                                 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS_POLLING);
2287                                 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2288                                 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2289                                 ifp->if_capenable |= IFCAP_POLLING;
2290                                 VGE_UNLOCK(sc);
2291                         } else {
2292                                 error = ether_poll_deregister(ifp);
2293                                 /* Enable interrupts. */
2294                                 VGE_LOCK(sc);
2295                                 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2296                                 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2297                                 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2298                                 ifp->if_capenable &= ~IFCAP_POLLING;
2299                                 VGE_UNLOCK(sc);
2300                         }
2301                 }
2302 #endif /* DEVICE_POLLING */
2303                 VGE_LOCK(sc);
2304                 if ((mask & IFCAP_TXCSUM) != 0 &&
2305                     (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
2306                         ifp->if_capenable ^= IFCAP_TXCSUM;
2307                         if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2308                                 ifp->if_hwassist |= VGE_CSUM_FEATURES;
2309                         else
2310                                 ifp->if_hwassist &= ~VGE_CSUM_FEATURES;
2311                 }
2312                 if ((mask & IFCAP_RXCSUM) != 0 &&
2313                     (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
2314                         ifp->if_capenable ^= IFCAP_RXCSUM;
2315                 if ((mask & IFCAP_WOL_UCAST) != 0 &&
2316                     (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0)
2317                         ifp->if_capenable ^= IFCAP_WOL_UCAST;
2318                 if ((mask & IFCAP_WOL_MCAST) != 0 &&
2319                     (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0)
2320                         ifp->if_capenable ^= IFCAP_WOL_MCAST;
2321                 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2322                     (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
2323                         ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2324                 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
2325                     (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
2326                         ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2327                 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2328                     (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
2329                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2330                         vge_setvlan(sc);
2331                 }
2332                 VGE_UNLOCK(sc);
2333                 VLAN_CAPABILITIES(ifp);
2334                 break;
2335         default:
2336                 error = ether_ioctl(ifp, command, data);
2337                 break;
2338         }
2339
2340         return (error);
2341 }
2342
2343 static void
2344 vge_watchdog(void *arg)
2345 {
2346         struct vge_softc *sc;
2347         struct ifnet *ifp;
2348
2349         sc = arg;
2350         VGE_LOCK_ASSERT(sc);
2351         vge_stats_update(sc);
2352         callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc);
2353         if (sc->vge_timer == 0 || --sc->vge_timer > 0)
2354                 return;
2355
2356         ifp = sc->vge_ifp;
2357         if_printf(ifp, "watchdog timeout\n");
2358         ifp->if_oerrors++;
2359
2360         vge_txeof(sc);
2361         vge_rxeof(sc, VGE_RX_DESC_CNT);
2362
2363         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2364         vge_init_locked(sc);
2365 }
2366
2367 /*
2368  * Stop the adapter and free any mbufs allocated to the
2369  * RX and TX lists.
2370  */
2371 static void
2372 vge_stop(struct vge_softc *sc)
2373 {
2374         struct ifnet *ifp;
2375
2376         VGE_LOCK_ASSERT(sc);
2377         ifp = sc->vge_ifp;
2378         sc->vge_timer = 0;
2379         callout_stop(&sc->vge_watchdog);
2380
2381         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2382
2383         CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2384         CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
2385         CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2386         CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
2387         CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
2388         CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
2389
2390         vge_stats_update(sc);
2391         VGE_CHAIN_RESET(sc);
2392         vge_txeof(sc);
2393         vge_freebufs(sc);
2394 }
2395
2396 /*
2397  * Device suspend routine.  Stop the interface and save some PCI
2398  * settings in case the BIOS doesn't restore them properly on
2399  * resume.
2400  */
2401 static int
2402 vge_suspend(device_t dev)
2403 {
2404         struct vge_softc *sc;
2405
2406         sc = device_get_softc(dev);
2407
2408         VGE_LOCK(sc);
2409         vge_stop(sc);
2410         vge_setwol(sc);
2411         sc->vge_flags |= VGE_FLAG_SUSPENDED;
2412         VGE_UNLOCK(sc);
2413
2414         return (0);
2415 }
2416
2417 /*
2418  * Device resume routine.  Restore some PCI settings in case the BIOS
2419  * doesn't, re-enable busmastering, and restart the interface if
2420  * appropriate.
2421  */
2422 static int
2423 vge_resume(device_t dev)
2424 {
2425         struct vge_softc *sc;
2426         struct ifnet *ifp;
2427         uint16_t pmstat;
2428
2429         sc = device_get_softc(dev);
2430         VGE_LOCK(sc);
2431         if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0) {
2432                 /* Disable PME and clear PME status. */
2433                 pmstat = pci_read_config(sc->vge_dev,
2434                     sc->vge_pmcap + PCIR_POWER_STATUS, 2);
2435                 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
2436                         pmstat &= ~PCIM_PSTAT_PMEENABLE;
2437                         pci_write_config(sc->vge_dev,
2438                             sc->vge_pmcap + PCIR_POWER_STATUS, pmstat, 2);
2439                 }
2440         }
2441         vge_clrwol(sc);
2442         /* Restart MII auto-polling. */
2443         vge_miipoll_start(sc);
2444         ifp = sc->vge_ifp;
2445         /* Reinitialize interface if necessary. */
2446         if ((ifp->if_flags & IFF_UP) != 0) {
2447                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2448                 vge_init_locked(sc);
2449         }
2450         sc->vge_flags &= ~VGE_FLAG_SUSPENDED;
2451         VGE_UNLOCK(sc);
2452
2453         return (0);
2454 }
2455
2456 /*
2457  * Stop all chip I/O so that the kernel's probe routines don't
2458  * get confused by errant DMAs when rebooting.
2459  */
2460 static int
2461 vge_shutdown(device_t dev)
2462 {
2463
2464         return (vge_suspend(dev));
2465 }
2466
2467 #define VGE_SYSCTL_STAT_ADD32(c, h, n, p, d)    \
2468             SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
2469
2470 static void
2471 vge_sysctl_node(struct vge_softc *sc)
2472 {
2473         struct sysctl_ctx_list *ctx;
2474         struct sysctl_oid_list *child, *parent;
2475         struct sysctl_oid *tree;
2476         struct vge_hw_stats *stats;
2477
2478         stats = &sc->vge_stats;
2479         ctx = device_get_sysctl_ctx(sc->vge_dev);
2480         child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vge_dev));
2481
2482         SYSCTL_ADD_INT(ctx, child, OID_AUTO, "int_holdoff",
2483             CTLFLAG_RW, &sc->vge_int_holdoff, 0, "interrupt holdoff");
2484         SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_coal_pkt",
2485             CTLFLAG_RW, &sc->vge_rx_coal_pkt, 0, "rx coalescing packet");
2486         SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_coal_pkt",
2487             CTLFLAG_RW, &sc->vge_tx_coal_pkt, 0, "tx coalescing packet");
2488
2489         /* Pull in device tunables. */
2490         sc->vge_int_holdoff = VGE_INT_HOLDOFF_DEFAULT;
2491         resource_int_value(device_get_name(sc->vge_dev),
2492             device_get_unit(sc->vge_dev), "int_holdoff", &sc->vge_int_holdoff);
2493         sc->vge_rx_coal_pkt = VGE_RX_COAL_PKT_DEFAULT;
2494         resource_int_value(device_get_name(sc->vge_dev),
2495             device_get_unit(sc->vge_dev), "rx_coal_pkt", &sc->vge_rx_coal_pkt);
2496         sc->vge_tx_coal_pkt = VGE_TX_COAL_PKT_DEFAULT;
2497         resource_int_value(device_get_name(sc->vge_dev),
2498             device_get_unit(sc->vge_dev), "tx_coal_pkt", &sc->vge_tx_coal_pkt);
2499
2500         tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
2501             NULL, "VGE statistics");
2502         parent = SYSCTL_CHILDREN(tree);
2503
2504         /* Rx statistics. */
2505         tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
2506             NULL, "RX MAC statistics");
2507         child = SYSCTL_CHILDREN(tree);
2508         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames",
2509             &stats->rx_frames, "frames");
2510         VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
2511             &stats->rx_good_frames, "Good frames");
2512         VGE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
2513             &stats->rx_fifo_oflows, "FIFO overflows");
2514         VGE_SYSCTL_STAT_ADD32(ctx, child, "runts",
2515             &stats->rx_runts, "Too short frames");
2516         VGE_SYSCTL_STAT_ADD32(ctx, child, "runts_errs",
2517             &stats->rx_runts_errs, "Too short frames with errors");
2518         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
2519             &stats->rx_pkts_64, "64 bytes frames");
2520         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
2521             &stats->rx_pkts_65_127, "65 to 127 bytes frames");
2522         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
2523             &stats->rx_pkts_128_255, "128 to 255 bytes frames");
2524         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
2525             &stats->rx_pkts_256_511, "256 to 511 bytes frames");
2526         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
2527             &stats->rx_pkts_512_1023, "512 to 1023 bytes frames");
2528         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
2529             &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames");
2530         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
2531             &stats->rx_pkts_1519_max, "1519 to max frames");
2532         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max_errs",
2533             &stats->rx_pkts_1519_max_errs, "1519 to max frames with error");
2534         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo",
2535             &stats->rx_jumbos, "Jumbo frames");
2536         VGE_SYSCTL_STAT_ADD32(ctx, child, "crcerrs",
2537             &stats->rx_crcerrs, "CRC errors");
2538         VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
2539             &stats->rx_pause_frames, "CRC errors");
2540         VGE_SYSCTL_STAT_ADD32(ctx, child, "align_errs",
2541             &stats->rx_alignerrs, "Alignment errors");
2542         VGE_SYSCTL_STAT_ADD32(ctx, child, "nobufs",
2543             &stats->rx_nobufs, "Frames with no buffer event");
2544         VGE_SYSCTL_STAT_ADD32(ctx, child, "sym_errs",
2545             &stats->rx_symerrs, "Frames with symbol errors");
2546         VGE_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
2547             &stats->rx_lenerrs, "Frames with length mismatched");
2548
2549         /* Tx statistics. */
2550         tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
2551             NULL, "TX MAC statistics");
2552         child = SYSCTL_CHILDREN(tree);
2553         VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
2554             &stats->tx_good_frames, "Good frames");
2555         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
2556             &stats->tx_pkts_64, "64 bytes frames");
2557         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
2558             &stats->tx_pkts_65_127, "65 to 127 bytes frames");
2559         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
2560             &stats->tx_pkts_128_255, "128 to 255 bytes frames");
2561         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
2562             &stats->tx_pkts_256_511, "256 to 511 bytes frames");
2563         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
2564             &stats->tx_pkts_512_1023, "512 to 1023 bytes frames");
2565         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
2566             &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames");
2567         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo",
2568             &stats->tx_jumbos, "Jumbo frames");
2569         VGE_SYSCTL_STAT_ADD32(ctx, child, "colls",
2570             &stats->tx_colls, "Collisions");
2571         VGE_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
2572             &stats->tx_latecolls, "Late collisions");
2573         VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
2574             &stats->tx_pause, "Pause frames");
2575 #ifdef VGE_ENABLE_SQEERR
2576         VGE_SYSCTL_STAT_ADD32(ctx, child, "sqeerrs",
2577             &stats->tx_sqeerrs, "SQE errors");
2578 #endif
2579         /* Clear MAC statistics. */
2580         vge_stats_clear(sc);
2581 }
2582
2583 #undef  VGE_SYSCTL_STAT_ADD32
2584
2585 static void
2586 vge_stats_clear(struct vge_softc *sc)
2587 {
2588         int i;
2589
2590         CSR_WRITE_1(sc, VGE_MIBCSR,
2591             CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FREEZE);
2592         CSR_WRITE_1(sc, VGE_MIBCSR,
2593             CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_CLR);
2594         for (i = VGE_TIMEOUT; i > 0; i--) {
2595                 DELAY(1);
2596                 if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_CLR) == 0)
2597                         break;
2598         }
2599         if (i == 0)
2600                 device_printf(sc->vge_dev, "MIB clear timed out!\n");
2601         CSR_WRITE_1(sc, VGE_MIBCSR, CSR_READ_1(sc, VGE_MIBCSR) &
2602             ~VGE_MIBCSR_FREEZE);
2603 }
2604
2605 static void
2606 vge_stats_update(struct vge_softc *sc)
2607 {
2608         struct vge_hw_stats *stats;
2609         struct ifnet *ifp;
2610         uint32_t mib[VGE_MIB_CNT], val;
2611         int i;
2612
2613         VGE_LOCK_ASSERT(sc);
2614
2615         stats = &sc->vge_stats;
2616         ifp = sc->vge_ifp;
2617
2618         CSR_WRITE_1(sc, VGE_MIBCSR,
2619             CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FLUSH);
2620         for (i = VGE_TIMEOUT; i > 0; i--) {
2621                 DELAY(1);
2622                 if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_FLUSH) == 0)
2623                         break;
2624         }
2625         if (i == 0) {
2626                 device_printf(sc->vge_dev, "MIB counter dump timed out!\n");
2627                 vge_stats_clear(sc);
2628                 return;
2629         }
2630
2631         bzero(mib, sizeof(mib));
2632 reset_idx:
2633         /* Set MIB read index to 0. */
2634         CSR_WRITE_1(sc, VGE_MIBCSR,
2635             CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_RINI);
2636         for (i = 0; i < VGE_MIB_CNT; i++) {
2637                 val = CSR_READ_4(sc, VGE_MIBDATA);
2638                 if (i != VGE_MIB_DATA_IDX(val)) {
2639                         /* Reading interrupted. */
2640                         goto reset_idx;
2641                 }
2642                 mib[i] = val & VGE_MIB_DATA_MASK;
2643         }
2644
2645         /* Rx stats. */
2646         stats->rx_frames += mib[VGE_MIB_RX_FRAMES];
2647         stats->rx_good_frames += mib[VGE_MIB_RX_GOOD_FRAMES];
2648         stats->rx_fifo_oflows += mib[VGE_MIB_RX_FIFO_OVERRUNS];
2649         stats->rx_runts += mib[VGE_MIB_RX_RUNTS];
2650         stats->rx_runts_errs += mib[VGE_MIB_RX_RUNTS_ERRS];
2651         stats->rx_pkts_64 += mib[VGE_MIB_RX_PKTS_64];
2652         stats->rx_pkts_65_127 += mib[VGE_MIB_RX_PKTS_65_127];
2653         stats->rx_pkts_128_255 += mib[VGE_MIB_RX_PKTS_128_255];
2654         stats->rx_pkts_256_511 += mib[VGE_MIB_RX_PKTS_256_511];
2655         stats->rx_pkts_512_1023 += mib[VGE_MIB_RX_PKTS_512_1023];
2656         stats->rx_pkts_1024_1518 += mib[VGE_MIB_RX_PKTS_1024_1518];
2657         stats->rx_pkts_1519_max += mib[VGE_MIB_RX_PKTS_1519_MAX];
2658         stats->rx_pkts_1519_max_errs += mib[VGE_MIB_RX_PKTS_1519_MAX_ERRS];
2659         stats->rx_jumbos += mib[VGE_MIB_RX_JUMBOS];
2660         stats->rx_crcerrs += mib[VGE_MIB_RX_CRCERRS];
2661         stats->rx_pause_frames += mib[VGE_MIB_RX_PAUSE];
2662         stats->rx_alignerrs += mib[VGE_MIB_RX_ALIGNERRS];
2663         stats->rx_nobufs += mib[VGE_MIB_RX_NOBUFS];
2664         stats->rx_symerrs += mib[VGE_MIB_RX_SYMERRS];
2665         stats->rx_lenerrs += mib[VGE_MIB_RX_LENERRS];
2666
2667         /* Tx stats. */
2668         stats->tx_good_frames += mib[VGE_MIB_TX_GOOD_FRAMES];
2669         stats->tx_pkts_64 += mib[VGE_MIB_TX_PKTS_64];
2670         stats->tx_pkts_65_127 += mib[VGE_MIB_TX_PKTS_65_127];
2671         stats->tx_pkts_128_255 += mib[VGE_MIB_TX_PKTS_128_255];
2672         stats->tx_pkts_256_511 += mib[VGE_MIB_TX_PKTS_256_511];
2673         stats->tx_pkts_512_1023 += mib[VGE_MIB_TX_PKTS_512_1023];
2674         stats->tx_pkts_1024_1518 += mib[VGE_MIB_TX_PKTS_1024_1518];
2675         stats->tx_jumbos += mib[VGE_MIB_TX_JUMBOS];
2676         stats->tx_colls += mib[VGE_MIB_TX_COLLS];
2677         stats->tx_pause += mib[VGE_MIB_TX_PAUSE];
2678 #ifdef VGE_ENABLE_SQEERR
2679         stats->tx_sqeerrs += mib[VGE_MIB_TX_SQEERRS];
2680 #endif
2681         stats->tx_latecolls += mib[VGE_MIB_TX_LATECOLLS];
2682
2683         /* Update counters in ifnet. */
2684         ifp->if_opackets += mib[VGE_MIB_TX_GOOD_FRAMES];
2685
2686         ifp->if_collisions += mib[VGE_MIB_TX_COLLS] +
2687             mib[VGE_MIB_TX_LATECOLLS];
2688
2689         ifp->if_oerrors += mib[VGE_MIB_TX_COLLS] +
2690             mib[VGE_MIB_TX_LATECOLLS];
2691
2692         ifp->if_ipackets += mib[VGE_MIB_RX_GOOD_FRAMES];
2693
2694         ifp->if_ierrors += mib[VGE_MIB_RX_FIFO_OVERRUNS] +
2695             mib[VGE_MIB_RX_RUNTS] +
2696             mib[VGE_MIB_RX_RUNTS_ERRS] +
2697             mib[VGE_MIB_RX_CRCERRS] +
2698             mib[VGE_MIB_RX_ALIGNERRS] +
2699             mib[VGE_MIB_RX_NOBUFS] +
2700             mib[VGE_MIB_RX_SYMERRS] +
2701             mib[VGE_MIB_RX_LENERRS];
2702 }
2703
2704 static void
2705 vge_intr_holdoff(struct vge_softc *sc)
2706 {
2707         uint8_t intctl;
2708
2709         VGE_LOCK_ASSERT(sc);
2710
2711         /*
2712          * Set Tx interrupt supression threshold.
2713          * It's possible to use single-shot timer in VGE_CRS1 register
2714          * in Tx path such that driver can remove most of Tx completion
2715          * interrupts. However this requires additional access to
2716          * VGE_CRS1 register to reload the timer in addintion to
2717          * activating Tx kick command. Another downside is we don't know
2718          * what single-shot timer value should be used in advance so
2719          * reclaiming transmitted mbufs could be delayed a lot which in
2720          * turn slows down Tx operation.
2721          */
2722         CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_TXSUPPTHR);
2723         CSR_WRITE_1(sc, VGE_TXSUPPTHR, sc->vge_tx_coal_pkt);
2724
2725         /* Set Rx interrupt suppresion threshold. */
2726         CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
2727         CSR_WRITE_1(sc, VGE_RXSUPPTHR, sc->vge_rx_coal_pkt);
2728
2729         intctl = CSR_READ_1(sc, VGE_INTCTL1);
2730         intctl &= ~VGE_INTCTL_SC_RELOAD;
2731         intctl |= VGE_INTCTL_HC_RELOAD;
2732         if (sc->vge_tx_coal_pkt <= 0)
2733                 intctl |= VGE_INTCTL_TXINTSUP_DISABLE;
2734         else
2735                 intctl &= ~VGE_INTCTL_TXINTSUP_DISABLE;
2736         if (sc->vge_rx_coal_pkt <= 0)
2737                 intctl |= VGE_INTCTL_RXINTSUP_DISABLE;
2738         else
2739                 intctl &= ~VGE_INTCTL_RXINTSUP_DISABLE;
2740         CSR_WRITE_1(sc, VGE_INTCTL1, intctl);
2741         CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_HOLDOFF);
2742         if (sc->vge_int_holdoff > 0) {
2743                 /* Set interrupt holdoff timer. */
2744                 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
2745                 CSR_WRITE_1(sc, VGE_INTHOLDOFF,
2746                     VGE_INT_HOLDOFF_USEC(sc->vge_int_holdoff));
2747                 /* Enable holdoff timer. */
2748                 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
2749         }
2750 }
2751
2752 static void
2753 vge_setlinkspeed(struct vge_softc *sc)
2754 {
2755         struct mii_data *mii;
2756         int aneg, i;
2757
2758         VGE_LOCK_ASSERT(sc);
2759
2760         mii = device_get_softc(sc->vge_miibus);
2761         mii_pollstat(mii);
2762         aneg = 0;
2763         if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
2764             (IFM_ACTIVE | IFM_AVALID)) {
2765                 switch IFM_SUBTYPE(mii->mii_media_active) {
2766                 case IFM_10_T:
2767                 case IFM_100_TX:
2768                         return;
2769                 case IFM_1000_T:
2770                         aneg++;
2771                 default:
2772                         break;
2773                 }
2774         }
2775         vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_100T2CR, 0);
2776         vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_ANAR,
2777             ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
2778         vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR,
2779             BMCR_AUTOEN | BMCR_STARTNEG);
2780         DELAY(1000);
2781         if (aneg != 0) {
2782                 /* Poll link state until vge(4) get a 10/100 link. */
2783                 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
2784                         mii_pollstat(mii);
2785                         if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
2786                             == (IFM_ACTIVE | IFM_AVALID)) {
2787                                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2788                                 case IFM_10_T:
2789                                 case IFM_100_TX:
2790                                         return;
2791                                 default:
2792                                         break;
2793                                 }
2794                         }
2795                         VGE_UNLOCK(sc);
2796                         pause("vgelnk", hz);
2797                         VGE_LOCK(sc);
2798                 }
2799                 if (i == MII_ANEGTICKS_GIGE)
2800                         device_printf(sc->vge_dev, "establishing link failed, "
2801                             "WOL may not work!");
2802         }
2803         /*
2804          * No link, force MAC to have 100Mbps, full-duplex link.
2805          * This is the last resort and may/may not work.
2806          */
2807         mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
2808         mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
2809 }
2810
2811 static void
2812 vge_setwol(struct vge_softc *sc)
2813 {
2814         struct ifnet *ifp;
2815         uint16_t pmstat;
2816         uint8_t val;
2817
2818         VGE_LOCK_ASSERT(sc);
2819
2820         if ((sc->vge_flags & VGE_FLAG_PMCAP) == 0) {
2821                 /* No PME capability, PHY power down. */
2822                 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR,
2823                     BMCR_PDOWN);
2824                 vge_miipoll_stop(sc);
2825                 return;
2826         }
2827
2828         ifp = sc->vge_ifp;
2829
2830         /* Clear WOL on pattern match. */
2831         CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL);
2832         /* Disable WOL on magic/unicast packet. */
2833         CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F);
2834         CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM |
2835             VGE_WOLCFG_PMEOVR);
2836         if ((ifp->if_capenable & IFCAP_WOL) != 0) {
2837                 vge_setlinkspeed(sc);
2838                 val = 0;
2839                 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
2840                         val |= VGE_WOLCR1_UCAST;
2841                 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
2842                         val |= VGE_WOLCR1_MAGIC;
2843                 CSR_WRITE_1(sc, VGE_WOLCR1S, val);
2844                 val = 0;
2845                 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
2846                         val |= VGE_WOLCFG_SAM | VGE_WOLCFG_SAB;
2847                 CSR_WRITE_1(sc, VGE_WOLCFGS, val | VGE_WOLCFG_PMEOVR);
2848                 /* Disable MII auto-polling. */
2849                 vge_miipoll_stop(sc);
2850         }
2851         CSR_SETBIT_1(sc, VGE_DIAGCTL,
2852             VGE_DIAGCTL_MACFORCE | VGE_DIAGCTL_FDXFORCE);
2853         CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII);
2854
2855         /* Clear WOL status on pattern match. */
2856         CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF);
2857         CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF);
2858
2859         val = CSR_READ_1(sc, VGE_PWRSTAT);
2860         val |= VGE_STICKHW_SWPTAG;
2861         CSR_WRITE_1(sc, VGE_PWRSTAT, val);
2862         /* Put hardware into sleep. */
2863         val = CSR_READ_1(sc, VGE_PWRSTAT);
2864         val |= VGE_STICKHW_DS0 | VGE_STICKHW_DS1;
2865         CSR_WRITE_1(sc, VGE_PWRSTAT, val);
2866         /* Request PME if WOL is requested. */
2867         pmstat = pci_read_config(sc->vge_dev, sc->vge_pmcap +
2868             PCIR_POWER_STATUS, 2);
2869         pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
2870         if ((ifp->if_capenable & IFCAP_WOL) != 0)
2871                 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2872         pci_write_config(sc->vge_dev, sc->vge_pmcap + PCIR_POWER_STATUS,
2873             pmstat, 2);
2874 }
2875
2876 static void
2877 vge_clrwol(struct vge_softc *sc)
2878 {
2879         uint8_t val;
2880
2881         val = CSR_READ_1(sc, VGE_PWRSTAT);
2882         val &= ~VGE_STICKHW_SWPTAG;
2883         CSR_WRITE_1(sc, VGE_PWRSTAT, val);
2884         /* Disable WOL and clear power state indicator. */
2885         val = CSR_READ_1(sc, VGE_PWRSTAT);
2886         val &= ~(VGE_STICKHW_DS0 | VGE_STICKHW_DS1);
2887         CSR_WRITE_1(sc, VGE_PWRSTAT, val);
2888
2889         CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII);
2890         CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2891
2892         /* Clear WOL on pattern match. */
2893         CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL);
2894         /* Disable WOL on magic/unicast packet. */
2895         CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F);
2896         CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM |
2897             VGE_WOLCFG_PMEOVR);
2898         /* Clear WOL status on pattern match. */
2899         CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF);
2900         CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF);
2901 }