]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/bge/if_bge.c
1) move all link state detection code from bge_tick_locked() to bge_link_upd()
[FreeBSD/FreeBSD.git] / sys / dev / bge / if_bge.c
1 /*-
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 /*
38  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39  *
40  * The Broadcom BCM5700 is based on technology originally developed by
41  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45  * frames, highly configurable RX filtering, and 16 RX and TX queues
46  * (which, along with RX filter rules, can be used for QOS applications).
47  * Other features, such as TCP segmentation, may be available as part
48  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49  * firmware images can be stored in hardware and need not be compiled
50  * into the driver.
51  *
52  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54  *
55  * The BCM5701 is a single-chip solution incorporating both the BCM5700
56  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57  * does not support external SSRAM.
58  *
59  * Broadcom also produces a variation of the BCM5700 under the "Altima"
60  * brand name, which is functionally similar but lacks PCI-X support.
61  *
62  * Without external SSRAM, you can only have at most 4 TX rings,
63  * and the use of the mini RX ring is disabled. This seems to imply
64  * that these features are simply not available on the BCM5701. As a
65  * result, this driver does not implement any support for the mini RX
66  * ring.
67  */
68
69 #ifdef HAVE_KERNEL_OPTION_HEADERS
70 #include "opt_device_polling.h"
71 #endif
72
73 #include <sys/param.h>
74 #include <sys/endian.h>
75 #include <sys/systm.h>
76 #include <sys/sockio.h>
77 #include <sys/mbuf.h>
78 #include <sys/malloc.h>
79 #include <sys/kernel.h>
80 #include <sys/module.h>
81 #include <sys/socket.h>
82
83 #include <net/if.h>
84 #include <net/if_arp.h>
85 #include <net/ethernet.h>
86 #include <net/if_dl.h>
87 #include <net/if_media.h>
88
89 #include <net/bpf.h>
90
91 #include <net/if_types.h>
92 #include <net/if_vlan_var.h>
93
94 #include <netinet/in_systm.h>
95 #include <netinet/in.h>
96 #include <netinet/ip.h>
97
98 #include <machine/clock.h>      /* for DELAY */
99 #include <machine/bus.h>
100 #include <machine/resource.h>
101 #include <sys/bus.h>
102 #include <sys/rman.h>
103
104 #include <dev/mii/mii.h>
105 #include <dev/mii/miivar.h>
106 #include "miidevs.h"
107 #include <dev/mii/brgphyreg.h>
108
109 #include <dev/pci/pcireg.h>
110 #include <dev/pci/pcivar.h>
111
112 #include <dev/bge/if_bgereg.h>
113
114 #include "opt_bge.h"
115
116 #define BGE_CSUM_FEATURES       (CSUM_IP | CSUM_TCP | CSUM_UDP)
117 #define ETHER_MIN_NOPAD         (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
118
119 MODULE_DEPEND(bge, pci, 1, 1, 1);
120 MODULE_DEPEND(bge, ether, 1, 1, 1);
121 MODULE_DEPEND(bge, miibus, 1, 1, 1);
122
123 /* "device miibus" required.  See GENERIC if you get errors here. */
124 #include "miibus_if.h"
125
126 /*
127  * Various supported device vendors/types and their names. Note: the
128  * spec seems to indicate that the hardware still has Alteon's vendor
129  * ID burned into it, though it will always be overriden by the vendor
130  * ID in the EEPROM. Just to be safe, we cover all possibilities.
131  */
132 #define BGE_DEVDESC_MAX         64      /* Maximum device description length */
133
134 static struct bge_type bge_devs[] = {
135         { ALT_VENDORID, ALT_DEVICEID_BCM5700,
136                 "Broadcom BCM5700 Gigabit Ethernet" },
137         { ALT_VENDORID, ALT_DEVICEID_BCM5701,
138                 "Broadcom BCM5701 Gigabit Ethernet" },
139         { BCOM_VENDORID, BCOM_DEVICEID_BCM5700,
140                 "Broadcom BCM5700 Gigabit Ethernet" },
141         { BCOM_VENDORID, BCOM_DEVICEID_BCM5701,
142                 "Broadcom BCM5701 Gigabit Ethernet" },
143         { BCOM_VENDORID, BCOM_DEVICEID_BCM5702,
144                 "Broadcom BCM5702 Gigabit Ethernet" },
145         { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X,
146                 "Broadcom BCM5702X Gigabit Ethernet" },
147         { BCOM_VENDORID, BCOM_DEVICEID_BCM5703,
148                 "Broadcom BCM5703 Gigabit Ethernet" },
149         { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X,
150                 "Broadcom BCM5703X Gigabit Ethernet" },
151         { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C,
152                 "Broadcom BCM5704C Dual Gigabit Ethernet" },
153         { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S,
154                 "Broadcom BCM5704S Dual Gigabit Ethernet" },
155         { BCOM_VENDORID, BCOM_DEVICEID_BCM5705,
156                 "Broadcom BCM5705 Gigabit Ethernet" },
157         { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K,
158                 "Broadcom BCM5705K Gigabit Ethernet" },
159         { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M,
160                 "Broadcom BCM5705M Gigabit Ethernet" },
161         { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT,
162                 "Broadcom BCM5705M Gigabit Ethernet" },
163         { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C,
164                 "Broadcom BCM5714C Gigabit Ethernet" },
165         { BCOM_VENDORID, BCOM_DEVICEID_BCM5721,
166                 "Broadcom BCM5721 Gigabit Ethernet" },
167         { BCOM_VENDORID, BCOM_DEVICEID_BCM5750,
168                 "Broadcom BCM5750 Gigabit Ethernet" },
169         { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M,
170                 "Broadcom BCM5750M Gigabit Ethernet" },
171         { BCOM_VENDORID, BCOM_DEVICEID_BCM5751,
172                 "Broadcom BCM5751 Gigabit Ethernet" },
173         { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M,
174                 "Broadcom BCM5751M Gigabit Ethernet" },
175         { BCOM_VENDORID, BCOM_DEVICEID_BCM5752,
176                 "Broadcom BCM5752 Gigabit Ethernet" },
177         { BCOM_VENDORID, BCOM_DEVICEID_BCM5782,
178                 "Broadcom BCM5782 Gigabit Ethernet" },
179         { BCOM_VENDORID, BCOM_DEVICEID_BCM5788,
180                 "Broadcom BCM5788 Gigabit Ethernet" },
181         { BCOM_VENDORID, BCOM_DEVICEID_BCM5789,
182                 "Broadcom BCM5789 Gigabit Ethernet" },
183         { BCOM_VENDORID, BCOM_DEVICEID_BCM5901,
184                 "Broadcom BCM5901 Fast Ethernet" },
185         { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2,
186                 "Broadcom BCM5901A2 Fast Ethernet" },
187         { SK_VENDORID, SK_DEVICEID_ALTIMA,
188                 "SysKonnect Gigabit Ethernet" },
189         { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000,
190                 "Altima AC1000 Gigabit Ethernet" },
191         { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002,
192                 "Altima AC1002 Gigabit Ethernet" },
193         { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100,
194                 "Altima AC9100 Gigabit Ethernet" },
195         { 0, 0, NULL }
196 };
197
198 static int bge_probe            (device_t);
199 static int bge_attach           (device_t);
200 static int bge_detach           (device_t);
201 static int bge_suspend          (device_t);
202 static int bge_resume           (device_t);
203 static void bge_release_resources
204                                 (struct bge_softc *);
205 static void bge_dma_map_addr    (void *, bus_dma_segment_t *, int, int);
206 static int bge_dma_alloc        (device_t);
207 static void bge_dma_free        (struct bge_softc *);
208
209 static void bge_txeof           (struct bge_softc *);
210 static void bge_rxeof           (struct bge_softc *);
211
212 static void bge_tick_locked     (struct bge_softc *);
213 static void bge_tick            (void *);
214 static void bge_stats_update    (struct bge_softc *);
215 static void bge_stats_update_regs
216                                 (struct bge_softc *);
217 static int bge_encap            (struct bge_softc *, struct mbuf *,
218                                         u_int32_t *);
219
220 static void bge_intr            (void *);
221 static void bge_start_locked    (struct ifnet *);
222 static void bge_start           (struct ifnet *);
223 static int bge_ioctl            (struct ifnet *, u_long, caddr_t);
224 static void bge_init_locked     (struct bge_softc *);
225 static void bge_init            (void *);
226 static void bge_stop            (struct bge_softc *);
227 static void bge_watchdog                (struct ifnet *);
228 static void bge_shutdown                (device_t);
229 static int bge_ifmedia_upd      (struct ifnet *);
230 static void bge_ifmedia_sts     (struct ifnet *, struct ifmediareq *);
231
232 static u_int8_t bge_eeprom_getbyte      (struct bge_softc *, int, u_int8_t *);
233 static int bge_read_eeprom      (struct bge_softc *, caddr_t, int, int);
234
235 static void bge_setmulti        (struct bge_softc *);
236
237 static int bge_newbuf_std       (struct bge_softc *, int, struct mbuf *);
238 static int bge_newbuf_jumbo     (struct bge_softc *, int, struct mbuf *);
239 static int bge_init_rx_ring_std (struct bge_softc *);
240 static void bge_free_rx_ring_std        (struct bge_softc *);
241 static int bge_init_rx_ring_jumbo       (struct bge_softc *);
242 static void bge_free_rx_ring_jumbo      (struct bge_softc *);
243 static void bge_free_tx_ring    (struct bge_softc *);
244 static int bge_init_tx_ring     (struct bge_softc *);
245
246 static int bge_chipinit         (struct bge_softc *);
247 static int bge_blockinit        (struct bge_softc *);
248
249 #ifdef notdef
250 static u_int8_t bge_vpd_readbyte(struct bge_softc *, int);
251 static void bge_vpd_read_res    (struct bge_softc *, struct vpd_res *, int);
252 static void bge_vpd_read        (struct bge_softc *);
253 #endif
254
255 static u_int32_t bge_readmem_ind
256                                 (struct bge_softc *, int);
257 static void bge_writemem_ind    (struct bge_softc *, int, int);
258 #ifdef notdef
259 static u_int32_t bge_readreg_ind
260                                 (struct bge_softc *, int);
261 #endif
262 static void bge_writereg_ind    (struct bge_softc *, int, int);
263
264 static int bge_miibus_readreg   (device_t, int, int);
265 static int bge_miibus_writereg  (device_t, int, int, int);
266 static void bge_miibus_statchg  (device_t);
267 #ifdef DEVICE_POLLING
268 static void bge_poll            (struct ifnet *ifp, enum poll_cmd cmd,
269                                     int count);
270 static void bge_poll_locked     (struct ifnet *ifp, enum poll_cmd cmd,
271                                     int count);
272 #endif
273
274 static void bge_reset           (struct bge_softc *);
275 static void bge_link_upd        (struct bge_softc *);
276
277 static device_method_t bge_methods[] = {
278         /* Device interface */
279         DEVMETHOD(device_probe,         bge_probe),
280         DEVMETHOD(device_attach,        bge_attach),
281         DEVMETHOD(device_detach,        bge_detach),
282         DEVMETHOD(device_shutdown,      bge_shutdown),
283         DEVMETHOD(device_suspend,       bge_suspend),
284         DEVMETHOD(device_resume,        bge_resume),
285
286         /* bus interface */
287         DEVMETHOD(bus_print_child,      bus_generic_print_child),
288         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
289
290         /* MII interface */
291         DEVMETHOD(miibus_readreg,       bge_miibus_readreg),
292         DEVMETHOD(miibus_writereg,      bge_miibus_writereg),
293         DEVMETHOD(miibus_statchg,       bge_miibus_statchg),
294
295         { 0, 0 }
296 };
297
298 static driver_t bge_driver = {
299         "bge",
300         bge_methods,
301         sizeof(struct bge_softc)
302 };
303
304 static devclass_t bge_devclass;
305
306 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
307 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
308
309 static u_int32_t
310 bge_readmem_ind(sc, off)
311         struct bge_softc *sc;
312         int off;
313 {
314         device_t dev;
315
316         dev = sc->bge_dev;
317
318         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
319         return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
320 }
321
322 static void
323 bge_writemem_ind(sc, off, val)
324         struct bge_softc *sc;
325         int off, val;
326 {
327         device_t dev;
328
329         dev = sc->bge_dev;
330
331         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
332         pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
333
334         return;
335 }
336
337 #ifdef notdef
338 static u_int32_t
339 bge_readreg_ind(sc, off)
340         struct bge_softc *sc;
341         int off;
342 {
343         device_t dev;
344
345         dev = sc->bge_dev;
346
347         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
348         return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
349 }
350 #endif
351
352 static void
353 bge_writereg_ind(sc, off, val)
354         struct bge_softc *sc;
355         int off, val;
356 {
357         device_t dev;
358
359         dev = sc->bge_dev;
360
361         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
362         pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
363
364         return;
365 }
366
367 /*
368  * Map a single buffer address.
369  */
370
371 static void
372 bge_dma_map_addr(arg, segs, nseg, error)
373         void *arg;
374         bus_dma_segment_t *segs;
375         int nseg;
376         int error;
377 {
378         struct bge_dmamap_arg *ctx;
379
380         if (error)
381                 return;
382
383         ctx = arg;
384
385         if (nseg > ctx->bge_maxsegs) {
386                 ctx->bge_maxsegs = 0;
387                 return;
388         }
389
390         ctx->bge_busaddr = segs->ds_addr;
391
392         return;
393 }
394
395 #ifdef notdef
396 static u_int8_t
397 bge_vpd_readbyte(sc, addr)
398         struct bge_softc *sc;
399         int addr;
400 {
401         int i;
402         device_t dev;
403         u_int32_t val;
404
405         dev = sc->bge_dev;
406         pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
407         for (i = 0; i < BGE_TIMEOUT * 10; i++) {
408                 DELAY(10);
409                 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
410                         break;
411         }
412
413         if (i == BGE_TIMEOUT) {
414                 device_printf(sc->bge_dev, "VPD read timed out\n");
415                 return(0);
416         }
417
418         val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
419
420         return((val >> ((addr % 4) * 8)) & 0xFF);
421 }
422
423 static void
424 bge_vpd_read_res(sc, res, addr)
425         struct bge_softc *sc;
426         struct vpd_res *res;
427         int addr;
428 {
429         int i;
430         u_int8_t *ptr;
431
432         ptr = (u_int8_t *)res;
433         for (i = 0; i < sizeof(struct vpd_res); i++)
434                 ptr[i] = bge_vpd_readbyte(sc, i + addr);
435
436         return;
437 }
438
439 static void
440 bge_vpd_read(sc)
441         struct bge_softc *sc;
442 {
443         int pos = 0, i;
444         struct vpd_res res;
445
446         if (sc->bge_vpd_prodname != NULL)
447                 free(sc->bge_vpd_prodname, M_DEVBUF);
448         if (sc->bge_vpd_readonly != NULL)
449                 free(sc->bge_vpd_readonly, M_DEVBUF);
450         sc->bge_vpd_prodname = NULL;
451         sc->bge_vpd_readonly = NULL;
452
453         bge_vpd_read_res(sc, &res, pos);
454
455         if (res.vr_id != VPD_RES_ID) {
456                 device_printf(sc->bge_dev,
457                     "bad VPD resource id: expected %x got %x\n", VPD_RES_ID,
458                     res.vr_id);
459                 return;
460         }
461
462         pos += sizeof(res);
463         sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
464         for (i = 0; i < res.vr_len; i++)
465                 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
466         sc->bge_vpd_prodname[i] = '\0';
467         pos += i;
468
469         bge_vpd_read_res(sc, &res, pos);
470
471         if (res.vr_id != VPD_RES_READ) {
472                 device_printf(sc->bge_dev,
473                     "bad VPD resource id: expected %x got %x\n", VPD_RES_READ,
474                     res.vr_id);
475                 return;
476         }
477
478         pos += sizeof(res);
479         sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
480         for (i = 0; i < res.vr_len + 1; i++)
481                 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
482
483         return;
484 }
485 #endif
486
487 /*
488  * Read a byte of data stored in the EEPROM at address 'addr.' The
489  * BCM570x supports both the traditional bitbang interface and an
490  * auto access interface for reading the EEPROM. We use the auto
491  * access method.
492  */
493 static u_int8_t
494 bge_eeprom_getbyte(sc, addr, dest)
495         struct bge_softc *sc;
496         int addr;
497         u_int8_t *dest;
498 {
499         int i;
500         u_int32_t byte = 0;
501
502         /*
503          * Enable use of auto EEPROM access so we can avoid
504          * having to use the bitbang method.
505          */
506         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
507
508         /* Reset the EEPROM, load the clock period. */
509         CSR_WRITE_4(sc, BGE_EE_ADDR,
510             BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
511         DELAY(20);
512
513         /* Issue the read EEPROM command. */
514         CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
515
516         /* Wait for completion */
517         for(i = 0; i < BGE_TIMEOUT * 10; i++) {
518                 DELAY(10);
519                 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
520                         break;
521         }
522
523         if (i == BGE_TIMEOUT) {
524                 device_printf(sc->bge_dev, "EEPROM read timed out\n");
525                 return(1);
526         }
527
528         /* Get result. */
529         byte = CSR_READ_4(sc, BGE_EE_DATA);
530
531         *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
532
533         return(0);
534 }
535
536 /*
537  * Read a sequence of bytes from the EEPROM.
538  */
539 static int
540 bge_read_eeprom(sc, dest, off, cnt)
541         struct bge_softc *sc;
542         caddr_t dest;
543         int off;
544         int cnt;
545 {
546         int err = 0, i;
547         u_int8_t byte = 0;
548
549         for (i = 0; i < cnt; i++) {
550                 err = bge_eeprom_getbyte(sc, off + i, &byte);
551                 if (err)
552                         break;
553                 *(dest + i) = byte;
554         }
555
556         return(err ? 1 : 0);
557 }
558
559 static int
560 bge_miibus_readreg(dev, phy, reg)
561         device_t dev;
562         int phy, reg;
563 {
564         struct bge_softc *sc;
565         u_int32_t val, autopoll;
566         int i;
567
568         sc = device_get_softc(dev);
569
570         /*
571          * Broadcom's own driver always assumes the internal
572          * PHY is at GMII address 1. On some chips, the PHY responds
573          * to accesses at all addresses, which could cause us to
574          * bogusly attach the PHY 32 times at probe type. Always
575          * restricting the lookup to address 1 is simpler than
576          * trying to figure out which chips revisions should be
577          * special-cased.
578          */
579         if (phy != 1)
580                 return(0);
581
582         /* Reading with autopolling on may trigger PCI errors */
583         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
584         if (autopoll & BGE_MIMODE_AUTOPOLL) {
585                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
586                 DELAY(40);
587         }
588
589         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
590             BGE_MIPHY(phy)|BGE_MIREG(reg));
591
592         for (i = 0; i < BGE_TIMEOUT; i++) {
593                 val = CSR_READ_4(sc, BGE_MI_COMM);
594                 if (!(val & BGE_MICOMM_BUSY))
595                         break;
596         }
597
598         if (i == BGE_TIMEOUT) {
599                 if_printf(sc->bge_ifp, "PHY read timed out\n");
600                 val = 0;
601                 goto done;
602         }
603
604         val = CSR_READ_4(sc, BGE_MI_COMM);
605
606 done:
607         if (autopoll & BGE_MIMODE_AUTOPOLL) {
608                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
609                 DELAY(40);
610         }
611
612         if (val & BGE_MICOMM_READFAIL)
613                 return(0);
614
615         return(val & 0xFFFF);
616 }
617
618 static int
619 bge_miibus_writereg(dev, phy, reg, val)
620         device_t dev;
621         int phy, reg, val;
622 {
623         struct bge_softc *sc;
624         u_int32_t autopoll;
625         int i;
626
627         sc = device_get_softc(dev);
628
629         /* Reading with autopolling on may trigger PCI errors */
630         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
631         if (autopoll & BGE_MIMODE_AUTOPOLL) {
632                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
633                 DELAY(40);
634         }
635
636         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
637             BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
638
639         for (i = 0; i < BGE_TIMEOUT; i++) {
640                 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
641                         break;
642         }
643
644         if (autopoll & BGE_MIMODE_AUTOPOLL) {
645                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
646                 DELAY(40);
647         }
648
649         if (i == BGE_TIMEOUT) {
650                 if_printf(sc->bge_ifp, "PHY read timed out\n");
651                 return(0);
652         }
653
654         return(0);
655 }
656
657 static void
658 bge_miibus_statchg(dev)
659         device_t dev;
660 {
661         struct bge_softc *sc;
662         struct mii_data *mii;
663
664         sc = device_get_softc(dev);
665         mii = device_get_softc(sc->bge_miibus);
666
667         BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
668         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
669                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
670         } else {
671                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
672         }
673
674         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
675                 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
676         } else {
677                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
678         }
679
680         return;
681 }
682
683 /*
684  * Intialize a standard receive ring descriptor.
685  */
686 static int
687 bge_newbuf_std(sc, i, m)
688         struct bge_softc        *sc;
689         int                     i;
690         struct mbuf             *m;
691 {
692         struct mbuf             *m_new = NULL;
693         struct bge_rx_bd        *r;
694         struct bge_dmamap_arg   ctx;
695         int                     error;
696
697         if (m == NULL) {
698                 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
699                 if (m_new == NULL) {
700                         return(ENOBUFS);
701                 }
702
703                 MCLGET(m_new, M_DONTWAIT);
704                 if (!(m_new->m_flags & M_EXT)) {
705                         m_freem(m_new);
706                         return(ENOBUFS);
707                 }
708                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
709         } else {
710                 m_new = m;
711                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
712                 m_new->m_data = m_new->m_ext.ext_buf;
713         }
714
715         if (!sc->bge_rx_alignment_bug)
716                 m_adj(m_new, ETHER_ALIGN);
717         sc->bge_cdata.bge_rx_std_chain[i] = m_new;
718         r = &sc->bge_ldata.bge_rx_std_ring[i];
719         ctx.bge_maxsegs = 1;
720         ctx.sc = sc;
721         error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
722             sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
723             m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
724         if (error || ctx.bge_maxsegs == 0) {
725                 if (m == NULL) {
726                         sc->bge_cdata.bge_rx_std_chain[i] = NULL;
727                         m_freem(m_new);
728                 }
729                 return(ENOMEM);
730         }
731         r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr);
732         r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr);
733         r->bge_flags = BGE_RXBDFLAG_END;
734         r->bge_len = m_new->m_len;
735         r->bge_idx = i;
736
737         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
738             sc->bge_cdata.bge_rx_std_dmamap[i],
739             BUS_DMASYNC_PREREAD);
740
741         return(0);
742 }
743
744 /*
745  * Initialize a jumbo receive ring descriptor. This allocates
746  * a jumbo buffer from the pool managed internally by the driver.
747  */
748 static int
749 bge_newbuf_jumbo(sc, i, m)
750         struct bge_softc *sc;
751         int i;
752         struct mbuf *m;
753 {
754         bus_dma_segment_t segs[BGE_NSEG_JUMBO];
755         struct bge_extrx_bd *r;
756         struct mbuf *m_new = NULL;
757         int nsegs;
758         int error;
759
760         if (m == NULL) {
761                 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
762                 if (m_new == NULL)
763                         return(ENOBUFS);
764
765                 m_cljget(m_new, M_DONTWAIT, MJUM9BYTES);
766                 if (!(m_new->m_flags & M_EXT)) {
767                         m_freem(m_new);
768                         return(ENOBUFS);
769                 }
770                 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
771         } else {
772                 m_new = m;
773                 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
774                 m_new->m_data = m_new->m_ext.ext_buf;
775         }
776
777         if (!sc->bge_rx_alignment_bug)
778                 m_adj(m_new, ETHER_ALIGN);
779
780         error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
781             sc->bge_cdata.bge_rx_jumbo_dmamap[i],
782             m_new, segs, &nsegs, BUS_DMA_NOWAIT);
783         if (error) {
784                 if (m == NULL)
785                         m_freem(m_new);
786                 return(error);
787         }
788         KASSERT(nsegs == BGE_NSEG_JUMBO, ("%s: %d segments", __func__, nsegs));
789
790         sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
791
792         /*
793          * Fill in the extended RX buffer descriptor.
794          */
795         r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
796         r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
797         r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
798         r->bge_len0 = segs[0].ds_len;
799         r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
800         r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
801         r->bge_len1 = segs[1].ds_len;
802         r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
803         r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
804         r->bge_len2 = segs[2].ds_len;
805         r->bge_len3 = 0;
806         r->bge_flags = BGE_RXBDFLAG_JUMBO_RING|BGE_RXBDFLAG_END;
807         r->bge_idx = i;
808
809         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
810             sc->bge_cdata.bge_rx_jumbo_dmamap[i],
811             BUS_DMASYNC_PREREAD);
812
813         return (0);
814 }
815
816 /*
817  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
818  * that's 1MB or memory, which is a lot. For now, we fill only the first
819  * 256 ring entries and hope that our CPU is fast enough to keep up with
820  * the NIC.
821  */
822 static int
823 bge_init_rx_ring_std(sc)
824         struct bge_softc *sc;
825 {
826         int i;
827
828         for (i = 0; i < BGE_SSLOTS; i++) {
829                 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
830                         return(ENOBUFS);
831         };
832
833         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
834             sc->bge_cdata.bge_rx_std_ring_map,
835             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
836
837         sc->bge_std = i - 1;
838         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
839
840         return(0);
841 }
842
843 static void
844 bge_free_rx_ring_std(sc)
845         struct bge_softc *sc;
846 {
847         int i;
848
849         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
850                 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
851                         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
852                             sc->bge_cdata.bge_rx_std_dmamap[i],
853                             BUS_DMASYNC_POSTREAD);
854                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
855                             sc->bge_cdata.bge_rx_std_dmamap[i]);
856                         m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
857                         sc->bge_cdata.bge_rx_std_chain[i] = NULL;
858                 }
859                 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
860                     sizeof(struct bge_rx_bd));
861         }
862
863         return;
864 }
865
866 static int
867 bge_init_rx_ring_jumbo(sc)
868         struct bge_softc *sc;
869 {
870         struct bge_rcb *rcb;
871         int i;
872
873         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
874                 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
875                         return(ENOBUFS);
876         };
877
878         bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
879             sc->bge_cdata.bge_rx_jumbo_ring_map,
880             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
881
882         sc->bge_jumbo = i - 1;
883
884         rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
885         rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
886                                     BGE_RCB_FLAG_USE_EXT_RX_BD);
887         CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
888
889         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
890
891         return(0);
892 }
893
894 static void
895 bge_free_rx_ring_jumbo(sc)
896         struct bge_softc *sc;
897 {
898         int i;
899
900         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
901                 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
902                         bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
903                             sc->bge_cdata.bge_rx_jumbo_dmamap[i],
904                             BUS_DMASYNC_POSTREAD);
905                         bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
906                             sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
907                         m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
908                         sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
909                 }
910                 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
911                     sizeof(struct bge_extrx_bd));
912         }
913
914         return;
915 }
916
917 static void
918 bge_free_tx_ring(sc)
919         struct bge_softc *sc;
920 {
921         int i;
922
923         if (sc->bge_ldata.bge_tx_ring == NULL)
924                 return;
925
926         for (i = 0; i < BGE_TX_RING_CNT; i++) {
927                 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
928                         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
929                             sc->bge_cdata.bge_tx_dmamap[i],
930                             BUS_DMASYNC_POSTWRITE);
931                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
932                             sc->bge_cdata.bge_tx_dmamap[i]);
933                         m_freem(sc->bge_cdata.bge_tx_chain[i]);
934                         sc->bge_cdata.bge_tx_chain[i] = NULL;
935                 }
936                 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
937                     sizeof(struct bge_tx_bd));
938         }
939
940         return;
941 }
942
943 static int
944 bge_init_tx_ring(sc)
945         struct bge_softc *sc;
946 {
947         sc->bge_txcnt = 0;
948         sc->bge_tx_saved_considx = 0;
949
950         /* Initialize transmit producer index for host-memory send ring. */
951         sc->bge_tx_prodidx = 0;
952         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
953
954         /* 5700 b2 errata */
955         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
956                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
957
958         /* NIC-memory send ring not used; initialize to zero. */
959         CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
960         /* 5700 b2 errata */
961         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
962                 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
963
964         return(0);
965 }
966
967 static void
968 bge_setmulti(sc)
969         struct bge_softc *sc;
970 {
971         struct ifnet *ifp;
972         struct ifmultiaddr *ifma;
973         u_int32_t hashes[4] = { 0, 0, 0, 0 };
974         int h, i;
975
976         BGE_LOCK_ASSERT(sc);
977
978         ifp = sc->bge_ifp;
979
980         if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
981                 for (i = 0; i < 4; i++)
982                         CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
983                 return;
984         }
985
986         /* First, zot all the existing filters. */
987         for (i = 0; i < 4; i++)
988                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
989
990         /* Now program new ones. */
991         IF_ADDR_LOCK(ifp);
992         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
993                 if (ifma->ifma_addr->sa_family != AF_LINK)
994                         continue;
995                 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
996                     ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
997                 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
998         }
999         IF_ADDR_UNLOCK(ifp);
1000
1001         for (i = 0; i < 4; i++)
1002                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1003
1004         return;
1005 }
1006
1007 /*
1008  * Do endian, PCI and DMA initialization. Also check the on-board ROM
1009  * self-test results.
1010  */
1011 static int
1012 bge_chipinit(sc)
1013         struct bge_softc *sc;
1014 {
1015         int                     i;
1016         u_int32_t               dma_rw_ctl;
1017
1018         /* Set endian type before we access any non-PCI registers. */
1019         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1020
1021         /*
1022          * Check the 'ROM failed' bit on the RX CPU to see if
1023          * self-tests passed.
1024          */
1025         if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1026                 device_printf(sc->bge_dev, "RX CPU self-diagnostics failed!\n");
1027                 return(ENODEV);
1028         }
1029
1030         /* Clear the MAC control register */
1031         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1032
1033         /*
1034          * Clear the MAC statistics block in the NIC's
1035          * internal memory.
1036          */
1037         for (i = BGE_STATS_BLOCK;
1038             i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1039                 BGE_MEMWIN_WRITE(sc, i, 0);
1040
1041         for (i = BGE_STATUS_BLOCK;
1042             i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1043                 BGE_MEMWIN_WRITE(sc, i, 0);
1044
1045         /* Set up the PCI DMA control register. */
1046         if (sc->bge_pcie) {
1047                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1048                     (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1049                     (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1050         } else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1051             BGE_PCISTATE_PCI_BUSMODE) {
1052                 /* Conventional PCI bus */
1053                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1054                     (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1055                     (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1056                     (0x0F);
1057         } else {
1058                 /* PCI-X bus */
1059                 /*
1060                  * The 5704 uses a different encoding of read/write
1061                  * watermarks.
1062                  */
1063                 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1064                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1065                             (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1066                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1067                 else
1068                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1069                             (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1070                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1071                             (0x0F);
1072
1073                 /*
1074                  * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1075                  * for hardware bugs.
1076                  */
1077                 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1078                     sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1079                         u_int32_t tmp;
1080
1081                         tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1082                         if (tmp == 0x6 || tmp == 0x7)
1083                                 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1084                 }
1085         }
1086
1087         if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1088             sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1089             sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1090             sc->bge_asicrev == BGE_ASICREV_BCM5750)
1091                 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1092         pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1093
1094         /*
1095          * Set up general mode register.
1096          */
1097         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1098             BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1099             BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1100
1101         /*
1102          * Disable memory write invalidate.  Apparently it is not supported
1103          * properly by these devices.
1104          */
1105         PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1106
1107 #ifdef __brokenalpha__
1108         /*
1109          * Must insure that we do not cross an 8K (bytes) boundary
1110          * for DMA reads.  Our highest limit is 1K bytes.  This is a
1111          * restriction on some ALPHA platforms with early revision
1112          * 21174 PCI chipsets, such as the AlphaPC 164lx
1113          */
1114         PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
1115             BGE_PCI_READ_BNDRY_1024BYTES, 4);
1116 #endif
1117
1118         /* Set the timer prescaler (always 66Mhz) */
1119         CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1120
1121         return(0);
1122 }
1123
1124 static int
1125 bge_blockinit(sc)
1126         struct bge_softc *sc;
1127 {
1128         struct bge_rcb *rcb;
1129         bus_size_t vrcb;
1130         bge_hostaddr taddr;
1131         int i;
1132
1133         /*
1134          * Initialize the memory window pointer register so that
1135          * we can access the first 32K of internal NIC RAM. This will
1136          * allow us to set up the TX send ring RCBs and the RX return
1137          * ring RCBs, plus other things which live in NIC memory.
1138          */
1139         CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1140
1141         /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1142
1143         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1144             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1145                 /* Configure mbuf memory pool */
1146                 if (sc->bge_extram) {
1147                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1148                             BGE_EXT_SSRAM);
1149                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1150                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1151                         else
1152                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1153                 } else {
1154                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1155                             BGE_BUFFPOOL_1);
1156                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1157                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1158                         else
1159                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1160                 }
1161
1162                 /* Configure DMA resource pool */
1163                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1164                     BGE_DMA_DESCRIPTORS);
1165                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1166         }
1167
1168         /* Configure mbuf pool watermarks */
1169         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1170             sc->bge_asicrev == BGE_ASICREV_BCM5750) {
1171                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1172                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1173         } else {
1174                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1175                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1176         }
1177         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1178
1179         /* Configure DMA resource watermarks */
1180         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1181         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1182
1183         /* Enable buffer manager */
1184         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1185             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1186                 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1187                     BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1188
1189                 /* Poll for buffer manager start indication */
1190                 for (i = 0; i < BGE_TIMEOUT; i++) {
1191                         if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1192                                 break;
1193                         DELAY(10);
1194                 }
1195
1196                 if (i == BGE_TIMEOUT) {
1197                         device_printf(sc->bge_dev,
1198                             "buffer manager failed to start\n");
1199                         return(ENXIO);
1200                 }
1201         }
1202
1203         /* Enable flow-through queues */
1204         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1205         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1206
1207         /* Wait until queue initialization is complete */
1208         for (i = 0; i < BGE_TIMEOUT; i++) {
1209                 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1210                         break;
1211                 DELAY(10);
1212         }
1213
1214         if (i == BGE_TIMEOUT) {
1215                 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1216                 return(ENXIO);
1217         }
1218
1219         /* Initialize the standard RX ring control block */
1220         rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1221         rcb->bge_hostaddr.bge_addr_lo =
1222             BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1223         rcb->bge_hostaddr.bge_addr_hi =
1224             BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1225         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1226             sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1227         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1228             sc->bge_asicrev == BGE_ASICREV_BCM5750)
1229                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1230         else
1231                 rcb->bge_maxlen_flags =
1232                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1233         if (sc->bge_extram)
1234                 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1235         else
1236                 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1237         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1238         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1239
1240         CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1241         CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1242
1243         /*
1244          * Initialize the jumbo RX ring control block
1245          * We set the 'ring disabled' bit in the flags
1246          * field until we're actually ready to start
1247          * using this ring (i.e. once we set the MTU
1248          * high enough to require it).
1249          */
1250         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1251             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1252                 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1253
1254                 rcb->bge_hostaddr.bge_addr_lo =
1255                     BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1256                 rcb->bge_hostaddr.bge_addr_hi =
1257                     BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1258                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1259                     sc->bge_cdata.bge_rx_jumbo_ring_map,
1260                     BUS_DMASYNC_PREREAD);
1261                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1262                     BGE_RCB_FLAG_USE_EXT_RX_BD|BGE_RCB_FLAG_RING_DISABLED);
1263                 if (sc->bge_extram)
1264                         rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1265                 else
1266                         rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1267                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1268                     rcb->bge_hostaddr.bge_addr_hi);
1269                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1270                     rcb->bge_hostaddr.bge_addr_lo);
1271
1272                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1273                     rcb->bge_maxlen_flags);
1274                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1275
1276                 /* Set up dummy disabled mini ring RCB */
1277                 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1278                 rcb->bge_maxlen_flags =
1279                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1280                 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1281                     rcb->bge_maxlen_flags);
1282         }
1283
1284         /*
1285          * Set the BD ring replentish thresholds. The recommended
1286          * values are 1/8th the number of descriptors allocated to
1287          * each ring.
1288          */
1289         CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1290         CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1291
1292         /*
1293          * Disable all unused send rings by setting the 'ring disabled'
1294          * bit in the flags field of all the TX send ring control blocks.
1295          * These are located in NIC memory.
1296          */
1297         vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1298         for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1299                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1300                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1301                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1302                 vrcb += sizeof(struct bge_rcb);
1303         }
1304
1305         /* Configure TX RCB 0 (we use only the first ring) */
1306         vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1307         BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1308         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1309         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1310         RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1311             BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1312         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1313             sc->bge_asicrev != BGE_ASICREV_BCM5750)
1314                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1315                     BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1316
1317         /* Disable all unused RX return rings */
1318         vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1319         for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1320                 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1321                 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1322                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1323                     BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1324                     BGE_RCB_FLAG_RING_DISABLED));
1325                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1326                 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1327                     (i * (sizeof(u_int64_t))), 0);
1328                 vrcb += sizeof(struct bge_rcb);
1329         }
1330
1331         /* Initialize RX ring indexes */
1332         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1333         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1334         CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1335
1336         /*
1337          * Set up RX return ring 0
1338          * Note that the NIC address for RX return rings is 0x00000000.
1339          * The return rings live entirely within the host, so the
1340          * nicaddr field in the RCB isn't used.
1341          */
1342         vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1343         BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1344         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1345         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1346         RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1347         RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1348             BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));  
1349
1350         /* Set random backoff seed for TX */
1351         CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1352             IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1353             IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1354             IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1355             BGE_TX_BACKOFF_SEED_MASK);
1356
1357         /* Set inter-packet gap */
1358         CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1359
1360         /*
1361          * Specify which ring to use for packets that don't match
1362          * any RX rules.
1363          */
1364         CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1365
1366         /*
1367          * Configure number of RX lists. One interrupt distribution
1368          * list, sixteen active lists, one bad frames class.
1369          */
1370         CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1371
1372         /* Inialize RX list placement stats mask. */
1373         CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1374         CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1375
1376         /* Disable host coalescing until we get it set up */
1377         CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1378
1379         /* Poll to make sure it's shut down. */
1380         for (i = 0; i < BGE_TIMEOUT; i++) {
1381                 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1382                         break;
1383                 DELAY(10);
1384         }
1385
1386         if (i == BGE_TIMEOUT) {
1387                 device_printf(sc->bge_dev,
1388                     "host coalescing engine failed to idle\n");
1389                 return(ENXIO);
1390         }
1391
1392         /* Set up host coalescing defaults */
1393         CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1394         CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1395         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1396         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1397         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1398             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1399                 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1400                 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1401         }
1402         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1403         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1404
1405         /* Set up address of statistics block */
1406         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1407             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1408                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1409                     BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1410                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1411                     BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1412                 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1413                 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1414                 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1415         }
1416
1417         /* Set up address of status block */
1418         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1419             BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1420         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1421             BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1422         sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1423         sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1424
1425         /* Turn on host coalescing state machine */
1426         CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1427
1428         /* Turn on RX BD completion state machine and enable attentions */
1429         CSR_WRITE_4(sc, BGE_RBDC_MODE,
1430             BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1431
1432         /* Turn on RX list placement state machine */
1433         CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1434
1435         /* Turn on RX list selector state machine. */
1436         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1437             sc->bge_asicrev != BGE_ASICREV_BCM5750)
1438                 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1439
1440         /* Turn on DMA, clear stats */
1441         CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1442             BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1443             BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1444             BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1445             (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1446
1447         /* Set misc. local control, enable interrupts on attentions */
1448         CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1449
1450 #ifdef notdef
1451         /* Assert GPIO pins for PHY reset */
1452         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1453             BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1454         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1455             BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1456 #endif
1457
1458         /* Turn on DMA completion state machine */
1459         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1460             sc->bge_asicrev != BGE_ASICREV_BCM5750)
1461                 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1462
1463         /* Turn on write DMA state machine */
1464         CSR_WRITE_4(sc, BGE_WDMA_MODE,
1465             BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1466
1467         /* Turn on read DMA state machine */
1468         CSR_WRITE_4(sc, BGE_RDMA_MODE,
1469             BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1470
1471         /* Turn on RX data completion state machine */
1472         CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1473
1474         /* Turn on RX BD initiator state machine */
1475         CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1476
1477         /* Turn on RX data and RX BD initiator state machine */
1478         CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1479
1480         /* Turn on Mbuf cluster free state machine */
1481         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1482             sc->bge_asicrev != BGE_ASICREV_BCM5750)
1483                 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1484
1485         /* Turn on send BD completion state machine */
1486         CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1487
1488         /* Turn on send data completion state machine */
1489         CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1490
1491         /* Turn on send data initiator state machine */
1492         CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1493
1494         /* Turn on send BD initiator state machine */
1495         CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1496
1497         /* Turn on send BD selector state machine */
1498         CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1499
1500         CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1501         CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1502             BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1503
1504         /* ack/clear link change events */
1505         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1506             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1507             BGE_MACSTAT_LINK_CHANGED);
1508         CSR_WRITE_4(sc, BGE_MI_STS, 0);
1509
1510         /* Enable PHY auto polling (for MII/GMII only) */
1511         if (sc->bge_tbi) {
1512                 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1513         } else {
1514                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1515                 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1516                     sc->bge_chipid != BGE_CHIPID_BCM5700_B1)
1517                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1518                             BGE_EVTENB_MI_INTERRUPT);
1519         }
1520
1521         /*
1522          * Clear any pending link state attention.
1523          * Otherwise some link state change events may be lost until attention
1524          * is cleared by bge_intr() -> bge_link_upd() sequence.
1525          * It's not necessary on newer BCM chips - perhaps enabling link
1526          * state change attentions implies clearing pending attention.
1527          */
1528         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1529             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1530             BGE_MACSTAT_LINK_CHANGED);
1531
1532         /* Enable link state change attentions. */
1533         BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1534
1535         return(0);
1536 }
1537
1538 /*
1539  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1540  * against our list and return its name if we find a match. Note
1541  * that since the Broadcom controller contains VPD support, we
1542  * can get the device name string from the controller itself instead
1543  * of the compiled-in string. This is a little slow, but it guarantees
1544  * we'll always announce the right product name.
1545  */
1546 static int
1547 bge_probe(dev)
1548         device_t dev;
1549 {
1550         struct bge_type *t;
1551         struct bge_softc *sc;
1552         char *descbuf;
1553
1554         t = bge_devs;
1555
1556         sc = device_get_softc(dev);
1557         bzero(sc, sizeof(struct bge_softc));
1558         sc->bge_dev = dev;
1559
1560         while(t->bge_name != NULL) {
1561                 if ((pci_get_vendor(dev) == t->bge_vid) &&
1562                     (pci_get_device(dev) == t->bge_did)) {
1563 #ifdef notdef
1564                         bge_vpd_read(sc);
1565                         device_set_desc(dev, sc->bge_vpd_prodname);
1566 #endif
1567                         descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
1568                         if (descbuf == NULL)
1569                                 return(ENOMEM);
1570                         snprintf(descbuf, BGE_DEVDESC_MAX,
1571                             "%s, ASIC rev. %#04x", t->bge_name,
1572                             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1573                         device_set_desc_copy(dev, descbuf);
1574                         if (pci_get_subvendor(dev) == DELL_VENDORID)
1575                                 sc->bge_no_3_led = 1;
1576                         free(descbuf, M_TEMP);
1577                         return(0);
1578                 }
1579                 t++;
1580         }
1581
1582         return(ENXIO);
1583 }
1584
1585 static void
1586 bge_dma_free(sc)
1587         struct bge_softc *sc;
1588 {
1589         int i;
1590
1591
1592         /* Destroy DMA maps for RX buffers */
1593
1594         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1595                 if (sc->bge_cdata.bge_rx_std_dmamap[i])
1596                         bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1597                             sc->bge_cdata.bge_rx_std_dmamap[i]);
1598         }
1599
1600         /* Destroy DMA maps for jumbo RX buffers */
1601
1602         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1603                 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1604                         bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1605                             sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1606         }
1607
1608         /* Destroy DMA maps for TX buffers */
1609
1610         for (i = 0; i < BGE_TX_RING_CNT; i++) {
1611                 if (sc->bge_cdata.bge_tx_dmamap[i])
1612                         bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1613                             sc->bge_cdata.bge_tx_dmamap[i]);
1614         }
1615
1616         if (sc->bge_cdata.bge_mtag)
1617                 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
1618
1619
1620         /* Destroy standard RX ring */
1621
1622         if (sc->bge_cdata.bge_rx_std_ring_map)
1623                 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
1624                     sc->bge_cdata.bge_rx_std_ring_map);
1625         if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
1626                 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
1627                     sc->bge_ldata.bge_rx_std_ring,
1628                     sc->bge_cdata.bge_rx_std_ring_map);
1629
1630         if (sc->bge_cdata.bge_rx_std_ring_tag)
1631                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
1632
1633         /* Destroy jumbo RX ring */
1634
1635         if (sc->bge_cdata.bge_rx_jumbo_ring_map)
1636                 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1637                     sc->bge_cdata.bge_rx_jumbo_ring_map);
1638
1639         if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
1640             sc->bge_ldata.bge_rx_jumbo_ring)
1641                 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1642                     sc->bge_ldata.bge_rx_jumbo_ring,
1643                     sc->bge_cdata.bge_rx_jumbo_ring_map);
1644
1645         if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
1646                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
1647
1648         /* Destroy RX return ring */
1649
1650         if (sc->bge_cdata.bge_rx_return_ring_map)
1651                 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
1652                     sc->bge_cdata.bge_rx_return_ring_map);
1653
1654         if (sc->bge_cdata.bge_rx_return_ring_map &&
1655             sc->bge_ldata.bge_rx_return_ring)
1656                 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
1657                     sc->bge_ldata.bge_rx_return_ring,
1658                     sc->bge_cdata.bge_rx_return_ring_map);
1659
1660         if (sc->bge_cdata.bge_rx_return_ring_tag)
1661                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
1662
1663         /* Destroy TX ring */
1664
1665         if (sc->bge_cdata.bge_tx_ring_map)
1666                 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
1667                     sc->bge_cdata.bge_tx_ring_map);
1668
1669         if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
1670                 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
1671                     sc->bge_ldata.bge_tx_ring,
1672                     sc->bge_cdata.bge_tx_ring_map);
1673
1674         if (sc->bge_cdata.bge_tx_ring_tag)
1675                 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
1676
1677         /* Destroy status block */
1678
1679         if (sc->bge_cdata.bge_status_map)
1680                 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
1681                     sc->bge_cdata.bge_status_map);
1682
1683         if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
1684                 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
1685                     sc->bge_ldata.bge_status_block,
1686                     sc->bge_cdata.bge_status_map);
1687
1688         if (sc->bge_cdata.bge_status_tag)
1689                 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
1690
1691         /* Destroy statistics block */
1692
1693         if (sc->bge_cdata.bge_stats_map)
1694                 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
1695                     sc->bge_cdata.bge_stats_map);
1696
1697         if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
1698                 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
1699                     sc->bge_ldata.bge_stats,
1700                     sc->bge_cdata.bge_stats_map);
1701
1702         if (sc->bge_cdata.bge_stats_tag)
1703                 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
1704
1705         /* Destroy the parent tag */
1706
1707         if (sc->bge_cdata.bge_parent_tag)
1708                 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
1709
1710         return;
1711 }
1712
1713 static int
1714 bge_dma_alloc(dev)
1715         device_t dev;
1716 {
1717         struct bge_softc *sc;
1718         int i, error;
1719         struct bge_dmamap_arg ctx;
1720
1721         sc = device_get_softc(dev);
1722
1723         /*
1724          * Allocate the parent bus DMA tag appropriate for PCI.
1725          */
1726         error = bus_dma_tag_create(NULL,        /* parent */
1727                         PAGE_SIZE, 0,           /* alignment, boundary */
1728                         BUS_SPACE_MAXADDR,      /* lowaddr */
1729                         BUS_SPACE_MAXADDR,      /* highaddr */
1730                         NULL, NULL,             /* filter, filterarg */
1731                         MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */
1732                         BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1733                         0,                      /* flags */
1734                         NULL, NULL,             /* lockfunc, lockarg */
1735                         &sc->bge_cdata.bge_parent_tag);
1736
1737         if (error != 0) {
1738                 device_printf(sc->bge_dev,
1739                     "could not allocate parent dma tag\n");
1740                 return (ENOMEM);
1741         }
1742
1743         /*
1744          * Create tag for RX mbufs.
1745          */
1746         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
1747             0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1748             NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES,
1749             BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag);
1750
1751         if (error) {
1752                 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1753                 return (ENOMEM);
1754         }
1755
1756         /* Create DMA maps for RX buffers */
1757
1758         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1759                 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1760                             &sc->bge_cdata.bge_rx_std_dmamap[i]);
1761                 if (error) {
1762                         device_printf(sc->bge_dev,
1763                             "can't create DMA map for RX\n");
1764                         return(ENOMEM);
1765                 }
1766         }
1767
1768         /* Create DMA maps for TX buffers */
1769
1770         for (i = 0; i < BGE_TX_RING_CNT; i++) {
1771                 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1772                             &sc->bge_cdata.bge_tx_dmamap[i]);
1773                 if (error) {
1774                         device_printf(sc->bge_dev,
1775                             "can't create DMA map for RX\n");
1776                         return(ENOMEM);
1777                 }
1778         }
1779
1780         /* Create tag for standard RX ring */
1781
1782         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1783             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1784             NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
1785             NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
1786
1787         if (error) {
1788                 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1789                 return (ENOMEM);
1790         }
1791
1792         /* Allocate DMA'able memory for standard RX ring */
1793
1794         error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
1795             (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
1796             &sc->bge_cdata.bge_rx_std_ring_map);
1797         if (error)
1798                 return (ENOMEM);
1799
1800         bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1801
1802         /* Load the address of the standard RX ring */
1803
1804         ctx.bge_maxsegs = 1;
1805         ctx.sc = sc;
1806
1807         error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
1808             sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
1809             BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1810
1811         if (error)
1812                 return (ENOMEM);
1813
1814         sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
1815
1816         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1817             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1818
1819                 /*
1820                  * Create tag for jumbo mbufs.
1821                  * This is really a bit of a kludge. We allocate a special
1822                  * jumbo buffer pool which (thanks to the way our DMA
1823                  * memory allocation works) will consist of contiguous
1824                  * pages. This means that even though a jumbo buffer might
1825                  * be larger than a page size, we don't really need to
1826                  * map it into more than one DMA segment. However, the
1827                  * default mbuf tag will result in multi-segment mappings,
1828                  * so we have to create a special jumbo mbuf tag that
1829                  * lets us get away with mapping the jumbo buffers as
1830                  * a single segment. I think eventually the driver should
1831                  * be changed so that it uses ordinary mbufs and cluster
1832                  * buffers, i.e. jumbo frames can span multiple DMA
1833                  * descriptors. But that's a project for another day.
1834                  */
1835
1836                 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1837                     1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1838                     NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
1839                     0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
1840
1841                 if (error) {
1842                         device_printf(sc->bge_dev,
1843                             "could not allocate dma tag\n");
1844                         return (ENOMEM);
1845                 }
1846
1847                 /* Create tag for jumbo RX ring */
1848                 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1849                     PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1850                     NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
1851                     NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
1852
1853                 if (error) {
1854                         device_printf(sc->bge_dev,
1855                             "could not allocate dma tag\n");
1856                         return (ENOMEM);
1857                 }
1858
1859                 /* Allocate DMA'able memory for jumbo RX ring */
1860                 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1861                     (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
1862                     BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1863                     &sc->bge_cdata.bge_rx_jumbo_ring_map);
1864                 if (error)
1865                         return (ENOMEM);
1866
1867                 /* Load the address of the jumbo RX ring */
1868                 ctx.bge_maxsegs = 1;
1869                 ctx.sc = sc;
1870
1871                 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1872                     sc->bge_cdata.bge_rx_jumbo_ring_map,
1873                     sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
1874                     bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1875
1876                 if (error)
1877                         return (ENOMEM);
1878
1879                 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
1880
1881                 /* Create DMA maps for jumbo RX buffers */
1882
1883                 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1884                         error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
1885                                     0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1886                         if (error) {
1887                                 device_printf(sc->bge_dev,
1888                                     "can't create DMA map for RX\n");
1889                                 return(ENOMEM);
1890                         }
1891                 }
1892
1893         }
1894
1895         /* Create tag for RX return ring */
1896
1897         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1898             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1899             NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
1900             NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
1901
1902         if (error) {
1903                 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1904                 return (ENOMEM);
1905         }
1906
1907         /* Allocate DMA'able memory for RX return ring */
1908
1909         error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
1910             (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
1911             &sc->bge_cdata.bge_rx_return_ring_map);
1912         if (error)
1913                 return (ENOMEM);
1914
1915         bzero((char *)sc->bge_ldata.bge_rx_return_ring,
1916             BGE_RX_RTN_RING_SZ(sc));
1917
1918         /* Load the address of the RX return ring */
1919
1920         ctx.bge_maxsegs = 1;
1921         ctx.sc = sc;
1922
1923         error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
1924             sc->bge_cdata.bge_rx_return_ring_map,
1925             sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
1926             bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1927
1928         if (error)
1929                 return (ENOMEM);
1930
1931         sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
1932
1933         /* Create tag for TX ring */
1934
1935         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1936             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1937             NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
1938             &sc->bge_cdata.bge_tx_ring_tag);
1939
1940         if (error) {
1941                 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1942                 return (ENOMEM);
1943         }
1944
1945         /* Allocate DMA'able memory for TX ring */
1946
1947         error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
1948             (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
1949             &sc->bge_cdata.bge_tx_ring_map);
1950         if (error)
1951                 return (ENOMEM);
1952
1953         bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1954
1955         /* Load the address of the TX ring */
1956
1957         ctx.bge_maxsegs = 1;
1958         ctx.sc = sc;
1959
1960         error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
1961             sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
1962             BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1963
1964         if (error)
1965                 return (ENOMEM);
1966
1967         sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
1968
1969         /* Create tag for status block */
1970
1971         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1972             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1973             NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
1974             NULL, NULL, &sc->bge_cdata.bge_status_tag);
1975
1976         if (error) {
1977                 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1978                 return (ENOMEM);
1979         }
1980
1981         /* Allocate DMA'able memory for status block */
1982
1983         error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
1984             (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
1985             &sc->bge_cdata.bge_status_map);
1986         if (error)
1987                 return (ENOMEM);
1988
1989         bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1990
1991         /* Load the address of the status block */
1992
1993         ctx.sc = sc;
1994         ctx.bge_maxsegs = 1;
1995
1996         error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
1997             sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
1998             BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1999
2000         if (error)
2001                 return (ENOMEM);
2002
2003         sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
2004
2005         /* Create tag for statistics block */
2006
2007         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2008             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2009             NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
2010             &sc->bge_cdata.bge_stats_tag);
2011
2012         if (error) {
2013                 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2014                 return (ENOMEM);
2015         }
2016
2017         /* Allocate DMA'able memory for statistics block */
2018
2019         error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
2020             (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
2021             &sc->bge_cdata.bge_stats_map);
2022         if (error)
2023                 return (ENOMEM);
2024
2025         bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
2026
2027         /* Load the address of the statstics block */
2028
2029         ctx.sc = sc;
2030         ctx.bge_maxsegs = 1;
2031
2032         error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
2033             sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
2034             BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2035
2036         if (error)
2037                 return (ENOMEM);
2038
2039         sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
2040
2041         return(0);
2042 }
2043
2044 static int
2045 bge_attach(dev)
2046         device_t dev;
2047 {
2048         struct ifnet *ifp;
2049         struct bge_softc *sc;
2050         u_int32_t hwcfg = 0;
2051         u_int32_t mac_tmp = 0;
2052         u_char eaddr[6];
2053         int error = 0, rid;
2054
2055         sc = device_get_softc(dev);
2056         sc->bge_dev = dev;
2057
2058         /*
2059          * Map control/status registers.
2060          */
2061         pci_enable_busmaster(dev);
2062
2063         rid = BGE_PCI_BAR0;
2064         sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2065             RF_ACTIVE|PCI_RF_DENSE);
2066
2067         if (sc->bge_res == NULL) {
2068                 device_printf (sc->bge_dev, "couldn't map memory\n");
2069                 error = ENXIO;
2070                 goto fail;
2071         }
2072
2073         sc->bge_btag = rman_get_bustag(sc->bge_res);
2074         sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
2075
2076         /* Allocate interrupt */
2077         rid = 0;
2078
2079         sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2080             RF_SHAREABLE | RF_ACTIVE);
2081
2082         if (sc->bge_irq == NULL) {
2083                 device_printf(sc->bge_dev, "couldn't map interrupt\n");
2084                 error = ENXIO;
2085                 goto fail;
2086         }
2087
2088         BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2089
2090         /* Save ASIC rev. */
2091
2092         sc->bge_chipid =
2093             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
2094             BGE_PCIMISCCTL_ASICREV;
2095         sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2096         sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2097
2098         /*
2099          * Treat the 5714 and the 5752 like the 5750 until we have more info
2100          * on this chip.
2101          */
2102         if (sc->bge_asicrev == BGE_ASICREV_BCM5714 || 
2103             sc->bge_asicrev == BGE_ASICREV_BCM5752)
2104                 sc->bge_asicrev = BGE_ASICREV_BCM5750;
2105
2106         /*
2107          * XXX: Broadcom Linux driver.  Not in specs or eratta.
2108          * PCI-Express?
2109          */
2110         if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
2111                 u_int32_t v;
2112
2113                 v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
2114                 if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) {
2115                         v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
2116                         if ((v & 0xff) == BGE_PCIE_CAPID)
2117                                 sc->bge_pcie = 1;
2118                 }
2119         }
2120
2121         /* Try to reset the chip. */
2122         bge_reset(sc);
2123
2124         if (bge_chipinit(sc)) {
2125                 device_printf(sc->bge_dev, "chip initialization failed\n");
2126                 bge_release_resources(sc);
2127                 error = ENXIO;
2128                 goto fail;
2129         }
2130
2131         /*
2132          * Get station address from the EEPROM.
2133          */
2134         mac_tmp = bge_readmem_ind(sc, 0x0c14);
2135         if ((mac_tmp >> 16) == 0x484b) {
2136                 eaddr[0] = (u_char)(mac_tmp >> 8);
2137                 eaddr[1] = (u_char)mac_tmp;
2138                 mac_tmp = bge_readmem_ind(sc, 0x0c18);
2139                 eaddr[2] = (u_char)(mac_tmp >> 24);
2140                 eaddr[3] = (u_char)(mac_tmp >> 16);
2141                 eaddr[4] = (u_char)(mac_tmp >> 8);
2142                 eaddr[5] = (u_char)mac_tmp;
2143         } else if (bge_read_eeprom(sc, eaddr,
2144             BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2145                 device_printf(sc->bge_dev, "failed to read station address\n");
2146                 bge_release_resources(sc);
2147                 error = ENXIO;
2148                 goto fail;
2149         }
2150
2151         /* 5705 limits RX return ring to 512 entries. */
2152         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2153             sc->bge_asicrev == BGE_ASICREV_BCM5750)
2154                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2155         else
2156                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2157
2158         if (bge_dma_alloc(dev)) {
2159                 device_printf(sc->bge_dev,
2160                     "failed to allocate DMA resources\n");
2161                 bge_release_resources(sc);
2162                 error = ENXIO;
2163                 goto fail;
2164         }
2165
2166         /* Set default tuneable values. */
2167         sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2168         sc->bge_rx_coal_ticks = 150;
2169         sc->bge_tx_coal_ticks = 150;
2170         sc->bge_rx_max_coal_bds = 64;
2171         sc->bge_tx_max_coal_bds = 128;
2172
2173         /* Set up ifnet structure */
2174         ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2175         if (ifp == NULL) {
2176                 device_printf(sc->bge_dev, "failed to if_alloc()\n");
2177                 bge_release_resources(sc);
2178                 error = ENXIO;
2179                 goto fail;
2180         }
2181         ifp->if_softc = sc;
2182         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2183         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2184         ifp->if_ioctl = bge_ioctl;
2185         ifp->if_start = bge_start;
2186         ifp->if_watchdog = bge_watchdog;
2187         ifp->if_init = bge_init;
2188         ifp->if_mtu = ETHERMTU;
2189         ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2190         IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2191         IFQ_SET_READY(&ifp->if_snd);
2192         ifp->if_hwassist = BGE_CSUM_FEATURES;
2193         ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
2194             IFCAP_VLAN_MTU;
2195         ifp->if_capenable = ifp->if_capabilities;
2196 #ifdef DEVICE_POLLING
2197         ifp->if_capabilities |= IFCAP_POLLING;
2198 #endif
2199
2200         /*
2201          * 5700 B0 chips do not support checksumming correctly due
2202          * to hardware bugs.
2203          */
2204         if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
2205                 ifp->if_capabilities &= ~IFCAP_HWCSUM;
2206                 ifp->if_capenable &= IFCAP_HWCSUM;
2207                 ifp->if_hwassist = 0;
2208         }
2209
2210         /*
2211          * Figure out what sort of media we have by checking the
2212          * hardware config word in the first 32k of NIC internal memory,
2213          * or fall back to examining the EEPROM if necessary.
2214          * Note: on some BCM5700 cards, this value appears to be unset.
2215          * If that's the case, we have to rely on identifying the NIC
2216          * by its PCI subsystem ID, as we do below for the SysKonnect
2217          * SK-9D41.
2218          */
2219         if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2220                 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2221         else {
2222                 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2223                     sizeof(hwcfg))) {
2224                         device_printf(sc->bge_dev, "failed to read EEPROM\n");
2225                         bge_release_resources(sc);
2226                         error = ENXIO;
2227                         goto fail;
2228                 }
2229                 hwcfg = ntohl(hwcfg);
2230         }
2231
2232         if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2233                 sc->bge_tbi = 1;
2234
2235         /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2236         if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2237                 sc->bge_tbi = 1;
2238
2239         if (sc->bge_tbi) {
2240                 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2241                     bge_ifmedia_upd, bge_ifmedia_sts);
2242                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2243                 ifmedia_add(&sc->bge_ifmedia,
2244                     IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2245                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2246                 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2247                 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2248         } else {
2249                 /*
2250                  * Do transceiver setup.
2251                  */
2252                 if (mii_phy_probe(dev, &sc->bge_miibus,
2253                     bge_ifmedia_upd, bge_ifmedia_sts)) {
2254                         device_printf(sc->bge_dev, "MII without any PHY!\n");
2255                         bge_release_resources(sc);
2256                         error = ENXIO;
2257                         goto fail;
2258                 }
2259         }
2260
2261         /*
2262          * When using the BCM5701 in PCI-X mode, data corruption has
2263          * been observed in the first few bytes of some received packets.
2264          * Aligning the packet buffer in memory eliminates the corruption.
2265          * Unfortunately, this misaligns the packet payloads.  On platforms
2266          * which do not support unaligned accesses, we will realign the
2267          * payloads by copying the received packets.
2268          */
2269         switch (sc->bge_chipid) {
2270         case BGE_CHIPID_BCM5701_A0:
2271         case BGE_CHIPID_BCM5701_B0:
2272         case BGE_CHIPID_BCM5701_B2:
2273         case BGE_CHIPID_BCM5701_B5:
2274                 /* If in PCI-X mode, work around the alignment bug. */
2275                 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2276                     (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
2277                     BGE_PCISTATE_PCI_BUSSPEED)
2278                         sc->bge_rx_alignment_bug = 1;
2279                 break;
2280         }
2281
2282         /*
2283          * Call MI attach routine.
2284          */
2285         ether_ifattach(ifp, eaddr);
2286         callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE);
2287
2288         /*
2289          * Hookup IRQ last.
2290          */
2291         error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2292            bge_intr, sc, &sc->bge_intrhand);
2293
2294         if (error) {
2295                 bge_detach(dev);
2296                 device_printf(sc->bge_dev, "couldn't set up irq\n");
2297         }
2298
2299 fail:
2300         return(error);
2301 }
2302
2303 static int
2304 bge_detach(dev)
2305         device_t dev;
2306 {
2307         struct bge_softc *sc;
2308         struct ifnet *ifp;
2309
2310         sc = device_get_softc(dev);
2311         ifp = sc->bge_ifp;
2312
2313 #ifdef DEVICE_POLLING
2314         if (ifp->if_capenable & IFCAP_POLLING)
2315                 ether_poll_deregister(ifp);
2316 #endif
2317
2318         BGE_LOCK(sc);
2319         bge_stop(sc);
2320         bge_reset(sc);
2321         BGE_UNLOCK(sc);
2322
2323         ether_ifdetach(ifp);
2324
2325         if (sc->bge_tbi) {
2326                 ifmedia_removeall(&sc->bge_ifmedia);
2327         } else {
2328                 bus_generic_detach(dev);
2329                 device_delete_child(dev, sc->bge_miibus);
2330         }
2331
2332         bge_release_resources(sc);
2333
2334         return(0);
2335 }
2336
2337 static void
2338 bge_release_resources(sc)
2339         struct bge_softc *sc;
2340 {
2341         device_t dev;
2342
2343         dev = sc->bge_dev;
2344
2345         if (sc->bge_vpd_prodname != NULL)
2346                 free(sc->bge_vpd_prodname, M_DEVBUF);
2347
2348         if (sc->bge_vpd_readonly != NULL)
2349                 free(sc->bge_vpd_readonly, M_DEVBUF);
2350
2351         if (sc->bge_intrhand != NULL)
2352                 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2353
2354         if (sc->bge_irq != NULL)
2355                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2356
2357         if (sc->bge_res != NULL)
2358                 bus_release_resource(dev, SYS_RES_MEMORY,
2359                     BGE_PCI_BAR0, sc->bge_res);
2360
2361         if (sc->bge_ifp != NULL)
2362                 if_free(sc->bge_ifp);
2363
2364         bge_dma_free(sc);
2365
2366         if (mtx_initialized(&sc->bge_mtx))      /* XXX */
2367                 BGE_LOCK_DESTROY(sc);
2368
2369         return;
2370 }
2371
2372 static void
2373 bge_reset(sc)
2374         struct bge_softc *sc;
2375 {
2376         device_t dev;
2377         u_int32_t cachesize, command, pcistate, reset;
2378         int i, val = 0;
2379
2380         dev = sc->bge_dev;
2381
2382         /* Save some important PCI state. */
2383         cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2384         command = pci_read_config(dev, BGE_PCI_CMD, 4);
2385         pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2386
2387         pci_write_config(dev, BGE_PCI_MISC_CTL,
2388             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2389         BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2390
2391         reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2392
2393         /* XXX: Broadcom Linux driver. */
2394         if (sc->bge_pcie) {
2395                 if (CSR_READ_4(sc, 0x7e2c) == 0x60)     /* PCIE 1.0 */
2396                         CSR_WRITE_4(sc, 0x7e2c, 0x20);
2397                 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2398                         /* Prevent PCIE link training during global reset */
2399                         CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2400                         reset |= (1<<29);
2401                 }
2402         }
2403
2404         /* Issue global reset */
2405         bge_writereg_ind(sc, BGE_MISC_CFG, reset);
2406
2407         DELAY(1000);
2408
2409         /* XXX: Broadcom Linux driver. */
2410         if (sc->bge_pcie) {
2411                 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2412                         uint32_t v;
2413
2414                         DELAY(500000); /* wait for link training to complete */
2415                         v = pci_read_config(dev, 0xc4, 4);
2416                         pci_write_config(dev, 0xc4, v | (1<<15), 4);
2417                 }
2418                 /* Set PCIE max payload size and clear error status. */
2419                 pci_write_config(dev, 0xd8, 0xf5000, 4);
2420         }
2421
2422         /* Reset some of the PCI state that got zapped by reset */
2423         pci_write_config(dev, BGE_PCI_MISC_CTL,
2424             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2425             BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2426         pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2427         pci_write_config(dev, BGE_PCI_CMD, command, 4);
2428         bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2429
2430         /* Enable memory arbiter. */
2431         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2432             sc->bge_asicrev != BGE_ASICREV_BCM5750)
2433                 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2434
2435         /*
2436          * Prevent PXE restart: write a magic number to the
2437          * general communications memory at 0xB50.
2438          */
2439         bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2440         /*
2441          * Poll the value location we just wrote until
2442          * we see the 1's complement of the magic number.
2443          * This indicates that the firmware initialization
2444          * is complete.
2445          */
2446         for (i = 0; i < BGE_TIMEOUT; i++) {
2447                 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2448                 if (val == ~BGE_MAGIC_NUMBER)
2449                         break;
2450                 DELAY(10);
2451         }
2452
2453         if (i == BGE_TIMEOUT) {
2454                 device_printf(sc->bge_dev, "firmware handshake timed out\n");
2455                 return;
2456         }
2457
2458         /*
2459          * XXX Wait for the value of the PCISTATE register to
2460          * return to its original pre-reset state. This is a
2461          * fairly good indicator of reset completion. If we don't
2462          * wait for the reset to fully complete, trying to read
2463          * from the device's non-PCI registers may yield garbage
2464          * results.
2465          */
2466         for (i = 0; i < BGE_TIMEOUT; i++) {
2467                 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2468                         break;
2469                 DELAY(10);
2470         }
2471
2472         /* Fix up byte swapping */
2473         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
2474             BGE_MODECTL_BYTESWAP_DATA);
2475
2476         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2477
2478         /*
2479          * The 5704 in TBI mode apparently needs some special
2480          * adjustment to insure the SERDES drive level is set
2481          * to 1.2V.
2482          */
2483         if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
2484                 uint32_t serdescfg;
2485                 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2486                 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2487                 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2488         }
2489
2490         /* XXX: Broadcom Linux driver. */
2491         if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2492                 uint32_t v;
2493
2494                 v = CSR_READ_4(sc, 0x7c00);
2495                 CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2496         }
2497         DELAY(10000);
2498
2499         return;
2500 }
2501
2502 /*
2503  * Frame reception handling. This is called if there's a frame
2504  * on the receive return list.
2505  *
2506  * Note: we have to be able to handle two possibilities here:
2507  * 1) the frame is from the jumbo receive ring
2508  * 2) the frame is from the standard receive ring
2509  */
2510
2511 static void
2512 bge_rxeof(sc)
2513         struct bge_softc *sc;
2514 {
2515         struct ifnet *ifp;
2516         int stdcnt = 0, jumbocnt = 0;
2517
2518         BGE_LOCK_ASSERT(sc);
2519
2520         ifp = sc->bge_ifp;
2521
2522         bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2523             sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
2524         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2525             sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
2526         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2527             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2528                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2529                     sc->bge_cdata.bge_rx_jumbo_ring_map,
2530                     BUS_DMASYNC_POSTREAD);
2531         }
2532
2533         while(sc->bge_rx_saved_considx !=
2534             sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2535                 struct bge_rx_bd        *cur_rx;
2536                 u_int32_t               rxidx;
2537                 struct ether_header     *eh;
2538                 struct mbuf             *m = NULL;
2539                 u_int16_t               vlan_tag = 0;
2540                 int                     have_tag = 0;
2541
2542 #ifdef DEVICE_POLLING
2543                 if (ifp->if_capenable & IFCAP_POLLING) {
2544                         if (sc->rxcycles <= 0)
2545                                 break;
2546                         sc->rxcycles--;
2547                 }
2548 #endif
2549
2550                 cur_rx =
2551             &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2552
2553                 rxidx = cur_rx->bge_idx;
2554                 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2555
2556                 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2557                         have_tag = 1;
2558                         vlan_tag = cur_rx->bge_vlan_tag;
2559                 }
2560
2561                 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2562                         BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2563                         bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
2564                             sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
2565                             BUS_DMASYNC_POSTREAD);
2566                         bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
2567                             sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
2568                         m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2569                         sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2570                         jumbocnt++;
2571                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2572                                 ifp->if_ierrors++;
2573                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2574                                 continue;
2575                         }
2576                         if (bge_newbuf_jumbo(sc,
2577                             sc->bge_jumbo, NULL) == ENOBUFS) {
2578                                 ifp->if_ierrors++;
2579                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2580                                 continue;
2581                         }
2582                 } else {
2583                         BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2584                         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2585                             sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2586                             BUS_DMASYNC_POSTREAD);
2587                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2588                             sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2589                         m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2590                         sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2591                         stdcnt++;
2592                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2593                                 ifp->if_ierrors++;
2594                                 bge_newbuf_std(sc, sc->bge_std, m);
2595                                 continue;
2596                         }
2597                         if (bge_newbuf_std(sc, sc->bge_std,
2598                             NULL) == ENOBUFS) {
2599                                 ifp->if_ierrors++;
2600                                 bge_newbuf_std(sc, sc->bge_std, m);
2601                                 continue;
2602                         }
2603                 }
2604
2605                 ifp->if_ipackets++;
2606 #ifndef __NO_STRICT_ALIGNMENT
2607                 /*
2608                  * For architectures with strict alignment we must make sure
2609                  * the payload is aligned.
2610                  */
2611                 if (sc->bge_rx_alignment_bug) {
2612                         bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2613                             cur_rx->bge_len);
2614                         m->m_data += ETHER_ALIGN;
2615                 }
2616 #endif
2617                 eh = mtod(m, struct ether_header *);
2618                 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2619                 m->m_pkthdr.rcvif = ifp;
2620
2621                 if (ifp->if_capenable & IFCAP_RXCSUM) {
2622                         m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2623                         if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2624                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2625                                 
2626                         if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
2627                             m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
2628                                 m->m_pkthdr.csum_data =
2629                                     cur_rx->bge_tcp_udp_csum;
2630                                 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
2631                         }
2632                 }
2633
2634                 /*
2635                  * If we received a packet with a vlan tag,
2636                  * attach that information to the packet.
2637                  */
2638                 if (have_tag) {
2639                         VLAN_INPUT_TAG(ifp, m, vlan_tag);
2640                         if (m == NULL)
2641                                 continue;
2642                 }
2643
2644                 BGE_UNLOCK(sc);
2645                 (*ifp->if_input)(ifp, m);
2646                 BGE_LOCK(sc);
2647         }
2648
2649         if (stdcnt > 0)
2650                 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2651                     sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
2652         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2653             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2654                 if (jumbocnt > 0)
2655                         bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2656                             sc->bge_cdata.bge_rx_jumbo_ring_map,
2657                             BUS_DMASYNC_PREWRITE);
2658         }
2659
2660         CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2661         if (stdcnt)
2662                 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2663         if (jumbocnt)
2664                 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2665
2666         return;
2667 }
2668
2669 static void
2670 bge_txeof(sc)
2671         struct bge_softc *sc;
2672 {
2673         struct bge_tx_bd *cur_tx = NULL;
2674         struct ifnet *ifp;
2675
2676         BGE_LOCK_ASSERT(sc);
2677
2678         ifp = sc->bge_ifp;
2679
2680         bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
2681             sc->bge_cdata.bge_tx_ring_map,
2682             BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2683         /*
2684          * Go through our tx ring and free mbufs for those
2685          * frames that have been sent.
2686          */
2687         while (sc->bge_tx_saved_considx !=
2688             sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2689                 u_int32_t               idx = 0;
2690
2691                 idx = sc->bge_tx_saved_considx;
2692                 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2693                 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2694                         ifp->if_opackets++;
2695                 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2696                         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2697                             sc->bge_cdata.bge_tx_dmamap[idx],
2698                             BUS_DMASYNC_POSTWRITE);
2699                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2700                             sc->bge_cdata.bge_tx_dmamap[idx]);
2701                         m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2702                         sc->bge_cdata.bge_tx_chain[idx] = NULL;
2703                 }
2704                 sc->bge_txcnt--;
2705                 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2706                 ifp->if_timer = 0;
2707         }
2708
2709         if (cur_tx != NULL)
2710                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2711
2712         return;
2713 }
2714
2715 #ifdef DEVICE_POLLING
2716 static void
2717 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2718 {
2719         struct bge_softc *sc = ifp->if_softc;
2720         
2721         BGE_LOCK(sc);
2722         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2723                 bge_poll_locked(ifp, cmd, count);
2724         BGE_UNLOCK(sc);
2725 }
2726
2727 static void
2728 bge_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
2729 {
2730         struct bge_softc *sc = ifp->if_softc;
2731
2732         BGE_LOCK_ASSERT(sc);
2733
2734         sc->rxcycles = count;
2735         bge_rxeof(sc);
2736         bge_txeof(sc);
2737         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2738                 bge_start_locked(ifp);
2739
2740         if (cmd == POLL_AND_CHECK_STATUS) {
2741                 uint32_t statusword;
2742
2743                 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2744                     sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
2745
2746                 statusword = atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status);
2747
2748                 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2749                     sc->bge_chipid != BGE_CHIPID_BCM5700_B1) ||
2750                     statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
2751                         bge_link_upd(sc);
2752
2753                 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2754                     sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
2755         }
2756 }
2757 #endif /* DEVICE_POLLING */
2758
2759 static void
2760 bge_intr(xsc)
2761         void *xsc;
2762 {
2763         struct bge_softc *sc;
2764         struct ifnet *ifp;
2765         uint32_t statusword;
2766
2767         sc = xsc;
2768
2769         BGE_LOCK(sc);
2770
2771         ifp = sc->bge_ifp;
2772
2773 #ifdef DEVICE_POLLING
2774         if (ifp->if_capenable & IFCAP_POLLING) {
2775                 BGE_UNLOCK(sc);
2776                 return;
2777         }
2778 #endif
2779
2780         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2781             sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
2782
2783         statusword =
2784             atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status);
2785
2786 #ifdef notdef
2787         /* Avoid this for now -- checking this register is expensive. */
2788         /* Make sure this is really our interrupt. */
2789         if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2790                 return;
2791 #endif
2792         /* Ack interrupt and stop others from occuring. */
2793         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2794
2795         if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2796             sc->bge_chipid != BGE_CHIPID_BCM5700_B1) ||
2797             statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
2798                 bge_link_upd(sc);
2799
2800         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2801                 /* Check RX return ring producer/consumer */
2802                 bge_rxeof(sc);
2803
2804                 /* Check TX ring producer/consumer */
2805                 bge_txeof(sc);
2806         }
2807
2808         /* Re-enable interrupts. */
2809         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2810
2811         if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2812             !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2813                 bge_start_locked(ifp);
2814
2815         BGE_UNLOCK(sc);
2816
2817         return;
2818 }
2819
2820 static void
2821 bge_tick_locked(sc)
2822         struct bge_softc *sc;
2823 {
2824         struct mii_data *mii = NULL;
2825         struct ifnet *ifp;
2826
2827         BGE_LOCK_ASSERT(sc);
2828
2829         ifp = sc->bge_ifp;
2830
2831         if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2832             sc->bge_asicrev == BGE_ASICREV_BCM5750)
2833                 bge_stats_update_regs(sc);
2834         else
2835                 bge_stats_update(sc);
2836
2837         if (!sc->bge_tbi) {
2838                 mii = device_get_softc(sc->bge_miibus);
2839                 mii_tick(mii);
2840         }
2841
2842         callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
2843 }
2844
2845 static void
2846 bge_tick(xsc)
2847         void *xsc;
2848 {
2849         struct bge_softc *sc;
2850
2851         sc = xsc;
2852
2853         BGE_LOCK(sc);
2854         bge_tick_locked(sc);
2855         BGE_UNLOCK(sc);
2856 }
2857
2858 static void
2859 bge_stats_update_regs(sc)
2860         struct bge_softc *sc;
2861 {
2862         struct ifnet *ifp;
2863         struct bge_mac_stats_regs stats;
2864         u_int32_t *s;
2865         int i;
2866
2867         ifp = sc->bge_ifp;
2868
2869         s = (u_int32_t *)&stats;
2870         for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2871                 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2872                 s++;
2873         }
2874
2875         ifp->if_collisions +=
2876            (stats.dot3StatsSingleCollisionFrames +
2877            stats.dot3StatsMultipleCollisionFrames +
2878            stats.dot3StatsExcessiveCollisions +
2879            stats.dot3StatsLateCollisions) -
2880            ifp->if_collisions;
2881
2882         return;
2883 }
2884
2885 static void
2886 bge_stats_update(sc)
2887         struct bge_softc *sc;
2888 {
2889         struct ifnet *ifp;
2890         bus_size_t stats;
2891
2892         ifp = sc->bge_ifp;
2893
2894         stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2895
2896 #define READ_STAT(sc, stats, stat) \
2897         CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2898
2899         ifp->if_collisions +=
2900            (READ_STAT(sc, stats,
2901                 txstats.dot3StatsSingleCollisionFrames.bge_addr_lo) +
2902             READ_STAT(sc, stats,
2903                 txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo) +
2904             READ_STAT(sc, stats,
2905                 txstats.dot3StatsExcessiveCollisions.bge_addr_lo) +
2906             READ_STAT(sc, stats,
2907                 txstats.dot3StatsLateCollisions.bge_addr_lo)) -
2908            ifp->if_collisions;
2909
2910 #undef READ_STAT
2911
2912 #ifdef notdef
2913         ifp->if_collisions +=
2914            (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2915            sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2916            sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2917            sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2918            ifp->if_collisions;
2919 #endif
2920
2921         return;
2922 }
2923
2924 /*
2925  * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
2926  * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
2927  * but when such padded frames employ the bge IP/TCP checksum offload,
2928  * the hardware checksum assist gives incorrect results (possibly
2929  * from incorporating its own padding into the UDP/TCP checksum; who knows).
2930  * If we pad such runts with zeros, the onboard checksum comes out correct.
2931  */
2932 static __inline int
2933 bge_cksum_pad(struct mbuf *m)
2934 {
2935         int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
2936         struct mbuf *last;
2937
2938         /* If there's only the packet-header and we can pad there, use it. */
2939         if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
2940             M_TRAILINGSPACE(m) >= padlen) {
2941                 last = m;
2942         } else {
2943                 /*
2944                  * Walk packet chain to find last mbuf. We will either
2945                  * pad there, or append a new mbuf and pad it.
2946                  */
2947                 for (last = m; last->m_next != NULL; last = last->m_next);
2948                 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
2949                         /* Allocate new empty mbuf, pad it. Compact later. */
2950                         struct mbuf *n;
2951
2952                         MGET(n, M_DONTWAIT, MT_DATA);
2953                         if (n == NULL)
2954                                 return (ENOBUFS);
2955                         n->m_len = 0;
2956                         last->m_next = n;
2957                         last = n;
2958                 }
2959         }
2960         
2961         /* Now zero the pad area, to avoid the bge cksum-assist bug. */
2962         memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
2963         last->m_len += padlen;
2964         m->m_pkthdr.len += padlen;
2965
2966         return (0);
2967 }
2968
2969 /*
2970  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
2971  * pointers to descriptors.
2972  */
2973 static int
2974 bge_encap(sc, m_head, txidx)
2975         struct bge_softc *sc;
2976         struct mbuf *m_head;
2977         uint32_t *txidx;
2978 {
2979         bus_dma_segment_t       segs[BGE_NSEG_NEW];
2980         bus_dmamap_t            map;
2981         struct bge_tx_bd        *d = NULL;
2982         struct m_tag            *mtag;
2983         uint32_t                idx = *txidx;
2984         uint16_t                csum_flags = 0;
2985         int                     nsegs, i, error;
2986
2987         if (m_head->m_pkthdr.csum_flags) {
2988                 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2989                         csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2990                 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
2991                         csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2992                         if (m_head->m_pkthdr.len < ETHER_MIN_NOPAD &&
2993                             bge_cksum_pad(m_head) != 0)
2994                                 return (ENOBUFS);
2995                 }
2996                 if (m_head->m_flags & M_LASTFRAG)
2997                         csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2998                 else if (m_head->m_flags & M_FRAG)
2999                         csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3000         }
3001
3002         mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m_head);
3003
3004         map = sc->bge_cdata.bge_tx_dmamap[idx];
3005         error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map,
3006             m_head, segs, &nsegs, BUS_DMA_NOWAIT);
3007         if (error) {
3008                 if (error == EFBIG) {
3009                         struct mbuf *m0;
3010
3011                         m0 = m_defrag(m_head, M_DONTWAIT);
3012                         if (m0 == NULL)
3013                                 return (ENOBUFS);
3014                         m_head = m0;
3015                         error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag,
3016                             map, m_head, segs, &nsegs, BUS_DMA_NOWAIT);
3017                 }
3018                 if (error)
3019                         return (error); 
3020         }
3021
3022         /*
3023          * Sanity check: avoid coming within 16 descriptors
3024          * of the end of the ring.
3025          */
3026         if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
3027                 bus_dmamap_unload(sc->bge_cdata.bge_mtag, map);
3028                 return (ENOBUFS);
3029         }
3030
3031         bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE);
3032
3033         for (i = 0; ; i++) {
3034                 d = &sc->bge_ldata.bge_tx_ring[idx];
3035                 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
3036                 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
3037                 d->bge_len = segs[i].ds_len;
3038                 d->bge_flags = csum_flags;
3039                 if (i == nsegs - 1)
3040                         break;
3041                 BGE_INC(idx, BGE_TX_RING_CNT);
3042         }
3043
3044         /* Mark the last segment as end of packet... */
3045         d->bge_flags |= BGE_TXBDFLAG_END;
3046         /* ... and put VLAN tag into first segment.  */
3047         d = &sc->bge_ldata.bge_tx_ring[*txidx];
3048         if (mtag != NULL) {
3049                 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3050                 d->bge_vlan_tag = VLAN_TAG_VALUE(mtag);
3051         } else
3052                 d->bge_vlan_tag = 0;
3053
3054         /*
3055          * Insure that the map for this transmission
3056          * is placed at the array index of the last descriptor
3057          * in this chain.
3058          */
3059         sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
3060         sc->bge_cdata.bge_tx_dmamap[idx] = map;
3061         sc->bge_cdata.bge_tx_chain[idx] = m_head;
3062         sc->bge_txcnt += nsegs;
3063
3064         BGE_INC(idx, BGE_TX_RING_CNT);
3065         *txidx = idx;
3066
3067         return (0);
3068 }
3069
3070 /*
3071  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3072  * to the mbuf data regions directly in the transmit descriptors.
3073  */
3074 static void
3075 bge_start_locked(ifp)
3076         struct ifnet *ifp;
3077 {
3078         struct bge_softc *sc;
3079         struct mbuf *m_head = NULL;
3080         uint32_t prodidx;
3081         int count = 0;
3082
3083         sc = ifp->if_softc;
3084
3085         if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3086                 return;
3087
3088         prodidx = sc->bge_tx_prodidx;
3089
3090         while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3091                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
3092                 if (m_head == NULL)
3093                         break;
3094
3095                 /*
3096                  * XXX
3097                  * The code inside the if() block is never reached since we
3098                  * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3099                  * requests to checksum TCP/UDP in a fragmented packet.
3100                  *
3101                  * XXX
3102                  * safety overkill.  If this is a fragmented packet chain
3103                  * with delayed TCP/UDP checksums, then only encapsulate
3104                  * it if we have enough descriptors to handle the entire
3105                  * chain at once.
3106                  * (paranoia -- may not actually be needed)
3107                  */
3108                 if (m_head->m_flags & M_FIRSTFRAG &&
3109                     m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3110                         if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3111                             m_head->m_pkthdr.csum_data + 16) {
3112                                 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3113                                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3114                                 break;
3115                         }
3116                 }
3117
3118                 /*
3119                  * Pack the data into the transmit ring. If we
3120                  * don't have room, set the OACTIVE flag and wait
3121                  * for the NIC to drain the ring.
3122                  */
3123                 if (bge_encap(sc, m_head, &prodidx)) {
3124                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3125                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3126                         break;
3127                 }
3128                 ++count;
3129
3130                 /*
3131                  * If there's a BPF listener, bounce a copy of this frame
3132                  * to him.
3133                  */
3134                 BPF_MTAP(ifp, m_head);
3135         }
3136
3137         if (count == 0) {
3138                 /* no packets were dequeued */
3139                 return;
3140         }
3141
3142         /* Transmit */
3143         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3144         /* 5700 b2 errata */
3145         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3146                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3147
3148         sc->bge_tx_prodidx = prodidx;
3149
3150         /*
3151          * Set a timeout in case the chip goes out to lunch.
3152          */
3153         ifp->if_timer = 5;
3154
3155         return;
3156 }
3157
3158 /*
3159  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3160  * to the mbuf data regions directly in the transmit descriptors.
3161  */
3162 static void
3163 bge_start(ifp)
3164         struct ifnet *ifp;
3165 {
3166         struct bge_softc *sc;
3167
3168         sc = ifp->if_softc;
3169         BGE_LOCK(sc);
3170         bge_start_locked(ifp);
3171         BGE_UNLOCK(sc);
3172 }
3173
3174 static void
3175 bge_init_locked(sc)
3176         struct bge_softc *sc;
3177 {
3178         struct ifnet *ifp;
3179         u_int16_t *m;
3180
3181         BGE_LOCK_ASSERT(sc);
3182
3183         ifp = sc->bge_ifp;
3184
3185         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3186                 return;
3187
3188         /* Cancel pending I/O and flush buffers. */
3189         bge_stop(sc);
3190         bge_reset(sc);
3191         bge_chipinit(sc);
3192
3193         /*
3194          * Init the various state machines, ring
3195          * control blocks and firmware.
3196          */
3197         if (bge_blockinit(sc)) {
3198                 device_printf(sc->bge_dev, "initialization failure\n");
3199                 return;
3200         }
3201
3202         ifp = sc->bge_ifp;
3203
3204         /* Specify MTU. */
3205         CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3206             ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
3207
3208         /* Load our MAC address. */
3209         m = (u_int16_t *)IF_LLADDR(sc->bge_ifp);
3210         CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3211         CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3212
3213         /* Enable or disable promiscuous mode as needed. */
3214         if (ifp->if_flags & IFF_PROMISC) {
3215                 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3216         } else {
3217                 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3218         }
3219
3220         /* Program multicast filter. */
3221         bge_setmulti(sc);
3222
3223         /* Init RX ring. */
3224         bge_init_rx_ring_std(sc);
3225
3226         /*
3227          * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3228          * memory to insure that the chip has in fact read the first
3229          * entry of the ring.
3230          */
3231         if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3232                 u_int32_t               v, i;
3233                 for (i = 0; i < 10; i++) {
3234                         DELAY(20);
3235                         v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3236                         if (v == (MCLBYTES - ETHER_ALIGN))
3237                                 break;
3238                 }
3239                 if (i == 10)
3240                         device_printf (sc->bge_dev,
3241                             "5705 A0 chip failed to load RX ring\n");
3242         }
3243
3244         /* Init jumbo RX ring. */
3245         if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3246                 bge_init_rx_ring_jumbo(sc);
3247
3248         /* Init our RX return ring index */
3249         sc->bge_rx_saved_considx = 0;
3250
3251         /* Init TX ring. */
3252         bge_init_tx_ring(sc);
3253
3254         /* Turn on transmitter */
3255         BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3256
3257         /* Turn on receiver */
3258         BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3259
3260         /* Tell firmware we're alive. */
3261         BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3262
3263 #ifdef DEVICE_POLLING
3264         /* Disable interrupts if we are polling. */
3265         if (ifp->if_capenable & IFCAP_POLLING) {
3266                 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3267                     BGE_PCIMISCCTL_MASK_PCI_INTR);
3268                 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3269                 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3270                 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3271         } else
3272 #endif
3273         
3274         /* Enable host interrupts. */
3275         {
3276         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3277         BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3278         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3279         }
3280         
3281         bge_ifmedia_upd(ifp);
3282
3283         ifp->if_drv_flags |= IFF_DRV_RUNNING;
3284         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3285
3286         callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3287 }
3288
3289 static void
3290 bge_init(xsc)
3291         void *xsc;
3292 {
3293         struct bge_softc *sc = xsc;
3294
3295         BGE_LOCK(sc);
3296         bge_init_locked(sc);
3297         BGE_UNLOCK(sc);
3298
3299         return;
3300 }
3301
3302 /*
3303  * Set media options.
3304  */
3305 static int
3306 bge_ifmedia_upd(ifp)
3307         struct ifnet *ifp;
3308 {
3309         struct bge_softc *sc;
3310         struct mii_data *mii;
3311         struct ifmedia *ifm;
3312
3313         sc = ifp->if_softc;
3314         ifm = &sc->bge_ifmedia;
3315
3316         /* If this is a 1000baseX NIC, enable the TBI port. */
3317         if (sc->bge_tbi) {
3318                 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3319                         return(EINVAL);
3320                 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3321                 case IFM_AUTO:
3322 #ifndef BGE_FAKE_AUTONEG
3323                         /*
3324                          * The BCM5704 ASIC appears to have a special
3325                          * mechanism for programming the autoneg
3326                          * advertisement registers in TBI mode.
3327                          */
3328                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3329                                 uint32_t sgdig;
3330                                 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3331                                 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3332                                 sgdig |= BGE_SGDIGCFG_AUTO|
3333                                     BGE_SGDIGCFG_PAUSE_CAP|
3334                                     BGE_SGDIGCFG_ASYM_PAUSE;
3335                                 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3336                                     sgdig|BGE_SGDIGCFG_SEND);
3337                                 DELAY(5);
3338                                 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3339                         }
3340 #endif
3341                         break;
3342                 case IFM_1000_SX:
3343                         if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3344                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
3345                                     BGE_MACMODE_HALF_DUPLEX);
3346                         } else {
3347                                 BGE_SETBIT(sc, BGE_MAC_MODE,
3348                                     BGE_MACMODE_HALF_DUPLEX);
3349                         }
3350                         break;
3351                 default:
3352                         return(EINVAL);
3353                 }
3354                 return(0);
3355         }
3356
3357         mii = device_get_softc(sc->bge_miibus);
3358         if (mii->mii_instance) {
3359                 struct mii_softc *miisc;
3360                 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3361                     miisc = LIST_NEXT(miisc, mii_list))
3362                         mii_phy_reset(miisc);
3363         }
3364         mii_mediachg(mii);
3365
3366         return(0);
3367 }
3368
3369 /*
3370  * Report current media status.
3371  */
3372 static void
3373 bge_ifmedia_sts(ifp, ifmr)
3374         struct ifnet *ifp;
3375         struct ifmediareq *ifmr;
3376 {
3377         struct bge_softc *sc;
3378         struct mii_data *mii;
3379
3380         sc = ifp->if_softc;
3381
3382         if (sc->bge_tbi) {
3383                 ifmr->ifm_status = IFM_AVALID;
3384                 ifmr->ifm_active = IFM_ETHER;
3385                 if (CSR_READ_4(sc, BGE_MAC_STS) &
3386                     BGE_MACSTAT_TBI_PCS_SYNCHED)
3387                         ifmr->ifm_status |= IFM_ACTIVE;
3388                 ifmr->ifm_active |= IFM_1000_SX;
3389                 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3390                         ifmr->ifm_active |= IFM_HDX;
3391                 else
3392                         ifmr->ifm_active |= IFM_FDX;
3393                 return;
3394         }
3395
3396         mii = device_get_softc(sc->bge_miibus);
3397         mii_pollstat(mii);
3398         ifmr->ifm_active = mii->mii_media_active;
3399         ifmr->ifm_status = mii->mii_media_status;
3400
3401         return;
3402 }
3403
3404 static int
3405 bge_ioctl(ifp, command, data)
3406         struct ifnet *ifp;
3407         u_long command;
3408         caddr_t data;
3409 {
3410         struct bge_softc *sc = ifp->if_softc;
3411         struct ifreq *ifr = (struct ifreq *) data;
3412         int mask, error = 0;
3413         struct mii_data *mii;
3414
3415         switch(command) {
3416         case SIOCSIFMTU:
3417                 /* Disallow jumbo frames on 5705. */
3418                 if (((sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
3419                       sc->bge_asicrev == BGE_ASICREV_BCM5750) &&
3420                     ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
3421                         error = EINVAL;
3422                 else {
3423                         ifp->if_mtu = ifr->ifr_mtu;
3424                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3425                         bge_init(sc);
3426                 }
3427                 break;
3428         case SIOCSIFFLAGS:
3429                 BGE_LOCK(sc);
3430                 if (ifp->if_flags & IFF_UP) {
3431                         /*
3432                          * If only the state of the PROMISC flag changed,
3433                          * then just use the 'set promisc mode' command
3434                          * instead of reinitializing the entire NIC. Doing
3435                          * a full re-init means reloading the firmware and
3436                          * waiting for it to start up, which may take a
3437                          * second or two.
3438                          */
3439                         if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3440                             ifp->if_flags & IFF_PROMISC &&
3441                             !(sc->bge_if_flags & IFF_PROMISC)) {
3442                                 BGE_SETBIT(sc, BGE_RX_MODE,
3443                                     BGE_RXMODE_RX_PROMISC);
3444                         } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3445                             !(ifp->if_flags & IFF_PROMISC) &&
3446                             sc->bge_if_flags & IFF_PROMISC) {
3447                                 BGE_CLRBIT(sc, BGE_RX_MODE,
3448                                     BGE_RXMODE_RX_PROMISC);
3449                         } else
3450                                 bge_init_locked(sc);
3451                 } else {
3452                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3453                                 bge_stop(sc);
3454                         }
3455                 }
3456                 sc->bge_if_flags = ifp->if_flags;
3457                 BGE_UNLOCK(sc);
3458                 error = 0;
3459                 break;
3460         case SIOCADDMULTI:
3461         case SIOCDELMULTI:
3462                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3463                         BGE_LOCK(sc);
3464                         bge_setmulti(sc);
3465                         BGE_UNLOCK(sc);
3466                         error = 0;
3467                 }
3468                 break;
3469         case SIOCSIFMEDIA:
3470         case SIOCGIFMEDIA:
3471                 if (sc->bge_tbi) {
3472                         error = ifmedia_ioctl(ifp, ifr,
3473                             &sc->bge_ifmedia, command);
3474                 } else {
3475                         mii = device_get_softc(sc->bge_miibus);
3476                         error = ifmedia_ioctl(ifp, ifr,
3477                             &mii->mii_media, command);
3478                 }
3479                 break;
3480         case SIOCSIFCAP:
3481                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3482 #ifdef DEVICE_POLLING
3483                 if (mask & IFCAP_POLLING) {
3484                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
3485                                 error = ether_poll_register(bge_poll, ifp);
3486                                 if (error)
3487                                         return(error);
3488                                 BGE_LOCK(sc);
3489                                 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3490                                     BGE_PCIMISCCTL_MASK_PCI_INTR);
3491                                 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3492                                 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3493                                 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3494                                 ifp->if_capenable |= IFCAP_POLLING;   
3495                                 BGE_UNLOCK(sc);
3496                         } else {
3497                                 error = ether_poll_deregister(ifp);
3498                                 /* Enable interrupt even in error case */
3499                                 BGE_LOCK(sc);
3500                                 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
3501                                 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
3502                                 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
3503                                     BGE_PCIMISCCTL_MASK_PCI_INTR);
3504                                 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3505                                 ifp->if_capenable &= ~IFCAP_POLLING;
3506                                 BGE_UNLOCK(sc);
3507                         }
3508                 }
3509 #endif
3510                 if (mask & IFCAP_HWCSUM) {
3511                         ifp->if_capenable ^= IFCAP_HWCSUM;
3512                         if (IFCAP_HWCSUM & ifp->if_capenable &&
3513                             IFCAP_HWCSUM & ifp->if_capabilities)
3514                                 ifp->if_hwassist = BGE_CSUM_FEATURES;
3515                         else
3516                                 ifp->if_hwassist = 0;
3517                 }
3518                 break;
3519         default:
3520                 error = ether_ioctl(ifp, command, data);
3521                 break;
3522         }
3523
3524         return(error);
3525 }
3526
3527 static void
3528 bge_watchdog(ifp)
3529         struct ifnet *ifp;
3530 {
3531         struct bge_softc *sc;
3532
3533         sc = ifp->if_softc;
3534
3535         if_printf(ifp, "watchdog timeout -- resetting\n");
3536
3537         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3538         bge_init(sc);
3539
3540         ifp->if_oerrors++;
3541
3542         return;
3543 }
3544
3545 /*
3546  * Stop the adapter and free any mbufs allocated to the
3547  * RX and TX lists.
3548  */
3549 static void
3550 bge_stop(sc)
3551         struct bge_softc *sc;
3552 {
3553         struct ifnet *ifp;
3554         struct ifmedia_entry *ifm;
3555         struct mii_data *mii = NULL;
3556         int mtmp, itmp;
3557
3558         BGE_LOCK_ASSERT(sc);
3559
3560         ifp = sc->bge_ifp;
3561
3562         if (!sc->bge_tbi)
3563                 mii = device_get_softc(sc->bge_miibus);
3564
3565         callout_stop(&sc->bge_stat_ch);
3566
3567         /*
3568          * Disable all of the receiver blocks
3569          */
3570         BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3571         BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3572         BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3573         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3574             sc->bge_asicrev != BGE_ASICREV_BCM5750)
3575                 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3576         BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3577         BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3578         BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3579
3580         /*
3581          * Disable all of the transmit blocks
3582          */
3583         BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3584         BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3585         BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3586         BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3587         BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3588         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3589             sc->bge_asicrev != BGE_ASICREV_BCM5750)
3590                 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3591         BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3592
3593         /*
3594          * Shut down all of the memory managers and related
3595          * state machines.
3596          */
3597         BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3598         BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3599         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3600             sc->bge_asicrev != BGE_ASICREV_BCM5750)
3601                 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3602         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3603         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3604         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3605             sc->bge_asicrev != BGE_ASICREV_BCM5750) {
3606                 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3607                 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3608         }
3609
3610         /* Disable host interrupts. */
3611         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3612         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3613
3614         /*
3615          * Tell firmware we're shutting down.
3616          */
3617         BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3618
3619         /* Free the RX lists. */
3620         bge_free_rx_ring_std(sc);
3621
3622         /* Free jumbo RX list. */
3623         if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3624             sc->bge_asicrev != BGE_ASICREV_BCM5750)
3625                 bge_free_rx_ring_jumbo(sc);
3626
3627         /* Free TX buffers. */
3628         bge_free_tx_ring(sc);
3629
3630         /*
3631          * Isolate/power down the PHY, but leave the media selection
3632          * unchanged so that things will be put back to normal when
3633          * we bring the interface back up.
3634          */
3635         if (!sc->bge_tbi) {
3636                 itmp = ifp->if_flags;
3637                 ifp->if_flags |= IFF_UP;
3638                 /*
3639                  * If we are called from bge_detach(), mii is already NULL.
3640                  */
3641                 if (mii != NULL) {
3642                         ifm = mii->mii_media.ifm_cur;
3643                         mtmp = ifm->ifm_media;
3644                         ifm->ifm_media = IFM_ETHER|IFM_NONE;
3645                         mii_mediachg(mii);
3646                         ifm->ifm_media = mtmp;
3647                 }
3648                 ifp->if_flags = itmp;
3649         }
3650
3651         sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3652
3653         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3654
3655         return;
3656 }
3657
3658 /*
3659  * Stop all chip I/O so that the kernel's probe routines don't
3660  * get confused by errant DMAs when rebooting.
3661  */
3662 static void
3663 bge_shutdown(dev)
3664         device_t dev;
3665 {
3666         struct bge_softc *sc;
3667
3668         sc = device_get_softc(dev);
3669
3670         BGE_LOCK(sc);
3671         bge_stop(sc);
3672         bge_reset(sc);
3673         BGE_UNLOCK(sc);
3674
3675         return;
3676 }
3677
3678 static int
3679 bge_suspend(device_t dev)
3680 {
3681         struct bge_softc *sc;
3682
3683         sc = device_get_softc(dev);
3684         BGE_LOCK(sc);
3685         bge_stop(sc);
3686         BGE_UNLOCK(sc);
3687
3688         return (0);
3689 }
3690
3691 static int
3692 bge_resume(device_t dev)
3693 {
3694         struct bge_softc *sc;
3695         struct ifnet *ifp;
3696
3697         sc = device_get_softc(dev);
3698         BGE_LOCK(sc);
3699         ifp = sc->bge_ifp;
3700         if (ifp->if_flags & IFF_UP) {
3701                 bge_init_locked(sc);
3702                 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3703                         bge_start_locked(ifp);
3704         }
3705         BGE_UNLOCK(sc);
3706
3707         return (0);
3708 }
3709
3710 static void
3711 bge_link_upd(sc)
3712         struct bge_softc *sc;
3713 {
3714         struct mii_data *mii;
3715         uint32_t link, status;
3716
3717         BGE_LOCK_ASSERT(sc);
3718
3719         /*
3720          * Process link state changes.
3721          * Grrr. The link status word in the status block does
3722          * not work correctly on the BCM5700 rev AX and BX chips,
3723          * according to all available information. Hence, we have
3724          * to enable MII interrupts in order to properly obtain
3725          * async link changes. Unfortunately, this also means that
3726          * we have to read the MAC status register to detect link
3727          * changes, thereby adding an additional register access to
3728          * the interrupt handler.
3729          *
3730          * XXX: perhaps link state detection procedure used for
3731          * BGE_CHIPID_BCM5700_B1 can be used for others BCM5700 revisions.
3732          */
3733
3734         if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3735             sc->bge_chipid != BGE_CHIPID_BCM5700_B1) {
3736                 status = CSR_READ_4(sc, BGE_MAC_STS);
3737                 if (status & BGE_MACSTAT_MI_INTERRUPT) {
3738                         callout_stop(&sc->bge_stat_ch);
3739                         bge_tick_locked(sc);
3740
3741                         mii = device_get_softc(sc->bge_miibus);
3742                         if (!sc->bge_link &&
3743                             mii->mii_media_status & IFM_ACTIVE &&
3744                             IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3745                                 sc->bge_link++;
3746                                 if (bootverbose)
3747                                         if_printf(sc->bge_ifp, "link UP\n");
3748                         } else if (sc->bge_link &&
3749                             (!(mii->mii_media_status & IFM_ACTIVE) ||
3750                             IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3751                                 sc->bge_link = 0;
3752                                 if (bootverbose)
3753                                         if_printf(sc->bge_ifp, "link DOWN\n");
3754                         }
3755
3756                         /* Clear the interrupt */
3757                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
3758                             BGE_EVTENB_MI_INTERRUPT);
3759                         bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
3760                         bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
3761                             BRGPHY_INTRS);
3762                 }
3763                 return;
3764         } 
3765
3766         if (sc->bge_tbi) {
3767                 /*
3768                  * Sometimes PCS encoding errors are detected in
3769                  * TBI mode (on fiber NICs), and for some reason
3770                  * the chip will signal them as link changes.
3771                  * If we get a link change event, but the 'PCS
3772                  * encoding error' bit in the MAC status register
3773                  * is set, don't bother doing a link check.
3774                  * This avoids spurious "link UP" messages
3775                  * that sometimes appear on fiber NICs during
3776                  * periods of heavy traffic. (There should be no
3777                  * effect on copper NICs.)
3778                  */
3779                 status = CSR_READ_4(sc, BGE_MAC_STS);
3780                 if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR|
3781                     BGE_MACSTAT_MI_COMPLETE))) {
3782                         if (!sc->bge_link &&
3783                             (status & BGE_MACSTAT_TBI_PCS_SYNCHED)) {
3784                                 sc->bge_link++;
3785                                 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
3786                                         BGE_CLRBIT(sc, BGE_MAC_MODE,
3787                                             BGE_MACMODE_TBI_SEND_CFGS);
3788                                 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3789                                 if (bootverbose)
3790                                         if_printf(sc->bge_ifp, "link UP\n");
3791                         } else if (sc->bge_link) {
3792                                 sc->bge_link = 0;
3793                                 if (bootverbose)
3794                                         if_printf(sc->bge_ifp, "link DOWN\n");
3795                         }
3796                 }
3797         } else {
3798                 /* 
3799                  * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
3800                  * in status word always set. Workaround this bug by reading
3801                  * PHY link status directly.
3802                  */
3803                 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
3804
3805                 if (link != sc->bge_link ||
3806                     sc->bge_asicrev == BGE_ASICREV_BCM5700) {
3807                         callout_stop(&sc->bge_stat_ch);
3808                         bge_tick_locked(sc);
3809
3810                         mii = device_get_softc(sc->bge_miibus);
3811                         if (!sc->bge_link &&
3812                             mii->mii_media_status & IFM_ACTIVE &&
3813                             IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3814                                 sc->bge_link++;
3815                                 if (bootverbose)
3816                                         if_printf(sc->bge_ifp, "link UP\n");
3817                         } else if (sc->bge_link &&
3818                             (!(mii->mii_media_status & IFM_ACTIVE) ||
3819                             IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3820                                 sc->bge_link = 0;
3821                                 if (bootverbose)
3822                                         if_printf(sc->bge_ifp, "link DOWN\n");
3823                         }
3824                 }
3825         }
3826
3827         /* Clear the interrupt */
3828         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
3829             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
3830             BGE_MACSTAT_LINK_CHANGED);
3831 }