]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/bge/if_bge.c
Recognize the 5750 C2.
[FreeBSD/FreeBSD.git] / sys / dev / bge / if_bge.c
1 /*-
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 /*
38  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39  *
40  * The Broadcom BCM5700 is based on technology originally developed by
41  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45  * frames, highly configurable RX filtering, and 16 RX and TX queues
46  * (which, along with RX filter rules, can be used for QOS applications).
47  * Other features, such as TCP segmentation, may be available as part
48  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49  * firmware images can be stored in hardware and need not be compiled
50  * into the driver.
51  *
52  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54  *
55  * The BCM5701 is a single-chip solution incorporating both the BCM5700
56  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57  * does not support external SSRAM.
58  *
59  * Broadcom also produces a variation of the BCM5700 under the "Altima"
60  * brand name, which is functionally similar but lacks PCI-X support.
61  *
62  * Without external SSRAM, you can only have at most 4 TX rings,
63  * and the use of the mini RX ring is disabled. This seems to imply
64  * that these features are simply not available on the BCM5701. As a
65  * result, this driver does not implement any support for the mini RX
66  * ring.
67  */
68
69 #ifdef HAVE_KERNEL_OPTION_HEADERS
70 #include "opt_device_polling.h"
71 #endif
72
73 #include <sys/param.h>
74 #include <sys/endian.h>
75 #include <sys/systm.h>
76 #include <sys/sockio.h>
77 #include <sys/mbuf.h>
78 #include <sys/malloc.h>
79 #include <sys/kernel.h>
80 #include <sys/module.h>
81 #include <sys/socket.h>
82
83 #include <net/if.h>
84 #include <net/if_arp.h>
85 #include <net/ethernet.h>
86 #include <net/if_dl.h>
87 #include <net/if_media.h>
88
89 #include <net/bpf.h>
90
91 #include <net/if_types.h>
92 #include <net/if_vlan_var.h>
93
94 #include <netinet/in_systm.h>
95 #include <netinet/in.h>
96 #include <netinet/ip.h>
97
98 #include <machine/bus.h>
99 #include <machine/resource.h>
100 #include <sys/bus.h>
101 #include <sys/rman.h>
102
103 #include <dev/mii/mii.h>
104 #include <dev/mii/miivar.h>
105 #include "miidevs.h"
106 #include <dev/mii/brgphyreg.h>
107
108 #include <dev/pci/pcireg.h>
109 #include <dev/pci/pcivar.h>
110
111 #include <dev/bge/if_bgereg.h>
112
113 #define BGE_CSUM_FEATURES       (CSUM_IP | CSUM_TCP | CSUM_UDP)
114 #define ETHER_MIN_NOPAD         (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
115
116 MODULE_DEPEND(bge, pci, 1, 1, 1);
117 MODULE_DEPEND(bge, ether, 1, 1, 1);
118 MODULE_DEPEND(bge, miibus, 1, 1, 1);
119
120 /* "device miibus" required.  See GENERIC if you get errors here. */
121 #include "miibus_if.h"
122
123 /*
124  * Various supported device vendors/types and their names. Note: the
125  * spec seems to indicate that the hardware still has Alteon's vendor
126  * ID burned into it, though it will always be overriden by the vendor
127  * ID in the EEPROM. Just to be safe, we cover all possibilities.
128  */
129 static struct bge_type {
130         uint16_t        bge_vid;
131         uint16_t        bge_did;
132 } bge_devs[] = {
133         { ALTEON_VENDORID,      ALTEON_DEVICEID_BCM5700 },
134         { ALTEON_VENDORID,      ALTEON_DEVICEID_BCM5701 },
135
136         { ALTIMA_VENDORID,      ALTIMA_DEVICE_AC1000 },
137         { ALTIMA_VENDORID,      ALTIMA_DEVICE_AC1002 },
138         { ALTIMA_VENDORID,      ALTIMA_DEVICE_AC9100 },
139
140         { APPLE_VENDORID,       APPLE_DEVICE_BCM5701 },
141
142         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5700 },
143         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5701 },
144         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5702 },
145         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5702_ALT },
146         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5702X },
147         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5703 },
148         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5703_ALT },
149         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5703X },
150         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5704C },
151         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5704S },
152         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5704S_ALT },
153         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5705 },
154         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5705F },
155         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5705K },
156         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5705M },
157         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5705M_ALT },
158         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5714C },
159         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5714S },
160         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5715 },
161         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5715S },
162         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5720 },
163         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5721 },
164         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5750 },
165         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5750M },
166         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5751 },
167         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5751F },
168         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5751M },
169         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5752 },
170         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5752M },
171         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5753 },
172         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5753F },
173         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5753M },
174         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5754 },
175         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5754M },
176         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5755 },
177         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5755M },
178         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5780 },
179         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5780S },
180         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5781 },
181         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5782 },
182         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5786 },
183         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5787 },
184         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5787M },
185         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5788 },
186         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5789 },
187         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5901 },
188         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5901A2 },
189         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5903M },
190
191         { SK_VENDORID,          SK_DEVICEID_ALTIMA },
192
193         { TC_VENDORID,          TC_DEVICEID_3C985 },
194         { TC_VENDORID,          TC_DEVICEID_3C996 },
195
196         { 0, 0 }
197 };
198
199 static const struct bge_vendor {
200         uint16_t        v_id;
201         const char      *v_name;
202 } bge_vendors[] = {
203         { ALTEON_VENDORID,      "Alteon" },
204         { ALTIMA_VENDORID,      "Altima" },
205         { APPLE_VENDORID,       "Apple" },
206         { BCOM_VENDORID,        "Broadcom" },
207         { SK_VENDORID,          "SysKonnect" },
208         { TC_VENDORID,          "3Com" },
209
210         { 0, NULL }
211 };
212         
213 static const struct bge_revision {
214         uint32_t        br_chipid;
215         const char      *br_name;
216 } bge_revisions[] = {
217         { BGE_CHIPID_BCM5700_A0,        "BCM5700 A0" },
218         { BGE_CHIPID_BCM5700_A1,        "BCM5700 A1" },
219         { BGE_CHIPID_BCM5700_B0,        "BCM5700 B0" },
220         { BGE_CHIPID_BCM5700_B1,        "BCM5700 B1" },
221         { BGE_CHIPID_BCM5700_B2,        "BCM5700 B2" },
222         { BGE_CHIPID_BCM5700_B3,        "BCM5700 B3" },
223         { BGE_CHIPID_BCM5700_ALTIMA,    "BCM5700 Altima" },
224         { BGE_CHIPID_BCM5700_C0,        "BCM5700 C0" },
225         { BGE_CHIPID_BCM5701_A0,        "BCM5701 A0" },
226         { BGE_CHIPID_BCM5701_B0,        "BCM5701 B0" },
227         { BGE_CHIPID_BCM5701_B2,        "BCM5701 B2" },
228         { BGE_CHIPID_BCM5701_B5,        "BCM5701 B5" },
229         { BGE_CHIPID_BCM5703_A0,        "BCM5703 A0" },
230         { BGE_CHIPID_BCM5703_A1,        "BCM5703 A1" },
231         { BGE_CHIPID_BCM5703_A2,        "BCM5703 A2" },
232         { BGE_CHIPID_BCM5703_A3,        "BCM5703 A3" },
233         { BGE_CHIPID_BCM5703_B0,        "BCM5703 B0" },
234         { BGE_CHIPID_BCM5704_A0,        "BCM5704 A0" },
235         { BGE_CHIPID_BCM5704_A1,        "BCM5704 A1" },
236         { BGE_CHIPID_BCM5704_A2,        "BCM5704 A2" },
237         { BGE_CHIPID_BCM5704_A3,        "BCM5704 A3" },
238         { BGE_CHIPID_BCM5704_B0,        "BCM5704 B0" },
239         { BGE_CHIPID_BCM5705_A0,        "BCM5705 A0" },
240         { BGE_CHIPID_BCM5705_A1,        "BCM5705 A1" },
241         { BGE_CHIPID_BCM5705_A2,        "BCM5705 A2" },
242         { BGE_CHIPID_BCM5705_A3,        "BCM5705 A3" },
243         { BGE_CHIPID_BCM5750_A0,        "BCM5750 A0" },
244         { BGE_CHIPID_BCM5750_A1,        "BCM5750 A1" },
245         { BGE_CHIPID_BCM5750_A3,        "BCM5750 A3" },
246         { BGE_CHIPID_BCM5750_B0,        "BCM5750 B0" },
247         { BGE_CHIPID_BCM5750_B1,        "BCM5750 B1" },
248         { BGE_CHIPID_BCM5750_C0,        "BCM5750 C0" },
249         { BGE_CHIPID_BCM5750_C1,        "BCM5750 C1" },
250         { BGE_CHIPID_BCM5750_C2,        "BCM5750 C2" },
251         { BGE_CHIPID_BCM5714_A0,        "BCM5714 A0" },
252         { BGE_CHIPID_BCM5752_A0,        "BCM5752 A0" },
253         { BGE_CHIPID_BCM5752_A1,        "BCM5752 A1" },
254         { BGE_CHIPID_BCM5752_A2,        "BCM5752 A2" },
255         { BGE_CHIPID_BCM5714_B0,        "BCM5714 B0" },
256         { BGE_CHIPID_BCM5714_B3,        "BCM5714 B3" },
257         { BGE_CHIPID_BCM5715_A0,        "BCM5715 A0" },
258         { BGE_CHIPID_BCM5715_A1,        "BCM5715 A1" },
259
260         { 0, NULL }
261 };
262
263 /*
264  * Some defaults for major revisions, so that newer steppings
265  * that we don't know about have a shot at working.
266  */
267 static const struct bge_revision bge_majorrevs[] = {
268         { BGE_ASICREV_BCM5700,          "unknown BCM5700" },
269         { BGE_ASICREV_BCM5701,          "unknown BCM5701" },
270         { BGE_ASICREV_BCM5703,          "unknown BCM5703" },
271         { BGE_ASICREV_BCM5704,          "unknown BCM5704" },
272         { BGE_ASICREV_BCM5705,          "unknown BCM5705" },
273         { BGE_ASICREV_BCM5750,          "unknown BCM5750" },
274         { BGE_ASICREV_BCM5714_A0,       "unknown BCM5714" },
275         { BGE_ASICREV_BCM5752,          "unknown BCM5752" },
276         { BGE_ASICREV_BCM5780,          "unknown BCM5780" },
277         { BGE_ASICREV_BCM5714,          "unknown BCM5714" },
278         { BGE_ASICREV_BCM5755,          "unknown BCM5755" },
279         { BGE_ASICREV_BCM5787,          "unknown BCM5787" },
280
281         { 0, NULL }
282 };
283
284 #define BGE_IS_5705_OR_BEYOND(sc)                          \
285         ((sc)->bge_asicrev == BGE_ASICREV_BCM5705       || \
286          (sc)->bge_asicrev == BGE_ASICREV_BCM5750       || \
287          (sc)->bge_asicrev == BGE_ASICREV_BCM5714_A0    || \
288          (sc)->bge_asicrev == BGE_ASICREV_BCM5780       || \
289          (sc)->bge_asicrev == BGE_ASICREV_BCM5714       || \
290          (sc)->bge_asicrev == BGE_ASICREV_BCM5752       || \
291          (sc)->bge_asicrev == BGE_ASICREV_BCM5755       || \
292          (sc)->bge_asicrev == BGE_ASICREV_BCM5787)
293
294 #define BGE_IS_575X_PLUS(sc)                               \
295         ((sc)->bge_asicrev == BGE_ASICREV_BCM5750       || \
296          (sc)->bge_asicrev == BGE_ASICREV_BCM5714_A0    || \
297          (sc)->bge_asicrev == BGE_ASICREV_BCM5780       || \
298          (sc)->bge_asicrev == BGE_ASICREV_BCM5714       || \
299          (sc)->bge_asicrev == BGE_ASICREV_BCM5752       || \
300          (sc)->bge_asicrev == BGE_ASICREV_BCM5755       || \
301          (sc)->bge_asicrev == BGE_ASICREV_BCM5787)
302
303 #define BGE_IS_5714_FAMILY(sc)                             \
304         ((sc)->bge_asicrev == BGE_ASICREV_BCM5714_A0    || \
305          (sc)->bge_asicrev == BGE_ASICREV_BCM5780       || \
306          (sc)->bge_asicrev == BGE_ASICREV_BCM5714)
307
308 #define BGE_IS_JUMBO_CAPABLE(sc) \
309         ((sc)->bge_asicrev == BGE_ASICREV_BCM5700       || \
310          (sc)->bge_asicrev == BGE_ASICREV_BCM5701       || \
311          (sc)->bge_asicrev == BGE_ASICREV_BCM5703       || \
312          (sc)->bge_asicrev == BGE_ASICREV_BCM5704)
313
314 const struct bge_revision * bge_lookup_rev(uint32_t);
315 const struct bge_vendor * bge_lookup_vendor(uint16_t);
316 static int bge_probe(device_t);
317 static int bge_attach(device_t);
318 static int bge_detach(device_t);
319 static int bge_suspend(device_t);
320 static int bge_resume(device_t);
321 static void bge_release_resources(struct bge_softc *);
322 static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
323 static int bge_dma_alloc(device_t);
324 static void bge_dma_free(struct bge_softc *);
325
326 static void bge_txeof(struct bge_softc *);
327 static void bge_rxeof(struct bge_softc *);
328
329 static void bge_tick_locked(struct bge_softc *);
330 static void bge_tick(void *);
331 static void bge_stats_update(struct bge_softc *);
332 static void bge_stats_update_regs(struct bge_softc *);
333 static int bge_encap(struct bge_softc *, struct mbuf *, uint32_t *);
334
335 static void bge_intr(void *);
336 static void bge_start_locked(struct ifnet *);
337 static void bge_start(struct ifnet *);
338 static int bge_ioctl(struct ifnet *, u_long, caddr_t);
339 static void bge_init_locked(struct bge_softc *);
340 static void bge_init(void *);
341 static void bge_stop(struct bge_softc *);
342 static void bge_watchdog(struct ifnet *);
343 static void bge_shutdown(device_t);
344 static int bge_ifmedia_upd(struct ifnet *);
345 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
346
347 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
348 static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
349
350 static void bge_setmulti(struct bge_softc *);
351
352 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *);
353 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
354 static int bge_init_rx_ring_std(struct bge_softc *);
355 static void bge_free_rx_ring_std(struct bge_softc *);
356 static int bge_init_rx_ring_jumbo(struct bge_softc *);
357 static void bge_free_rx_ring_jumbo(struct bge_softc *);
358 static void bge_free_tx_ring(struct bge_softc *);
359 static int bge_init_tx_ring(struct bge_softc *);
360
361 static int bge_chipinit(struct bge_softc *);
362 static int bge_blockinit(struct bge_softc *);
363
364 static uint32_t bge_readmem_ind(struct bge_softc *, int);
365 static void bge_writemem_ind(struct bge_softc *, int, int);
366 #ifdef notdef
367 static uint32_t bge_readreg_ind(struct bge_softc *, int);
368 #endif
369 static void bge_writereg_ind(struct bge_softc *, int, int);
370
371 static int bge_miibus_readreg(device_t, int, int);
372 static int bge_miibus_writereg(device_t, int, int, int);
373 static void bge_miibus_statchg(device_t);
374 #ifdef DEVICE_POLLING
375 static void bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
376 #endif
377
378 static void bge_reset(struct bge_softc *);
379 static void bge_link_upd(struct bge_softc *);
380
381 static device_method_t bge_methods[] = {
382         /* Device interface */
383         DEVMETHOD(device_probe,         bge_probe),
384         DEVMETHOD(device_attach,        bge_attach),
385         DEVMETHOD(device_detach,        bge_detach),
386         DEVMETHOD(device_shutdown,      bge_shutdown),
387         DEVMETHOD(device_suspend,       bge_suspend),
388         DEVMETHOD(device_resume,        bge_resume),
389
390         /* bus interface */
391         DEVMETHOD(bus_print_child,      bus_generic_print_child),
392         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
393
394         /* MII interface */
395         DEVMETHOD(miibus_readreg,       bge_miibus_readreg),
396         DEVMETHOD(miibus_writereg,      bge_miibus_writereg),
397         DEVMETHOD(miibus_statchg,       bge_miibus_statchg),
398
399         { 0, 0 }
400 };
401
402 static driver_t bge_driver = {
403         "bge",
404         bge_methods,
405         sizeof(struct bge_softc)
406 };
407
408 static devclass_t bge_devclass;
409
410 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
411 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
412
413 static int bge_fake_autoneg = 0;
414 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
415
416 static uint32_t
417 bge_readmem_ind(struct bge_softc *sc, int off)
418 {
419         device_t dev;
420
421         dev = sc->bge_dev;
422
423         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
424         return (pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
425 }
426
427 static void
428 bge_writemem_ind(struct bge_softc *sc, int off, int val)
429 {
430         device_t dev;
431
432         dev = sc->bge_dev;
433
434         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
435         pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
436 }
437
438 #ifdef notdef
439 static uint32_t
440 bge_readreg_ind(struct bge_softc *sc, int off)
441 {
442         device_t dev;
443
444         dev = sc->bge_dev;
445
446         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
447         return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
448 }
449 #endif
450
451 static void
452 bge_writereg_ind(struct bge_softc *sc, int off, int val)
453 {
454         device_t dev;
455
456         dev = sc->bge_dev;
457
458         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
459         pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
460 }
461
462 /*
463  * Map a single buffer address.
464  */
465
466 static void
467 bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
468 {
469         struct bge_dmamap_arg *ctx;
470
471         if (error)
472                 return;
473
474         ctx = arg;
475
476         if (nseg > ctx->bge_maxsegs) {
477                 ctx->bge_maxsegs = 0;
478                 return;
479         }
480
481         ctx->bge_busaddr = segs->ds_addr;
482 }
483
484 /*
485  * Read a byte of data stored in the EEPROM at address 'addr.' The
486  * BCM570x supports both the traditional bitbang interface and an
487  * auto access interface for reading the EEPROM. We use the auto
488  * access method.
489  */
490 static uint8_t
491 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
492 {
493         int i;
494         uint32_t byte = 0;
495
496         /*
497          * Enable use of auto EEPROM access so we can avoid
498          * having to use the bitbang method.
499          */
500         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
501
502         /* Reset the EEPROM, load the clock period. */
503         CSR_WRITE_4(sc, BGE_EE_ADDR,
504             BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
505         DELAY(20);
506
507         /* Issue the read EEPROM command. */
508         CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
509
510         /* Wait for completion */
511         for(i = 0; i < BGE_TIMEOUT * 10; i++) {
512                 DELAY(10);
513                 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
514                         break;
515         }
516
517         if (i == BGE_TIMEOUT) {
518                 device_printf(sc->bge_dev, "EEPROM read timed out\n");
519                 return (1);
520         }
521
522         /* Get result. */
523         byte = CSR_READ_4(sc, BGE_EE_DATA);
524
525         *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
526
527         return (0);
528 }
529
530 /*
531  * Read a sequence of bytes from the EEPROM.
532  */
533 static int
534 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
535 {
536         int i, error = 0;
537         uint8_t byte = 0;
538
539         for (i = 0; i < cnt; i++) {
540                 error = bge_eeprom_getbyte(sc, off + i, &byte);
541                 if (error)
542                         break;
543                 *(dest + i) = byte;
544         }
545
546         return (error ? 1 : 0);
547 }
548
549 static int
550 bge_miibus_readreg(device_t dev, int phy, int reg)
551 {
552         struct bge_softc *sc;
553         uint32_t val, autopoll;
554         int i;
555
556         sc = device_get_softc(dev);
557
558         /*
559          * Broadcom's own driver always assumes the internal
560          * PHY is at GMII address 1. On some chips, the PHY responds
561          * to accesses at all addresses, which could cause us to
562          * bogusly attach the PHY 32 times at probe type. Always
563          * restricting the lookup to address 1 is simpler than
564          * trying to figure out which chips revisions should be
565          * special-cased.
566          */
567         if (phy != 1)
568                 return (0);
569
570         /* Reading with autopolling on may trigger PCI errors */
571         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
572         if (autopoll & BGE_MIMODE_AUTOPOLL) {
573                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
574                 DELAY(40);
575         }
576
577         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
578             BGE_MIPHY(phy)|BGE_MIREG(reg));
579
580         for (i = 0; i < BGE_TIMEOUT; i++) {
581                 val = CSR_READ_4(sc, BGE_MI_COMM);
582                 if (!(val & BGE_MICOMM_BUSY))
583                         break;
584         }
585
586         if (i == BGE_TIMEOUT) {
587                 if_printf(sc->bge_ifp, "PHY read timed out\n");
588                 val = 0;
589                 goto done;
590         }
591
592         val = CSR_READ_4(sc, BGE_MI_COMM);
593
594 done:
595         if (autopoll & BGE_MIMODE_AUTOPOLL) {
596                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
597                 DELAY(40);
598         }
599
600         if (val & BGE_MICOMM_READFAIL)
601                 return (0);
602
603         return (val & 0xFFFF);
604 }
605
606 static int
607 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
608 {
609         struct bge_softc *sc;
610         uint32_t autopoll;
611         int i;
612
613         sc = device_get_softc(dev);
614
615         /* Reading with autopolling on may trigger PCI errors */
616         autopoll = CSR_READ_4(sc, BGE_MI_MODE);
617         if (autopoll & BGE_MIMODE_AUTOPOLL) {
618                 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
619                 DELAY(40);
620         }
621
622         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
623             BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
624
625         for (i = 0; i < BGE_TIMEOUT; i++) {
626                 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
627                         break;
628         }
629
630         if (autopoll & BGE_MIMODE_AUTOPOLL) {
631                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
632                 DELAY(40);
633         }
634
635         if (i == BGE_TIMEOUT) {
636                 if_printf(sc->bge_ifp, "PHY read timed out\n");
637                 return (0);
638         }
639
640         return (0);
641 }
642
643 static void
644 bge_miibus_statchg(device_t dev)
645 {
646         struct bge_softc *sc;
647         struct mii_data *mii;
648
649         sc = device_get_softc(dev);
650         mii = device_get_softc(sc->bge_miibus);
651
652         BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
653         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
654                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
655         else
656                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
657
658         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
659                 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
660         else
661                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
662 }
663
664 /*
665  * Intialize a standard receive ring descriptor.
666  */
667 static int
668 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
669 {
670         struct mbuf *m_new = NULL;
671         struct bge_rx_bd *r;
672         struct bge_dmamap_arg ctx;
673         int error;
674
675         if (m == NULL) {
676                 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
677                 if (m_new == NULL)
678                         return (ENOBUFS);
679                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
680         } else {
681                 m_new = m;
682                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
683                 m_new->m_data = m_new->m_ext.ext_buf;
684         }
685
686         if (!sc->bge_rx_alignment_bug)
687                 m_adj(m_new, ETHER_ALIGN);
688         sc->bge_cdata.bge_rx_std_chain[i] = m_new;
689         r = &sc->bge_ldata.bge_rx_std_ring[i];
690         ctx.bge_maxsegs = 1;
691         ctx.sc = sc;
692         error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
693             sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
694             m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
695         if (error || ctx.bge_maxsegs == 0) {
696                 if (m == NULL) {
697                         sc->bge_cdata.bge_rx_std_chain[i] = NULL;
698                         m_freem(m_new);
699                 }
700                 return (ENOMEM);
701         }
702         r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr);
703         r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr);
704         r->bge_flags = BGE_RXBDFLAG_END;
705         r->bge_len = m_new->m_len;
706         r->bge_idx = i;
707
708         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
709             sc->bge_cdata.bge_rx_std_dmamap[i],
710             BUS_DMASYNC_PREREAD);
711
712         return (0);
713 }
714
715 /*
716  * Initialize a jumbo receive ring descriptor. This allocates
717  * a jumbo buffer from the pool managed internally by the driver.
718  */
719 static int
720 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
721 {
722         bus_dma_segment_t segs[BGE_NSEG_JUMBO];
723         struct bge_extrx_bd *r;
724         struct mbuf *m_new = NULL;
725         int nsegs;
726         int error;
727
728         if (m == NULL) {
729                 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
730                 if (m_new == NULL)
731                         return (ENOBUFS);
732
733                 m_cljget(m_new, M_DONTWAIT, MJUM9BYTES);
734                 if (!(m_new->m_flags & M_EXT)) {
735                         m_freem(m_new);
736                         return (ENOBUFS);
737                 }
738                 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
739         } else {
740                 m_new = m;
741                 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
742                 m_new->m_data = m_new->m_ext.ext_buf;
743         }
744
745         if (!sc->bge_rx_alignment_bug)
746                 m_adj(m_new, ETHER_ALIGN);
747
748         error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
749             sc->bge_cdata.bge_rx_jumbo_dmamap[i],
750             m_new, segs, &nsegs, BUS_DMA_NOWAIT);
751         if (error) {
752                 if (m == NULL)
753                         m_freem(m_new);
754                 return (error);
755         }
756         sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
757
758         /*
759          * Fill in the extended RX buffer descriptor.
760          */
761         r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
762         r->bge_flags = BGE_RXBDFLAG_JUMBO_RING|BGE_RXBDFLAG_END;
763         r->bge_idx = i;
764         r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
765         switch (nsegs) {
766         case 4:
767                 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
768                 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
769                 r->bge_len3 = segs[3].ds_len;
770         case 3:
771                 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
772                 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
773                 r->bge_len2 = segs[2].ds_len;
774         case 2:
775                 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
776                 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
777                 r->bge_len1 = segs[1].ds_len;
778         case 1:
779                 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
780                 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
781                 r->bge_len0 = segs[0].ds_len;
782                 break;
783         default:
784                 panic("%s: %d segments\n", __func__, nsegs);
785         }
786
787         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
788             sc->bge_cdata.bge_rx_jumbo_dmamap[i],
789             BUS_DMASYNC_PREREAD);
790
791         return (0);
792 }
793
794 /*
795  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
796  * that's 1MB or memory, which is a lot. For now, we fill only the first
797  * 256 ring entries and hope that our CPU is fast enough to keep up with
798  * the NIC.
799  */
800 static int
801 bge_init_rx_ring_std(struct bge_softc *sc)
802 {
803         int i;
804
805         for (i = 0; i < BGE_SSLOTS; i++) {
806                 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
807                         return (ENOBUFS);
808         };
809
810         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
811             sc->bge_cdata.bge_rx_std_ring_map,
812             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
813
814         sc->bge_std = i - 1;
815         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
816
817         return (0);
818 }
819
820 static void
821 bge_free_rx_ring_std(struct bge_softc *sc)
822 {
823         int i;
824
825         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
826                 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
827                         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
828                             sc->bge_cdata.bge_rx_std_dmamap[i],
829                             BUS_DMASYNC_POSTREAD);
830                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
831                             sc->bge_cdata.bge_rx_std_dmamap[i]);
832                         m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
833                         sc->bge_cdata.bge_rx_std_chain[i] = NULL;
834                 }
835                 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
836                     sizeof(struct bge_rx_bd));
837         }
838 }
839
840 static int
841 bge_init_rx_ring_jumbo(struct bge_softc *sc)
842 {
843         struct bge_rcb *rcb;
844         int i;
845
846         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
847                 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
848                         return (ENOBUFS);
849         };
850
851         bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
852             sc->bge_cdata.bge_rx_jumbo_ring_map,
853             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
854
855         sc->bge_jumbo = i - 1;
856
857         rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
858         rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
859                                     BGE_RCB_FLAG_USE_EXT_RX_BD);
860         CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
861
862         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
863
864         return (0);
865 }
866
867 static void
868 bge_free_rx_ring_jumbo(struct bge_softc *sc)
869 {
870         int i;
871
872         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
873                 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
874                         bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
875                             sc->bge_cdata.bge_rx_jumbo_dmamap[i],
876                             BUS_DMASYNC_POSTREAD);
877                         bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
878                             sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
879                         m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
880                         sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
881                 }
882                 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
883                     sizeof(struct bge_extrx_bd));
884         }
885 }
886
887 static void
888 bge_free_tx_ring(struct bge_softc *sc)
889 {
890         int i;
891
892         if (sc->bge_ldata.bge_tx_ring == NULL)
893                 return;
894
895         for (i = 0; i < BGE_TX_RING_CNT; i++) {
896                 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
897                         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
898                             sc->bge_cdata.bge_tx_dmamap[i],
899                             BUS_DMASYNC_POSTWRITE);
900                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
901                             sc->bge_cdata.bge_tx_dmamap[i]);
902                         m_freem(sc->bge_cdata.bge_tx_chain[i]);
903                         sc->bge_cdata.bge_tx_chain[i] = NULL;
904                 }
905                 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
906                     sizeof(struct bge_tx_bd));
907         }
908 }
909
910 static int
911 bge_init_tx_ring(struct bge_softc *sc)
912 {
913         sc->bge_txcnt = 0;
914         sc->bge_tx_saved_considx = 0;
915
916         /* Initialize transmit producer index for host-memory send ring. */
917         sc->bge_tx_prodidx = 0;
918         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
919
920         /* 5700 b2 errata */
921         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
922                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
923
924         /* NIC-memory send ring not used; initialize to zero. */
925         CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
926         /* 5700 b2 errata */
927         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
928                 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
929
930         return (0);
931 }
932
933 static void
934 bge_setmulti(struct bge_softc *sc)
935 {
936         struct ifnet *ifp;
937         struct ifmultiaddr *ifma;
938         uint32_t hashes[4] = { 0, 0, 0, 0 };
939         int h, i;
940
941         BGE_LOCK_ASSERT(sc);
942
943         ifp = sc->bge_ifp;
944
945         if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
946                 for (i = 0; i < 4; i++)
947                         CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
948                 return;
949         }
950
951         /* First, zot all the existing filters. */
952         for (i = 0; i < 4; i++)
953                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
954
955         /* Now program new ones. */
956         IF_ADDR_LOCK(ifp);
957         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
958                 if (ifma->ifma_addr->sa_family != AF_LINK)
959                         continue;
960                 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
961                     ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
962                 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
963         }
964         IF_ADDR_UNLOCK(ifp);
965
966         for (i = 0; i < 4; i++)
967                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
968 }
969
970 /*
971  * Do endian, PCI and DMA initialization. Also check the on-board ROM
972  * self-test results.
973  */
974 static int
975 bge_chipinit(struct bge_softc *sc)
976 {
977         uint32_t dma_rw_ctl;
978         int i;
979
980         /* Set endian type before we access any non-PCI registers. */
981         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
982
983         /*
984          * Check the 'ROM failed' bit on the RX CPU to see if
985          * self-tests passed.
986          */
987         if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
988                 device_printf(sc->bge_dev, "RX CPU self-diagnostics failed!\n");
989                 return (ENODEV);
990         }
991
992         /* Clear the MAC control register */
993         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
994
995         /*
996          * Clear the MAC statistics block in the NIC's
997          * internal memory.
998          */
999         for (i = BGE_STATS_BLOCK;
1000             i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1001                 BGE_MEMWIN_WRITE(sc, i, 0);
1002
1003         for (i = BGE_STATUS_BLOCK;
1004             i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1005                 BGE_MEMWIN_WRITE(sc, i, 0);
1006
1007         /* Set up the PCI DMA control register. */
1008         if (sc->bge_pcie) {
1009                 /* PCI Express bus */
1010                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1011                     (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1012                     (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1013         } else if (sc->bge_pcix) {
1014                 /* PCI-X bus */
1015                 if (BGE_IS_5714_FAMILY(sc)) {
1016                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD;
1017                         dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */
1018                         /* XXX magic values, Broadcom-supplied Linux driver */
1019                         if (sc->bge_asicrev == BGE_ASICREV_BCM5780)
1020                                 dma_rw_ctl |= (1 << 20) | (1 << 18) | 
1021                                     BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1022                         else
1023                                 dma_rw_ctl |= (1 << 20) | (1 << 18) | (1 << 15);
1024
1025                 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1026                         /*
1027                          * The 5704 uses a different encoding of read/write
1028                          * watermarks.
1029                          */
1030                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1031                             (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1032                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1033                 else
1034                         dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1035                             (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1036                             (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1037                             (0x0F);
1038
1039                 /*
1040                  * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1041                  * for hardware bugs.
1042                  */
1043                 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1044                     sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1045                         uint32_t tmp;
1046
1047                         tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1048                         if (tmp == 0x6 || tmp == 0x7)
1049                                 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1050                 }
1051         } else
1052                 /* Conventional PCI bus */
1053                 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1054                     (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1055                     (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1056                     (0x0F);
1057
1058         if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1059             sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1060             sc->bge_asicrev == BGE_ASICREV_BCM5705)
1061                 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1062         pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1063
1064         /*
1065          * Set up general mode register.
1066          */
1067         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1068             BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1069             BGE_MODECTL_TX_NO_PHDR_CSUM);
1070
1071         /*
1072          * Disable memory write invalidate.  Apparently it is not supported
1073          * properly by these devices.
1074          */
1075         PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1076
1077 #ifdef __brokenalpha__
1078         /*
1079          * Must insure that we do not cross an 8K (bytes) boundary
1080          * for DMA reads.  Our highest limit is 1K bytes.  This is a
1081          * restriction on some ALPHA platforms with early revision
1082          * 21174 PCI chipsets, such as the AlphaPC 164lx
1083          */
1084         PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
1085             BGE_PCI_READ_BNDRY_1024BYTES, 4);
1086 #endif
1087
1088         /* Set the timer prescaler (always 66Mhz) */
1089         CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1090
1091         return (0);
1092 }
1093
1094 static int
1095 bge_blockinit(struct bge_softc *sc)
1096 {
1097         struct bge_rcb *rcb;
1098         bus_size_t vrcb;
1099         bge_hostaddr taddr;
1100         int i;
1101
1102         /*
1103          * Initialize the memory window pointer register so that
1104          * we can access the first 32K of internal NIC RAM. This will
1105          * allow us to set up the TX send ring RCBs and the RX return
1106          * ring RCBs, plus other things which live in NIC memory.
1107          */
1108         CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1109
1110         /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1111
1112         if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1113                 /* Configure mbuf memory pool */
1114                 if (sc->bge_extram) {
1115                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1116                             BGE_EXT_SSRAM);
1117                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1118                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1119                         else
1120                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1121                 } else {
1122                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1123                             BGE_BUFFPOOL_1);
1124                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1125                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1126                         else
1127                                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1128                 }
1129
1130                 /* Configure DMA resource pool */
1131                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1132                     BGE_DMA_DESCRIPTORS);
1133                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1134         }
1135
1136         /* Configure mbuf pool watermarks */
1137         if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1138                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1139                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1140         } else {
1141                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1142                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1143         }
1144         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1145
1146         /* Configure DMA resource watermarks */
1147         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1148         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1149
1150         /* Enable buffer manager */
1151         if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1152                 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1153                     BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1154
1155                 /* Poll for buffer manager start indication */
1156                 for (i = 0; i < BGE_TIMEOUT; i++) {
1157                         if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1158                                 break;
1159                         DELAY(10);
1160                 }
1161
1162                 if (i == BGE_TIMEOUT) {
1163                         device_printf(sc->bge_dev,
1164                             "buffer manager failed to start\n");
1165                         return (ENXIO);
1166                 }
1167         }
1168
1169         /* Enable flow-through queues */
1170         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1171         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1172
1173         /* Wait until queue initialization is complete */
1174         for (i = 0; i < BGE_TIMEOUT; i++) {
1175                 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1176                         break;
1177                 DELAY(10);
1178         }
1179
1180         if (i == BGE_TIMEOUT) {
1181                 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1182                 return (ENXIO);
1183         }
1184
1185         /* Initialize the standard RX ring control block */
1186         rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1187         rcb->bge_hostaddr.bge_addr_lo =
1188             BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1189         rcb->bge_hostaddr.bge_addr_hi =
1190             BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1191         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1192             sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1193         if (BGE_IS_5705_OR_BEYOND(sc))
1194                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1195         else
1196                 rcb->bge_maxlen_flags =
1197                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1198         if (sc->bge_extram)
1199                 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1200         else
1201                 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1202         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1203         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1204
1205         CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1206         CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1207
1208         /*
1209          * Initialize the jumbo RX ring control block
1210          * We set the 'ring disabled' bit in the flags
1211          * field until we're actually ready to start
1212          * using this ring (i.e. once we set the MTU
1213          * high enough to require it).
1214          */
1215         if (BGE_IS_JUMBO_CAPABLE(sc)) {
1216                 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1217
1218                 rcb->bge_hostaddr.bge_addr_lo =
1219                     BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1220                 rcb->bge_hostaddr.bge_addr_hi =
1221                     BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1222                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1223                     sc->bge_cdata.bge_rx_jumbo_ring_map,
1224                     BUS_DMASYNC_PREREAD);
1225                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1226                     BGE_RCB_FLAG_USE_EXT_RX_BD|BGE_RCB_FLAG_RING_DISABLED);
1227                 if (sc->bge_extram)
1228                         rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1229                 else
1230                         rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1231                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1232                     rcb->bge_hostaddr.bge_addr_hi);
1233                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1234                     rcb->bge_hostaddr.bge_addr_lo);
1235
1236                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1237                     rcb->bge_maxlen_flags);
1238                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1239
1240                 /* Set up dummy disabled mini ring RCB */
1241                 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1242                 rcb->bge_maxlen_flags =
1243                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1244                 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1245                     rcb->bge_maxlen_flags);
1246         }
1247
1248         /*
1249          * Set the BD ring replentish thresholds. The recommended
1250          * values are 1/8th the number of descriptors allocated to
1251          * each ring.
1252          */
1253         CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1254         CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1255
1256         /*
1257          * Disable all unused send rings by setting the 'ring disabled'
1258          * bit in the flags field of all the TX send ring control blocks.
1259          * These are located in NIC memory.
1260          */
1261         vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1262         for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1263                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1264                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1265                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1266                 vrcb += sizeof(struct bge_rcb);
1267         }
1268
1269         /* Configure TX RCB 0 (we use only the first ring) */
1270         vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1271         BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1272         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1273         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1274         RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1275             BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1276         if (!(BGE_IS_5705_OR_BEYOND(sc)))
1277                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1278                     BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1279
1280         /* Disable all unused RX return rings */
1281         vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1282         for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1283                 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1284                 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1285                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1286                     BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1287                     BGE_RCB_FLAG_RING_DISABLED));
1288                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1289                 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1290                     (i * (sizeof(uint64_t))), 0);
1291                 vrcb += sizeof(struct bge_rcb);
1292         }
1293
1294         /* Initialize RX ring indexes */
1295         CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1296         CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1297         CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1298
1299         /*
1300          * Set up RX return ring 0
1301          * Note that the NIC address for RX return rings is 0x00000000.
1302          * The return rings live entirely within the host, so the
1303          * nicaddr field in the RCB isn't used.
1304          */
1305         vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1306         BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1307         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1308         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1309         RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1310         RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1311             BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));  
1312
1313         /* Set random backoff seed for TX */
1314         CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1315             IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1316             IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1317             IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1318             BGE_TX_BACKOFF_SEED_MASK);
1319
1320         /* Set inter-packet gap */
1321         CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1322
1323         /*
1324          * Specify which ring to use for packets that don't match
1325          * any RX rules.
1326          */
1327         CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1328
1329         /*
1330          * Configure number of RX lists. One interrupt distribution
1331          * list, sixteen active lists, one bad frames class.
1332          */
1333         CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1334
1335         /* Inialize RX list placement stats mask. */
1336         CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1337         CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1338
1339         /* Disable host coalescing until we get it set up */
1340         CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1341
1342         /* Poll to make sure it's shut down. */
1343         for (i = 0; i < BGE_TIMEOUT; i++) {
1344                 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1345                         break;
1346                 DELAY(10);
1347         }
1348
1349         if (i == BGE_TIMEOUT) {
1350                 device_printf(sc->bge_dev,
1351                     "host coalescing engine failed to idle\n");
1352                 return (ENXIO);
1353         }
1354
1355         /* Set up host coalescing defaults */
1356         CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1357         CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1358         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1359         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1360         if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1361                 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1362                 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1363         }
1364         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1365         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1366
1367         /* Set up address of statistics block */
1368         if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1369                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1370                     BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1371                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1372                     BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1373                 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1374                 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1375                 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1376         }
1377
1378         /* Set up address of status block */
1379         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1380             BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1381         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1382             BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1383         sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1384         sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1385
1386         /* Turn on host coalescing state machine */
1387         CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1388
1389         /* Turn on RX BD completion state machine and enable attentions */
1390         CSR_WRITE_4(sc, BGE_RBDC_MODE,
1391             BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1392
1393         /* Turn on RX list placement state machine */
1394         CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1395
1396         /* Turn on RX list selector state machine. */
1397         if (!(BGE_IS_5705_OR_BEYOND(sc)))
1398                 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1399
1400         /* Turn on DMA, clear stats */
1401         CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1402             BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1403             BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1404             BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1405             (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1406
1407         /* Set misc. local control, enable interrupts on attentions */
1408         CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1409
1410 #ifdef notdef
1411         /* Assert GPIO pins for PHY reset */
1412         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1413             BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1414         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1415             BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1416 #endif
1417
1418         /* Turn on DMA completion state machine */
1419         if (!(BGE_IS_5705_OR_BEYOND(sc)))
1420                 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1421
1422         /* Turn on write DMA state machine */
1423         CSR_WRITE_4(sc, BGE_WDMA_MODE,
1424             BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1425
1426         /* Turn on read DMA state machine */
1427         CSR_WRITE_4(sc, BGE_RDMA_MODE,
1428             BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1429
1430         /* Turn on RX data completion state machine */
1431         CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1432
1433         /* Turn on RX BD initiator state machine */
1434         CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1435
1436         /* Turn on RX data and RX BD initiator state machine */
1437         CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1438
1439         /* Turn on Mbuf cluster free state machine */
1440         if (!(BGE_IS_5705_OR_BEYOND(sc)))
1441                 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1442
1443         /* Turn on send BD completion state machine */
1444         CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1445
1446         /* Turn on send data completion state machine */
1447         CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1448
1449         /* Turn on send data initiator state machine */
1450         CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1451
1452         /* Turn on send BD initiator state machine */
1453         CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1454
1455         /* Turn on send BD selector state machine */
1456         CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1457
1458         CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1459         CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1460             BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1461
1462         /* ack/clear link change events */
1463         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1464             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1465             BGE_MACSTAT_LINK_CHANGED);
1466         CSR_WRITE_4(sc, BGE_MI_STS, 0);
1467
1468         /* Enable PHY auto polling (for MII/GMII only) */
1469         if (sc->bge_tbi) {
1470                 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1471         } else {
1472                 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1473                 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1474                     sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
1475                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1476                             BGE_EVTENB_MI_INTERRUPT);
1477         }
1478
1479         /*
1480          * Clear any pending link state attention.
1481          * Otherwise some link state change events may be lost until attention
1482          * is cleared by bge_intr() -> bge_link_upd() sequence.
1483          * It's not necessary on newer BCM chips - perhaps enabling link
1484          * state change attentions implies clearing pending attention.
1485          */
1486         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1487             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1488             BGE_MACSTAT_LINK_CHANGED);
1489
1490         /* Enable link state change attentions. */
1491         BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1492
1493         return (0);
1494 }
1495
1496 const struct bge_revision *
1497 bge_lookup_rev(uint32_t chipid)
1498 {
1499         const struct bge_revision *br;
1500
1501         for (br = bge_revisions; br->br_name != NULL; br++) {
1502                 if (br->br_chipid == chipid)
1503                         return (br);
1504         }
1505
1506         for (br = bge_majorrevs; br->br_name != NULL; br++) {
1507                 if (br->br_chipid == BGE_ASICREV(chipid))
1508                         return (br);
1509         }
1510
1511         return (NULL);
1512 }
1513
1514 const struct bge_vendor *
1515 bge_lookup_vendor(uint16_t vid)
1516 {
1517         const struct bge_vendor *v;
1518
1519         for (v = bge_vendors; v->v_name != NULL; v++)
1520                 if (v->v_id == vid)
1521                         return (v);
1522                 
1523         panic("%s: unknown vendor %d", __func__, vid);
1524         return (NULL);
1525 }
1526
1527 /*
1528  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1529  * against our list and return its name if we find a match.
1530  *
1531  * Note that since the Broadcom controller contains VPD support, we
1532  * can get the device name string from the controller itself instead
1533  * of the compiled-in string. This is a little slow, but it guarantees
1534  * we'll always announce the right product name. Unfortunately, this
1535  * is possible only later in bge_attach(), when we have established
1536  * access to EEPROM.
1537  */
1538 static int
1539 bge_probe(device_t dev)
1540 {
1541         struct bge_type *t = bge_devs;
1542         struct bge_softc *sc = device_get_softc(dev);
1543
1544         bzero(sc, sizeof(struct bge_softc));
1545         sc->bge_dev = dev;
1546
1547         while(t->bge_vid != 0) {
1548                 if ((pci_get_vendor(dev) == t->bge_vid) &&
1549                     (pci_get_device(dev) == t->bge_did)) {
1550                         char buf[64];
1551                         const struct bge_revision *br;
1552                         const struct bge_vendor *v;
1553                         uint32_t id;
1554
1555                         id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1556                             BGE_PCIMISCCTL_ASICREV;
1557                         br = bge_lookup_rev(id);
1558                         id >>= 16;
1559                         v = bge_lookup_vendor(t->bge_vid);
1560                         if (br == NULL)
1561                                 snprintf(buf, 64, "%s unknown ASIC (%#04x)",
1562                                     v->v_name, id);
1563                         else
1564                                 snprintf(buf, 64, "%s %s, ASIC rev. %#04x",
1565                                     v->v_name, br->br_name, id);
1566                         device_set_desc_copy(dev, buf);
1567                         if (pci_get_subvendor(dev) == DELL_VENDORID)
1568                                 sc->bge_no_3_led = 1;
1569                         return (0);
1570                 }
1571                 t++;
1572         }
1573
1574         return (ENXIO);
1575 }
1576
1577 static void
1578 bge_dma_free(struct bge_softc *sc)
1579 {
1580         int i;
1581
1582         /* Destroy DMA maps for RX buffers. */
1583         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1584                 if (sc->bge_cdata.bge_rx_std_dmamap[i])
1585                         bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1586                             sc->bge_cdata.bge_rx_std_dmamap[i]);
1587         }
1588
1589         /* Destroy DMA maps for jumbo RX buffers. */
1590         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1591                 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1592                         bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1593                             sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1594         }
1595
1596         /* Destroy DMA maps for TX buffers. */
1597         for (i = 0; i < BGE_TX_RING_CNT; i++) {
1598                 if (sc->bge_cdata.bge_tx_dmamap[i])
1599                         bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1600                             sc->bge_cdata.bge_tx_dmamap[i]);
1601         }
1602
1603         if (sc->bge_cdata.bge_mtag)
1604                 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
1605
1606
1607         /* Destroy standard RX ring. */
1608         if (sc->bge_cdata.bge_rx_std_ring_map)
1609                 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
1610                     sc->bge_cdata.bge_rx_std_ring_map);
1611         if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
1612                 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
1613                     sc->bge_ldata.bge_rx_std_ring,
1614                     sc->bge_cdata.bge_rx_std_ring_map);
1615
1616         if (sc->bge_cdata.bge_rx_std_ring_tag)
1617                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
1618
1619         /* Destroy jumbo RX ring. */
1620         if (sc->bge_cdata.bge_rx_jumbo_ring_map)
1621                 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1622                     sc->bge_cdata.bge_rx_jumbo_ring_map);
1623
1624         if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
1625             sc->bge_ldata.bge_rx_jumbo_ring)
1626                 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1627                     sc->bge_ldata.bge_rx_jumbo_ring,
1628                     sc->bge_cdata.bge_rx_jumbo_ring_map);
1629
1630         if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
1631                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
1632
1633         /* Destroy RX return ring. */
1634         if (sc->bge_cdata.bge_rx_return_ring_map)
1635                 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
1636                     sc->bge_cdata.bge_rx_return_ring_map);
1637
1638         if (sc->bge_cdata.bge_rx_return_ring_map &&
1639             sc->bge_ldata.bge_rx_return_ring)
1640                 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
1641                     sc->bge_ldata.bge_rx_return_ring,
1642                     sc->bge_cdata.bge_rx_return_ring_map);
1643
1644         if (sc->bge_cdata.bge_rx_return_ring_tag)
1645                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
1646
1647         /* Destroy TX ring. */
1648         if (sc->bge_cdata.bge_tx_ring_map)
1649                 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
1650                     sc->bge_cdata.bge_tx_ring_map);
1651
1652         if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
1653                 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
1654                     sc->bge_ldata.bge_tx_ring,
1655                     sc->bge_cdata.bge_tx_ring_map);
1656
1657         if (sc->bge_cdata.bge_tx_ring_tag)
1658                 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
1659
1660         /* Destroy status block. */
1661         if (sc->bge_cdata.bge_status_map)
1662                 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
1663                     sc->bge_cdata.bge_status_map);
1664
1665         if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
1666                 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
1667                     sc->bge_ldata.bge_status_block,
1668                     sc->bge_cdata.bge_status_map);
1669
1670         if (sc->bge_cdata.bge_status_tag)
1671                 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
1672
1673         /* Destroy statistics block. */
1674         if (sc->bge_cdata.bge_stats_map)
1675                 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
1676                     sc->bge_cdata.bge_stats_map);
1677
1678         if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
1679                 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
1680                     sc->bge_ldata.bge_stats,
1681                     sc->bge_cdata.bge_stats_map);
1682
1683         if (sc->bge_cdata.bge_stats_tag)
1684                 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
1685
1686         /* Destroy the parent tag. */
1687         if (sc->bge_cdata.bge_parent_tag)
1688                 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
1689 }
1690
1691 static int
1692 bge_dma_alloc(device_t dev)
1693 {
1694         struct bge_dmamap_arg ctx;
1695         struct bge_softc *sc;
1696         int i, error;
1697
1698         sc = device_get_softc(dev);
1699
1700         /*
1701          * Allocate the parent bus DMA tag appropriate for PCI.
1702          */
1703         error = bus_dma_tag_create(NULL,        /* parent */
1704                         PAGE_SIZE, 0,           /* alignment, boundary */
1705                         BUS_SPACE_MAXADDR,      /* lowaddr */
1706                         BUS_SPACE_MAXADDR,      /* highaddr */
1707                         NULL, NULL,             /* filter, filterarg */
1708                         MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */
1709                         BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1710                         0,                      /* flags */
1711                         NULL, NULL,             /* lockfunc, lockarg */
1712                         &sc->bge_cdata.bge_parent_tag);
1713
1714         if (error != 0) {
1715                 device_printf(sc->bge_dev,
1716                     "could not allocate parent dma tag\n");
1717                 return (ENOMEM);
1718         }
1719
1720         /*
1721          * Create tag for RX mbufs.
1722          */
1723         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
1724             0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1725             NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES,
1726             BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag);
1727
1728         if (error) {
1729                 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1730                 return (ENOMEM);
1731         }
1732
1733         /* Create DMA maps for RX buffers. */
1734         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1735                 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1736                             &sc->bge_cdata.bge_rx_std_dmamap[i]);
1737                 if (error) {
1738                         device_printf(sc->bge_dev,
1739                             "can't create DMA map for RX\n");
1740                         return (ENOMEM);
1741                 }
1742         }
1743
1744         /* Create DMA maps for TX buffers. */
1745         for (i = 0; i < BGE_TX_RING_CNT; i++) {
1746                 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1747                             &sc->bge_cdata.bge_tx_dmamap[i]);
1748                 if (error) {
1749                         device_printf(sc->bge_dev,
1750                             "can't create DMA map for RX\n");
1751                         return (ENOMEM);
1752                 }
1753         }
1754
1755         /* Create tag for standard RX ring. */
1756         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1757             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1758             NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
1759             NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
1760
1761         if (error) {
1762                 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1763                 return (ENOMEM);
1764         }
1765
1766         /* Allocate DMA'able memory for standard RX ring. */
1767         error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
1768             (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
1769             &sc->bge_cdata.bge_rx_std_ring_map);
1770         if (error)
1771                 return (ENOMEM);
1772
1773         bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1774
1775         /* Load the address of the standard RX ring. */
1776         ctx.bge_maxsegs = 1;
1777         ctx.sc = sc;
1778
1779         error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
1780             sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
1781             BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1782
1783         if (error)
1784                 return (ENOMEM);
1785
1786         sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
1787
1788         /* Create tags for jumbo mbufs. */
1789         if (BGE_IS_JUMBO_CAPABLE(sc)) {
1790                 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1791                     1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1792                     NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
1793                     0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
1794                 if (error) {
1795                         device_printf(sc->bge_dev,
1796                             "could not allocate jumbo dma tag\n");
1797                         return (ENOMEM);
1798                 }
1799
1800                 /* Create tag for jumbo RX ring. */
1801                 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1802                     PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1803                     NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
1804                     NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
1805
1806                 if (error) {
1807                         device_printf(sc->bge_dev,
1808                             "could not allocate jumbo ring dma tag\n");
1809                         return (ENOMEM);
1810                 }
1811
1812                 /* Allocate DMA'able memory for jumbo RX ring. */
1813                 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1814                     (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
1815                     BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1816                     &sc->bge_cdata.bge_rx_jumbo_ring_map);
1817                 if (error)
1818                         return (ENOMEM);
1819
1820                 /* Load the address of the jumbo RX ring. */
1821                 ctx.bge_maxsegs = 1;
1822                 ctx.sc = sc;
1823
1824                 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1825                     sc->bge_cdata.bge_rx_jumbo_ring_map,
1826                     sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
1827                     bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1828
1829                 if (error)
1830                         return (ENOMEM);
1831
1832                 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
1833
1834                 /* Create DMA maps for jumbo RX buffers. */
1835                 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1836                         error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
1837                                     0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1838                         if (error) {
1839                                 device_printf(sc->bge_dev,
1840                                     "can't create DMA map for jumbo RX\n");
1841                                 return (ENOMEM);
1842                         }
1843                 }
1844
1845         }
1846
1847         /* Create tag for RX return ring. */
1848         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1849             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1850             NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
1851             NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
1852
1853         if (error) {
1854                 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1855                 return (ENOMEM);
1856         }
1857
1858         /* Allocate DMA'able memory for RX return ring. */
1859         error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
1860             (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
1861             &sc->bge_cdata.bge_rx_return_ring_map);
1862         if (error)
1863                 return (ENOMEM);
1864
1865         bzero((char *)sc->bge_ldata.bge_rx_return_ring,
1866             BGE_RX_RTN_RING_SZ(sc));
1867
1868         /* Load the address of the RX return ring. */
1869         ctx.bge_maxsegs = 1;
1870         ctx.sc = sc;
1871
1872         error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
1873             sc->bge_cdata.bge_rx_return_ring_map,
1874             sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
1875             bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1876
1877         if (error)
1878                 return (ENOMEM);
1879
1880         sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
1881
1882         /* Create tag for TX ring. */
1883         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1884             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1885             NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
1886             &sc->bge_cdata.bge_tx_ring_tag);
1887
1888         if (error) {
1889                 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1890                 return (ENOMEM);
1891         }
1892
1893         /* Allocate DMA'able memory for TX ring. */
1894         error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
1895             (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
1896             &sc->bge_cdata.bge_tx_ring_map);
1897         if (error)
1898                 return (ENOMEM);
1899
1900         bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1901
1902         /* Load the address of the TX ring. */
1903         ctx.bge_maxsegs = 1;
1904         ctx.sc = sc;
1905
1906         error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
1907             sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
1908             BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1909
1910         if (error)
1911                 return (ENOMEM);
1912
1913         sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
1914
1915         /* Create tag for status block. */
1916         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1917             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1918             NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
1919             NULL, NULL, &sc->bge_cdata.bge_status_tag);
1920
1921         if (error) {
1922                 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1923                 return (ENOMEM);
1924         }
1925
1926         /* Allocate DMA'able memory for status block. */
1927         error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
1928             (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
1929             &sc->bge_cdata.bge_status_map);
1930         if (error)
1931                 return (ENOMEM);
1932
1933         bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1934
1935         /* Load the address of the status block. */
1936         ctx.sc = sc;
1937         ctx.bge_maxsegs = 1;
1938
1939         error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
1940             sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
1941             BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1942
1943         if (error)
1944                 return (ENOMEM);
1945
1946         sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
1947
1948         /* Create tag for statistics block. */
1949         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1950             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1951             NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
1952             &sc->bge_cdata.bge_stats_tag);
1953
1954         if (error) {
1955                 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1956                 return (ENOMEM);
1957         }
1958
1959         /* Allocate DMA'able memory for statistics block. */
1960         error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
1961             (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
1962             &sc->bge_cdata.bge_stats_map);
1963         if (error)
1964                 return (ENOMEM);
1965
1966         bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
1967
1968         /* Load the address of the statstics block. */
1969         ctx.sc = sc;
1970         ctx.bge_maxsegs = 1;
1971
1972         error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
1973             sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
1974             BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1975
1976         if (error)
1977                 return (ENOMEM);
1978
1979         sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
1980
1981         return (0);
1982 }
1983
1984 static int
1985 bge_attach(device_t dev)
1986 {
1987         struct ifnet *ifp;
1988         struct bge_softc *sc;
1989         uint32_t hwcfg = 0;
1990         uint32_t mac_tmp = 0;
1991         u_char eaddr[6];
1992         int error = 0, rid;
1993
1994         sc = device_get_softc(dev);
1995         sc->bge_dev = dev;
1996
1997         /*
1998          * Map control/status registers.
1999          */
2000         pci_enable_busmaster(dev);
2001
2002         rid = BGE_PCI_BAR0;
2003         sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2004             RF_ACTIVE|PCI_RF_DENSE);
2005
2006         if (sc->bge_res == NULL) {
2007                 device_printf (sc->bge_dev, "couldn't map memory\n");
2008                 error = ENXIO;
2009                 goto fail;
2010         }
2011
2012         sc->bge_btag = rman_get_bustag(sc->bge_res);
2013         sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
2014
2015         /* Allocate interrupt. */
2016         rid = 0;
2017
2018         sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2019             RF_SHAREABLE | RF_ACTIVE);
2020
2021         if (sc->bge_irq == NULL) {
2022                 device_printf(sc->bge_dev, "couldn't map interrupt\n");
2023                 error = ENXIO;
2024                 goto fail;
2025         }
2026
2027         BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2028
2029         /* Save ASIC rev. */
2030
2031         sc->bge_chipid =
2032             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
2033             BGE_PCIMISCCTL_ASICREV;
2034         sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2035         sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2036
2037         /*
2038          * XXX: Broadcom Linux driver.  Not in specs or eratta.
2039          * PCI-Express?
2040          */
2041         if (BGE_IS_5705_OR_BEYOND(sc)) {
2042                 uint32_t v;
2043
2044                 v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
2045                 if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) {
2046                         v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
2047                         if ((v & 0xff) == BGE_PCIE_CAPID)
2048                                 sc->bge_pcie = 1;
2049                 }
2050         }
2051
2052         /*
2053          * PCI-X ?
2054          */
2055         if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
2056             BGE_PCISTATE_PCI_BUSMODE) == 0)
2057                 sc->bge_pcix = 1;
2058
2059         /* Try to reset the chip. */
2060         bge_reset(sc);
2061
2062         if (bge_chipinit(sc)) {
2063                 device_printf(sc->bge_dev, "chip initialization failed\n");
2064                 bge_release_resources(sc);
2065                 error = ENXIO;
2066                 goto fail;
2067         }
2068
2069         /*
2070          * Get station address from the EEPROM.
2071          */
2072         mac_tmp = bge_readmem_ind(sc, 0x0c14);
2073         if ((mac_tmp >> 16) == 0x484b) {
2074                 eaddr[0] = (u_char)(mac_tmp >> 8);
2075                 eaddr[1] = (u_char)mac_tmp;
2076                 mac_tmp = bge_readmem_ind(sc, 0x0c18);
2077                 eaddr[2] = (u_char)(mac_tmp >> 24);
2078                 eaddr[3] = (u_char)(mac_tmp >> 16);
2079                 eaddr[4] = (u_char)(mac_tmp >> 8);
2080                 eaddr[5] = (u_char)mac_tmp;
2081         } else if (bge_read_eeprom(sc, eaddr,
2082             BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2083                 device_printf(sc->bge_dev, "failed to read station address\n");
2084                 bge_release_resources(sc);
2085                 error = ENXIO;
2086                 goto fail;
2087         }
2088
2089         /* 5705 limits RX return ring to 512 entries. */
2090         if (BGE_IS_5705_OR_BEYOND(sc))
2091                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2092         else
2093                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2094
2095         if (bge_dma_alloc(dev)) {
2096                 device_printf(sc->bge_dev,
2097                     "failed to allocate DMA resources\n");
2098                 bge_release_resources(sc);
2099                 error = ENXIO;
2100                 goto fail;
2101         }
2102
2103         /* Set default tuneable values. */
2104         sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2105         sc->bge_rx_coal_ticks = 150;
2106         sc->bge_tx_coal_ticks = 150;
2107         sc->bge_rx_max_coal_bds = 64;
2108         sc->bge_tx_max_coal_bds = 128;
2109
2110         /* Set up ifnet structure */
2111         ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2112         if (ifp == NULL) {
2113                 device_printf(sc->bge_dev, "failed to if_alloc()\n");
2114                 bge_release_resources(sc);
2115                 error = ENXIO;
2116                 goto fail;
2117         }
2118         ifp->if_softc = sc;
2119         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2120         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2121         ifp->if_ioctl = bge_ioctl;
2122         ifp->if_start = bge_start;
2123         ifp->if_watchdog = bge_watchdog;
2124         ifp->if_init = bge_init;
2125         ifp->if_mtu = ETHERMTU;
2126         ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2127         IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2128         IFQ_SET_READY(&ifp->if_snd);
2129         ifp->if_hwassist = BGE_CSUM_FEATURES;
2130         ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
2131             IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM;
2132         ifp->if_capenable = ifp->if_capabilities;
2133 #ifdef DEVICE_POLLING
2134         ifp->if_capabilities |= IFCAP_POLLING;
2135 #endif
2136
2137         /*
2138          * 5700 B0 chips do not support checksumming correctly due
2139          * to hardware bugs.
2140          */
2141         if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
2142                 ifp->if_capabilities &= ~IFCAP_HWCSUM;
2143                 ifp->if_capenable &= IFCAP_HWCSUM;
2144                 ifp->if_hwassist = 0;
2145         }
2146
2147         /*
2148          * Figure out what sort of media we have by checking the
2149          * hardware config word in the first 32k of NIC internal memory,
2150          * or fall back to examining the EEPROM if necessary.
2151          * Note: on some BCM5700 cards, this value appears to be unset.
2152          * If that's the case, we have to rely on identifying the NIC
2153          * by its PCI subsystem ID, as we do below for the SysKonnect
2154          * SK-9D41.
2155          */
2156         if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2157                 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2158         else {
2159                 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2160                     sizeof(hwcfg))) {
2161                         device_printf(sc->bge_dev, "failed to read EEPROM\n");
2162                         bge_release_resources(sc);
2163                         error = ENXIO;
2164                         goto fail;
2165                 }
2166                 hwcfg = ntohl(hwcfg);
2167         }
2168
2169         if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2170                 sc->bge_tbi = 1;
2171
2172         /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2173         if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2174                 sc->bge_tbi = 1;
2175
2176         if (sc->bge_tbi) {
2177                 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2178                     bge_ifmedia_upd, bge_ifmedia_sts);
2179                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2180                 ifmedia_add(&sc->bge_ifmedia,
2181                     IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2182                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2183                 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2184                 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2185         } else {
2186                 /*
2187                  * Do transceiver setup.
2188                  */
2189                 if (mii_phy_probe(dev, &sc->bge_miibus,
2190                     bge_ifmedia_upd, bge_ifmedia_sts)) {
2191                         device_printf(sc->bge_dev, "MII without any PHY!\n");
2192                         bge_release_resources(sc);
2193                         error = ENXIO;
2194                         goto fail;
2195                 }
2196         }
2197
2198         /*
2199          * When using the BCM5701 in PCI-X mode, data corruption has
2200          * been observed in the first few bytes of some received packets.
2201          * Aligning the packet buffer in memory eliminates the corruption.
2202          * Unfortunately, this misaligns the packet payloads.  On platforms
2203          * which do not support unaligned accesses, we will realign the
2204          * payloads by copying the received packets.
2205          */
2206         if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && sc->bge_pcix)
2207                 sc->bge_rx_alignment_bug = 1;
2208
2209         /*
2210          * Call MI attach routine.
2211          */
2212         ether_ifattach(ifp, eaddr);
2213         callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE);
2214
2215         /*
2216          * Hookup IRQ last.
2217          */
2218         error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2219            bge_intr, sc, &sc->bge_intrhand);
2220
2221         if (error) {
2222                 bge_detach(dev);
2223                 device_printf(sc->bge_dev, "couldn't set up irq\n");
2224         }
2225
2226 fail:
2227         return (error);
2228 }
2229
2230 static int
2231 bge_detach(device_t dev)
2232 {
2233         struct bge_softc *sc;
2234         struct ifnet *ifp;
2235
2236         sc = device_get_softc(dev);
2237         ifp = sc->bge_ifp;
2238
2239 #ifdef DEVICE_POLLING
2240         if (ifp->if_capenable & IFCAP_POLLING)
2241                 ether_poll_deregister(ifp);
2242 #endif
2243
2244         BGE_LOCK(sc);
2245         bge_stop(sc);
2246         bge_reset(sc);
2247         BGE_UNLOCK(sc);
2248
2249         ether_ifdetach(ifp);
2250
2251         if (sc->bge_tbi) {
2252                 ifmedia_removeall(&sc->bge_ifmedia);
2253         } else {
2254                 bus_generic_detach(dev);
2255                 device_delete_child(dev, sc->bge_miibus);
2256         }
2257
2258         bge_release_resources(sc);
2259
2260         return (0);
2261 }
2262
2263 static void
2264 bge_release_resources(struct bge_softc *sc)
2265 {
2266         device_t dev;
2267
2268         dev = sc->bge_dev;
2269
2270         if (sc->bge_vpd_prodname != NULL)
2271                 free(sc->bge_vpd_prodname, M_DEVBUF);
2272
2273         if (sc->bge_vpd_readonly != NULL)
2274                 free(sc->bge_vpd_readonly, M_DEVBUF);
2275
2276         if (sc->bge_intrhand != NULL)
2277                 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2278
2279         if (sc->bge_irq != NULL)
2280                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2281
2282         if (sc->bge_res != NULL)
2283                 bus_release_resource(dev, SYS_RES_MEMORY,
2284                     BGE_PCI_BAR0, sc->bge_res);
2285
2286         if (sc->bge_ifp != NULL)
2287                 if_free(sc->bge_ifp);
2288
2289         bge_dma_free(sc);
2290
2291         if (mtx_initialized(&sc->bge_mtx))      /* XXX */
2292                 BGE_LOCK_DESTROY(sc);
2293 }
2294
2295 static void
2296 bge_reset(struct bge_softc *sc)
2297 {
2298         device_t dev;
2299         uint32_t cachesize, command, pcistate, reset;
2300         int i, val = 0;
2301
2302         dev = sc->bge_dev;
2303
2304         /* Save some important PCI state. */
2305         cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2306         command = pci_read_config(dev, BGE_PCI_CMD, 4);
2307         pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2308
2309         pci_write_config(dev, BGE_PCI_MISC_CTL,
2310             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2311         BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2312
2313         reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2314
2315         /* XXX: Broadcom Linux driver. */
2316         if (sc->bge_pcie) {
2317                 if (CSR_READ_4(sc, 0x7e2c) == 0x60)     /* PCIE 1.0 */
2318                         CSR_WRITE_4(sc, 0x7e2c, 0x20);
2319                 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2320                         /* Prevent PCIE link training during global reset */
2321                         CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2322                         reset |= (1<<29);
2323                 }
2324         }
2325
2326         /* Issue global reset */
2327         bge_writereg_ind(sc, BGE_MISC_CFG, reset);
2328
2329         DELAY(1000);
2330
2331         /* XXX: Broadcom Linux driver. */
2332         if (sc->bge_pcie) {
2333                 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2334                         uint32_t v;
2335
2336                         DELAY(500000); /* wait for link training to complete */
2337                         v = pci_read_config(dev, 0xc4, 4);
2338                         pci_write_config(dev, 0xc4, v | (1<<15), 4);
2339                 }
2340                 /* Set PCIE max payload size and clear error status. */
2341                 pci_write_config(dev, 0xd8, 0xf5000, 4);
2342         }
2343
2344         /* Reset some of the PCI state that got zapped by reset. */
2345         pci_write_config(dev, BGE_PCI_MISC_CTL,
2346             BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2347             BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2348         pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2349         pci_write_config(dev, BGE_PCI_CMD, command, 4);
2350         bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2351
2352         /* Enable memory arbiter. */
2353         if (BGE_IS_5714_FAMILY(sc)) {
2354                 uint32_t val;
2355
2356                 val = CSR_READ_4(sc, BGE_MARB_MODE);
2357                 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2358         } else
2359                 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2360
2361         /*
2362          * Prevent PXE restart: write a magic number to the
2363          * general communications memory at 0xB50.
2364          */
2365         bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2366         /*
2367          * Poll the value location we just wrote until
2368          * we see the 1's complement of the magic number.
2369          * This indicates that the firmware initialization
2370          * is complete.
2371          */
2372         for (i = 0; i < BGE_TIMEOUT; i++) {
2373                 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2374                 if (val == ~BGE_MAGIC_NUMBER)
2375                         break;
2376                 DELAY(10);
2377         }
2378
2379         if (i == BGE_TIMEOUT) {
2380                 device_printf(sc->bge_dev, "firmware handshake timed out\n");
2381                 return;
2382         }
2383
2384         /*
2385          * XXX Wait for the value of the PCISTATE register to
2386          * return to its original pre-reset state. This is a
2387          * fairly good indicator of reset completion. If we don't
2388          * wait for the reset to fully complete, trying to read
2389          * from the device's non-PCI registers may yield garbage
2390          * results.
2391          */
2392         for (i = 0; i < BGE_TIMEOUT; i++) {
2393                 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2394                         break;
2395                 DELAY(10);
2396         }
2397
2398         /* Fix up byte swapping. */
2399         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
2400             BGE_MODECTL_BYTESWAP_DATA);
2401
2402         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2403
2404         /*
2405          * The 5704 in TBI mode apparently needs some special
2406          * adjustment to insure the SERDES drive level is set
2407          * to 1.2V.
2408          */
2409         if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
2410                 uint32_t serdescfg;
2411                 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2412                 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2413                 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2414         }
2415
2416         /* XXX: Broadcom Linux driver. */
2417         if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2418                 uint32_t v;
2419
2420                 v = CSR_READ_4(sc, 0x7c00);
2421                 CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2422         }
2423         DELAY(10000);
2424 }
2425
2426 /*
2427  * Frame reception handling. This is called if there's a frame
2428  * on the receive return list.
2429  *
2430  * Note: we have to be able to handle two possibilities here:
2431  * 1) the frame is from the jumbo receive ring
2432  * 2) the frame is from the standard receive ring
2433  */
2434
2435 static void
2436 bge_rxeof(struct bge_softc *sc)
2437 {
2438         struct ifnet *ifp;
2439         int stdcnt = 0, jumbocnt = 0;
2440
2441         BGE_LOCK_ASSERT(sc);
2442
2443         /* Nothing to do. */
2444         if (sc->bge_rx_saved_considx ==
2445             sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx)
2446                 return;
2447
2448         ifp = sc->bge_ifp;
2449
2450         bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2451             sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
2452         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2453             sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
2454         if (BGE_IS_JUMBO_CAPABLE(sc))
2455                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2456                     sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTREAD);
2457
2458         while(sc->bge_rx_saved_considx !=
2459             sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2460                 struct bge_rx_bd        *cur_rx;
2461                 uint32_t                rxidx;
2462                 struct mbuf             *m = NULL;
2463                 uint16_t                vlan_tag = 0;
2464                 int                     have_tag = 0;
2465
2466 #ifdef DEVICE_POLLING
2467                 if (ifp->if_capenable & IFCAP_POLLING) {
2468                         if (sc->rxcycles <= 0)
2469                                 break;
2470                         sc->rxcycles--;
2471                 }
2472 #endif
2473
2474                 cur_rx =
2475             &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2476
2477                 rxidx = cur_rx->bge_idx;
2478                 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2479
2480                 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2481                         have_tag = 1;
2482                         vlan_tag = cur_rx->bge_vlan_tag;
2483                 }
2484
2485                 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2486                         BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2487                         bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
2488                             sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
2489                             BUS_DMASYNC_POSTREAD);
2490                         bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
2491                             sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
2492                         m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2493                         sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2494                         jumbocnt++;
2495                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2496                                 ifp->if_ierrors++;
2497                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2498                                 continue;
2499                         }
2500                         if (bge_newbuf_jumbo(sc,
2501                             sc->bge_jumbo, NULL) == ENOBUFS) {
2502                                 ifp->if_ierrors++;
2503                                 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2504                                 continue;
2505                         }
2506                 } else {
2507                         BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2508                         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2509                             sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2510                             BUS_DMASYNC_POSTREAD);
2511                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2512                             sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2513                         m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2514                         sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2515                         stdcnt++;
2516                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2517                                 ifp->if_ierrors++;
2518                                 bge_newbuf_std(sc, sc->bge_std, m);
2519                                 continue;
2520                         }
2521                         if (bge_newbuf_std(sc, sc->bge_std,
2522                             NULL) == ENOBUFS) {
2523                                 ifp->if_ierrors++;
2524                                 bge_newbuf_std(sc, sc->bge_std, m);
2525                                 continue;
2526                         }
2527                 }
2528
2529                 ifp->if_ipackets++;
2530 #ifndef __NO_STRICT_ALIGNMENT
2531                 /*
2532                  * For architectures with strict alignment we must make sure
2533                  * the payload is aligned.
2534                  */
2535                 if (sc->bge_rx_alignment_bug) {
2536                         bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2537                             cur_rx->bge_len);
2538                         m->m_data += ETHER_ALIGN;
2539                 }
2540 #endif
2541                 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2542                 m->m_pkthdr.rcvif = ifp;
2543
2544                 if (ifp->if_capenable & IFCAP_RXCSUM) {
2545                         if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2546                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2547                                 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2548                                         m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2549                         }
2550                         if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
2551                             m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
2552                                 m->m_pkthdr.csum_data =
2553                                     cur_rx->bge_tcp_udp_csum;
2554                                 m->m_pkthdr.csum_flags |=
2555                                     CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2556                         }
2557                 }
2558
2559                 /*
2560                  * If we received a packet with a vlan tag,
2561                  * attach that information to the packet.
2562                  */
2563                 if (have_tag) {
2564                         VLAN_INPUT_TAG(ifp, m, vlan_tag);
2565                         if (m == NULL)
2566                                 continue;
2567                 }
2568
2569                 BGE_UNLOCK(sc);
2570                 (*ifp->if_input)(ifp, m);
2571                 BGE_LOCK(sc);
2572         }
2573
2574         if (stdcnt > 0)
2575                 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2576                     sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
2577
2578         if (BGE_IS_JUMBO_CAPABLE(sc) && jumbocnt > 0)
2579                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2580                     sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
2581
2582         CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2583         if (stdcnt)
2584                 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2585         if (jumbocnt)
2586                 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2587 }
2588
2589 static void
2590 bge_txeof(struct bge_softc *sc)
2591 {
2592         struct bge_tx_bd *cur_tx = NULL;
2593         struct ifnet *ifp;
2594
2595         BGE_LOCK_ASSERT(sc);
2596
2597         /* Nothing to do. */
2598         if (sc->bge_tx_saved_considx ==
2599             sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
2600                 return;
2601
2602         ifp = sc->bge_ifp;
2603
2604         bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
2605             sc->bge_cdata.bge_tx_ring_map,
2606             BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2607         /*
2608          * Go through our tx ring and free mbufs for those
2609          * frames that have been sent.
2610          */
2611         while (sc->bge_tx_saved_considx !=
2612             sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2613                 uint32_t                idx = 0;
2614
2615                 idx = sc->bge_tx_saved_considx;
2616                 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2617                 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2618                         ifp->if_opackets++;
2619                 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2620                         bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2621                             sc->bge_cdata.bge_tx_dmamap[idx],
2622                             BUS_DMASYNC_POSTWRITE);
2623                         bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2624                             sc->bge_cdata.bge_tx_dmamap[idx]);
2625                         m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2626                         sc->bge_cdata.bge_tx_chain[idx] = NULL;
2627                 }
2628                 sc->bge_txcnt--;
2629                 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2630                 ifp->if_timer = 0;
2631         }
2632
2633         if (cur_tx != NULL)
2634                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2635 }
2636
2637 #ifdef DEVICE_POLLING
2638 static void
2639 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2640 {
2641         struct bge_softc *sc = ifp->if_softc;
2642         uint32_t statusword;
2643         
2644         BGE_LOCK(sc);
2645         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2646                 BGE_UNLOCK(sc);
2647                 return;
2648         }
2649
2650         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2651             sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
2652
2653         statusword = atomic_readandclear_32(
2654             &sc->bge_ldata.bge_status_block->bge_status);
2655
2656         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2657             sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
2658
2659         /* Note link event. It will be processed by POLL_AND_CHECK_STATUS cmd */
2660         if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
2661                 sc->bge_link_evt++;
2662
2663         if (cmd == POLL_AND_CHECK_STATUS)
2664                 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2665                     sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
2666                     sc->bge_link_evt || sc->bge_tbi)
2667                         bge_link_upd(sc);
2668
2669         sc->rxcycles = count;
2670         bge_rxeof(sc);
2671         bge_txeof(sc);
2672         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2673                 bge_start_locked(ifp);
2674
2675         BGE_UNLOCK(sc);
2676 }
2677 #endif /* DEVICE_POLLING */
2678
2679 static void
2680 bge_intr(void *xsc)
2681 {
2682         struct bge_softc *sc;
2683         struct ifnet *ifp;
2684         uint32_t statusword;
2685
2686         sc = xsc;
2687
2688         BGE_LOCK(sc);
2689
2690         ifp = sc->bge_ifp;
2691
2692 #ifdef DEVICE_POLLING
2693         if (ifp->if_capenable & IFCAP_POLLING) {
2694                 BGE_UNLOCK(sc);
2695                 return;
2696         }
2697 #endif
2698
2699         /*
2700          * Do the mandatory PCI flush as well as get the link status.
2701          */
2702         statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
2703
2704         /* Ack interrupt and stop others from occuring. */
2705         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2706
2707         /* Make sure the descriptor ring indexes are coherent. */
2708         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2709             sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
2710         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2711             sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
2712
2713         if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2714             sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
2715             statusword || sc->bge_link_evt)
2716                 bge_link_upd(sc);
2717
2718         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2719                 /* Check RX return ring producer/consumer. */
2720                 bge_rxeof(sc);
2721
2722                 /* Check TX ring producer/consumer. */
2723                 bge_txeof(sc);
2724         }
2725
2726         /* Re-enable interrupts. */
2727         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2728
2729         if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2730             !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2731                 bge_start_locked(ifp);
2732
2733         BGE_UNLOCK(sc);
2734 }
2735
2736 static void
2737 bge_tick_locked(struct bge_softc *sc)
2738 {
2739         struct mii_data *mii = NULL;
2740
2741         BGE_LOCK_ASSERT(sc);
2742
2743         if (BGE_IS_5705_OR_BEYOND(sc))
2744                 bge_stats_update_regs(sc);
2745         else
2746                 bge_stats_update(sc);
2747
2748         if (!sc->bge_tbi) {
2749                 mii = device_get_softc(sc->bge_miibus);
2750                 mii_tick(mii);
2751         } else {
2752                 /*
2753                  * Since in TBI mode auto-polling can't be used we should poll
2754                  * link status manually. Here we register pending link event
2755                  * and trigger interrupt.
2756                  */
2757 #ifdef DEVICE_POLLING
2758                 /* In polling mode we poll link state in bge_poll(). */
2759                 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
2760 #endif
2761                 {
2762                 sc->bge_link_evt++;
2763                 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
2764                 }
2765         }
2766
2767         callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
2768 }
2769
2770 static void
2771 bge_tick(void *xsc)
2772 {
2773         struct bge_softc *sc;
2774
2775         sc = xsc;
2776
2777         BGE_LOCK(sc);
2778         bge_tick_locked(sc);
2779         BGE_UNLOCK(sc);
2780 }
2781
2782 static void
2783 bge_stats_update_regs(struct bge_softc *sc)
2784 {
2785         struct bge_mac_stats_regs stats;
2786         struct ifnet *ifp;
2787         uint32_t *s;
2788         u_long cnt;     /* current register value */
2789         int i;
2790
2791         ifp = sc->bge_ifp;
2792
2793         s = (uint32_t *)&stats;
2794         for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2795                 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2796                 s++;
2797         }
2798
2799         cnt = stats.dot3StatsSingleCollisionFrames +
2800             stats.dot3StatsMultipleCollisionFrames +
2801             stats.dot3StatsExcessiveCollisions +
2802             stats.dot3StatsLateCollisions;
2803         ifp->if_collisions += cnt >= sc->bge_tx_collisions ?
2804             cnt - sc->bge_tx_collisions : cnt;
2805         sc->bge_tx_collisions = cnt;
2806 }
2807
2808 static void
2809 bge_stats_update(struct bge_softc *sc)
2810 {
2811         struct ifnet *ifp;
2812         bus_size_t stats;
2813         u_long cnt;     /* current register value */
2814
2815         ifp = sc->bge_ifp;
2816
2817         stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2818
2819 #define READ_STAT(sc, stats, stat) \
2820         CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2821
2822         cnt = READ_STAT(sc, stats,
2823             txstats.dot3StatsSingleCollisionFrames.bge_addr_lo);
2824         cnt += READ_STAT(sc, stats,
2825             txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo);
2826         cnt += READ_STAT(sc, stats,
2827             txstats.dot3StatsExcessiveCollisions.bge_addr_lo);
2828         cnt += READ_STAT(sc, stats,
2829                 txstats.dot3StatsLateCollisions.bge_addr_lo);
2830         ifp->if_collisions += cnt >= sc->bge_tx_collisions ?
2831             cnt - sc->bge_tx_collisions : cnt;
2832         sc->bge_tx_collisions = cnt;
2833
2834         cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
2835         ifp->if_ierrors += cnt >= sc->bge_rx_discards ?
2836             cnt - sc->bge_rx_discards : cnt;
2837         sc->bge_rx_discards = cnt;
2838
2839         cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
2840         ifp->if_oerrors += cnt >= sc->bge_tx_discards ?
2841             cnt - sc->bge_tx_discards : cnt;
2842         sc->bge_tx_discards = cnt;
2843
2844 #undef READ_STAT
2845 }
2846
2847 /*
2848  * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
2849  * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
2850  * but when such padded frames employ the bge IP/TCP checksum offload,
2851  * the hardware checksum assist gives incorrect results (possibly
2852  * from incorporating its own padding into the UDP/TCP checksum; who knows).
2853  * If we pad such runts with zeros, the onboard checksum comes out correct.
2854  */
2855 static __inline int
2856 bge_cksum_pad(struct mbuf *m)
2857 {
2858         int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
2859         struct mbuf *last;
2860
2861         /* If there's only the packet-header and we can pad there, use it. */
2862         if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
2863             M_TRAILINGSPACE(m) >= padlen) {
2864                 last = m;
2865         } else {
2866                 /*
2867                  * Walk packet chain to find last mbuf. We will either
2868                  * pad there, or append a new mbuf and pad it.
2869                  */
2870                 for (last = m; last->m_next != NULL; last = last->m_next);
2871                 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
2872                         /* Allocate new empty mbuf, pad it. Compact later. */
2873                         struct mbuf *n;
2874
2875                         MGET(n, M_DONTWAIT, MT_DATA);
2876                         if (n == NULL)
2877                                 return (ENOBUFS);
2878                         n->m_len = 0;
2879                         last->m_next = n;
2880                         last = n;
2881                 }
2882         }
2883         
2884         /* Now zero the pad area, to avoid the bge cksum-assist bug. */
2885         memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
2886         last->m_len += padlen;
2887         m->m_pkthdr.len += padlen;
2888
2889         return (0);
2890 }
2891
2892 /*
2893  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
2894  * pointers to descriptors.
2895  */
2896 static int
2897 bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx)
2898 {
2899         bus_dma_segment_t       segs[BGE_NSEG_NEW];
2900         bus_dmamap_t            map;
2901         struct bge_tx_bd        *d = NULL;
2902         struct m_tag            *mtag;
2903         uint32_t                idx = *txidx;
2904         uint16_t                csum_flags = 0;
2905         int                     nsegs, i, error;
2906
2907         if (m_head->m_pkthdr.csum_flags) {
2908                 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2909                         csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2910                 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
2911                         csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2912                         if (m_head->m_pkthdr.len < ETHER_MIN_NOPAD &&
2913                             bge_cksum_pad(m_head) != 0)
2914                                 return (ENOBUFS);
2915                 }
2916                 if (m_head->m_flags & M_LASTFRAG)
2917                         csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2918                 else if (m_head->m_flags & M_FRAG)
2919                         csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2920         }
2921
2922         mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m_head);
2923
2924         map = sc->bge_cdata.bge_tx_dmamap[idx];
2925         error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map,
2926             m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2927         if (error) {
2928                 if (error == EFBIG) {
2929                         struct mbuf *m0;
2930
2931                         m0 = m_defrag(m_head, M_DONTWAIT);
2932                         if (m0 == NULL)
2933                                 return (ENOBUFS);
2934                         m_head = m0;
2935                         error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag,
2936                             map, m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2937                 }
2938                 if (error)
2939                         return (error);
2940         }
2941
2942         /*
2943          * Sanity check: avoid coming within 16 descriptors
2944          * of the end of the ring.
2945          */
2946         if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
2947                 bus_dmamap_unload(sc->bge_cdata.bge_mtag, map);
2948                 return (ENOBUFS);
2949         }
2950
2951         bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE);
2952
2953         for (i = 0; ; i++) {
2954                 d = &sc->bge_ldata.bge_tx_ring[idx];
2955                 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
2956                 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
2957                 d->bge_len = segs[i].ds_len;
2958                 d->bge_flags = csum_flags;
2959                 if (i == nsegs - 1)
2960                         break;
2961                 BGE_INC(idx, BGE_TX_RING_CNT);
2962         }
2963
2964         /* Mark the last segment as end of packet... */
2965         d->bge_flags |= BGE_TXBDFLAG_END;
2966         /* ... and put VLAN tag into first segment.  */
2967         d = &sc->bge_ldata.bge_tx_ring[*txidx];
2968         if (mtag != NULL) {
2969                 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2970                 d->bge_vlan_tag = VLAN_TAG_VALUE(mtag);
2971         } else
2972                 d->bge_vlan_tag = 0;
2973
2974         /*
2975          * Insure that the map for this transmission
2976          * is placed at the array index of the last descriptor
2977          * in this chain.
2978          */
2979         sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
2980         sc->bge_cdata.bge_tx_dmamap[idx] = map;
2981         sc->bge_cdata.bge_tx_chain[idx] = m_head;
2982         sc->bge_txcnt += nsegs;
2983
2984         BGE_INC(idx, BGE_TX_RING_CNT);
2985         *txidx = idx;
2986
2987         return (0);
2988 }
2989
2990 /*
2991  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2992  * to the mbuf data regions directly in the transmit descriptors.
2993  */
2994 static void
2995 bge_start_locked(struct ifnet *ifp)
2996 {
2997         struct bge_softc *sc;
2998         struct mbuf *m_head = NULL;
2999         uint32_t prodidx;
3000         int count = 0;
3001
3002         sc = ifp->if_softc;
3003
3004         if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3005                 return;
3006
3007         prodidx = sc->bge_tx_prodidx;
3008
3009         while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3010                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
3011                 if (m_head == NULL)
3012                         break;
3013
3014                 /*
3015                  * XXX
3016                  * The code inside the if() block is never reached since we
3017                  * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3018                  * requests to checksum TCP/UDP in a fragmented packet.
3019                  *
3020                  * XXX
3021                  * safety overkill.  If this is a fragmented packet chain
3022                  * with delayed TCP/UDP checksums, then only encapsulate
3023                  * it if we have enough descriptors to handle the entire
3024                  * chain at once.
3025                  * (paranoia -- may not actually be needed)
3026                  */
3027                 if (m_head->m_flags & M_FIRSTFRAG &&
3028                     m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3029                         if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3030                             m_head->m_pkthdr.csum_data + 16) {
3031                                 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3032                                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3033                                 break;
3034                         }
3035                 }
3036
3037                 /*
3038                  * Pack the data into the transmit ring. If we
3039                  * don't have room, set the OACTIVE flag and wait
3040                  * for the NIC to drain the ring.
3041                  */
3042                 if (bge_encap(sc, m_head, &prodidx)) {
3043                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3044                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3045                         break;
3046                 }
3047                 ++count;
3048
3049                 /*
3050                  * If there's a BPF listener, bounce a copy of this frame
3051                  * to him.
3052                  */
3053                 BPF_MTAP(ifp, m_head);
3054         }
3055
3056         if (count == 0)
3057                 /* No packets were dequeued. */
3058                 return;
3059
3060         /* Transmit. */
3061         CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3062         /* 5700 b2 errata */
3063         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3064                 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3065
3066         sc->bge_tx_prodidx = prodidx;
3067
3068         /*
3069          * Set a timeout in case the chip goes out to lunch.
3070          */
3071         ifp->if_timer = 5;
3072 }
3073
3074 /*
3075  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3076  * to the mbuf data regions directly in the transmit descriptors.
3077  */
3078 static void
3079 bge_start(struct ifnet *ifp)
3080 {
3081         struct bge_softc *sc;
3082
3083         sc = ifp->if_softc;
3084         BGE_LOCK(sc);
3085         bge_start_locked(ifp);
3086         BGE_UNLOCK(sc);
3087 }
3088
3089 static void
3090 bge_init_locked(struct bge_softc *sc)
3091 {
3092         struct ifnet *ifp;
3093         uint16_t *m;
3094
3095         BGE_LOCK_ASSERT(sc);
3096
3097         ifp = sc->bge_ifp;
3098
3099         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3100                 return;
3101
3102         /* Cancel pending I/O and flush buffers. */
3103         bge_stop(sc);
3104         bge_reset(sc);
3105         bge_chipinit(sc);
3106
3107         /*
3108          * Init the various state machines, ring
3109          * control blocks and firmware.
3110          */
3111         if (bge_blockinit(sc)) {
3112                 device_printf(sc->bge_dev, "initialization failure\n");
3113                 return;
3114         }
3115
3116         ifp = sc->bge_ifp;
3117
3118         /* Specify MTU. */
3119         CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3120             ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
3121
3122         /* Load our MAC address. */
3123         m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
3124         CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3125         CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3126
3127         /* Enable or disable promiscuous mode as needed. */
3128         if (ifp->if_flags & IFF_PROMISC) {
3129                 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3130         } else {
3131                 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3132         }
3133
3134         /* Program multicast filter. */
3135         bge_setmulti(sc);
3136
3137         /* Init RX ring. */
3138         bge_init_rx_ring_std(sc);
3139
3140         /*
3141          * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3142          * memory to insure that the chip has in fact read the first
3143          * entry of the ring.
3144          */
3145         if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3146                 uint32_t                v, i;
3147                 for (i = 0; i < 10; i++) {
3148                         DELAY(20);
3149                         v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3150                         if (v == (MCLBYTES - ETHER_ALIGN))
3151                                 break;
3152                 }
3153                 if (i == 10)
3154                         device_printf (sc->bge_dev,
3155                             "5705 A0 chip failed to load RX ring\n");
3156         }
3157
3158         /* Init jumbo RX ring. */
3159         if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3160                 bge_init_rx_ring_jumbo(sc);
3161
3162         /* Init our RX return ring index. */
3163         sc->bge_rx_saved_considx = 0;
3164
3165         /* Init TX ring. */
3166         bge_init_tx_ring(sc);
3167
3168         /* Turn on transmitter. */
3169         BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3170
3171         /* Turn on receiver. */
3172         BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3173
3174         /* Tell firmware we're alive. */
3175         BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3176
3177 #ifdef DEVICE_POLLING
3178         /* Disable interrupts if we are polling. */
3179         if (ifp->if_capenable & IFCAP_POLLING) {
3180                 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3181                     BGE_PCIMISCCTL_MASK_PCI_INTR);
3182                 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3183                 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3184                 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3185         } else
3186 #endif
3187         
3188         /* Enable host interrupts. */
3189         {
3190         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3191         BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3192         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3193         }
3194         
3195         bge_ifmedia_upd(ifp);
3196
3197         ifp->if_drv_flags |= IFF_DRV_RUNNING;
3198         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3199
3200         callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3201 }
3202
3203 static void
3204 bge_init(void *xsc)
3205 {
3206         struct bge_softc *sc = xsc;
3207
3208         BGE_LOCK(sc);
3209         bge_init_locked(sc);
3210         BGE_UNLOCK(sc);
3211 }
3212
3213 /*
3214  * Set media options.
3215  */
3216 static int
3217 bge_ifmedia_upd(struct ifnet *ifp)
3218 {
3219         struct bge_softc *sc;
3220         struct mii_data *mii;
3221         struct ifmedia *ifm;
3222
3223         sc = ifp->if_softc;
3224         ifm = &sc->bge_ifmedia;
3225
3226         /* If this is a 1000baseX NIC, enable the TBI port. */
3227         if (sc->bge_tbi) {
3228                 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3229                         return (EINVAL);
3230                 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3231                 case IFM_AUTO:
3232                         /*
3233                          * The BCM5704 ASIC appears to have a special
3234                          * mechanism for programming the autoneg
3235                          * advertisement registers in TBI mode.
3236                          */
3237                         if (bge_fake_autoneg == 0 &&
3238                             sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3239                                 uint32_t sgdig;
3240                                 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3241                                 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3242                                 sgdig |= BGE_SGDIGCFG_AUTO|
3243                                     BGE_SGDIGCFG_PAUSE_CAP|
3244                                     BGE_SGDIGCFG_ASYM_PAUSE;
3245                                 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3246                                     sgdig|BGE_SGDIGCFG_SEND);
3247                                 DELAY(5);
3248                                 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3249                         }
3250                         break;
3251                 case IFM_1000_SX:
3252                         if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3253                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
3254                                     BGE_MACMODE_HALF_DUPLEX);
3255                         } else {
3256                                 BGE_SETBIT(sc, BGE_MAC_MODE,
3257                                     BGE_MACMODE_HALF_DUPLEX);
3258                         }
3259                         break;
3260                 default:
3261                         return (EINVAL);
3262                 }
3263                 return (0);
3264         }
3265
3266         sc->bge_link_evt++;
3267         mii = device_get_softc(sc->bge_miibus);
3268         if (mii->mii_instance) {
3269                 struct mii_softc *miisc;
3270                 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3271                     miisc = LIST_NEXT(miisc, mii_list))
3272                         mii_phy_reset(miisc);
3273         }
3274         mii_mediachg(mii);
3275
3276         return (0);
3277 }
3278
3279 /*
3280  * Report current media status.
3281  */
3282 static void
3283 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3284 {
3285         struct bge_softc *sc;
3286         struct mii_data *mii;
3287
3288         sc = ifp->if_softc;
3289
3290         if (sc->bge_tbi) {
3291                 ifmr->ifm_status = IFM_AVALID;
3292                 ifmr->ifm_active = IFM_ETHER;
3293                 if (CSR_READ_4(sc, BGE_MAC_STS) &
3294                     BGE_MACSTAT_TBI_PCS_SYNCHED)
3295                         ifmr->ifm_status |= IFM_ACTIVE;
3296                 else {
3297                         ifmr->ifm_active |= IFM_NONE;
3298                         return;
3299                 }
3300                 ifmr->ifm_active |= IFM_1000_SX;
3301                 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3302                         ifmr->ifm_active |= IFM_HDX;
3303                 else
3304                         ifmr->ifm_active |= IFM_FDX;
3305                 return;
3306         }
3307
3308         mii = device_get_softc(sc->bge_miibus);
3309         mii_pollstat(mii);
3310         ifmr->ifm_active = mii->mii_media_active;
3311         ifmr->ifm_status = mii->mii_media_status;
3312 }
3313
3314 static int
3315 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3316 {
3317         struct bge_softc *sc = ifp->if_softc;
3318         struct ifreq *ifr = (struct ifreq *) data;
3319         struct mii_data *mii;
3320         int mask, error = 0;
3321
3322         switch (command) {
3323         case SIOCSIFMTU:
3324                 if (ifr->ifr_mtu < ETHERMIN ||
3325                     ((BGE_IS_JUMBO_CAPABLE(sc)) &&
3326                     ifr->ifr_mtu > BGE_JUMBO_MTU) ||
3327                     ((!BGE_IS_JUMBO_CAPABLE(sc)) &&
3328                     ifr->ifr_mtu > ETHERMTU))
3329                         error = EINVAL;
3330                 else if (ifp->if_mtu != ifr->ifr_mtu) {
3331                         ifp->if_mtu = ifr->ifr_mtu;
3332                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3333                         bge_init(sc);
3334                 }
3335                 break;
3336         case SIOCSIFFLAGS:
3337                 BGE_LOCK(sc);
3338                 if (ifp->if_flags & IFF_UP) {
3339                         /*
3340                          * If only the state of the PROMISC flag changed,
3341                          * then just use the 'set promisc mode' command
3342                          * instead of reinitializing the entire NIC. Doing
3343                          * a full re-init means reloading the firmware and
3344                          * waiting for it to start up, which may take a
3345                          * second or two.  Similarly for ALLMULTI.
3346                          */
3347                         if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3348                             ifp->if_flags & IFF_PROMISC &&
3349                             !(sc->bge_if_flags & IFF_PROMISC)) {
3350                                 BGE_SETBIT(sc, BGE_RX_MODE,
3351                                     BGE_RXMODE_RX_PROMISC);
3352                         } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3353                             !(ifp->if_flags & IFF_PROMISC) &&
3354                             sc->bge_if_flags & IFF_PROMISC) {
3355                                 BGE_CLRBIT(sc, BGE_RX_MODE,
3356                                     BGE_RXMODE_RX_PROMISC);
3357                         } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3358                             (ifp->if_flags ^ sc->bge_if_flags) & IFF_ALLMULTI) {
3359                                 bge_setmulti(sc);
3360                         } else
3361                                 bge_init_locked(sc);
3362                 } else {
3363                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3364                                 bge_stop(sc);
3365                         }
3366                 }
3367                 sc->bge_if_flags = ifp->if_flags;
3368                 BGE_UNLOCK(sc);
3369                 error = 0;
3370                 break;
3371         case SIOCADDMULTI:
3372         case SIOCDELMULTI:
3373                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3374                         BGE_LOCK(sc);
3375                         bge_setmulti(sc);
3376                         BGE_UNLOCK(sc);
3377                         error = 0;
3378                 }
3379                 break;
3380         case SIOCSIFMEDIA:
3381         case SIOCGIFMEDIA:
3382                 if (sc->bge_tbi) {
3383                         error = ifmedia_ioctl(ifp, ifr,
3384                             &sc->bge_ifmedia, command);
3385                 } else {
3386                         mii = device_get_softc(sc->bge_miibus);
3387                         error = ifmedia_ioctl(ifp, ifr,
3388                             &mii->mii_media, command);
3389                 }
3390                 break;
3391         case SIOCSIFCAP:
3392                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3393 #ifdef DEVICE_POLLING
3394                 if (mask & IFCAP_POLLING) {
3395                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
3396                                 error = ether_poll_register(bge_poll, ifp);
3397                                 if (error)
3398                                         return (error);
3399                                 BGE_LOCK(sc);
3400                                 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3401                                     BGE_PCIMISCCTL_MASK_PCI_INTR);
3402                                 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3403                                 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3404                                 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3405                                 ifp->if_capenable |= IFCAP_POLLING;
3406                                 BGE_UNLOCK(sc);
3407                         } else {
3408                                 error = ether_poll_deregister(ifp);
3409                                 /* Enable interrupt even in error case */
3410                                 BGE_LOCK(sc);
3411                                 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
3412                                 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
3413                                 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
3414                                     BGE_PCIMISCCTL_MASK_PCI_INTR);
3415                                 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3416                                 ifp->if_capenable &= ~IFCAP_POLLING;
3417                                 BGE_UNLOCK(sc);
3418                         }
3419                 }
3420 #endif
3421                 if (mask & IFCAP_HWCSUM) {
3422                         ifp->if_capenable ^= IFCAP_HWCSUM;
3423                         if (IFCAP_HWCSUM & ifp->if_capenable &&
3424                             IFCAP_HWCSUM & ifp->if_capabilities)
3425                                 ifp->if_hwassist = BGE_CSUM_FEATURES;
3426                         else
3427                                 ifp->if_hwassist = 0;
3428                         VLAN_CAPABILITIES(ifp);
3429                 }
3430                 break;
3431         default:
3432                 error = ether_ioctl(ifp, command, data);
3433                 break;
3434         }
3435
3436         return (error);
3437 }
3438
3439 static void
3440 bge_watchdog(struct ifnet *ifp)
3441 {
3442         struct bge_softc *sc;
3443
3444         sc = ifp->if_softc;
3445
3446         if_printf(ifp, "watchdog timeout -- resetting\n");
3447
3448         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3449         bge_init(sc);
3450
3451         ifp->if_oerrors++;
3452 }
3453
3454 /*
3455  * Stop the adapter and free any mbufs allocated to the
3456  * RX and TX lists.
3457  */
3458 static void
3459 bge_stop(struct bge_softc *sc)
3460 {
3461         struct ifnet *ifp;
3462         struct ifmedia_entry *ifm;
3463         struct mii_data *mii = NULL;
3464         int mtmp, itmp;
3465
3466         BGE_LOCK_ASSERT(sc);
3467
3468         ifp = sc->bge_ifp;
3469
3470         if (!sc->bge_tbi)
3471                 mii = device_get_softc(sc->bge_miibus);
3472
3473         callout_stop(&sc->bge_stat_ch);
3474
3475         /*
3476          * Disable all of the receiver blocks.
3477          */
3478         BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3479         BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3480         BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3481         if (!(BGE_IS_5705_OR_BEYOND(sc)))
3482                 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3483         BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3484         BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3485         BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3486
3487         /*
3488          * Disable all of the transmit blocks.
3489          */
3490         BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3491         BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3492         BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3493         BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3494         BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3495         if (!(BGE_IS_5705_OR_BEYOND(sc)))
3496                 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3497         BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3498
3499         /*
3500          * Shut down all of the memory managers and related
3501          * state machines.
3502          */
3503         BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3504         BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3505         if (!(BGE_IS_5705_OR_BEYOND(sc)))
3506                 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3507         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3508         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3509         if (!(BGE_IS_5705_OR_BEYOND(sc))) {
3510                 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3511                 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3512         }
3513
3514         /* Disable host interrupts. */
3515         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3516         CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3517
3518         /*
3519          * Tell firmware we're shutting down.
3520          */
3521         BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3522
3523         /* Free the RX lists. */
3524         bge_free_rx_ring_std(sc);
3525
3526         /* Free jumbo RX list. */
3527         if (BGE_IS_JUMBO_CAPABLE(sc))
3528                 bge_free_rx_ring_jumbo(sc);
3529
3530         /* Free TX buffers. */
3531         bge_free_tx_ring(sc);
3532
3533         /*
3534          * Isolate/power down the PHY, but leave the media selection
3535          * unchanged so that things will be put back to normal when
3536          * we bring the interface back up.
3537          */
3538         if (!sc->bge_tbi) {
3539                 itmp = ifp->if_flags;
3540                 ifp->if_flags |= IFF_UP;
3541                 /*
3542                  * If we are called from bge_detach(), mii is already NULL.
3543                  */
3544                 if (mii != NULL) {
3545                         ifm = mii->mii_media.ifm_cur;
3546                         mtmp = ifm->ifm_media;
3547                         ifm->ifm_media = IFM_ETHER|IFM_NONE;
3548                         mii_mediachg(mii);
3549                         ifm->ifm_media = mtmp;
3550                 }
3551                 ifp->if_flags = itmp;
3552         }
3553
3554         sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3555
3556         /*
3557          * We can't just call bge_link_upd() cause chip is almost stopped so
3558          * bge_link_upd -> bge_tick_locked -> bge_stats_update sequence may
3559          * lead to hardware deadlock. So we just clearing MAC's link state
3560          * (PHY may still have link UP).
3561          */
3562         if (bootverbose && sc->bge_link)
3563                 if_printf(sc->bge_ifp, "link DOWN\n");
3564         sc->bge_link = 0;
3565
3566         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3567 }
3568
3569 /*
3570  * Stop all chip I/O so that the kernel's probe routines don't
3571  * get confused by errant DMAs when rebooting.
3572  */
3573 static void
3574 bge_shutdown(device_t dev)
3575 {
3576         struct bge_softc *sc;
3577
3578         sc = device_get_softc(dev);
3579
3580         BGE_LOCK(sc);
3581         bge_stop(sc);
3582         bge_reset(sc);
3583         BGE_UNLOCK(sc);
3584 }
3585
3586 static int
3587 bge_suspend(device_t dev)
3588 {
3589         struct bge_softc *sc;
3590
3591         sc = device_get_softc(dev);
3592         BGE_LOCK(sc);
3593         bge_stop(sc);
3594         BGE_UNLOCK(sc);
3595
3596         return (0);
3597 }
3598
3599 static int
3600 bge_resume(device_t dev)
3601 {
3602         struct bge_softc *sc;
3603         struct ifnet *ifp;
3604
3605         sc = device_get_softc(dev);
3606         BGE_LOCK(sc);
3607         ifp = sc->bge_ifp;
3608         if (ifp->if_flags & IFF_UP) {
3609                 bge_init_locked(sc);
3610                 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3611                         bge_start_locked(ifp);
3612         }
3613         BGE_UNLOCK(sc);
3614
3615         return (0);
3616 }
3617
3618 static void
3619 bge_link_upd(struct bge_softc *sc)
3620 {
3621         struct mii_data *mii;
3622         uint32_t link, status;
3623
3624         BGE_LOCK_ASSERT(sc);
3625
3626         /* Clear 'pending link event' flag. */
3627         sc->bge_link_evt = 0;
3628
3629         /*
3630          * Process link state changes.
3631          * Grrr. The link status word in the status block does
3632          * not work correctly on the BCM5700 rev AX and BX chips,
3633          * according to all available information. Hence, we have
3634          * to enable MII interrupts in order to properly obtain
3635          * async link changes. Unfortunately, this also means that
3636          * we have to read the MAC status register to detect link
3637          * changes, thereby adding an additional register access to
3638          * the interrupt handler.
3639          *
3640          * XXX: perhaps link state detection procedure used for
3641          * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
3642          */
3643
3644         if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3645             sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
3646                 status = CSR_READ_4(sc, BGE_MAC_STS);
3647                 if (status & BGE_MACSTAT_MI_INTERRUPT) {
3648                         callout_stop(&sc->bge_stat_ch);
3649                         bge_tick_locked(sc);
3650
3651                         mii = device_get_softc(sc->bge_miibus);
3652                         if (!sc->bge_link &&
3653                             mii->mii_media_status & IFM_ACTIVE &&
3654                             IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3655                                 sc->bge_link++;
3656                                 if (bootverbose)
3657                                         if_printf(sc->bge_ifp, "link UP\n");
3658                         } else if (sc->bge_link &&
3659                             (!(mii->mii_media_status & IFM_ACTIVE) ||
3660                             IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3661                                 sc->bge_link = 0;
3662                                 if (bootverbose)
3663                                         if_printf(sc->bge_ifp, "link DOWN\n");
3664                         }
3665
3666                         /* Clear the interrupt. */
3667                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
3668                             BGE_EVTENB_MI_INTERRUPT);
3669                         bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
3670                         bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
3671                             BRGPHY_INTRS);
3672                 }
3673                 return;
3674         }
3675
3676         if (sc->bge_tbi) {
3677                 status = CSR_READ_4(sc, BGE_MAC_STS);
3678                 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3679                         if (!sc->bge_link) {
3680                                 sc->bge_link++;
3681                                 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
3682                                         BGE_CLRBIT(sc, BGE_MAC_MODE,
3683                                             BGE_MACMODE_TBI_SEND_CFGS);
3684                                 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3685                                 if (bootverbose)
3686                                         if_printf(sc->bge_ifp, "link UP\n");
3687                                 if_link_state_change(sc->bge_ifp,
3688                                     LINK_STATE_UP);
3689                         }
3690                 } else if (sc->bge_link) {
3691                         sc->bge_link = 0;
3692                         if (bootverbose)
3693                                 if_printf(sc->bge_ifp, "link DOWN\n");
3694                         if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
3695                 }
3696         /* Discard link events for MII/GMII cards if MI auto-polling disabled */
3697         } else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
3698                 /*
3699                  * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
3700                  * in status word always set. Workaround this bug by reading
3701                  * PHY link status directly.
3702                  */
3703                 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
3704
3705                 if (link != sc->bge_link ||
3706                     sc->bge_asicrev == BGE_ASICREV_BCM5700) {
3707                         callout_stop(&sc->bge_stat_ch);
3708                         bge_tick_locked(sc);
3709
3710                         mii = device_get_softc(sc->bge_miibus);
3711                         if (!sc->bge_link &&
3712                             mii->mii_media_status & IFM_ACTIVE &&
3713                             IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3714                                 sc->bge_link++;
3715                                 if (bootverbose)
3716                                         if_printf(sc->bge_ifp, "link UP\n");
3717                         } else if (sc->bge_link &&
3718                             (!(mii->mii_media_status & IFM_ACTIVE) ||
3719                             IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3720                                 sc->bge_link = 0;
3721                                 if (bootverbose)
3722                                         if_printf(sc->bge_ifp, "link DOWN\n");
3723                         }
3724                 }
3725         }
3726
3727         /* Clear the attention. */
3728         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
3729             BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
3730             BGE_MACSTAT_LINK_CHANGED);
3731 }