]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/bge/if_bge.c
MFC @r257698.
[FreeBSD/FreeBSD.git] / sys / dev / bge / if_bge.c
1 /*-
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 /*
38  * Broadcom BCM57xx(x)/BCM590x NetXtreme and NetLink family Ethernet driver
39  *
40  * The Broadcom BCM5700 is based on technology originally developed by
41  * Alteon Networks as part of the Tigon I and Tigon II Gigabit Ethernet
42  * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has
43  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45  * frames, highly configurable RX filtering, and 16 RX and TX queues
46  * (which, along with RX filter rules, can be used for QOS applications).
47  * Other features, such as TCP segmentation, may be available as part
48  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49  * firmware images can be stored in hardware and need not be compiled
50  * into the driver.
51  *
52  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54  *
55  * The BCM5701 is a single-chip solution incorporating both the BCM5700
56  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57  * does not support external SSRAM.
58  *
59  * Broadcom also produces a variation of the BCM5700 under the "Altima"
60  * brand name, which is functionally similar but lacks PCI-X support.
61  *
62  * Without external SSRAM, you can only have at most 4 TX rings,
63  * and the use of the mini RX ring is disabled. This seems to imply
64  * that these features are simply not available on the BCM5701. As a
65  * result, this driver does not implement any support for the mini RX
66  * ring.
67  */
68
69 #ifdef HAVE_KERNEL_OPTION_HEADERS
70 #include "opt_device_polling.h"
71 #endif
72
73 #include <sys/param.h>
74 #include <sys/endian.h>
75 #include <sys/systm.h>
76 #include <sys/sockio.h>
77 #include <sys/mbuf.h>
78 #include <sys/malloc.h>
79 #include <sys/kernel.h>
80 #include <sys/module.h>
81 #include <sys/socket.h>
82 #include <sys/sysctl.h>
83 #include <sys/taskqueue.h>
84
85 #include <net/if.h>
86 #include <net/if_var.h>
87 #include <net/if_arp.h>
88 #include <net/ethernet.h>
89 #include <net/if_dl.h>
90 #include <net/if_media.h>
91
92 #include <net/bpf.h>
93
94 #include <net/if_types.h>
95 #include <net/if_vlan_var.h>
96
97 #include <netinet/in_systm.h>
98 #include <netinet/in.h>
99 #include <netinet/ip.h>
100 #include <netinet/tcp.h>
101
102 #include <machine/bus.h>
103 #include <machine/resource.h>
104 #include <sys/bus.h>
105 #include <sys/rman.h>
106
107 #include <dev/mii/mii.h>
108 #include <dev/mii/miivar.h>
109 #include "miidevs.h"
110 #include <dev/mii/brgphyreg.h>
111
112 #ifdef __sparc64__
113 #include <dev/ofw/ofw_bus.h>
114 #include <dev/ofw/openfirm.h>
115 #include <machine/ofw_machdep.h>
116 #include <machine/ver.h>
117 #endif
118
119 #include <dev/pci/pcireg.h>
120 #include <dev/pci/pcivar.h>
121
122 #include <dev/bge/if_bgereg.h>
123
124 #define BGE_CSUM_FEATURES       (CSUM_IP | CSUM_TCP)
125 #define ETHER_MIN_NOPAD         (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
126
127 MODULE_DEPEND(bge, pci, 1, 1, 1);
128 MODULE_DEPEND(bge, ether, 1, 1, 1);
129 MODULE_DEPEND(bge, miibus, 1, 1, 1);
130
131 /* "device miibus" required.  See GENERIC if you get errors here. */
132 #include "miibus_if.h"
133
134 /*
135  * Various supported device vendors/types and their names. Note: the
136  * spec seems to indicate that the hardware still has Alteon's vendor
137  * ID burned into it, though it will always be overriden by the vendor
138  * ID in the EEPROM. Just to be safe, we cover all possibilities.
139  */
140 static const struct bge_type {
141         uint16_t        bge_vid;
142         uint16_t        bge_did;
143 } bge_devs[] = {
144         { ALTEON_VENDORID,      ALTEON_DEVICEID_BCM5700 },
145         { ALTEON_VENDORID,      ALTEON_DEVICEID_BCM5701 },
146
147         { ALTIMA_VENDORID,      ALTIMA_DEVICE_AC1000 },
148         { ALTIMA_VENDORID,      ALTIMA_DEVICE_AC1002 },
149         { ALTIMA_VENDORID,      ALTIMA_DEVICE_AC9100 },
150
151         { APPLE_VENDORID,       APPLE_DEVICE_BCM5701 },
152
153         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5700 },
154         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5701 },
155         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5702 },
156         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5702_ALT },
157         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5702X },
158         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5703 },
159         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5703_ALT },
160         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5703X },
161         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5704C },
162         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5704S },
163         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5704S_ALT },
164         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5705 },
165         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5705F },
166         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5705K },
167         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5705M },
168         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5705M_ALT },
169         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5714C },
170         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5714S },
171         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5715 },
172         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5715S },
173         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5717 },
174         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5718 },
175         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5719 },
176         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5720 },
177         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5721 },
178         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5722 },
179         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5723 },
180         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5725 },
181         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5727 },
182         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5750 },
183         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5750M },
184         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5751 },
185         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5751F },
186         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5751M },
187         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5752 },
188         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5752M },
189         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5753 },
190         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5753F },
191         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5753M },
192         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5754 },
193         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5754M },
194         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5755 },
195         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5755M },
196         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5756 },
197         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5761 },
198         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5761E },
199         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5761S },
200         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5761SE },
201         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5762 },
202         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5764 },
203         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5780 },
204         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5780S },
205         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5781 },
206         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5782 },
207         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5784 },
208         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5785F },
209         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5785G },
210         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5786 },
211         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5787 },
212         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5787F },
213         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5787M },
214         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5788 },
215         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5789 },
216         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5901 },
217         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5901A2 },
218         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5903M },
219         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5906 },
220         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5906M },
221         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57760 },
222         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57761 },
223         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57762 },
224         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57765 },
225         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57766 },
226         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57780 },
227         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57781 },
228         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57785 },
229         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57788 },
230         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57790 },
231         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57791 },
232         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57795 },
233
234         { SK_VENDORID,          SK_DEVICEID_ALTIMA },
235
236         { TC_VENDORID,          TC_DEVICEID_3C996 },
237
238         { FJTSU_VENDORID,       FJTSU_DEVICEID_PW008GE4 },
239         { FJTSU_VENDORID,       FJTSU_DEVICEID_PW008GE5 },
240         { FJTSU_VENDORID,       FJTSU_DEVICEID_PP250450 },
241
242         { 0, 0 }
243 };
244
245 static const struct bge_vendor {
246         uint16_t        v_id;
247         const char      *v_name;
248 } bge_vendors[] = {
249         { ALTEON_VENDORID,      "Alteon" },
250         { ALTIMA_VENDORID,      "Altima" },
251         { APPLE_VENDORID,       "Apple" },
252         { BCOM_VENDORID,        "Broadcom" },
253         { SK_VENDORID,          "SysKonnect" },
254         { TC_VENDORID,          "3Com" },
255         { FJTSU_VENDORID,       "Fujitsu" },
256
257         { 0, NULL }
258 };
259
260 static const struct bge_revision {
261         uint32_t        br_chipid;
262         const char      *br_name;
263 } bge_revisions[] = {
264         { BGE_CHIPID_BCM5700_A0,        "BCM5700 A0" },
265         { BGE_CHIPID_BCM5700_A1,        "BCM5700 A1" },
266         { BGE_CHIPID_BCM5700_B0,        "BCM5700 B0" },
267         { BGE_CHIPID_BCM5700_B1,        "BCM5700 B1" },
268         { BGE_CHIPID_BCM5700_B2,        "BCM5700 B2" },
269         { BGE_CHIPID_BCM5700_B3,        "BCM5700 B3" },
270         { BGE_CHIPID_BCM5700_ALTIMA,    "BCM5700 Altima" },
271         { BGE_CHIPID_BCM5700_C0,        "BCM5700 C0" },
272         { BGE_CHIPID_BCM5701_A0,        "BCM5701 A0" },
273         { BGE_CHIPID_BCM5701_B0,        "BCM5701 B0" },
274         { BGE_CHIPID_BCM5701_B2,        "BCM5701 B2" },
275         { BGE_CHIPID_BCM5701_B5,        "BCM5701 B5" },
276         { BGE_CHIPID_BCM5703_A0,        "BCM5703 A0" },
277         { BGE_CHIPID_BCM5703_A1,        "BCM5703 A1" },
278         { BGE_CHIPID_BCM5703_A2,        "BCM5703 A2" },
279         { BGE_CHIPID_BCM5703_A3,        "BCM5703 A3" },
280         { BGE_CHIPID_BCM5703_B0,        "BCM5703 B0" },
281         { BGE_CHIPID_BCM5704_A0,        "BCM5704 A0" },
282         { BGE_CHIPID_BCM5704_A1,        "BCM5704 A1" },
283         { BGE_CHIPID_BCM5704_A2,        "BCM5704 A2" },
284         { BGE_CHIPID_BCM5704_A3,        "BCM5704 A3" },
285         { BGE_CHIPID_BCM5704_B0,        "BCM5704 B0" },
286         { BGE_CHIPID_BCM5705_A0,        "BCM5705 A0" },
287         { BGE_CHIPID_BCM5705_A1,        "BCM5705 A1" },
288         { BGE_CHIPID_BCM5705_A2,        "BCM5705 A2" },
289         { BGE_CHIPID_BCM5705_A3,        "BCM5705 A3" },
290         { BGE_CHIPID_BCM5750_A0,        "BCM5750 A0" },
291         { BGE_CHIPID_BCM5750_A1,        "BCM5750 A1" },
292         { BGE_CHIPID_BCM5750_A3,        "BCM5750 A3" },
293         { BGE_CHIPID_BCM5750_B0,        "BCM5750 B0" },
294         { BGE_CHIPID_BCM5750_B1,        "BCM5750 B1" },
295         { BGE_CHIPID_BCM5750_C0,        "BCM5750 C0" },
296         { BGE_CHIPID_BCM5750_C1,        "BCM5750 C1" },
297         { BGE_CHIPID_BCM5750_C2,        "BCM5750 C2" },
298         { BGE_CHIPID_BCM5714_A0,        "BCM5714 A0" },
299         { BGE_CHIPID_BCM5752_A0,        "BCM5752 A0" },
300         { BGE_CHIPID_BCM5752_A1,        "BCM5752 A1" },
301         { BGE_CHIPID_BCM5752_A2,        "BCM5752 A2" },
302         { BGE_CHIPID_BCM5714_B0,        "BCM5714 B0" },
303         { BGE_CHIPID_BCM5714_B3,        "BCM5714 B3" },
304         { BGE_CHIPID_BCM5715_A0,        "BCM5715 A0" },
305         { BGE_CHIPID_BCM5715_A1,        "BCM5715 A1" },
306         { BGE_CHIPID_BCM5715_A3,        "BCM5715 A3" },
307         { BGE_CHIPID_BCM5717_A0,        "BCM5717 A0" },
308         { BGE_CHIPID_BCM5717_B0,        "BCM5717 B0" },
309         { BGE_CHIPID_BCM5719_A0,        "BCM5719 A0" },
310         { BGE_CHIPID_BCM5720_A0,        "BCM5720 A0" },
311         { BGE_CHIPID_BCM5755_A0,        "BCM5755 A0" },
312         { BGE_CHIPID_BCM5755_A1,        "BCM5755 A1" },
313         { BGE_CHIPID_BCM5755_A2,        "BCM5755 A2" },
314         { BGE_CHIPID_BCM5722_A0,        "BCM5722 A0" },
315         { BGE_CHIPID_BCM5761_A0,        "BCM5761 A0" },
316         { BGE_CHIPID_BCM5761_A1,        "BCM5761 A1" },
317         { BGE_CHIPID_BCM5762_A0,        "BCM5762 A0" },
318         { BGE_CHIPID_BCM5784_A0,        "BCM5784 A0" },
319         { BGE_CHIPID_BCM5784_A1,        "BCM5784 A1" },
320         /* 5754 and 5787 share the same ASIC ID */
321         { BGE_CHIPID_BCM5787_A0,        "BCM5754/5787 A0" },
322         { BGE_CHIPID_BCM5787_A1,        "BCM5754/5787 A1" },
323         { BGE_CHIPID_BCM5787_A2,        "BCM5754/5787 A2" },
324         { BGE_CHIPID_BCM5906_A1,        "BCM5906 A1" },
325         { BGE_CHIPID_BCM5906_A2,        "BCM5906 A2" },
326         { BGE_CHIPID_BCM57765_A0,       "BCM57765 A0" },
327         { BGE_CHIPID_BCM57765_B0,       "BCM57765 B0" },
328         { BGE_CHIPID_BCM57780_A0,       "BCM57780 A0" },
329         { BGE_CHIPID_BCM57780_A1,       "BCM57780 A1" },
330
331         { 0, NULL }
332 };
333
334 /*
335  * Some defaults for major revisions, so that newer steppings
336  * that we don't know about have a shot at working.
337  */
338 static const struct bge_revision bge_majorrevs[] = {
339         { BGE_ASICREV_BCM5700,          "unknown BCM5700" },
340         { BGE_ASICREV_BCM5701,          "unknown BCM5701" },
341         { BGE_ASICREV_BCM5703,          "unknown BCM5703" },
342         { BGE_ASICREV_BCM5704,          "unknown BCM5704" },
343         { BGE_ASICREV_BCM5705,          "unknown BCM5705" },
344         { BGE_ASICREV_BCM5750,          "unknown BCM5750" },
345         { BGE_ASICREV_BCM5714_A0,       "unknown BCM5714" },
346         { BGE_ASICREV_BCM5752,          "unknown BCM5752" },
347         { BGE_ASICREV_BCM5780,          "unknown BCM5780" },
348         { BGE_ASICREV_BCM5714,          "unknown BCM5714" },
349         { BGE_ASICREV_BCM5755,          "unknown BCM5755" },
350         { BGE_ASICREV_BCM5761,          "unknown BCM5761" },
351         { BGE_ASICREV_BCM5784,          "unknown BCM5784" },
352         { BGE_ASICREV_BCM5785,          "unknown BCM5785" },
353         /* 5754 and 5787 share the same ASIC ID */
354         { BGE_ASICREV_BCM5787,          "unknown BCM5754/5787" },
355         { BGE_ASICREV_BCM5906,          "unknown BCM5906" },
356         { BGE_ASICREV_BCM57765,         "unknown BCM57765" },
357         { BGE_ASICREV_BCM57766,         "unknown BCM57766" },
358         { BGE_ASICREV_BCM57780,         "unknown BCM57780" },
359         { BGE_ASICREV_BCM5717,          "unknown BCM5717" },
360         { BGE_ASICREV_BCM5719,          "unknown BCM5719" },
361         { BGE_ASICREV_BCM5720,          "unknown BCM5720" },
362         { BGE_ASICREV_BCM5762,          "unknown BCM5762" },
363
364         { 0, NULL }
365 };
366
367 #define BGE_IS_JUMBO_CAPABLE(sc)        ((sc)->bge_flags & BGE_FLAG_JUMBO)
368 #define BGE_IS_5700_FAMILY(sc)          ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
369 #define BGE_IS_5705_PLUS(sc)            ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
370 #define BGE_IS_5714_FAMILY(sc)          ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
371 #define BGE_IS_575X_PLUS(sc)            ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
372 #define BGE_IS_5755_PLUS(sc)            ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
373 #define BGE_IS_5717_PLUS(sc)            ((sc)->bge_flags & BGE_FLAG_5717_PLUS)
374 #define BGE_IS_57765_PLUS(sc)           ((sc)->bge_flags & BGE_FLAG_57765_PLUS)
375
376 static uint32_t bge_chipid(device_t);
377 static const struct bge_vendor * bge_lookup_vendor(uint16_t);
378 static const struct bge_revision * bge_lookup_rev(uint32_t);
379
380 typedef int     (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
381
382 static int bge_probe(device_t);
383 static int bge_attach(device_t);
384 static int bge_detach(device_t);
385 static int bge_suspend(device_t);
386 static int bge_resume(device_t);
387 static void bge_release_resources(struct bge_softc *);
388 static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
389 static int bge_dma_alloc(struct bge_softc *);
390 static void bge_dma_free(struct bge_softc *);
391 static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t,
392     bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
393
394 static void bge_devinfo(struct bge_softc *);
395 static int bge_mbox_reorder(struct bge_softc *);
396
397 static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
398 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
399 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
400 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
401 static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
402
403 static void bge_txeof(struct bge_softc *, uint16_t);
404 static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
405 static int bge_rxeof(struct bge_softc *, uint16_t, int);
406
407 static void bge_asf_driver_up (struct bge_softc *);
408 static void bge_tick(void *);
409 static void bge_stats_clear_regs(struct bge_softc *);
410 static void bge_stats_update(struct bge_softc *);
411 static void bge_stats_update_regs(struct bge_softc *);
412 static struct mbuf *bge_check_short_dma(struct mbuf *);
413 static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
414     uint16_t *, uint16_t *);
415 static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
416
417 static void bge_intr(void *);
418 static int bge_msi_intr(void *);
419 static void bge_intr_task(void *, int);
420 static void bge_start_locked(struct ifnet *);
421 static void bge_start(struct ifnet *);
422 static int bge_ioctl(struct ifnet *, u_long, caddr_t);
423 static void bge_init_locked(struct bge_softc *);
424 static void bge_init(void *);
425 static void bge_stop_block(struct bge_softc *, bus_size_t, uint32_t);
426 static void bge_stop(struct bge_softc *);
427 static void bge_watchdog(struct bge_softc *);
428 static int bge_shutdown(device_t);
429 static int bge_ifmedia_upd_locked(struct ifnet *);
430 static int bge_ifmedia_upd(struct ifnet *);
431 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
432
433 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
434 static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
435
436 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
437 static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
438
439 static void bge_setpromisc(struct bge_softc *);
440 static void bge_setmulti(struct bge_softc *);
441 static void bge_setvlan(struct bge_softc *);
442
443 static __inline void bge_rxreuse_std(struct bge_softc *, int);
444 static __inline void bge_rxreuse_jumbo(struct bge_softc *, int);
445 static int bge_newbuf_std(struct bge_softc *, int);
446 static int bge_newbuf_jumbo(struct bge_softc *, int);
447 static int bge_init_rx_ring_std(struct bge_softc *);
448 static void bge_free_rx_ring_std(struct bge_softc *);
449 static int bge_init_rx_ring_jumbo(struct bge_softc *);
450 static void bge_free_rx_ring_jumbo(struct bge_softc *);
451 static void bge_free_tx_ring(struct bge_softc *);
452 static int bge_init_tx_ring(struct bge_softc *);
453
454 static int bge_chipinit(struct bge_softc *);
455 static int bge_blockinit(struct bge_softc *);
456 static uint32_t bge_dma_swap_options(struct bge_softc *);
457
458 static int bge_has_eaddr(struct bge_softc *);
459 static uint32_t bge_readmem_ind(struct bge_softc *, int);
460 static void bge_writemem_ind(struct bge_softc *, int, int);
461 static void bge_writembx(struct bge_softc *, int, int);
462 #ifdef notdef
463 static uint32_t bge_readreg_ind(struct bge_softc *, int);
464 #endif
465 static void bge_writemem_direct(struct bge_softc *, int, int);
466 static void bge_writereg_ind(struct bge_softc *, int, int);
467
468 static int bge_miibus_readreg(device_t, int, int);
469 static int bge_miibus_writereg(device_t, int, int, int);
470 static void bge_miibus_statchg(device_t);
471 #ifdef DEVICE_POLLING
472 static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
473 #endif
474
475 #define BGE_RESET_SHUTDOWN      0
476 #define BGE_RESET_START         1
477 #define BGE_RESET_SUSPEND       2
478 static void bge_sig_post_reset(struct bge_softc *, int);
479 static void bge_sig_legacy(struct bge_softc *, int);
480 static void bge_sig_pre_reset(struct bge_softc *, int);
481 static void bge_stop_fw(struct bge_softc *);
482 static int bge_reset(struct bge_softc *);
483 static void bge_link_upd(struct bge_softc *);
484
485 static void bge_ape_lock_init(struct bge_softc *);
486 static void bge_ape_read_fw_ver(struct bge_softc *);
487 static int bge_ape_lock(struct bge_softc *, int);
488 static void bge_ape_unlock(struct bge_softc *, int);
489 static void bge_ape_send_event(struct bge_softc *, uint32_t);
490 static void bge_ape_driver_state_change(struct bge_softc *, int);
491
492 /*
493  * The BGE_REGISTER_DEBUG option is only for low-level debugging.  It may
494  * leak information to untrusted users.  It is also known to cause alignment
495  * traps on certain architectures.
496  */
497 #ifdef BGE_REGISTER_DEBUG
498 static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
499 static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
500 static int bge_sysctl_ape_read(SYSCTL_HANDLER_ARGS);
501 static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
502 #endif
503 static void bge_add_sysctls(struct bge_softc *);
504 static void bge_add_sysctl_stats_regs(struct bge_softc *,
505     struct sysctl_ctx_list *, struct sysctl_oid_list *);
506 static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *,
507     struct sysctl_oid_list *);
508 static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
509
510 static device_method_t bge_methods[] = {
511         /* Device interface */
512         DEVMETHOD(device_probe,         bge_probe),
513         DEVMETHOD(device_attach,        bge_attach),
514         DEVMETHOD(device_detach,        bge_detach),
515         DEVMETHOD(device_shutdown,      bge_shutdown),
516         DEVMETHOD(device_suspend,       bge_suspend),
517         DEVMETHOD(device_resume,        bge_resume),
518
519         /* MII interface */
520         DEVMETHOD(miibus_readreg,       bge_miibus_readreg),
521         DEVMETHOD(miibus_writereg,      bge_miibus_writereg),
522         DEVMETHOD(miibus_statchg,       bge_miibus_statchg),
523
524         DEVMETHOD_END
525 };
526
527 static driver_t bge_driver = {
528         "bge",
529         bge_methods,
530         sizeof(struct bge_softc)
531 };
532
533 static devclass_t bge_devclass;
534
535 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
536 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
537
538 static int bge_allow_asf = 1;
539
540 TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
541
542 static SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
543 SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
544         "Allow ASF mode if available");
545
546 #define SPARC64_BLADE_1500_MODEL        "SUNW,Sun-Blade-1500"
547 #define SPARC64_BLADE_1500_PATH_BGE     "/pci@1f,700000/network@2"
548 #define SPARC64_BLADE_2500_MODEL        "SUNW,Sun-Blade-2500"
549 #define SPARC64_BLADE_2500_PATH_BGE     "/pci@1c,600000/network@3"
550 #define SPARC64_OFW_SUBVENDOR           "subsystem-vendor-id"
551
552 static int
553 bge_has_eaddr(struct bge_softc *sc)
554 {
555 #ifdef __sparc64__
556         char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
557         device_t dev;
558         uint32_t subvendor;
559
560         dev = sc->bge_dev;
561
562         /*
563          * The on-board BGEs found in sun4u machines aren't fitted with
564          * an EEPROM which means that we have to obtain the MAC address
565          * via OFW and that some tests will always fail.  We distinguish
566          * such BGEs by the subvendor ID, which also has to be obtained
567          * from OFW instead of the PCI configuration space as the latter
568          * indicates Broadcom as the subvendor of the netboot interface.
569          * For early Blade 1500 and 2500 we even have to check the OFW
570          * device path as the subvendor ID always defaults to Broadcom
571          * there.
572          */
573         if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
574             &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
575             (subvendor == FJTSU_VENDORID || subvendor == SUN_VENDORID))
576                 return (0);
577         memset(buf, 0, sizeof(buf));
578         if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
579                 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
580                     strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
581                         return (0);
582                 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
583                     strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
584                         return (0);
585         }
586 #endif
587         return (1);
588 }
589
590 static uint32_t
591 bge_readmem_ind(struct bge_softc *sc, int off)
592 {
593         device_t dev;
594         uint32_t val;
595
596         if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
597             off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
598                 return (0);
599
600         dev = sc->bge_dev;
601
602         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
603         val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
604         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
605         return (val);
606 }
607
608 static void
609 bge_writemem_ind(struct bge_softc *sc, int off, int val)
610 {
611         device_t dev;
612
613         if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
614             off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
615                 return;
616
617         dev = sc->bge_dev;
618
619         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
620         pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
621         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
622 }
623
624 #ifdef notdef
625 static uint32_t
626 bge_readreg_ind(struct bge_softc *sc, int off)
627 {
628         device_t dev;
629
630         dev = sc->bge_dev;
631
632         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
633         return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
634 }
635 #endif
636
637 static void
638 bge_writereg_ind(struct bge_softc *sc, int off, int val)
639 {
640         device_t dev;
641
642         dev = sc->bge_dev;
643
644         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
645         pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
646 }
647
648 static void
649 bge_writemem_direct(struct bge_softc *sc, int off, int val)
650 {
651         CSR_WRITE_4(sc, off, val);
652 }
653
654 static void
655 bge_writembx(struct bge_softc *sc, int off, int val)
656 {
657         if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
658                 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
659
660         CSR_WRITE_4(sc, off, val);
661         if ((sc->bge_flags & BGE_FLAG_MBOX_REORDER) != 0)
662                 CSR_READ_4(sc, off);
663 }
664
665 /*
666  * Clear all stale locks and select the lock for this driver instance.
667  */
668 static void
669 bge_ape_lock_init(struct bge_softc *sc)
670 {
671         uint32_t bit, regbase;
672         int i;
673
674         if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
675                 regbase = BGE_APE_LOCK_GRANT;
676         else
677                 regbase = BGE_APE_PER_LOCK_GRANT;
678
679         /* Clear any stale locks. */
680         for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) {
681                 switch (i) {
682                 case BGE_APE_LOCK_PHY0:
683                 case BGE_APE_LOCK_PHY1:
684                 case BGE_APE_LOCK_PHY2:
685                 case BGE_APE_LOCK_PHY3:
686                         bit = BGE_APE_LOCK_GRANT_DRIVER0;
687                         break;
688                 default:
689                         if (sc->bge_func_addr == 0)
690                                 bit = BGE_APE_LOCK_GRANT_DRIVER0;
691                         else
692                                 bit = (1 << sc->bge_func_addr);
693                 }
694                 APE_WRITE_4(sc, regbase + 4 * i, bit);
695         }
696
697         /* Select the PHY lock based on the device's function number. */
698         switch (sc->bge_func_addr) {
699         case 0:
700                 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0;
701                 break;
702         case 1:
703                 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1;
704                 break;
705         case 2:
706                 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2;
707                 break;
708         case 3:
709                 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3;
710                 break;
711         default:
712                 device_printf(sc->bge_dev,
713                     "PHY lock not supported on this function\n");
714         }
715 }
716
717 /*
718  * Check for APE firmware, set flags, and print version info.
719  */
720 static void
721 bge_ape_read_fw_ver(struct bge_softc *sc)
722 {
723         const char *fwtype;
724         uint32_t apedata, features;
725
726         /* Check for a valid APE signature in shared memory. */
727         apedata = APE_READ_4(sc, BGE_APE_SEG_SIG);
728         if (apedata != BGE_APE_SEG_SIG_MAGIC) {
729                 sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE;
730                 return;
731         }
732
733         /* Check if APE firmware is running. */
734         apedata = APE_READ_4(sc, BGE_APE_FW_STATUS);
735         if ((apedata & BGE_APE_FW_STATUS_READY) == 0) {
736                 device_printf(sc->bge_dev, "APE signature found "
737                     "but FW status not ready! 0x%08x\n", apedata);
738                 return;
739         }
740
741         sc->bge_mfw_flags |= BGE_MFW_ON_APE;
742
743         /* Fetch the APE firwmare type and version. */
744         apedata = APE_READ_4(sc, BGE_APE_FW_VERSION);
745         features = APE_READ_4(sc, BGE_APE_FW_FEATURES);
746         if ((features & BGE_APE_FW_FEATURE_NCSI) != 0) {
747                 sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI;
748                 fwtype = "NCSI";
749         } else if ((features & BGE_APE_FW_FEATURE_DASH) != 0) {
750                 sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH;
751                 fwtype = "DASH";
752         } else
753                 fwtype = "UNKN";
754
755         /* Print the APE firmware version. */
756         device_printf(sc->bge_dev, "APE FW version: %s v%d.%d.%d.%d\n",
757             fwtype,
758             (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT,
759             (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT,
760             (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT,
761             (apedata & BGE_APE_FW_VERSION_BLDMSK));
762 }
763
764 static int
765 bge_ape_lock(struct bge_softc *sc, int locknum)
766 {
767         uint32_t bit, gnt, req, status;
768         int i, off;
769
770         if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
771                 return (0);
772
773         /* Lock request/grant registers have different bases. */
774         if (sc->bge_asicrev == BGE_ASICREV_BCM5761) {
775                 req = BGE_APE_LOCK_REQ;
776                 gnt = BGE_APE_LOCK_GRANT;
777         } else {
778                 req = BGE_APE_PER_LOCK_REQ;
779                 gnt = BGE_APE_PER_LOCK_GRANT;
780         }
781
782         off = 4 * locknum;
783
784         switch (locknum) {
785         case BGE_APE_LOCK_GPIO:
786                 /* Lock required when using GPIO. */
787                 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
788                         return (0);
789                 if (sc->bge_func_addr == 0)
790                         bit = BGE_APE_LOCK_REQ_DRIVER0;
791                 else
792                         bit = (1 << sc->bge_func_addr);
793                 break;
794         case BGE_APE_LOCK_GRC:
795                 /* Lock required to reset the device. */
796                 if (sc->bge_func_addr == 0)
797                         bit = BGE_APE_LOCK_REQ_DRIVER0;
798                 else
799                         bit = (1 << sc->bge_func_addr);
800                 break;
801         case BGE_APE_LOCK_MEM:
802                 /* Lock required when accessing certain APE memory. */
803                 if (sc->bge_func_addr == 0)
804                         bit = BGE_APE_LOCK_REQ_DRIVER0;
805                 else
806                         bit = (1 << sc->bge_func_addr);
807                 break;
808         case BGE_APE_LOCK_PHY0:
809         case BGE_APE_LOCK_PHY1:
810         case BGE_APE_LOCK_PHY2:
811         case BGE_APE_LOCK_PHY3:
812                 /* Lock required when accessing PHYs. */
813                 bit = BGE_APE_LOCK_REQ_DRIVER0;
814                 break;
815         default:
816                 return (EINVAL);
817         }
818
819         /* Request a lock. */
820         APE_WRITE_4(sc, req + off, bit);
821
822         /* Wait up to 1 second to acquire lock. */
823         for (i = 0; i < 20000; i++) {
824                 status = APE_READ_4(sc, gnt + off);
825                 if (status == bit)
826                         break;
827                 DELAY(50);
828         }
829
830         /* Handle any errors. */
831         if (status != bit) {
832                 device_printf(sc->bge_dev, "APE lock %d request failed! "
833                     "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n",
834                     locknum, req + off, bit & 0xFFFF, gnt + off,
835                     status & 0xFFFF);
836                 /* Revoke the lock request. */
837                 APE_WRITE_4(sc, gnt + off, bit);
838                 return (EBUSY);
839         }
840
841         return (0);
842 }
843
844 static void
845 bge_ape_unlock(struct bge_softc *sc, int locknum)
846 {
847         uint32_t bit, gnt;
848         int off;
849
850         if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
851                 return;
852
853         if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
854                 gnt = BGE_APE_LOCK_GRANT;
855         else
856                 gnt = BGE_APE_PER_LOCK_GRANT;
857
858         off = 4 * locknum;
859
860         switch (locknum) {
861         case BGE_APE_LOCK_GPIO:
862                 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
863                         return;
864                 if (sc->bge_func_addr == 0)
865                         bit = BGE_APE_LOCK_GRANT_DRIVER0;
866                 else
867                         bit = (1 << sc->bge_func_addr);
868                 break;
869         case BGE_APE_LOCK_GRC:
870                 if (sc->bge_func_addr == 0)
871                         bit = BGE_APE_LOCK_GRANT_DRIVER0;
872                 else
873                         bit = (1 << sc->bge_func_addr);
874                 break;
875         case BGE_APE_LOCK_MEM:
876                 if (sc->bge_func_addr == 0)
877                         bit = BGE_APE_LOCK_GRANT_DRIVER0;
878                 else
879                         bit = (1 << sc->bge_func_addr);
880                 break;
881         case BGE_APE_LOCK_PHY0:
882         case BGE_APE_LOCK_PHY1:
883         case BGE_APE_LOCK_PHY2:
884         case BGE_APE_LOCK_PHY3:
885                 bit = BGE_APE_LOCK_GRANT_DRIVER0;
886                 break;
887         default:
888                 return;
889         }
890
891         APE_WRITE_4(sc, gnt + off, bit);
892 }
893
894 /*
895  * Send an event to the APE firmware.
896  */
897 static void
898 bge_ape_send_event(struct bge_softc *sc, uint32_t event)
899 {
900         uint32_t apedata;
901         int i;
902
903         /* NCSI does not support APE events. */
904         if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
905                 return;
906
907         /* Wait up to 1ms for APE to service previous event. */
908         for (i = 10; i > 0; i--) {
909                 if (bge_ape_lock(sc, BGE_APE_LOCK_MEM) != 0)
910                         break;
911                 apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS);
912                 if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) {
913                         APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event |
914                             BGE_APE_EVENT_STATUS_EVENT_PENDING);
915                         bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
916                         APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1);
917                         break;
918                 }
919                 bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
920                 DELAY(100);
921         }
922         if (i == 0)
923                 device_printf(sc->bge_dev, "APE event 0x%08x send timed out\n",
924                     event);
925 }
926
927 static void
928 bge_ape_driver_state_change(struct bge_softc *sc, int kind)
929 {
930         uint32_t apedata, event;
931
932         if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
933                 return;
934
935         switch (kind) {
936         case BGE_RESET_START:
937                 /* If this is the first load, clear the load counter. */
938                 apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG);
939                 if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC)
940                         APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0);
941                 else {
942                         apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT);
943                         APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata);
944                 }
945                 APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG,
946                     BGE_APE_HOST_SEG_SIG_MAGIC);
947                 APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN,
948                     BGE_APE_HOST_SEG_LEN_MAGIC);
949
950                 /* Add some version info if bge(4) supports it. */
951                 APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID,
952                     BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0));
953                 APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR,
954                     BGE_APE_HOST_BEHAV_NO_PHYLOCK);
955                 APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS,
956                     BGE_APE_HOST_HEARTBEAT_INT_DISABLE);
957                 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
958                     BGE_APE_HOST_DRVR_STATE_START);
959                 event = BGE_APE_EVENT_STATUS_STATE_START;
960                 break;
961         case BGE_RESET_SHUTDOWN:
962                 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
963                     BGE_APE_HOST_DRVR_STATE_UNLOAD);
964                 event = BGE_APE_EVENT_STATUS_STATE_UNLOAD;
965                 break;
966         case BGE_RESET_SUSPEND:
967                 event = BGE_APE_EVENT_STATUS_STATE_SUSPEND;
968                 break;
969         default:
970                 return;
971         }
972
973         bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT |
974             BGE_APE_EVENT_STATUS_STATE_CHNGE);
975 }
976
977 /*
978  * Map a single buffer address.
979  */
980
981 static void
982 bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
983 {
984         struct bge_dmamap_arg *ctx;
985
986         if (error)
987                 return;
988
989         KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
990
991         ctx = arg;
992         ctx->bge_busaddr = segs->ds_addr;
993 }
994
995 static uint8_t
996 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
997 {
998         uint32_t access, byte = 0;
999         int i;
1000
1001         /* Lock. */
1002         CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
1003         for (i = 0; i < 8000; i++) {
1004                 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
1005                         break;
1006                 DELAY(20);
1007         }
1008         if (i == 8000)
1009                 return (1);
1010
1011         /* Enable access. */
1012         access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
1013         CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
1014
1015         CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
1016         CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
1017         for (i = 0; i < BGE_TIMEOUT * 10; i++) {
1018                 DELAY(10);
1019                 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
1020                         DELAY(10);
1021                         break;
1022                 }
1023         }
1024
1025         if (i == BGE_TIMEOUT * 10) {
1026                 if_printf(sc->bge_ifp, "nvram read timed out\n");
1027                 return (1);
1028         }
1029
1030         /* Get result. */
1031         byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
1032
1033         *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
1034
1035         /* Disable access. */
1036         CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
1037
1038         /* Unlock. */
1039         CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
1040         CSR_READ_4(sc, BGE_NVRAM_SWARB);
1041
1042         return (0);
1043 }
1044
1045 /*
1046  * Read a sequence of bytes from NVRAM.
1047  */
1048 static int
1049 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
1050 {
1051         int err = 0, i;
1052         uint8_t byte = 0;
1053
1054         if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
1055                 return (1);
1056
1057         for (i = 0; i < cnt; i++) {
1058                 err = bge_nvram_getbyte(sc, off + i, &byte);
1059                 if (err)
1060                         break;
1061                 *(dest + i) = byte;
1062         }
1063
1064         return (err ? 1 : 0);
1065 }
1066
1067 /*
1068  * Read a byte of data stored in the EEPROM at address 'addr.' The
1069  * BCM570x supports both the traditional bitbang interface and an
1070  * auto access interface for reading the EEPROM. We use the auto
1071  * access method.
1072  */
1073 static uint8_t
1074 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
1075 {
1076         int i;
1077         uint32_t byte = 0;
1078
1079         /*
1080          * Enable use of auto EEPROM access so we can avoid
1081          * having to use the bitbang method.
1082          */
1083         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
1084
1085         /* Reset the EEPROM, load the clock period. */
1086         CSR_WRITE_4(sc, BGE_EE_ADDR,
1087             BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
1088         DELAY(20);
1089
1090         /* Issue the read EEPROM command. */
1091         CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
1092
1093         /* Wait for completion */
1094         for(i = 0; i < BGE_TIMEOUT * 10; i++) {
1095                 DELAY(10);
1096                 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
1097                         break;
1098         }
1099
1100         if (i == BGE_TIMEOUT * 10) {
1101                 device_printf(sc->bge_dev, "EEPROM read timed out\n");
1102                 return (1);
1103         }
1104
1105         /* Get result. */
1106         byte = CSR_READ_4(sc, BGE_EE_DATA);
1107
1108         *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
1109
1110         return (0);
1111 }
1112
1113 /*
1114  * Read a sequence of bytes from the EEPROM.
1115  */
1116 static int
1117 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
1118 {
1119         int i, error = 0;
1120         uint8_t byte = 0;
1121
1122         for (i = 0; i < cnt; i++) {
1123                 error = bge_eeprom_getbyte(sc, off + i, &byte);
1124                 if (error)
1125                         break;
1126                 *(dest + i) = byte;
1127         }
1128
1129         return (error ? 1 : 0);
1130 }
1131
1132 static int
1133 bge_miibus_readreg(device_t dev, int phy, int reg)
1134 {
1135         struct bge_softc *sc;
1136         uint32_t val;
1137         int i;
1138
1139         sc = device_get_softc(dev);
1140
1141         if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
1142                 return (0);
1143
1144         /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
1145         if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
1146                 CSR_WRITE_4(sc, BGE_MI_MODE,
1147                     sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
1148                 DELAY(80);
1149         }
1150
1151         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
1152             BGE_MIPHY(phy) | BGE_MIREG(reg));
1153
1154         /* Poll for the PHY register access to complete. */
1155         for (i = 0; i < BGE_TIMEOUT; i++) {
1156                 DELAY(10);
1157                 val = CSR_READ_4(sc, BGE_MI_COMM);
1158                 if ((val & BGE_MICOMM_BUSY) == 0) {
1159                         DELAY(5);
1160                         val = CSR_READ_4(sc, BGE_MI_COMM);
1161                         break;
1162                 }
1163         }
1164
1165         if (i == BGE_TIMEOUT) {
1166                 device_printf(sc->bge_dev,
1167                     "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
1168                     phy, reg, val);
1169                 val = 0;
1170         }
1171
1172         /* Restore the autopoll bit if necessary. */
1173         if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
1174                 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
1175                 DELAY(80);
1176         }
1177
1178         bge_ape_unlock(sc, sc->bge_phy_ape_lock);
1179
1180         if (val & BGE_MICOMM_READFAIL)
1181                 return (0);
1182
1183         return (val & 0xFFFF);
1184 }
1185
1186 static int
1187 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
1188 {
1189         struct bge_softc *sc;
1190         int i;
1191
1192         sc = device_get_softc(dev);
1193
1194         if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
1195             (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
1196                 return (0);
1197
1198         if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
1199                 return (0);
1200
1201         /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
1202         if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
1203                 CSR_WRITE_4(sc, BGE_MI_MODE,
1204                     sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
1205                 DELAY(80);
1206         }
1207
1208         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
1209             BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
1210
1211         for (i = 0; i < BGE_TIMEOUT; i++) {
1212                 DELAY(10);
1213                 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
1214                         DELAY(5);
1215                         CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
1216                         break;
1217                 }
1218         }
1219
1220         /* Restore the autopoll bit if necessary. */
1221         if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
1222                 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
1223                 DELAY(80);
1224         }
1225
1226         bge_ape_unlock(sc, sc->bge_phy_ape_lock);
1227
1228         if (i == BGE_TIMEOUT)
1229                 device_printf(sc->bge_dev,
1230                     "PHY write timed out (phy %d, reg %d, val 0x%04x)\n",
1231                     phy, reg, val);
1232
1233         return (0);
1234 }
1235
1236 static void
1237 bge_miibus_statchg(device_t dev)
1238 {
1239         struct bge_softc *sc;
1240         struct mii_data *mii;
1241         uint32_t mac_mode, rx_mode, tx_mode;
1242
1243         sc = device_get_softc(dev);
1244         if ((sc->bge_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1245                 return;
1246         mii = device_get_softc(sc->bge_miibus);
1247
1248         if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1249             (IFM_ACTIVE | IFM_AVALID)) {
1250                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1251                 case IFM_10_T:
1252                 case IFM_100_TX:
1253                         sc->bge_link = 1;
1254                         break;
1255                 case IFM_1000_T:
1256                 case IFM_1000_SX:
1257                 case IFM_2500_SX:
1258                         if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
1259                                 sc->bge_link = 1;
1260                         else
1261                                 sc->bge_link = 0;
1262                         break;
1263                 default:
1264                         sc->bge_link = 0;
1265                         break;
1266                 }
1267         } else
1268                 sc->bge_link = 0;
1269         if (sc->bge_link == 0)
1270                 return;
1271
1272         /*
1273          * APE firmware touches these registers to keep the MAC
1274          * connected to the outside world.  Try to keep the
1275          * accesses atomic.
1276          */
1277
1278         /* Set the port mode (MII/GMII) to match the link speed. */
1279         mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) &
1280             ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX);
1281         tx_mode = CSR_READ_4(sc, BGE_TX_MODE);
1282         rx_mode = CSR_READ_4(sc, BGE_RX_MODE);
1283
1284         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1285             IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
1286                 mac_mode |= BGE_PORTMODE_GMII;
1287         else
1288                 mac_mode |= BGE_PORTMODE_MII;
1289
1290         /* Set MAC flow control behavior to match link flow control settings. */
1291         tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE;
1292         rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE;
1293         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1294                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1295                         tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE;
1296                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1297                         rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE;
1298         } else
1299                 mac_mode |= BGE_MACMODE_HALF_DUPLEX;
1300
1301         CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode);
1302         DELAY(40);
1303         CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode);
1304         CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode);
1305 }
1306
1307 /*
1308  * Intialize a standard receive ring descriptor.
1309  */
1310 static int
1311 bge_newbuf_std(struct bge_softc *sc, int i)
1312 {
1313         struct mbuf *m;
1314         struct bge_rx_bd *r;
1315         bus_dma_segment_t segs[1];
1316         bus_dmamap_t map;
1317         int error, nsegs;
1318
1319         if (sc->bge_flags & BGE_FLAG_JUMBO_STD &&
1320             (sc->bge_ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1321             ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) {
1322                 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
1323                 if (m == NULL)
1324                         return (ENOBUFS);
1325                 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1326         } else {
1327                 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1328                 if (m == NULL)
1329                         return (ENOBUFS);
1330                 m->m_len = m->m_pkthdr.len = MCLBYTES;
1331         }
1332         if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
1333                 m_adj(m, ETHER_ALIGN);
1334
1335         error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
1336             sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
1337         if (error != 0) {
1338                 m_freem(m);
1339                 return (error);
1340         }
1341         if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1342                 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1343                     sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
1344                 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1345                     sc->bge_cdata.bge_rx_std_dmamap[i]);
1346         }
1347         map = sc->bge_cdata.bge_rx_std_dmamap[i];
1348         sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
1349         sc->bge_cdata.bge_rx_std_sparemap = map;
1350         sc->bge_cdata.bge_rx_std_chain[i] = m;
1351         sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len;
1352         r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
1353         r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1354         r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1355         r->bge_flags = BGE_RXBDFLAG_END;
1356         r->bge_len = segs[0].ds_len;
1357         r->bge_idx = i;
1358
1359         bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1360             sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
1361
1362         return (0);
1363 }
1364
1365 /*
1366  * Initialize a jumbo receive ring descriptor. This allocates
1367  * a jumbo buffer from the pool managed internally by the driver.
1368  */
1369 static int
1370 bge_newbuf_jumbo(struct bge_softc *sc, int i)
1371 {
1372         bus_dma_segment_t segs[BGE_NSEG_JUMBO];
1373         bus_dmamap_t map;
1374         struct bge_extrx_bd *r;
1375         struct mbuf *m;
1376         int error, nsegs;
1377
1378         MGETHDR(m, M_NOWAIT, MT_DATA);
1379         if (m == NULL)
1380                 return (ENOBUFS);
1381
1382         m_cljget(m, M_NOWAIT, MJUM9BYTES);
1383         if (!(m->m_flags & M_EXT)) {
1384                 m_freem(m);
1385                 return (ENOBUFS);
1386         }
1387         m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1388         if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
1389                 m_adj(m, ETHER_ALIGN);
1390
1391         error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
1392             sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
1393         if (error != 0) {
1394                 m_freem(m);
1395                 return (error);
1396         }
1397
1398         if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1399                 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1400                     sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
1401                 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1402                     sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1403         }
1404         map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
1405         sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
1406             sc->bge_cdata.bge_rx_jumbo_sparemap;
1407         sc->bge_cdata.bge_rx_jumbo_sparemap = map;
1408         sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1409         sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0;
1410         sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0;
1411         sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0;
1412         sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0;
1413
1414         /*
1415          * Fill in the extended RX buffer descriptor.
1416          */
1417         r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
1418         r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1419         r->bge_idx = i;
1420         r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1421         switch (nsegs) {
1422         case 4:
1423                 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1424                 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1425                 r->bge_len3 = segs[3].ds_len;
1426                 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len;
1427         case 3:
1428                 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1429                 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1430                 r->bge_len2 = segs[2].ds_len;
1431                 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len;
1432         case 2:
1433                 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1434                 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1435                 r->bge_len1 = segs[1].ds_len;
1436                 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len;
1437         case 1:
1438                 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1439                 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1440                 r->bge_len0 = segs[0].ds_len;
1441                 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len;
1442                 break;
1443         default:
1444                 panic("%s: %d segments\n", __func__, nsegs);
1445         }
1446
1447         bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1448             sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
1449
1450         return (0);
1451 }
1452
1453 static int
1454 bge_init_rx_ring_std(struct bge_softc *sc)
1455 {
1456         int error, i;
1457
1458         bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1459         sc->bge_std = 0;
1460         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1461                 if ((error = bge_newbuf_std(sc, i)) != 0)
1462                         return (error);
1463                 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1464         }
1465
1466         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1467             sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
1468
1469         sc->bge_std = 0;
1470         bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1);
1471
1472         return (0);
1473 }
1474
1475 static void
1476 bge_free_rx_ring_std(struct bge_softc *sc)
1477 {
1478         int i;
1479
1480         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1481                 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1482                         bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1483                             sc->bge_cdata.bge_rx_std_dmamap[i],
1484                             BUS_DMASYNC_POSTREAD);
1485                         bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1486                             sc->bge_cdata.bge_rx_std_dmamap[i]);
1487                         m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1488                         sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1489                 }
1490                 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1491                     sizeof(struct bge_rx_bd));
1492         }
1493 }
1494
1495 static int
1496 bge_init_rx_ring_jumbo(struct bge_softc *sc)
1497 {
1498         struct bge_rcb *rcb;
1499         int error, i;
1500
1501         bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
1502         sc->bge_jumbo = 0;
1503         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1504                 if ((error = bge_newbuf_jumbo(sc, i)) != 0)
1505                         return (error);
1506                 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1507         }
1508
1509         bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1510             sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
1511
1512         sc->bge_jumbo = 0;
1513
1514         /* Enable the jumbo receive producer ring. */
1515         rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1516         rcb->bge_maxlen_flags =
1517             BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
1518         CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1519
1520         bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1);
1521
1522         return (0);
1523 }
1524
1525 static void
1526 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1527 {
1528         int i;
1529
1530         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1531                 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1532                         bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1533                             sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1534                             BUS_DMASYNC_POSTREAD);
1535                         bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1536                             sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1537                         m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1538                         sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1539                 }
1540                 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1541                     sizeof(struct bge_extrx_bd));
1542         }
1543 }
1544
1545 static void
1546 bge_free_tx_ring(struct bge_softc *sc)
1547 {
1548         int i;
1549
1550         if (sc->bge_ldata.bge_tx_ring == NULL)
1551                 return;
1552
1553         for (i = 0; i < BGE_TX_RING_CNT; i++) {
1554                 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1555                         bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
1556                             sc->bge_cdata.bge_tx_dmamap[i],
1557                             BUS_DMASYNC_POSTWRITE);
1558                         bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1559                             sc->bge_cdata.bge_tx_dmamap[i]);
1560                         m_freem(sc->bge_cdata.bge_tx_chain[i]);
1561                         sc->bge_cdata.bge_tx_chain[i] = NULL;
1562                 }
1563                 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1564                     sizeof(struct bge_tx_bd));
1565         }
1566 }
1567
1568 static int
1569 bge_init_tx_ring(struct bge_softc *sc)
1570 {
1571         sc->bge_txcnt = 0;
1572         sc->bge_tx_saved_considx = 0;
1573
1574         bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1575         bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
1576             sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1577
1578         /* Initialize transmit producer index for host-memory send ring. */
1579         sc->bge_tx_prodidx = 0;
1580         bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1581
1582         /* 5700 b2 errata */
1583         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1584                 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1585
1586         /* NIC-memory send ring not used; initialize to zero. */
1587         bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1588         /* 5700 b2 errata */
1589         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1590                 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1591
1592         return (0);
1593 }
1594
1595 static void
1596 bge_setpromisc(struct bge_softc *sc)
1597 {
1598         struct ifnet *ifp;
1599
1600         BGE_LOCK_ASSERT(sc);
1601
1602         ifp = sc->bge_ifp;
1603
1604         /* Enable or disable promiscuous mode as needed. */
1605         if (ifp->if_flags & IFF_PROMISC)
1606                 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1607         else
1608                 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1609 }
1610
1611 static void
1612 bge_setmulti(struct bge_softc *sc)
1613 {
1614         struct ifnet *ifp;
1615         struct ifmultiaddr *ifma;
1616         uint32_t hashes[4] = { 0, 0, 0, 0 };
1617         int h, i;
1618
1619         BGE_LOCK_ASSERT(sc);
1620
1621         ifp = sc->bge_ifp;
1622
1623         if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1624                 for (i = 0; i < 4; i++)
1625                         CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1626                 return;
1627         }
1628
1629         /* First, zot all the existing filters. */
1630         for (i = 0; i < 4; i++)
1631                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1632
1633         /* Now program new ones. */
1634         if_maddr_rlock(ifp);
1635         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1636                 if (ifma->ifma_addr->sa_family != AF_LINK)
1637                         continue;
1638                 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1639                     ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1640                 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1641         }
1642         if_maddr_runlock(ifp);
1643
1644         for (i = 0; i < 4; i++)
1645                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1646 }
1647
1648 static void
1649 bge_setvlan(struct bge_softc *sc)
1650 {
1651         struct ifnet *ifp;
1652
1653         BGE_LOCK_ASSERT(sc);
1654
1655         ifp = sc->bge_ifp;
1656
1657         /* Enable or disable VLAN tag stripping as needed. */
1658         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1659                 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1660         else
1661                 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1662 }
1663
1664 static void
1665 bge_sig_pre_reset(struct bge_softc *sc, int type)
1666 {
1667
1668         /*
1669          * Some chips don't like this so only do this if ASF is enabled
1670          */
1671         if (sc->bge_asf_mode)
1672                 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
1673
1674         if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1675                 switch (type) {
1676                 case BGE_RESET_START:
1677                         bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1678                             BGE_FW_DRV_STATE_START);
1679                         break;
1680                 case BGE_RESET_SHUTDOWN:
1681                         bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1682                             BGE_FW_DRV_STATE_UNLOAD);
1683                         break;
1684                 case BGE_RESET_SUSPEND:
1685                         bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1686                             BGE_FW_DRV_STATE_SUSPEND);
1687                         break;
1688                 }
1689         }
1690
1691         if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND)
1692                 bge_ape_driver_state_change(sc, type);
1693 }
1694
1695 static void
1696 bge_sig_post_reset(struct bge_softc *sc, int type)
1697 {
1698
1699         if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1700                 switch (type) {
1701                 case BGE_RESET_START:
1702                         bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1703                             BGE_FW_DRV_STATE_START_DONE);
1704                         /* START DONE */
1705                         break;
1706                 case BGE_RESET_SHUTDOWN:
1707                         bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1708                             BGE_FW_DRV_STATE_UNLOAD_DONE);
1709                         break;
1710                 }
1711         }
1712         if (type == BGE_RESET_SHUTDOWN)
1713                 bge_ape_driver_state_change(sc, type);
1714 }
1715
1716 static void
1717 bge_sig_legacy(struct bge_softc *sc, int type)
1718 {
1719
1720         if (sc->bge_asf_mode) {
1721                 switch (type) {
1722                 case BGE_RESET_START:
1723                         bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1724                             BGE_FW_DRV_STATE_START);
1725                         break;
1726                 case BGE_RESET_SHUTDOWN:
1727                         bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1728                             BGE_FW_DRV_STATE_UNLOAD);
1729                         break;
1730                 }
1731         }
1732 }
1733
1734 static void
1735 bge_stop_fw(struct bge_softc *sc)
1736 {
1737         int i;
1738
1739         if (sc->bge_asf_mode) {
1740                 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE);
1741                 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
1742                     CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT);
1743
1744                 for (i = 0; i < 100; i++ ) {
1745                         if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) &
1746                             BGE_RX_CPU_DRV_EVENT))
1747                                 break;
1748                         DELAY(10);
1749                 }
1750         }
1751 }
1752
1753 static uint32_t
1754 bge_dma_swap_options(struct bge_softc *sc)
1755 {
1756         uint32_t dma_options;
1757
1758         dma_options = BGE_MODECTL_WORDSWAP_NONFRAME |
1759             BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA;
1760 #if BYTE_ORDER == BIG_ENDIAN
1761         dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME;
1762 #endif
1763         return (dma_options);
1764 }
1765
1766 /*
1767  * Do endian, PCI and DMA initialization.
1768  */
1769 static int
1770 bge_chipinit(struct bge_softc *sc)
1771 {
1772         uint32_t dma_rw_ctl, misc_ctl, mode_ctl;
1773         uint16_t val;
1774         int i;
1775
1776         /* Set endianness before we access any non-PCI registers. */
1777         misc_ctl = BGE_INIT;
1778         if (sc->bge_flags & BGE_FLAG_TAGGED_STATUS)
1779                 misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
1780         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, misc_ctl, 4);
1781
1782         /*
1783          * Clear the MAC statistics block in the NIC's
1784          * internal memory.
1785          */
1786         for (i = BGE_STATS_BLOCK;
1787             i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1788                 BGE_MEMWIN_WRITE(sc, i, 0);
1789
1790         for (i = BGE_STATUS_BLOCK;
1791             i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1792                 BGE_MEMWIN_WRITE(sc, i, 0);
1793
1794         if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1795                 /*
1796                  *  Fix data corruption caused by non-qword write with WB.
1797                  *  Fix master abort in PCI mode.
1798                  *  Fix PCI latency timer.
1799                  */
1800                 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1801                 val |= (1 << 10) | (1 << 12) | (1 << 13);
1802                 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1803         }
1804
1805         if (sc->bge_asicrev == BGE_ASICREV_BCM57765 ||
1806             sc->bge_asicrev == BGE_ASICREV_BCM57766) {
1807                 /*
1808                  * For the 57766 and non Ax versions of 57765, bootcode
1809                  * needs to setup the PCIE Fast Training Sequence (FTS)
1810                  * value to prevent transmit hangs.
1811                  */
1812                 if (sc->bge_chiprev != BGE_CHIPREV_57765_AX) {
1813                         CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL,
1814                             CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL) |
1815                             BGE_CPMU_PADRNG_CTL_RDIV2);
1816                 }
1817         }
1818
1819         /*
1820          * Set up the PCI DMA control register.
1821          */
1822         dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1823             BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1824         if (sc->bge_flags & BGE_FLAG_PCIE) {
1825                 if (sc->bge_mps >= 256)
1826                         dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1827                 else
1828                         dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1829         } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1830                 if (BGE_IS_5714_FAMILY(sc)) {
1831                         /* 256 bytes for read and write. */
1832                         dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1833                             BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1834                         dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1835                             BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1836                             BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1837                 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
1838                         /*
1839                          * In the BCM5703, the DMA read watermark should
1840                          * be set to less than or equal to the maximum
1841                          * memory read byte count of the PCI-X command
1842                          * register.
1843                          */
1844                         dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
1845                             BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1846                 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1847                         /* 1536 bytes for read, 384 bytes for write. */
1848                         dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1849                             BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1850                 } else {
1851                         /* 384 bytes for read and write. */
1852                         dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1853                             BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1854                             0x0F;
1855                 }
1856                 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1857                     sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1858                         uint32_t tmp;
1859
1860                         /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1861                         tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1862                         if (tmp == 6 || tmp == 7)
1863                                 dma_rw_ctl |=
1864                                     BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1865
1866                         /* Set PCI-X DMA write workaround. */
1867                         dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1868                 }
1869         } else {
1870                 /* Conventional PCI bus: 256 bytes for read and write. */
1871                 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1872                     BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1873
1874                 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1875                     sc->bge_asicrev != BGE_ASICREV_BCM5750)
1876                         dma_rw_ctl |= 0x0F;
1877         }
1878         if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1879             sc->bge_asicrev == BGE_ASICREV_BCM5701)
1880                 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1881                     BGE_PCIDMARWCTL_ASRT_ALL_BE;
1882         if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1883             sc->bge_asicrev == BGE_ASICREV_BCM5704)
1884                 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1885         if (BGE_IS_5717_PLUS(sc)) {
1886                 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1887                 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
1888                         dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1889                 /*
1890                  * Enable HW workaround for controllers that misinterpret
1891                  * a status tag update and leave interrupts permanently
1892                  * disabled.
1893                  */
1894                 if (!BGE_IS_57765_PLUS(sc) &&
1895                     sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
1896                     sc->bge_asicrev != BGE_ASICREV_BCM5762)
1897                         dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1898         }
1899         pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1900
1901         /*
1902          * Set up general mode register.
1903          */
1904         mode_ctl = bge_dma_swap_options(sc);
1905         if (sc->bge_asicrev == BGE_ASICREV_BCM5720 ||
1906             sc->bge_asicrev == BGE_ASICREV_BCM5762) {
1907                 /* Retain Host-2-BMC settings written by APE firmware. */
1908                 mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) &
1909                     (BGE_MODECTL_BYTESWAP_B2HRX_DATA |
1910                     BGE_MODECTL_WORDSWAP_B2HRX_DATA |
1911                     BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE);
1912         }
1913         mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1914             BGE_MODECTL_TX_NO_PHDR_CSUM;
1915
1916         /*
1917          * BCM5701 B5 have a bug causing data corruption when using
1918          * 64-bit DMA reads, which can be terminated early and then
1919          * completed later as 32-bit accesses, in combination with
1920          * certain bridges.
1921          */
1922         if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1923             sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1924                 mode_ctl |= BGE_MODECTL_FORCE_PCI32;
1925
1926         /*
1927          * Tell the firmware the driver is running
1928          */
1929         if (sc->bge_asf_mode & ASF_STACKUP)
1930                 mode_ctl |= BGE_MODECTL_STACKUP;
1931
1932         CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1933
1934         /*
1935          * Disable memory write invalidate.  Apparently it is not supported
1936          * properly by these devices.  Also ensure that INTx isn't disabled,
1937          * as these chips need it even when using MSI.
1938          */
1939         PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1940             PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1941
1942         /* Set the timer prescaler (always 66 MHz). */
1943         CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1944
1945         /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1946         if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1947                 DELAY(40);      /* XXX */
1948
1949                 /* Put PHY into ready state */
1950                 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1951                 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1952                 DELAY(40);
1953         }
1954
1955         return (0);
1956 }
1957
1958 static int
1959 bge_blockinit(struct bge_softc *sc)
1960 {
1961         struct bge_rcb *rcb;
1962         bus_size_t vrcb;
1963         bge_hostaddr taddr;
1964         uint32_t dmactl, rdmareg, val;
1965         int i, limit;
1966
1967         /*
1968          * Initialize the memory window pointer register so that
1969          * we can access the first 32K of internal NIC RAM. This will
1970          * allow us to set up the TX send ring RCBs and the RX return
1971          * ring RCBs, plus other things which live in NIC memory.
1972          */
1973         CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1974
1975         /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1976
1977         if (!(BGE_IS_5705_PLUS(sc))) {
1978                 /* Configure mbuf memory pool */
1979                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1980                 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1981                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1982                 else
1983                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1984
1985                 /* Configure DMA resource pool */
1986                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1987                     BGE_DMA_DESCRIPTORS);
1988                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1989         }
1990
1991         /* Configure mbuf pool watermarks */
1992         if (BGE_IS_5717_PLUS(sc)) {
1993                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1994                 if (sc->bge_ifp->if_mtu > ETHERMTU) {
1995                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1996                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1997                 } else {
1998                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1999                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
2000                 }
2001         } else if (!BGE_IS_5705_PLUS(sc)) {
2002                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
2003                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
2004                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
2005         } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2006                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
2007                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
2008                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
2009         } else {
2010                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
2011                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
2012                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
2013         }
2014
2015         /* Configure DMA resource watermarks */
2016         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
2017         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
2018
2019         /* Enable buffer manager */
2020         val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
2021         /*
2022          * Change the arbitration algorithm of TXMBUF read request to
2023          * round-robin instead of priority based for BCM5719.  When
2024          * TXFIFO is almost empty, RDMA will hold its request until
2025          * TXFIFO is not almost empty.
2026          */
2027         if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
2028                 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
2029         CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
2030
2031         /* Poll for buffer manager start indication */
2032         for (i = 0; i < BGE_TIMEOUT; i++) {
2033                 DELAY(10);
2034                 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
2035                         break;
2036         }
2037
2038         if (i == BGE_TIMEOUT) {
2039                 device_printf(sc->bge_dev, "buffer manager failed to start\n");
2040                 return (ENXIO);
2041         }
2042
2043         /* Enable flow-through queues */
2044         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
2045         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
2046
2047         /* Wait until queue initialization is complete */
2048         for (i = 0; i < BGE_TIMEOUT; i++) {
2049                 DELAY(10);
2050                 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
2051                         break;
2052         }
2053
2054         if (i == BGE_TIMEOUT) {
2055                 device_printf(sc->bge_dev, "flow-through queue init failed\n");
2056                 return (ENXIO);
2057         }
2058
2059         /*
2060          * Summary of rings supported by the controller:
2061          *
2062          * Standard Receive Producer Ring
2063          * - This ring is used to feed receive buffers for "standard"
2064          *   sized frames (typically 1536 bytes) to the controller.
2065          *
2066          * Jumbo Receive Producer Ring
2067          * - This ring is used to feed receive buffers for jumbo sized
2068          *   frames (i.e. anything bigger than the "standard" frames)
2069          *   to the controller.
2070          *
2071          * Mini Receive Producer Ring
2072          * - This ring is used to feed receive buffers for "mini"
2073          *   sized frames to the controller.
2074          * - This feature required external memory for the controller
2075          *   but was never used in a production system.  Should always
2076          *   be disabled.
2077          *
2078          * Receive Return Ring
2079          * - After the controller has placed an incoming frame into a
2080          *   receive buffer that buffer is moved into a receive return
2081          *   ring.  The driver is then responsible to passing the
2082          *   buffer up to the stack.  Many versions of the controller
2083          *   support multiple RR rings.
2084          *
2085          * Send Ring
2086          * - This ring is used for outgoing frames.  Many versions of
2087          *   the controller support multiple send rings.
2088          */
2089
2090         /* Initialize the standard receive producer ring control block. */
2091         rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
2092         rcb->bge_hostaddr.bge_addr_lo =
2093             BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
2094         rcb->bge_hostaddr.bge_addr_hi =
2095             BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
2096         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2097             sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
2098         if (BGE_IS_5717_PLUS(sc)) {
2099                 /*
2100                  * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
2101                  * Bits 15-2 : Maximum RX frame size
2102                  * Bit 1     : 1 = Ring Disabled, 0 = Ring ENabled
2103                  * Bit 0     : Reserved
2104                  */
2105                 rcb->bge_maxlen_flags =
2106                     BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
2107         } else if (BGE_IS_5705_PLUS(sc)) {
2108                 /*
2109                  * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
2110                  * Bits 15-2 : Reserved (should be 0)
2111                  * Bit 1     : 1 = Ring Disabled, 0 = Ring Enabled
2112                  * Bit 0     : Reserved
2113                  */
2114                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
2115         } else {
2116                 /*
2117                  * Ring size is always XXX entries
2118                  * Bits 31-16: Maximum RX frame size
2119                  * Bits 15-2 : Reserved (should be 0)
2120                  * Bit 1     : 1 = Ring Disabled, 0 = Ring Enabled
2121                  * Bit 0     : Reserved
2122                  */
2123                 rcb->bge_maxlen_flags =
2124                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
2125         }
2126         if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
2127             sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
2128             sc->bge_asicrev == BGE_ASICREV_BCM5720)
2129                 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
2130         else
2131                 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
2132         /* Write the standard receive producer ring control block. */
2133         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
2134         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
2135         CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
2136         CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
2137
2138         /* Reset the standard receive producer ring producer index. */
2139         bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
2140
2141         /*
2142          * Initialize the jumbo RX producer ring control
2143          * block.  We set the 'ring disabled' bit in the
2144          * flags field until we're actually ready to start
2145          * using this ring (i.e. once we set the MTU
2146          * high enough to require it).
2147          */
2148         if (BGE_IS_JUMBO_CAPABLE(sc)) {
2149                 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
2150                 /* Get the jumbo receive producer ring RCB parameters. */
2151                 rcb->bge_hostaddr.bge_addr_lo =
2152                     BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
2153                 rcb->bge_hostaddr.bge_addr_hi =
2154                     BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
2155                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2156                     sc->bge_cdata.bge_rx_jumbo_ring_map,
2157                     BUS_DMASYNC_PREREAD);
2158                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
2159                     BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
2160                 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
2161                     sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
2162                     sc->bge_asicrev == BGE_ASICREV_BCM5720)
2163                         rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
2164                 else
2165                         rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
2166                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
2167                     rcb->bge_hostaddr.bge_addr_hi);
2168                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
2169                     rcb->bge_hostaddr.bge_addr_lo);
2170                 /* Program the jumbo receive producer ring RCB parameters. */
2171                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
2172                     rcb->bge_maxlen_flags);
2173                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
2174                 /* Reset the jumbo receive producer ring producer index. */
2175                 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
2176         }
2177
2178         /* Disable the mini receive producer ring RCB. */
2179         if (BGE_IS_5700_FAMILY(sc)) {
2180                 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
2181                 rcb->bge_maxlen_flags =
2182                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
2183                 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
2184                     rcb->bge_maxlen_flags);
2185                 /* Reset the mini receive producer ring producer index. */
2186                 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
2187         }
2188
2189         /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
2190         if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2191                 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
2192                     sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
2193                     sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
2194                         CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
2195                             (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
2196         }
2197         /*
2198          * The BD ring replenish thresholds control how often the
2199          * hardware fetches new BD's from the producer rings in host
2200          * memory.  Setting the value too low on a busy system can
2201          * starve the hardware and recue the throughpout.
2202          *
2203          * Set the BD ring replentish thresholds. The recommended
2204          * values are 1/8th the number of descriptors allocated to
2205          * each ring.
2206          * XXX The 5754 requires a lower threshold, so it might be a
2207          * requirement of all 575x family chips.  The Linux driver sets
2208          * the lower threshold for all 5705 family chips as well, but there
2209          * are reports that it might not need to be so strict.
2210          *
2211          * XXX Linux does some extra fiddling here for the 5906 parts as
2212          * well.
2213          */
2214         if (BGE_IS_5705_PLUS(sc))
2215                 val = 8;
2216         else
2217                 val = BGE_STD_RX_RING_CNT / 8;
2218         CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
2219         if (BGE_IS_JUMBO_CAPABLE(sc))
2220                 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
2221                     BGE_JUMBO_RX_RING_CNT/8);
2222         if (BGE_IS_5717_PLUS(sc)) {
2223                 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
2224                 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
2225         }
2226
2227         /*
2228          * Disable all send rings by setting the 'ring disabled' bit
2229          * in the flags field of all the TX send ring control blocks,
2230          * located in NIC memory.
2231          */
2232         if (!BGE_IS_5705_PLUS(sc))
2233                 /* 5700 to 5704 had 16 send rings. */
2234                 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
2235         else if (BGE_IS_57765_PLUS(sc) ||
2236             sc->bge_asicrev == BGE_ASICREV_BCM5762)
2237                 limit = 2;
2238         else if (BGE_IS_5717_PLUS(sc))
2239                 limit = 4;
2240         else
2241                 limit = 1;
2242         vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
2243         for (i = 0; i < limit; i++) {
2244                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
2245                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
2246                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
2247                 vrcb += sizeof(struct bge_rcb);
2248         }
2249
2250         /* Configure send ring RCB 0 (we use only the first ring) */
2251         vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
2252         BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
2253         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
2254         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
2255         if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
2256             sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
2257             sc->bge_asicrev == BGE_ASICREV_BCM5720)
2258                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
2259         else
2260                 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
2261                     BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
2262         RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
2263             BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
2264
2265         /*
2266          * Disable all receive return rings by setting the
2267          * 'ring diabled' bit in the flags field of all the receive
2268          * return ring control blocks, located in NIC memory.
2269          */
2270         if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
2271             sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
2272             sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2273                 /* Should be 17, use 16 until we get an SRAM map. */
2274                 limit = 16;
2275         } else if (!BGE_IS_5705_PLUS(sc))
2276                 limit = BGE_RX_RINGS_MAX;
2277         else if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2278             sc->bge_asicrev == BGE_ASICREV_BCM5762 ||
2279             BGE_IS_57765_PLUS(sc))
2280                 limit = 4;
2281         else
2282                 limit = 1;
2283         /* Disable all receive return rings. */
2284         vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
2285         for (i = 0; i < limit; i++) {
2286                 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
2287                 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
2288                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
2289                     BGE_RCB_FLAG_RING_DISABLED);
2290                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
2291                 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
2292                     (i * (sizeof(uint64_t))), 0);
2293                 vrcb += sizeof(struct bge_rcb);
2294         }
2295
2296         /*
2297          * Set up receive return ring 0.  Note that the NIC address
2298          * for RX return rings is 0x0.  The return rings live entirely
2299          * within the host, so the nicaddr field in the RCB isn't used.
2300          */
2301         vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
2302         BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
2303         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
2304         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
2305         RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
2306         RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
2307             BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
2308
2309         /* Set random backoff seed for TX */
2310         CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
2311             (IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
2312             IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
2313             IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5]) &
2314             BGE_TX_BACKOFF_SEED_MASK);
2315
2316         /* Set inter-packet gap */
2317         val = 0x2620;
2318         if (sc->bge_asicrev == BGE_ASICREV_BCM5720 ||
2319             sc->bge_asicrev == BGE_ASICREV_BCM5762)
2320                 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
2321                     (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
2322         CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
2323
2324         /*
2325          * Specify which ring to use for packets that don't match
2326          * any RX rules.
2327          */
2328         CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
2329
2330         /*
2331          * Configure number of RX lists. One interrupt distribution
2332          * list, sixteen active lists, one bad frames class.
2333          */
2334         CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
2335
2336         /* Inialize RX list placement stats mask. */
2337         CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
2338         CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
2339
2340         /* Disable host coalescing until we get it set up */
2341         CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
2342
2343         /* Poll to make sure it's shut down. */
2344         for (i = 0; i < BGE_TIMEOUT; i++) {
2345                 DELAY(10);
2346                 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
2347                         break;
2348         }
2349
2350         if (i == BGE_TIMEOUT) {
2351                 device_printf(sc->bge_dev,
2352                     "host coalescing engine failed to idle\n");
2353                 return (ENXIO);
2354         }
2355
2356         /* Set up host coalescing defaults */
2357         CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
2358         CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
2359         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
2360         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
2361         if (!(BGE_IS_5705_PLUS(sc))) {
2362                 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
2363                 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
2364         }
2365         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
2366         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
2367
2368         /* Set up address of statistics block */
2369         if (!(BGE_IS_5705_PLUS(sc))) {
2370                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
2371                     BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
2372                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
2373                     BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
2374                 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
2375                 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
2376                 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
2377         }
2378
2379         /* Set up address of status block */
2380         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
2381             BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
2382         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
2383             BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
2384
2385         /* Set up status block size. */
2386         if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2387             sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
2388                 val = BGE_STATBLKSZ_FULL;
2389                 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
2390         } else {
2391                 val = BGE_STATBLKSZ_32BYTE;
2392                 bzero(sc->bge_ldata.bge_status_block, 32);
2393         }
2394         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2395             sc->bge_cdata.bge_status_map,
2396             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2397
2398         /* Turn on host coalescing state machine */
2399         CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
2400
2401         /* Turn on RX BD completion state machine and enable attentions */
2402         CSR_WRITE_4(sc, BGE_RBDC_MODE,
2403             BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
2404
2405         /* Turn on RX list placement state machine */
2406         CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2407
2408         /* Turn on RX list selector state machine. */
2409         if (!(BGE_IS_5705_PLUS(sc)))
2410                 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2411
2412         /* Turn on DMA, clear stats. */
2413         val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
2414             BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
2415             BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
2416             BGE_MACMODE_FRMHDR_DMA_ENB;
2417
2418         if (sc->bge_flags & BGE_FLAG_TBI)
2419                 val |= BGE_PORTMODE_TBI;
2420         else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
2421                 val |= BGE_PORTMODE_GMII;
2422         else
2423                 val |= BGE_PORTMODE_MII;
2424
2425         /* Allow APE to send/receive frames. */
2426         if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
2427                 val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
2428
2429         CSR_WRITE_4(sc, BGE_MAC_MODE, val);
2430         DELAY(40);
2431
2432         /* Set misc. local control, enable interrupts on attentions */
2433         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
2434
2435 #ifdef notdef
2436         /* Assert GPIO pins for PHY reset */
2437         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
2438             BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
2439         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
2440             BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
2441 #endif
2442
2443         /* Turn on DMA completion state machine */
2444         if (!(BGE_IS_5705_PLUS(sc)))
2445                 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2446
2447         val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
2448
2449         /* Enable host coalescing bug fix. */
2450         if (BGE_IS_5755_PLUS(sc))
2451                 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
2452
2453         /* Request larger DMA burst size to get better performance. */
2454         if (sc->bge_asicrev == BGE_ASICREV_BCM5785)
2455                 val |= BGE_WDMAMODE_BURST_ALL_DATA;
2456
2457         /* Turn on write DMA state machine */
2458         CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
2459         DELAY(40);
2460
2461         /* Turn on read DMA state machine */
2462         val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
2463
2464         if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
2465                 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
2466
2467         if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2468             sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2469             sc->bge_asicrev == BGE_ASICREV_BCM57780)
2470                 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
2471                     BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
2472                     BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
2473         if (sc->bge_flags & BGE_FLAG_PCIE)
2474                 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
2475         if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2476                 val |= BGE_RDMAMODE_TSO4_ENABLE;
2477                 if (sc->bge_flags & BGE_FLAG_TSO3 ||
2478                     sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2479                     sc->bge_asicrev == BGE_ASICREV_BCM57780)
2480                         val |= BGE_RDMAMODE_TSO6_ENABLE;
2481         }
2482
2483         if (sc->bge_asicrev == BGE_ASICREV_BCM5720 ||
2484             sc->bge_asicrev == BGE_ASICREV_BCM5762) {
2485                 val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
2486                         BGE_RDMAMODE_H2BNC_VLAN_DET;
2487                 /*
2488                  * Allow multiple outstanding read requests from
2489                  * non-LSO read DMA engine.
2490                  */
2491                 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
2492         }
2493
2494         if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2495             sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2496             sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2497             sc->bge_asicrev == BGE_ASICREV_BCM57780 ||
2498             BGE_IS_5717_PLUS(sc) || BGE_IS_57765_PLUS(sc)) {
2499                 if (sc->bge_asicrev == BGE_ASICREV_BCM5762)
2500                         rdmareg = BGE_RDMA_RSRVCTRL_REG2;
2501                 else
2502                         rdmareg = BGE_RDMA_RSRVCTRL;
2503                 dmactl = CSR_READ_4(sc, rdmareg);
2504                 /*
2505                  * Adjust tx margin to prevent TX data corruption and
2506                  * fix internal FIFO overflow.
2507                  */
2508                 if (sc->bge_chipid == BGE_CHIPID_BCM5719_A0 ||
2509                     sc->bge_asicrev == BGE_ASICREV_BCM5762) {
2510                         dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
2511                             BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
2512                             BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
2513                         dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
2514                             BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
2515                             BGE_RDMA_RSRVCTRL_TXMRGN_320B;
2516                 }
2517                 /*
2518                  * Enable fix for read DMA FIFO overruns.
2519                  * The fix is to limit the number of RX BDs
2520                  * the hardware would fetch at a fime.
2521                  */
2522                 CSR_WRITE_4(sc, rdmareg, dmactl |
2523                     BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2524         }
2525
2526         if (sc->bge_asicrev == BGE_ASICREV_BCM5719) {
2527                 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2528                     CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2529                     BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2530                     BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2531         } else if (sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2532                 /*
2533                  * Allow 4KB burst length reads for non-LSO frames.
2534                  * Enable 512B burst length reads for buffer descriptors.
2535                  */
2536                 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2537                     CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2538                     BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
2539                     BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2540         } else if (sc->bge_asicrev == BGE_ASICREV_BCM5762) {
2541                 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2,
2542                     CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2) |
2543                     BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2544                     BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2545         }
2546
2547         CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
2548         DELAY(40);
2549
2550         if (sc->bge_flags & BGE_FLAG_RDMA_BUG) {
2551                 for (i = 0; i < BGE_NUM_RDMA_CHANNELS / 2; i++) {
2552                         val = CSR_READ_4(sc, BGE_RDMA_LENGTH + i * 4);
2553                         if ((val & 0xFFFF) > BGE_FRAMELEN)
2554                                 break;
2555                         if (((val >> 16) & 0xFFFF) > BGE_FRAMELEN)
2556                                 break;
2557                 }
2558                 if (i != BGE_NUM_RDMA_CHANNELS / 2) {
2559                         val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL);
2560                         if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
2561                                 val |= BGE_RDMA_TX_LENGTH_WA_5719;
2562                         else
2563                                 val |= BGE_RDMA_TX_LENGTH_WA_5720;
2564                         CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val);
2565                 }
2566         }
2567
2568         /* Turn on RX data completion state machine */
2569         CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2570
2571         /* Turn on RX BD initiator state machine */
2572         CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2573
2574         /* Turn on RX data and RX BD initiator state machine */
2575         CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
2576
2577         /* Turn on Mbuf cluster free state machine */
2578         if (!(BGE_IS_5705_PLUS(sc)))
2579                 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2580
2581         /* Turn on send BD completion state machine */
2582         CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2583
2584         /* Turn on send data completion state machine */
2585         val = BGE_SDCMODE_ENABLE;
2586         if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
2587                 val |= BGE_SDCMODE_CDELAY;
2588         CSR_WRITE_4(sc, BGE_SDC_MODE, val);
2589
2590         /* Turn on send data initiator state machine */
2591         if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3))
2592                 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
2593                     BGE_SDIMODE_HW_LSO_PRE_DMA);
2594         else
2595                 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2596
2597         /* Turn on send BD initiator state machine */
2598         CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2599
2600         /* Turn on send BD selector state machine */
2601         CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2602
2603         CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
2604         CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2605             BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
2606
2607         /* ack/clear link change events */
2608         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2609             BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2610             BGE_MACSTAT_LINK_CHANGED);
2611         CSR_WRITE_4(sc, BGE_MI_STS, 0);
2612
2613         /*
2614          * Enable attention when the link has changed state for
2615          * devices that use auto polling.
2616          */
2617         if (sc->bge_flags & BGE_FLAG_TBI) {
2618                 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2619         } else {
2620                 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
2621                         CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
2622                         DELAY(80);
2623                 }
2624                 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2625                     sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
2626                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2627                             BGE_EVTENB_MI_INTERRUPT);
2628         }
2629
2630         /*
2631          * Clear any pending link state attention.
2632          * Otherwise some link state change events may be lost until attention
2633          * is cleared by bge_intr() -> bge_link_upd() sequence.
2634          * It's not necessary on newer BCM chips - perhaps enabling link
2635          * state change attentions implies clearing pending attention.
2636          */
2637         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2638             BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2639             BGE_MACSTAT_LINK_CHANGED);
2640
2641         /* Enable link state change attentions. */
2642         BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2643
2644         return (0);
2645 }
2646
2647 static const struct bge_revision *
2648 bge_lookup_rev(uint32_t chipid)
2649 {
2650         const struct bge_revision *br;
2651
2652         for (br = bge_revisions; br->br_name != NULL; br++) {
2653                 if (br->br_chipid == chipid)
2654                         return (br);
2655         }
2656
2657         for (br = bge_majorrevs; br->br_name != NULL; br++) {
2658                 if (br->br_chipid == BGE_ASICREV(chipid))
2659                         return (br);
2660         }
2661
2662         return (NULL);
2663 }
2664
2665 static const struct bge_vendor *
2666 bge_lookup_vendor(uint16_t vid)
2667 {
2668         const struct bge_vendor *v;
2669
2670         for (v = bge_vendors; v->v_name != NULL; v++)
2671                 if (v->v_id == vid)
2672                         return (v);
2673
2674         return (NULL);
2675 }
2676
2677 static uint32_t
2678 bge_chipid(device_t dev)
2679 {
2680         uint32_t id;
2681
2682         id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2683             BGE_PCIMISCCTL_ASICREV_SHIFT;
2684         if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) {
2685                 /*
2686                  * Find the ASCI revision.  Different chips use different
2687                  * registers.
2688                  */
2689                 switch (pci_get_device(dev)) {
2690                 case BCOM_DEVICEID_BCM5717:
2691                 case BCOM_DEVICEID_BCM5718:
2692                 case BCOM_DEVICEID_BCM5719:
2693                 case BCOM_DEVICEID_BCM5720:
2694                 case BCOM_DEVICEID_BCM5725:
2695                 case BCOM_DEVICEID_BCM5727:
2696                 case BCOM_DEVICEID_BCM5762:
2697                         id = pci_read_config(dev,
2698                             BGE_PCI_GEN2_PRODID_ASICREV, 4);
2699                         break;
2700                 case BCOM_DEVICEID_BCM57761:
2701                 case BCOM_DEVICEID_BCM57762:
2702                 case BCOM_DEVICEID_BCM57765:
2703                 case BCOM_DEVICEID_BCM57766:
2704                 case BCOM_DEVICEID_BCM57781:
2705                 case BCOM_DEVICEID_BCM57785:
2706                 case BCOM_DEVICEID_BCM57791:
2707                 case BCOM_DEVICEID_BCM57795:
2708                         id = pci_read_config(dev,
2709                             BGE_PCI_GEN15_PRODID_ASICREV, 4);
2710                         break;
2711                 default:
2712                         id = pci_read_config(dev, BGE_PCI_PRODID_ASICREV, 4);
2713                 }
2714         }
2715         return (id);
2716 }
2717
2718 /*
2719  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2720  * against our list and return its name if we find a match.
2721  *
2722  * Note that since the Broadcom controller contains VPD support, we
2723  * try to get the device name string from the controller itself instead
2724  * of the compiled-in string. It guarantees we'll always announce the
2725  * right product name. We fall back to the compiled-in string when
2726  * VPD is unavailable or corrupt.
2727  */
2728 static int
2729 bge_probe(device_t dev)
2730 {
2731         char buf[96];
2732         char model[64];
2733         const struct bge_revision *br;
2734         const char *pname;
2735         struct bge_softc *sc;
2736         const struct bge_type *t = bge_devs;
2737         const struct bge_vendor *v;
2738         uint32_t id;
2739         uint16_t did, vid;
2740
2741         sc = device_get_softc(dev);
2742         sc->bge_dev = dev;
2743         vid = pci_get_vendor(dev);
2744         did = pci_get_device(dev);
2745         while(t->bge_vid != 0) {
2746                 if ((vid == t->bge_vid) && (did == t->bge_did)) {
2747                         id = bge_chipid(dev);
2748                         br = bge_lookup_rev(id);
2749                         if (bge_has_eaddr(sc) &&
2750                             pci_get_vpd_ident(dev, &pname) == 0)
2751                                 snprintf(model, sizeof(model), "%s", pname);
2752                         else {
2753                                 v = bge_lookup_vendor(vid);
2754                                 snprintf(model, sizeof(model), "%s %s",
2755                                     v != NULL ? v->v_name : "Unknown",
2756                                     br != NULL ? br->br_name :
2757                                     "NetXtreme/NetLink Ethernet Controller");
2758                         }
2759                         snprintf(buf, sizeof(buf), "%s, %sASIC rev. %#08x",
2760                             model, br != NULL ? "" : "unknown ", id);
2761                         device_set_desc_copy(dev, buf);
2762                         return (BUS_PROBE_DEFAULT);
2763                 }
2764                 t++;
2765         }
2766
2767         return (ENXIO);
2768 }
2769
2770 static void
2771 bge_dma_free(struct bge_softc *sc)
2772 {
2773         int i;
2774
2775         /* Destroy DMA maps for RX buffers. */
2776         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2777                 if (sc->bge_cdata.bge_rx_std_dmamap[i])
2778                         bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2779                             sc->bge_cdata.bge_rx_std_dmamap[i]);
2780         }
2781         if (sc->bge_cdata.bge_rx_std_sparemap)
2782                 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2783                     sc->bge_cdata.bge_rx_std_sparemap);
2784
2785         /* Destroy DMA maps for jumbo RX buffers. */
2786         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2787                 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
2788                         bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2789                             sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2790         }
2791         if (sc->bge_cdata.bge_rx_jumbo_sparemap)
2792                 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2793                     sc->bge_cdata.bge_rx_jumbo_sparemap);
2794
2795         /* Destroy DMA maps for TX buffers. */
2796         for (i = 0; i < BGE_TX_RING_CNT; i++) {
2797                 if (sc->bge_cdata.bge_tx_dmamap[i])
2798                         bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
2799                             sc->bge_cdata.bge_tx_dmamap[i]);
2800         }
2801
2802         if (sc->bge_cdata.bge_rx_mtag)
2803                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
2804         if (sc->bge_cdata.bge_mtag_jumbo)
2805                 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag_jumbo);
2806         if (sc->bge_cdata.bge_tx_mtag)
2807                 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
2808
2809         /* Destroy standard RX ring. */
2810         if (sc->bge_cdata.bge_rx_std_ring_map)
2811                 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2812                     sc->bge_cdata.bge_rx_std_ring_map);
2813         if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
2814                 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2815                     sc->bge_ldata.bge_rx_std_ring,
2816                     sc->bge_cdata.bge_rx_std_ring_map);
2817
2818         if (sc->bge_cdata.bge_rx_std_ring_tag)
2819                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2820
2821         /* Destroy jumbo RX ring. */
2822         if (sc->bge_cdata.bge_rx_jumbo_ring_map)
2823                 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2824                     sc->bge_cdata.bge_rx_jumbo_ring_map);
2825
2826         if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
2827             sc->bge_ldata.bge_rx_jumbo_ring)
2828                 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2829                     sc->bge_ldata.bge_rx_jumbo_ring,
2830                     sc->bge_cdata.bge_rx_jumbo_ring_map);
2831
2832         if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2833                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2834
2835         /* Destroy RX return ring. */
2836         if (sc->bge_cdata.bge_rx_return_ring_map)
2837                 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2838                     sc->bge_cdata.bge_rx_return_ring_map);
2839
2840         if (sc->bge_cdata.bge_rx_return_ring_map &&
2841             sc->bge_ldata.bge_rx_return_ring)
2842                 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2843                     sc->bge_ldata.bge_rx_return_ring,
2844                     sc->bge_cdata.bge_rx_return_ring_map);
2845
2846         if (sc->bge_cdata.bge_rx_return_ring_tag)
2847                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2848
2849         /* Destroy TX ring. */
2850         if (sc->bge_cdata.bge_tx_ring_map)
2851                 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2852                     sc->bge_cdata.bge_tx_ring_map);
2853
2854         if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2855                 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2856                     sc->bge_ldata.bge_tx_ring,
2857                     sc->bge_cdata.bge_tx_ring_map);
2858
2859         if (sc->bge_cdata.bge_tx_ring_tag)
2860                 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2861
2862         /* Destroy status block. */
2863         if (sc->bge_cdata.bge_status_map)
2864                 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2865                     sc->bge_cdata.bge_status_map);
2866
2867         if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2868                 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2869                     sc->bge_ldata.bge_status_block,
2870                     sc->bge_cdata.bge_status_map);
2871
2872         if (sc->bge_cdata.bge_status_tag)
2873                 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2874
2875         /* Destroy statistics block. */
2876         if (sc->bge_cdata.bge_stats_map)
2877                 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2878                     sc->bge_cdata.bge_stats_map);
2879
2880         if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2881                 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2882                     sc->bge_ldata.bge_stats,
2883                     sc->bge_cdata.bge_stats_map);
2884
2885         if (sc->bge_cdata.bge_stats_tag)
2886                 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2887
2888         if (sc->bge_cdata.bge_buffer_tag)
2889                 bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag);
2890
2891         /* Destroy the parent tag. */
2892         if (sc->bge_cdata.bge_parent_tag)
2893                 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2894 }
2895
2896 static int
2897 bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment,
2898     bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
2899     bus_addr_t *paddr, const char *msg)
2900 {
2901         struct bge_dmamap_arg ctx;
2902         int error;
2903
2904         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2905             alignment, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2906             NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
2907         if (error != 0) {
2908                 device_printf(sc->bge_dev,
2909                     "could not create %s dma tag\n", msg);
2910                 return (ENOMEM);
2911         }
2912         /* Allocate DMA'able memory for ring. */
2913         error = bus_dmamem_alloc(*tag, (void **)ring,
2914             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
2915         if (error != 0) {
2916                 device_printf(sc->bge_dev,
2917                     "could not allocate DMA'able memory for %s\n", msg);
2918                 return (ENOMEM);
2919         }
2920         /* Load the address of the ring. */
2921         ctx.bge_busaddr = 0;
2922         error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr,
2923             &ctx, BUS_DMA_NOWAIT);
2924         if (error != 0) {
2925                 device_printf(sc->bge_dev,
2926                     "could not load DMA'able memory for %s\n", msg);
2927                 return (ENOMEM);
2928         }
2929         *paddr = ctx.bge_busaddr;
2930         return (0);
2931 }
2932
2933 static int
2934 bge_dma_alloc(struct bge_softc *sc)
2935 {
2936         bus_addr_t lowaddr;
2937         bus_size_t rxmaxsegsz, sbsz, txsegsz, txmaxsegsz;
2938         int i, error;
2939
2940         lowaddr = BUS_SPACE_MAXADDR;
2941         if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
2942                 lowaddr = BGE_DMA_MAXADDR;
2943         /*
2944          * Allocate the parent bus DMA tag appropriate for PCI.
2945          */
2946         error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2947             1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2948             NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2949             0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2950         if (error != 0) {
2951                 device_printf(sc->bge_dev,
2952                     "could not allocate parent dma tag\n");
2953                 return (ENOMEM);
2954         }
2955
2956         /* Create tag for standard RX ring. */
2957         error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ,
2958             &sc->bge_cdata.bge_rx_std_ring_tag,
2959             (uint8_t **)&sc->bge_ldata.bge_rx_std_ring,
2960             &sc->bge_cdata.bge_rx_std_ring_map,
2961             &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring");
2962         if (error)
2963                 return (error);
2964
2965         /* Create tag for RX return ring. */
2966         error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc),
2967             &sc->bge_cdata.bge_rx_return_ring_tag,
2968             (uint8_t **)&sc->bge_ldata.bge_rx_return_ring,
2969             &sc->bge_cdata.bge_rx_return_ring_map,
2970             &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring");
2971         if (error)
2972                 return (error);
2973
2974         /* Create tag for TX ring. */
2975         error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ,
2976             &sc->bge_cdata.bge_tx_ring_tag,
2977             (uint8_t **)&sc->bge_ldata.bge_tx_ring,
2978             &sc->bge_cdata.bge_tx_ring_map,
2979             &sc->bge_ldata.bge_tx_ring_paddr, "TX ring");
2980         if (error)
2981                 return (error);
2982
2983         /*
2984          * Create tag for status block.
2985          * Because we only use single Tx/Rx/Rx return ring, use
2986          * minimum status block size except BCM5700 AX/BX which
2987          * seems to want to see full status block size regardless
2988          * of configured number of ring.
2989          */
2990         if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2991             sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
2992                 sbsz = BGE_STATUS_BLK_SZ;
2993         else
2994                 sbsz = 32;
2995         error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz,
2996             &sc->bge_cdata.bge_status_tag,
2997             (uint8_t **)&sc->bge_ldata.bge_status_block,
2998             &sc->bge_cdata.bge_status_map,
2999             &sc->bge_ldata.bge_status_block_paddr, "status block");
3000         if (error)
3001                 return (error);
3002
3003         /* Create tag for statistics block. */
3004         error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ,
3005             &sc->bge_cdata.bge_stats_tag,
3006             (uint8_t **)&sc->bge_ldata.bge_stats,
3007             &sc->bge_cdata.bge_stats_map,
3008             &sc->bge_ldata.bge_stats_paddr, "statistics block");
3009         if (error)
3010                 return (error);
3011
3012         /* Create tag for jumbo RX ring. */
3013         if (BGE_IS_JUMBO_CAPABLE(sc)) {
3014                 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ,
3015                     &sc->bge_cdata.bge_rx_jumbo_ring_tag,
3016                     (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring,
3017                     &sc->bge_cdata.bge_rx_jumbo_ring_map,
3018                     &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring");
3019                 if (error)
3020                         return (error);
3021         }
3022
3023         /* Create parent tag for buffers. */
3024         if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) {
3025                 /*
3026                  * XXX
3027                  * watchdog timeout issue was observed on BCM5704 which
3028                  * lives behind PCI-X bridge(e.g AMD 8131 PCI-X bridge).
3029                  * Both limiting DMA address space to 32bits and flushing
3030                  * mailbox write seem to address the issue.
3031                  */
3032                 if (sc->bge_pcixcap != 0)
3033                         lowaddr = BUS_SPACE_MAXADDR_32BIT;
3034         }
3035         error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev), 1, 0, lowaddr,
3036             BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0,
3037             BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
3038             &sc->bge_cdata.bge_buffer_tag);
3039         if (error != 0) {
3040                 device_printf(sc->bge_dev,
3041                     "could not allocate buffer dma tag\n");
3042                 return (ENOMEM);
3043         }
3044         /* Create tag for Tx mbufs. */
3045         if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
3046                 txsegsz = BGE_TSOSEG_SZ;
3047                 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
3048         } else {
3049                 txsegsz = MCLBYTES;
3050                 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
3051         }
3052         error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1,
3053             0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
3054             txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
3055             &sc->bge_cdata.bge_tx_mtag);
3056
3057         if (error) {
3058                 device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
3059                 return (ENOMEM);
3060         }
3061
3062         /* Create tag for Rx mbufs. */
3063         if (sc->bge_flags & BGE_FLAG_JUMBO_STD)
3064                 rxmaxsegsz = MJUM9BYTES;
3065         else
3066                 rxmaxsegsz = MCLBYTES;
3067         error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0,
3068             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, rxmaxsegsz, 1,
3069             rxmaxsegsz, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
3070
3071         if (error) {
3072                 device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
3073                 return (ENOMEM);
3074         }
3075
3076         /* Create DMA maps for RX buffers. */
3077         error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
3078             &sc->bge_cdata.bge_rx_std_sparemap);
3079         if (error) {
3080                 device_printf(sc->bge_dev,
3081                     "can't create spare DMA map for RX\n");
3082                 return (ENOMEM);
3083         }
3084         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3085                 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
3086                             &sc->bge_cdata.bge_rx_std_dmamap[i]);
3087                 if (error) {
3088                         device_printf(sc->bge_dev,
3089                             "can't create DMA map for RX\n");
3090                         return (ENOMEM);
3091                 }
3092         }
3093
3094         /* Create DMA maps for TX buffers. */
3095         for (i = 0; i < BGE_TX_RING_CNT; i++) {
3096                 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
3097                             &sc->bge_cdata.bge_tx_dmamap[i]);
3098                 if (error) {
3099                         device_printf(sc->bge_dev,
3100                             "can't create DMA map for TX\n");
3101                         return (ENOMEM);
3102                 }
3103         }
3104
3105         /* Create tags for jumbo RX buffers. */
3106         if (BGE_IS_JUMBO_CAPABLE(sc)) {
3107                 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag,
3108                     1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
3109                     NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
3110                     0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
3111                 if (error) {
3112                         device_printf(sc->bge_dev,
3113                             "could not allocate jumbo dma tag\n");
3114                         return (ENOMEM);
3115                 }
3116                 /* Create DMA maps for jumbo RX buffers. */
3117                 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
3118                     0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
3119                 if (error) {
3120                         device_printf(sc->bge_dev,
3121                             "can't create spare DMA map for jumbo RX\n");
3122                         return (ENOMEM);
3123                 }
3124                 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
3125                         error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
3126                                     0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
3127                         if (error) {
3128                                 device_printf(sc->bge_dev,
3129                                     "can't create DMA map for jumbo RX\n");
3130                                 return (ENOMEM);
3131                         }
3132                 }
3133         }
3134
3135         return (0);
3136 }
3137
3138 /*
3139  * Return true if this device has more than one port.
3140  */
3141 static int
3142 bge_has_multiple_ports(struct bge_softc *sc)
3143 {
3144         device_t dev = sc->bge_dev;
3145         u_int b, d, f, fscan, s;
3146
3147         d = pci_get_domain(dev);
3148         b = pci_get_bus(dev);
3149         s = pci_get_slot(dev);
3150         f = pci_get_function(dev);
3151         for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
3152                 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
3153                         return (1);
3154         return (0);
3155 }
3156
3157 /*
3158  * Return true if MSI can be used with this device.
3159  */
3160 static int
3161 bge_can_use_msi(struct bge_softc *sc)
3162 {
3163         int can_use_msi = 0;
3164
3165         if (sc->bge_msi == 0)
3166                 return (0);
3167
3168         /* Disable MSI for polling(4). */
3169 #ifdef DEVICE_POLLING
3170         return (0);
3171 #endif
3172         switch (sc->bge_asicrev) {
3173         case BGE_ASICREV_BCM5714_A0:
3174         case BGE_ASICREV_BCM5714:
3175                 /*
3176                  * Apparently, MSI doesn't work when these chips are
3177                  * configured in single-port mode.
3178                  */
3179                 if (bge_has_multiple_ports(sc))
3180                         can_use_msi = 1;
3181                 break;
3182         case BGE_ASICREV_BCM5750:
3183                 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
3184                     sc->bge_chiprev != BGE_CHIPREV_5750_BX)
3185                         can_use_msi = 1;
3186                 break;
3187         default:
3188                 if (BGE_IS_575X_PLUS(sc))
3189                         can_use_msi = 1;
3190         }
3191         return (can_use_msi);
3192 }
3193
3194 static int
3195 bge_mbox_reorder(struct bge_softc *sc)
3196 {
3197         /* Lists of PCI bridges that are known to reorder mailbox writes. */
3198         static const struct mbox_reorder {
3199                 const uint16_t vendor;
3200                 const uint16_t device;
3201                 const char *desc;
3202         } mbox_reorder_lists[] = {
3203                 { 0x1022, 0x7450, "AMD-8131 PCI-X Bridge" },
3204         };
3205         devclass_t pci, pcib;
3206         device_t bus, dev;
3207         int i;
3208
3209         pci = devclass_find("pci");
3210         pcib = devclass_find("pcib");
3211         dev = sc->bge_dev;
3212         bus = device_get_parent(dev);
3213         for (;;) {
3214                 dev = device_get_parent(bus);
3215                 bus = device_get_parent(dev);
3216                 if (device_get_devclass(dev) != pcib)
3217                         break;
3218                 for (i = 0; i < nitems(mbox_reorder_lists); i++) {
3219                         if (pci_get_vendor(dev) ==
3220                             mbox_reorder_lists[i].vendor &&
3221                             pci_get_device(dev) ==
3222                             mbox_reorder_lists[i].device) {
3223                                 device_printf(sc->bge_dev,
3224                                     "enabling MBOX workaround for %s\n",
3225                                     mbox_reorder_lists[i].desc);
3226                                 return (1);
3227                         }
3228                 }
3229                 if (device_get_devclass(bus) != pci)
3230                         break;
3231         }
3232         return (0);
3233 }
3234
3235 static void
3236 bge_devinfo(struct bge_softc *sc)
3237 {
3238         uint32_t cfg, clk;
3239
3240         device_printf(sc->bge_dev,
3241             "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; ",
3242             sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev);
3243         if (sc->bge_flags & BGE_FLAG_PCIE)
3244                 printf("PCI-E\n");
3245         else if (sc->bge_flags & BGE_FLAG_PCIX) {
3246                 printf("PCI-X ");
3247                 cfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK;
3248                 if (cfg == BGE_MISCCFG_BOARD_ID_5704CIOBE)
3249                         clk = 133;
3250                 else {
3251                         clk = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
3252                         switch (clk) {
3253                         case 0:
3254                                 clk = 33;
3255                                 break;
3256                         case 2:
3257                                 clk = 50;
3258                                 break;
3259                         case 4:
3260                                 clk = 66;
3261                                 break;
3262                         case 6:
3263                                 clk = 100;
3264                                 break;
3265                         case 7:
3266                                 clk = 133;
3267                                 break;
3268                         }
3269                 }
3270                 printf("%u MHz\n", clk);
3271         } else {
3272                 if (sc->bge_pcixcap != 0)
3273                         printf("PCI on PCI-X ");
3274                 else
3275                         printf("PCI ");
3276                 cfg = pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4);
3277                 if (cfg & BGE_PCISTATE_PCI_BUSSPEED)
3278                         clk = 66;
3279                 else
3280                         clk = 33;
3281                 if (cfg & BGE_PCISTATE_32BIT_BUS)
3282                         printf("%u MHz; 32bit\n", clk);
3283                 else
3284                         printf("%u MHz; 64bit\n", clk);
3285         }
3286 }
3287
3288 static int
3289 bge_attach(device_t dev)
3290 {
3291         struct ifnet *ifp;
3292         struct bge_softc *sc;
3293         uint32_t hwcfg = 0, misccfg, pcistate;
3294         u_char eaddr[ETHER_ADDR_LEN];
3295         int capmask, error, msicount, reg, rid, trys;
3296
3297         sc = device_get_softc(dev);
3298         sc->bge_dev = dev;
3299
3300         BGE_LOCK_INIT(sc, device_get_nameunit(dev));
3301         TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
3302         callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
3303
3304         /*
3305          * Map control/status registers.
3306          */
3307         pci_enable_busmaster(dev);
3308
3309         rid = PCIR_BAR(0);
3310         sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
3311             RF_ACTIVE);
3312
3313         if (sc->bge_res == NULL) {
3314                 device_printf (sc->bge_dev, "couldn't map BAR0 memory\n");
3315                 error = ENXIO;
3316                 goto fail;
3317         }
3318
3319         /* Save various chip information. */
3320         sc->bge_func_addr = pci_get_function(dev);
3321         sc->bge_chipid = bge_chipid(dev);
3322         sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
3323         sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
3324
3325         /* Set default PHY address. */
3326         sc->bge_phy_addr = 1;
3327          /*
3328           * PHY address mapping for various devices.
3329           *
3330           *          | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
3331           * ---------+-------+-------+-------+-------+
3332           * BCM57XX  |   1   |   X   |   X   |   X   |
3333           * BCM5704  |   1   |   X   |   1   |   X   |
3334           * BCM5717  |   1   |   8   |   2   |   9   |
3335           * BCM5719  |   1   |   8   |   2   |   9   |
3336           * BCM5720  |   1   |   8   |   2   |   9   |
3337           *
3338           *          | F2 Cu | F2 Sr | F3 Cu | F3 Sr |
3339           * ---------+-------+-------+-------+-------+
3340           * BCM57XX  |   X   |   X   |   X   |   X   |
3341           * BCM5704  |   X   |   X   |   X   |   X   |
3342           * BCM5717  |   X   |   X   |   X   |   X   |
3343           * BCM5719  |   3   |   10  |   4   |   11  |
3344           * BCM5720  |   X   |   X   |   X   |   X   |
3345           *
3346           * Other addresses may respond but they are not
3347           * IEEE compliant PHYs and should be ignored.
3348           */
3349         if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
3350             sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
3351             sc->bge_asicrev == BGE_ASICREV_BCM5720) {
3352                 if (sc->bge_chipid != BGE_CHIPID_BCM5717_A0) {
3353                         if (CSR_READ_4(sc, BGE_SGDIG_STS) &
3354                             BGE_SGDIGSTS_IS_SERDES)
3355                                 sc->bge_phy_addr = sc->bge_func_addr + 8;
3356                         else
3357                                 sc->bge_phy_addr = sc->bge_func_addr + 1;
3358                 } else {
3359                         if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
3360                             BGE_CPMU_PHY_STRAP_IS_SERDES)
3361                                 sc->bge_phy_addr = sc->bge_func_addr + 8;
3362                         else
3363                                 sc->bge_phy_addr = sc->bge_func_addr + 1;
3364                 }
3365         }
3366
3367         if (bge_has_eaddr(sc))
3368                 sc->bge_flags |= BGE_FLAG_EADDR;
3369
3370         /* Save chipset family. */
3371         switch (sc->bge_asicrev) {
3372         case BGE_ASICREV_BCM5762:
3373         case BGE_ASICREV_BCM57765:
3374         case BGE_ASICREV_BCM57766:
3375                 sc->bge_flags |= BGE_FLAG_57765_PLUS;
3376                 /* FALLTHROUGH */
3377         case BGE_ASICREV_BCM5717:
3378         case BGE_ASICREV_BCM5719:
3379         case BGE_ASICREV_BCM5720:
3380                 sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS |
3381                     BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS | BGE_FLAG_JUMBO |
3382                     BGE_FLAG_JUMBO_FRAME;
3383                 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
3384                     sc->bge_asicrev == BGE_ASICREV_BCM5720) {
3385                         /*
3386                          * Enable work around for DMA engine miscalculation
3387                          * of TXMBUF available space.
3388                          */
3389                         sc->bge_flags |= BGE_FLAG_RDMA_BUG;
3390                         if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
3391                             sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
3392                                 /* Jumbo frame on BCM5719 A0 does not work. */
3393                                 sc->bge_flags &= ~BGE_FLAG_JUMBO;
3394                         }
3395                 }
3396                 break;
3397         case BGE_ASICREV_BCM5755:
3398         case BGE_ASICREV_BCM5761:
3399         case BGE_ASICREV_BCM5784:
3400         case BGE_ASICREV_BCM5785:
3401         case BGE_ASICREV_BCM5787:
3402         case BGE_ASICREV_BCM57780:
3403                 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
3404                     BGE_FLAG_5705_PLUS;
3405                 break;
3406         case BGE_ASICREV_BCM5700:
3407         case BGE_ASICREV_BCM5701:
3408         case BGE_ASICREV_BCM5703:
3409         case BGE_ASICREV_BCM5704:
3410                 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
3411                 break;
3412         case BGE_ASICREV_BCM5714_A0:
3413         case BGE_ASICREV_BCM5780:
3414         case BGE_ASICREV_BCM5714:
3415                 sc->bge_flags |= BGE_FLAG_5714_FAMILY | BGE_FLAG_JUMBO_STD;
3416                 /* FALLTHROUGH */
3417         case BGE_ASICREV_BCM5750:
3418         case BGE_ASICREV_BCM5752:
3419         case BGE_ASICREV_BCM5906:
3420                 sc->bge_flags |= BGE_FLAG_575X_PLUS;
3421                 /* FALLTHROUGH */
3422         case BGE_ASICREV_BCM5705:
3423                 sc->bge_flags |= BGE_FLAG_5705_PLUS;
3424                 break;
3425         }
3426
3427         /* Identify chips with APE processor. */
3428         switch (sc->bge_asicrev) {
3429         case BGE_ASICREV_BCM5717:
3430         case BGE_ASICREV_BCM5719:
3431         case BGE_ASICREV_BCM5720:
3432         case BGE_ASICREV_BCM5761:
3433         case BGE_ASICREV_BCM5762:
3434                 sc->bge_flags |= BGE_FLAG_APE;
3435                 break;
3436         }
3437
3438         /* Chips with APE need BAR2 access for APE registers/memory. */
3439         if ((sc->bge_flags & BGE_FLAG_APE) != 0) {
3440                 rid = PCIR_BAR(2);
3441                 sc->bge_res2 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
3442                     RF_ACTIVE);
3443                 if (sc->bge_res2 == NULL) {
3444                         device_printf (sc->bge_dev,
3445                             "couldn't map BAR2 memory\n");
3446                         error = ENXIO;
3447                         goto fail;
3448                 }
3449
3450                 /* Enable APE register/memory access by host driver. */
3451                 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
3452                 pcistate |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
3453                     BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
3454                     BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
3455                 pci_write_config(dev, BGE_PCI_PCISTATE, pcistate, 4);
3456
3457                 bge_ape_lock_init(sc);
3458                 bge_ape_read_fw_ver(sc);
3459         }
3460
3461         /* Add SYSCTLs, requires the chipset family to be set. */
3462         bge_add_sysctls(sc);
3463
3464         /* Identify the chips that use an CPMU. */
3465         if (BGE_IS_5717_PLUS(sc) ||
3466             sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
3467             sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
3468             sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
3469             sc->bge_asicrev == BGE_ASICREV_BCM57780)
3470                 sc->bge_flags |= BGE_FLAG_CPMU_PRESENT;
3471         if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0)
3472                 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
3473         else
3474                 sc->bge_mi_mode = BGE_MIMODE_BASE;
3475         /* Enable auto polling for BCM570[0-5]. */
3476         if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705)
3477                 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
3478
3479         /*
3480          * All Broadcom controllers have 4GB boundary DMA bug.
3481          * Whenever an address crosses a multiple of the 4GB boundary
3482          * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
3483          * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
3484          * state machine will lockup and cause the device to hang.
3485          */
3486         sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
3487
3488         /* BCM5755 or higher and BCM5906 have short DMA bug. */
3489         if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
3490                 sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG;
3491
3492         /*
3493          * BCM5719 cannot handle DMA requests for DMA segments that
3494          * have larger than 4KB in size.  However the maximum DMA
3495          * segment size created in DMA tag is 4KB for TSO, so we
3496          * wouldn't encounter the issue here.
3497          */
3498         if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
3499                 sc->bge_flags |= BGE_FLAG_4K_RDMA_BUG;
3500
3501         misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK;
3502         if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
3503                 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
3504                     misccfg == BGE_MISCCFG_BOARD_ID_5788M)
3505                         sc->bge_flags |= BGE_FLAG_5788;
3506         }
3507
3508         capmask = BMSR_DEFCAPMASK;
3509         if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 &&
3510             (misccfg == 0x4000 || misccfg == 0x8000)) ||
3511             (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
3512             pci_get_vendor(dev) == BCOM_VENDORID &&
3513             (pci_get_device(dev) == BCOM_DEVICEID_BCM5901 ||
3514             pci_get_device(dev) == BCOM_DEVICEID_BCM5901A2 ||
3515             pci_get_device(dev) == BCOM_DEVICEID_BCM5705F)) ||
3516             (pci_get_vendor(dev) == BCOM_VENDORID &&
3517             (pci_get_device(dev) == BCOM_DEVICEID_BCM5751F ||
3518             pci_get_device(dev) == BCOM_DEVICEID_BCM5753F ||
3519             pci_get_device(dev) == BCOM_DEVICEID_BCM5787F)) ||
3520             pci_get_device(dev) == BCOM_DEVICEID_BCM57790 ||
3521             pci_get_device(dev) == BCOM_DEVICEID_BCM57791 ||
3522             pci_get_device(dev) == BCOM_DEVICEID_BCM57795 ||
3523             sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3524                 /* These chips are 10/100 only. */
3525                 capmask &= ~BMSR_EXTSTAT;
3526                 sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
3527         }
3528
3529         /*
3530          * Some controllers seem to require a special firmware to use
3531          * TSO. But the firmware is not available to FreeBSD and Linux
3532          * claims that the TSO performed by the firmware is slower than
3533          * hardware based TSO. Moreover the firmware based TSO has one
3534          * known bug which can't handle TSO if Ethernet header + IP/TCP
3535          * header is greater than 80 bytes. A workaround for the TSO
3536          * bug exist but it seems it's too expensive than not using
3537          * TSO at all. Some hardwares also have the TSO bug so limit
3538          * the TSO to the controllers that are not affected TSO issues
3539          * (e.g. 5755 or higher).
3540          */
3541         if (BGE_IS_5717_PLUS(sc)) {
3542                 /* BCM5717 requires different TSO configuration. */
3543                 sc->bge_flags |= BGE_FLAG_TSO3;
3544                 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
3545                     sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
3546                         /* TSO on BCM5719 A0 does not work. */
3547                         sc->bge_flags &= ~BGE_FLAG_TSO3;
3548                 }
3549         } else if (BGE_IS_5755_PLUS(sc)) {
3550                 /*
3551                  * BCM5754 and BCM5787 shares the same ASIC id so
3552                  * explicit device id check is required.
3553                  * Due to unknown reason TSO does not work on BCM5755M.
3554                  */
3555                 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 &&
3556                     pci_get_device(dev) != BCOM_DEVICEID_BCM5754M &&
3557                     pci_get_device(dev) != BCOM_DEVICEID_BCM5755M)
3558                         sc->bge_flags |= BGE_FLAG_TSO;
3559         }
3560
3561         /*
3562          * Check if this is a PCI-X or PCI Express device.
3563          */
3564         if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
3565                 /*
3566                  * Found a PCI Express capabilities register, this
3567                  * must be a PCI Express device.
3568                  */
3569                 sc->bge_flags |= BGE_FLAG_PCIE;
3570                 sc->bge_expcap = reg;
3571                 /* Extract supported maximum payload size. */
3572                 sc->bge_mps = pci_read_config(dev, sc->bge_expcap +
3573                     PCIER_DEVICE_CAP, 2);
3574                 sc->bge_mps = 128 << (sc->bge_mps & PCIEM_CAP_MAX_PAYLOAD);
3575                 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
3576                     sc->bge_asicrev == BGE_ASICREV_BCM5720)
3577                         sc->bge_expmrq = 2048;
3578                 else
3579                         sc->bge_expmrq = 4096;
3580                 pci_set_max_read_req(dev, sc->bge_expmrq);
3581         } else {
3582                 /*
3583                  * Check if the device is in PCI-X Mode.
3584                  * (This bit is not valid on PCI Express controllers.)
3585                  */
3586                 if (pci_find_cap(dev, PCIY_PCIX, &reg) == 0)
3587                         sc->bge_pcixcap = reg;
3588                 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
3589                     BGE_PCISTATE_PCI_BUSMODE) == 0)
3590                         sc->bge_flags |= BGE_FLAG_PCIX;
3591         }
3592
3593         /*
3594          * The 40bit DMA bug applies to the 5714/5715 controllers and is
3595          * not actually a MAC controller bug but an issue with the embedded
3596          * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
3597          */
3598         if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
3599                 sc->bge_flags |= BGE_FLAG_40BIT_BUG;
3600         /*
3601          * Some PCI-X bridges are known to trigger write reordering to
3602          * the mailbox registers. Typical phenomena is watchdog timeouts
3603          * caused by out-of-order TX completions.  Enable workaround for
3604          * PCI-X devices that live behind these bridges.
3605          * Note, PCI-X controllers can run in PCI mode so we can't use
3606          * BGE_FLAG_PCIX flag to detect PCI-X controllers.
3607          */
3608         if (sc->bge_pcixcap != 0 && bge_mbox_reorder(sc) != 0)
3609                 sc->bge_flags |= BGE_FLAG_MBOX_REORDER;
3610         /*
3611          * Allocate the interrupt, using MSI if possible.  These devices
3612          * support 8 MSI messages, but only the first one is used in
3613          * normal operation.
3614          */
3615         rid = 0;
3616         if (pci_find_cap(sc->bge_dev, PCIY_MSI, &reg) == 0) {
3617                 sc->bge_msicap = reg;
3618                 if (bge_can_use_msi(sc)) {
3619                         msicount = pci_msi_count(dev);
3620                         if (msicount > 1)
3621                                 msicount = 1;
3622                 } else
3623                         msicount = 0;
3624                 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
3625                         rid = 1;
3626                         sc->bge_flags |= BGE_FLAG_MSI;
3627                 }
3628         }
3629
3630         /*
3631          * All controllers except BCM5700 supports tagged status but
3632          * we use tagged status only for MSI case on BCM5717. Otherwise
3633          * MSI on BCM5717 does not work.
3634          */
3635 #ifndef DEVICE_POLLING
3636         if (sc->bge_flags & BGE_FLAG_MSI && BGE_IS_5717_PLUS(sc))
3637                 sc->bge_flags |= BGE_FLAG_TAGGED_STATUS;
3638 #endif
3639
3640         sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
3641             RF_SHAREABLE | RF_ACTIVE);
3642
3643         if (sc->bge_irq == NULL) {
3644                 device_printf(sc->bge_dev, "couldn't map interrupt\n");
3645                 error = ENXIO;
3646                 goto fail;
3647         }
3648
3649         bge_devinfo(sc);
3650
3651         sc->bge_asf_mode = 0;
3652         /* No ASF if APE present. */
3653         if ((sc->bge_flags & BGE_FLAG_APE) == 0) {
3654                 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) ==
3655                     BGE_SRAM_DATA_SIG_MAGIC)) {
3656                         if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG) &
3657                             BGE_HWCFG_ASF) {
3658                                 sc->bge_asf_mode |= ASF_ENABLE;
3659                                 sc->bge_asf_mode |= ASF_STACKUP;
3660                                 if (BGE_IS_575X_PLUS(sc))
3661                                         sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
3662                         }
3663                 }
3664         }
3665
3666         bge_stop_fw(sc);
3667         bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN);
3668         if (bge_reset(sc)) {
3669                 device_printf(sc->bge_dev, "chip reset failed\n");
3670                 error = ENXIO;
3671                 goto fail;
3672         }
3673
3674         bge_sig_legacy(sc, BGE_RESET_SHUTDOWN);
3675         bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN);
3676
3677         if (bge_chipinit(sc)) {
3678                 device_printf(sc->bge_dev, "chip initialization failed\n");
3679                 error = ENXIO;
3680                 goto fail;
3681         }
3682
3683         error = bge_get_eaddr(sc, eaddr);
3684         if (error) {
3685                 device_printf(sc->bge_dev,
3686                     "failed to read station address\n");
3687                 error = ENXIO;
3688                 goto fail;
3689         }
3690
3691         /* 5705 limits RX return ring to 512 entries. */
3692         if (BGE_IS_5717_PLUS(sc))
3693                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3694         else if (BGE_IS_5705_PLUS(sc))
3695                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
3696         else
3697                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3698
3699         if (bge_dma_alloc(sc)) {
3700                 device_printf(sc->bge_dev,
3701                     "failed to allocate DMA resources\n");
3702                 error = ENXIO;
3703                 goto fail;
3704         }
3705
3706         /* Set default tuneable values. */
3707         sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
3708         sc->bge_rx_coal_ticks = 150;
3709         sc->bge_tx_coal_ticks = 150;
3710         sc->bge_rx_max_coal_bds = 10;
3711         sc->bge_tx_max_coal_bds = 10;
3712
3713         /* Initialize checksum features to use. */
3714         sc->bge_csum_features = BGE_CSUM_FEATURES;
3715         if (sc->bge_forced_udpcsum != 0)
3716                 sc->bge_csum_features |= CSUM_UDP;
3717
3718         /* Set up ifnet structure */
3719         ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
3720         if (ifp == NULL) {
3721                 device_printf(sc->bge_dev, "failed to if_alloc()\n");
3722                 error = ENXIO;
3723                 goto fail;
3724         }
3725         ifp->if_softc = sc;
3726         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3727         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3728         ifp->if_ioctl = bge_ioctl;
3729         ifp->if_start = bge_start;
3730         ifp->if_init = bge_init;
3731         ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
3732         IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
3733         IFQ_SET_READY(&ifp->if_snd);
3734         ifp->if_hwassist = sc->bge_csum_features;
3735         ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
3736             IFCAP_VLAN_MTU;
3737         if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) {
3738                 ifp->if_hwassist |= CSUM_TSO;
3739                 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO;
3740         }
3741 #ifdef IFCAP_VLAN_HWCSUM
3742         ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
3743 #endif
3744         ifp->if_capenable = ifp->if_capabilities;
3745 #ifdef DEVICE_POLLING
3746         ifp->if_capabilities |= IFCAP_POLLING;
3747 #endif
3748
3749         /*
3750          * 5700 B0 chips do not support checksumming correctly due
3751          * to hardware bugs.
3752          */
3753         if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
3754                 ifp->if_capabilities &= ~IFCAP_HWCSUM;
3755                 ifp->if_capenable &= ~IFCAP_HWCSUM;
3756                 ifp->if_hwassist = 0;
3757         }
3758
3759         /*
3760          * Figure out what sort of media we have by checking the
3761          * hardware config word in the first 32k of NIC internal memory,
3762          * or fall back to examining the EEPROM if necessary.
3763          * Note: on some BCM5700 cards, this value appears to be unset.
3764          * If that's the case, we have to rely on identifying the NIC
3765          * by its PCI subsystem ID, as we do below for the SysKonnect
3766          * SK-9D41.
3767          */
3768         if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC)
3769                 hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG);
3770         else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
3771             (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3772                 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
3773                     sizeof(hwcfg))) {
3774                         device_printf(sc->bge_dev, "failed to read EEPROM\n");
3775                         error = ENXIO;
3776                         goto fail;
3777                 }
3778                 hwcfg = ntohl(hwcfg);
3779         }
3780
3781         /* The SysKonnect SK-9D41 is a 1000baseSX card. */
3782         if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) ==
3783             SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
3784                 if (BGE_IS_5705_PLUS(sc)) {
3785                         sc->bge_flags |= BGE_FLAG_MII_SERDES;
3786                         sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
3787                 } else
3788                         sc->bge_flags |= BGE_FLAG_TBI;
3789         }
3790
3791         /* Set various PHY bug flags. */
3792         if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
3793             sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
3794                 sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
3795         if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
3796             sc->bge_chiprev == BGE_CHIPREV_5704_AX)
3797                 sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
3798         if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
3799                 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
3800         if (pci_get_subvendor(dev) == DELL_VENDORID)
3801                 sc->bge_phy_flags |= BGE_PHY_NO_3LED;
3802         if ((BGE_IS_5705_PLUS(sc)) &&
3803             sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
3804             sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
3805             sc->bge_asicrev != BGE_ASICREV_BCM57780 &&
3806             !BGE_IS_5717_PLUS(sc)) {
3807                 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
3808                     sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
3809                     sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
3810                     sc->bge_asicrev == BGE_ASICREV_BCM5787) {
3811                         if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 &&
3812                             pci_get_device(dev) != BCOM_DEVICEID_BCM5756)
3813                                 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
3814                         if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M)
3815                                 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
3816                 } else
3817                         sc->bge_phy_flags |= BGE_PHY_BER_BUG;
3818         }
3819
3820         /*
3821          * Don't enable Ethernet@WireSpeed for the 5700 or the
3822          * 5705 A0 and A1 chips.
3823          */
3824         if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
3825             (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
3826             (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
3827             sc->bge_chipid != BGE_CHIPID_BCM5705_A1)))
3828                 sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
3829
3830         if (sc->bge_flags & BGE_FLAG_TBI) {
3831                 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3832                     bge_ifmedia_sts);
3833                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
3834                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
3835                     0, NULL);
3836                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
3837                 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
3838                 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
3839         } else {
3840                 /*
3841                  * Do transceiver setup and tell the firmware the
3842                  * driver is down so we can try to get access the
3843                  * probe if ASF is running.  Retry a couple of times
3844                  * if we get a conflict with the ASF firmware accessing
3845                  * the PHY.
3846                  */
3847                 trys = 0;
3848                 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3849 again:
3850                 bge_asf_driver_up(sc);
3851
3852                 error = mii_attach(dev, &sc->bge_miibus, ifp, bge_ifmedia_upd,
3853                     bge_ifmedia_sts, capmask, sc->bge_phy_addr, MII_OFFSET_ANY,
3854                     MIIF_DOPAUSE);
3855                 if (error != 0) {
3856                         if (trys++ < 4) {
3857                                 device_printf(sc->bge_dev, "Try again\n");
3858                                 bge_miibus_writereg(sc->bge_dev,
3859                                     sc->bge_phy_addr, MII_BMCR, BMCR_RESET);
3860                                 goto again;
3861                         }
3862                         device_printf(sc->bge_dev, "attaching PHYs failed\n");
3863                         goto fail;
3864                 }
3865
3866                 /*
3867                  * Now tell the firmware we are going up after probing the PHY
3868                  */
3869                 if (sc->bge_asf_mode & ASF_STACKUP)
3870                         BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3871         }
3872
3873         /*
3874          * When using the BCM5701 in PCI-X mode, data corruption has
3875          * been observed in the first few bytes of some received packets.
3876          * Aligning the packet buffer in memory eliminates the corruption.
3877          * Unfortunately, this misaligns the packet payloads.  On platforms
3878          * which do not support unaligned accesses, we will realign the
3879          * payloads by copying the received packets.
3880          */
3881         if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
3882             sc->bge_flags & BGE_FLAG_PCIX)
3883                 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
3884
3885         /*
3886          * Call MI attach routine.
3887          */
3888         ether_ifattach(ifp, eaddr);
3889
3890         /* Tell upper layer we support long frames. */
3891         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3892
3893         /*
3894          * Hookup IRQ last.
3895          */
3896         if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
3897                 /* Take advantage of single-shot MSI. */
3898                 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
3899                     ~BGE_MSIMODE_ONE_SHOT_DISABLE);
3900                 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
3901                     taskqueue_thread_enqueue, &sc->bge_tq);
3902                 if (sc->bge_tq == NULL) {
3903                         device_printf(dev, "could not create taskqueue.\n");
3904                         ether_ifdetach(ifp);
3905                         error = ENOMEM;
3906                         goto fail;
3907                 }
3908                 error = taskqueue_start_threads(&sc->bge_tq, 1, PI_NET,
3909                     "%s taskq", device_get_nameunit(sc->bge_dev));
3910                 if (error != 0) {
3911                         device_printf(dev, "could not start threads.\n");
3912                         ether_ifdetach(ifp);
3913                         goto fail;
3914                 }
3915                 error = bus_setup_intr(dev, sc->bge_irq,
3916                     INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
3917                     &sc->bge_intrhand);
3918         } else
3919                 error = bus_setup_intr(dev, sc->bge_irq,
3920                     INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
3921                     &sc->bge_intrhand);
3922
3923         if (error) {
3924                 ether_ifdetach(ifp);
3925                 device_printf(sc->bge_dev, "couldn't set up irq\n");
3926         }
3927
3928 fail:
3929         if (error)
3930                 bge_detach(dev);
3931         return (error);
3932 }
3933
3934 static int
3935 bge_detach(device_t dev)
3936 {
3937         struct bge_softc *sc;
3938         struct ifnet *ifp;
3939
3940         sc = device_get_softc(dev);
3941         ifp = sc->bge_ifp;
3942
3943 #ifdef DEVICE_POLLING
3944         if (ifp->if_capenable & IFCAP_POLLING)
3945                 ether_poll_deregister(ifp);
3946 #endif
3947
3948         if (device_is_attached(dev)) {
3949                 ether_ifdetach(ifp);
3950                 BGE_LOCK(sc);
3951                 bge_stop(sc);
3952                 BGE_UNLOCK(sc);
3953                 callout_drain(&sc->bge_stat_ch);
3954         }
3955
3956         if (sc->bge_tq)
3957                 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
3958
3959         if (sc->bge_flags & BGE_FLAG_TBI)
3960                 ifmedia_removeall(&sc->bge_ifmedia);
3961         else if (sc->bge_miibus != NULL) {
3962                 bus_generic_detach(dev);
3963                 device_delete_child(dev, sc->bge_miibus);
3964         }
3965
3966         bge_release_resources(sc);
3967
3968         return (0);
3969 }
3970
3971 static void
3972 bge_release_resources(struct bge_softc *sc)
3973 {
3974         device_t dev;
3975
3976         dev = sc->bge_dev;
3977
3978         if (sc->bge_tq != NULL)
3979                 taskqueue_free(sc->bge_tq);
3980
3981         if (sc->bge_intrhand != NULL)
3982                 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
3983
3984         if (sc->bge_irq != NULL)
3985                 bus_release_resource(dev, SYS_RES_IRQ,
3986                     sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
3987
3988         if (sc->bge_flags & BGE_FLAG_MSI)
3989                 pci_release_msi(dev);
3990
3991         if (sc->bge_res != NULL)
3992                 bus_release_resource(dev, SYS_RES_MEMORY,
3993                     PCIR_BAR(0), sc->bge_res);
3994
3995         if (sc->bge_res2 != NULL)
3996                 bus_release_resource(dev, SYS_RES_MEMORY,
3997                     PCIR_BAR(2), sc->bge_res2);
3998
3999         if (sc->bge_ifp != NULL)
4000                 if_free(sc->bge_ifp);
4001
4002         bge_dma_free(sc);
4003
4004         if (mtx_initialized(&sc->bge_mtx))      /* XXX */
4005                 BGE_LOCK_DESTROY(sc);
4006 }
4007
4008 static int
4009 bge_reset(struct bge_softc *sc)
4010 {
4011         device_t dev;
4012         uint32_t cachesize, command, mac_mode, mac_mode_mask, reset, val;
4013         void (*write_op)(struct bge_softc *, int, int);
4014         uint16_t devctl;
4015         int i;
4016
4017         dev = sc->bge_dev;
4018
4019         mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE;
4020         if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
4021                 mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
4022         mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask;
4023
4024         if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
4025             (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
4026                 if (sc->bge_flags & BGE_FLAG_PCIE)
4027                         write_op = bge_writemem_direct;
4028                 else
4029                         write_op = bge_writemem_ind;
4030         } else
4031                 write_op = bge_writereg_ind;
4032
4033         if (sc->bge_asicrev != BGE_ASICREV_BCM5700 &&
4034             sc->bge_asicrev != BGE_ASICREV_BCM5701) {
4035                 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
4036                 for (i = 0; i < 8000; i++) {
4037                         if (CSR_READ_4(sc, BGE_NVRAM_SWARB) &
4038                             BGE_NVRAMSWARB_GNT1)
4039                                 break;
4040                         DELAY(20);
4041                 }
4042                 if (i == 8000) {
4043                         if (bootverbose)
4044                                 device_printf(dev, "NVRAM lock timedout!\n");
4045                 }
4046         }
4047         /* Take APE lock when performing reset. */
4048         bge_ape_lock(sc, BGE_APE_LOCK_GRC);
4049
4050         /* Save some important PCI state. */
4051         cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
4052         command = pci_read_config(dev, BGE_PCI_CMD, 4);
4053
4054         pci_write_config(dev, BGE_PCI_MISC_CTL,
4055             BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
4056             BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
4057
4058         /* Disable fastboot on controllers that support it. */
4059         if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
4060             BGE_IS_5755_PLUS(sc)) {
4061                 if (bootverbose)
4062                         device_printf(dev, "Disabling fastboot\n");
4063                 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
4064         }
4065
4066         /*
4067          * Write the magic number to SRAM at offset 0xB50.
4068          * When firmware finishes its initialization it will
4069          * write ~BGE_SRAM_FW_MB_MAGIC to the same location.
4070          */
4071         bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
4072
4073         reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
4074
4075         /* XXX: Broadcom Linux driver. */
4076         if (sc->bge_flags & BGE_FLAG_PCIE) {
4077                 if (sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
4078                     (sc->bge_flags & BGE_FLAG_5717_PLUS) == 0) {
4079                         if (CSR_READ_4(sc, 0x7E2C) == 0x60)     /* PCIE 1.0 */
4080                                 CSR_WRITE_4(sc, 0x7E2C, 0x20);
4081                 }
4082                 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
4083                         /* Prevent PCIE link training during global reset */
4084                         CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
4085                         reset |= 1 << 29;
4086                 }
4087         }
4088
4089         if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
4090                 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
4091                 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
4092                     val | BGE_VCPU_STATUS_DRV_RESET);
4093                 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
4094                 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
4095                     val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
4096         }
4097
4098         /*
4099          * Set GPHY Power Down Override to leave GPHY
4100          * powered up in D0 uninitialized.
4101          */
4102         if (BGE_IS_5705_PLUS(sc) &&
4103             (sc->bge_flags & BGE_FLAG_CPMU_PRESENT) == 0)
4104                 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
4105
4106         /* Issue global reset */
4107         write_op(sc, BGE_MISC_CFG, reset);
4108
4109         if (sc->bge_flags & BGE_FLAG_PCIE)
4110                 DELAY(100 * 1000);
4111         else
4112                 DELAY(1000);
4113
4114         /* XXX: Broadcom Linux driver. */
4115         if (sc->bge_flags & BGE_FLAG_PCIE) {
4116                 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
4117                         DELAY(500000); /* wait for link training to complete */
4118                         val = pci_read_config(dev, 0xC4, 4);
4119                         pci_write_config(dev, 0xC4, val | (1 << 15), 4);
4120                 }
4121                 devctl = pci_read_config(dev,
4122                     sc->bge_expcap + PCIER_DEVICE_CTL, 2);
4123                 /* Clear enable no snoop and disable relaxed ordering. */
4124                 devctl &= ~(PCIEM_CTL_RELAXED_ORD_ENABLE |
4125                     PCIEM_CTL_NOSNOOP_ENABLE);
4126                 pci_write_config(dev, sc->bge_expcap + PCIER_DEVICE_CTL,
4127                     devctl, 2);
4128                 pci_set_max_read_req(dev, sc->bge_expmrq);
4129                 /* Clear error status. */
4130                 pci_write_config(dev, sc->bge_expcap + PCIER_DEVICE_STA,
4131                     PCIEM_STA_CORRECTABLE_ERROR |
4132                     PCIEM_STA_NON_FATAL_ERROR | PCIEM_STA_FATAL_ERROR |
4133                     PCIEM_STA_UNSUPPORTED_REQ, 2);
4134         }
4135
4136         /* Reset some of the PCI state that got zapped by reset. */
4137         pci_write_config(dev, BGE_PCI_MISC_CTL,
4138             BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
4139             BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
4140         val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE;
4141         if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 &&
4142             (sc->bge_flags & BGE_FLAG_PCIX) != 0)
4143                 val |= BGE_PCISTATE_RETRY_SAME_DMA;
4144         if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
4145                 val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
4146                     BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
4147                     BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
4148         pci_write_config(dev, BGE_PCI_PCISTATE, val, 4);
4149         pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
4150         pci_write_config(dev, BGE_PCI_CMD, command, 4);
4151         /*
4152          * Disable PCI-X relaxed ordering to ensure status block update
4153          * comes first then packet buffer DMA. Otherwise driver may
4154          * read stale status block.
4155          */
4156         if (sc->bge_flags & BGE_FLAG_PCIX) {
4157                 devctl = pci_read_config(dev,
4158                     sc->bge_pcixcap + PCIXR_COMMAND, 2);
4159                 devctl &= ~PCIXM_COMMAND_ERO;
4160                 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
4161                         devctl &= ~PCIXM_COMMAND_MAX_READ;
4162                         devctl |= PCIXM_COMMAND_MAX_READ_2048;
4163                 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
4164                         devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
4165                             PCIXM_COMMAND_MAX_READ);
4166                         devctl |= PCIXM_COMMAND_MAX_READ_2048;
4167                 }
4168                 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
4169                     devctl, 2);
4170         }
4171         /* Re-enable MSI, if necessary, and enable the memory arbiter. */
4172         if (BGE_IS_5714_FAMILY(sc)) {
4173                 /* This chip disables MSI on reset. */
4174                 if (sc->bge_flags & BGE_FLAG_MSI) {
4175                         val = pci_read_config(dev,
4176                             sc->bge_msicap + PCIR_MSI_CTRL, 2);
4177                         pci_write_config(dev,
4178                             sc->bge_msicap + PCIR_MSI_CTRL,
4179                             val | PCIM_MSICTRL_MSI_ENABLE, 2);
4180                         val = CSR_READ_4(sc, BGE_MSI_MODE);
4181                         CSR_WRITE_4(sc, BGE_MSI_MODE,
4182                             val | BGE_MSIMODE_ENABLE);
4183                 }
4184                 val = CSR_READ_4(sc, BGE_MARB_MODE);
4185                 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
4186         } else
4187                 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
4188
4189         /* Fix up byte swapping. */
4190         CSR_WRITE_4(sc, BGE_MODE_CTL, bge_dma_swap_options(sc));
4191
4192         val = CSR_READ_4(sc, BGE_MAC_MODE);
4193         val = (val & ~mac_mode_mask) | mac_mode;
4194         CSR_WRITE_4(sc, BGE_MAC_MODE, val);
4195         DELAY(40);
4196
4197         bge_ape_unlock(sc, BGE_APE_LOCK_GRC);
4198
4199         if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
4200                 for (i = 0; i < BGE_TIMEOUT; i++) {
4201                         val = CSR_READ_4(sc, BGE_VCPU_STATUS);
4202                         if (val & BGE_VCPU_STATUS_INIT_DONE)
4203                                 break;
4204                         DELAY(100);
4205                 }
4206                 if (i == BGE_TIMEOUT) {
4207                         device_printf(dev, "reset timed out\n");
4208                         return (1);
4209                 }
4210         } else {
4211                 /*
4212                  * Poll until we see the 1's complement of the magic number.
4213                  * This indicates that the firmware initialization is complete.
4214                  * We expect this to fail if no chip containing the Ethernet
4215                  * address is fitted though.
4216                  */
4217                 for (i = 0; i < BGE_TIMEOUT; i++) {
4218                         DELAY(10);
4219                         val = bge_readmem_ind(sc, BGE_SRAM_FW_MB);
4220                         if (val == ~BGE_SRAM_FW_MB_MAGIC)
4221                                 break;
4222                 }
4223
4224                 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
4225                         device_printf(dev,
4226                             "firmware handshake timed out, found 0x%08x\n",
4227                             val);
4228                 /* BCM57765 A0 needs additional time before accessing. */
4229                 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
4230                         DELAY(10 * 1000);       /* XXX */
4231         }
4232
4233         /*
4234          * The 5704 in TBI mode apparently needs some special
4235          * adjustment to insure the SERDES drive level is set
4236          * to 1.2V.
4237          */
4238         if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
4239             sc->bge_flags & BGE_FLAG_TBI) {
4240                 val = CSR_READ_4(sc, BGE_SERDES_CFG);
4241                 val = (val & ~0xFFF) | 0x880;
4242                 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
4243         }
4244
4245         /* XXX: Broadcom Linux driver. */
4246         if (sc->bge_flags & BGE_FLAG_PCIE &&
4247             !BGE_IS_5717_PLUS(sc) &&
4248             sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
4249             sc->bge_asicrev != BGE_ASICREV_BCM5785) {
4250                 /* Enable Data FIFO protection. */
4251                 val = CSR_READ_4(sc, 0x7C00);
4252                 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
4253         }
4254
4255         if (sc->bge_asicrev == BGE_ASICREV_BCM5720)
4256                 BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
4257                     CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
4258
4259         return (0);
4260 }
4261
4262 static __inline void
4263 bge_rxreuse_std(struct bge_softc *sc, int i)
4264 {
4265         struct bge_rx_bd *r;
4266
4267         r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
4268         r->bge_flags = BGE_RXBDFLAG_END;
4269         r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i];
4270         r->bge_idx = i;
4271         BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
4272 }
4273
4274 static __inline void
4275 bge_rxreuse_jumbo(struct bge_softc *sc, int i)
4276 {
4277         struct bge_extrx_bd *r;
4278
4279         r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
4280         r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
4281         r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0];
4282         r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1];
4283         r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2];
4284         r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3];
4285         r->bge_idx = i;
4286         BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
4287 }
4288
4289 /*
4290  * Frame reception handling. This is called if there's a frame
4291  * on the receive return list.
4292  *
4293  * Note: we have to be able to handle two possibilities here:
4294  * 1) the frame is from the jumbo receive ring
4295  * 2) the frame is from the standard receive ring
4296  */
4297
4298 static int
4299 bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
4300 {
4301         struct ifnet *ifp;
4302         int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
4303         uint16_t rx_cons;
4304
4305         rx_cons = sc->bge_rx_saved_considx;
4306
4307         /* Nothing to do. */
4308         if (rx_cons == rx_prod)
4309                 return (rx_npkts);
4310
4311         ifp = sc->bge_ifp;
4312
4313         bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
4314             sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
4315         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
4316             sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
4317         if (BGE_IS_JUMBO_CAPABLE(sc) &&
4318             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
4319             (MCLBYTES - ETHER_ALIGN))
4320                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
4321                     sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
4322
4323         while (rx_cons != rx_prod) {
4324                 struct bge_rx_bd        *cur_rx;
4325                 uint32_t                rxidx;
4326                 struct mbuf             *m = NULL;
4327                 uint16_t                vlan_tag = 0;
4328                 int                     have_tag = 0;
4329
4330 #ifdef DEVICE_POLLING
4331                 if (ifp->if_capenable & IFCAP_POLLING) {
4332                         if (sc->rxcycles <= 0)
4333                                 break;
4334                         sc->rxcycles--;
4335                 }
4336 #endif
4337
4338                 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
4339
4340                 rxidx = cur_rx->bge_idx;
4341                 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
4342
4343                 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
4344                     cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
4345                         have_tag = 1;
4346                         vlan_tag = cur_rx->bge_vlan_tag;
4347                 }
4348
4349                 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
4350                         jumbocnt++;
4351                         m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
4352                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
4353                                 bge_rxreuse_jumbo(sc, rxidx);
4354                                 continue;
4355                         }
4356                         if (bge_newbuf_jumbo(sc, rxidx) != 0) {
4357                                 bge_rxreuse_jumbo(sc, rxidx);
4358                                 ifp->if_iqdrops++;
4359                                 continue;
4360                         }
4361                         BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
4362                 } else {
4363                         stdcnt++;
4364                         m = sc->bge_cdata.bge_rx_std_chain[rxidx];
4365                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
4366                                 bge_rxreuse_std(sc, rxidx);
4367                                 continue;
4368                         }
4369                         if (bge_newbuf_std(sc, rxidx) != 0) {
4370                                 bge_rxreuse_std(sc, rxidx);
4371                                 ifp->if_iqdrops++;
4372                                 continue;
4373                         }
4374                         BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
4375                 }
4376
4377                 ifp->if_ipackets++;
4378 #ifndef __NO_STRICT_ALIGNMENT
4379                 /*
4380                  * For architectures with strict alignment we must make sure
4381                  * the payload is aligned.
4382                  */
4383                 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
4384                         bcopy(m->m_data, m->m_data + ETHER_ALIGN,
4385                             cur_rx->bge_len);
4386                         m->m_data += ETHER_ALIGN;
4387                 }
4388 #endif
4389                 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
4390                 m->m_pkthdr.rcvif = ifp;
4391
4392                 if (ifp->if_capenable & IFCAP_RXCSUM)
4393                         bge_rxcsum(sc, cur_rx, m);
4394
4395                 /*
4396                  * If we received a packet with a vlan tag,
4397                  * attach that information to the packet.
4398                  */
4399                 if (have_tag) {
4400                         m->m_pkthdr.ether_vtag = vlan_tag;
4401                         m->m_flags |= M_VLANTAG;
4402                 }
4403
4404                 if (holdlck != 0) {
4405                         BGE_UNLOCK(sc);
4406                         (*ifp->if_input)(ifp, m);
4407                         BGE_LOCK(sc);
4408                 } else
4409                         (*ifp->if_input)(ifp, m);
4410                 rx_npkts++;
4411
4412                 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
4413                         return (rx_npkts);
4414         }
4415
4416         bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
4417             sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
4418         if (stdcnt > 0)
4419                 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
4420                     sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
4421
4422         if (jumbocnt > 0)
4423                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
4424                     sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
4425
4426         sc->bge_rx_saved_considx = rx_cons;
4427         bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
4428         if (stdcnt)
4429                 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std +
4430                     BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT);
4431         if (jumbocnt)
4432                 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo +
4433                     BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT);
4434 #ifdef notyet
4435         /*
4436          * This register wraps very quickly under heavy packet drops.
4437          * If you need correct statistics, you can enable this check.
4438          */
4439         if (BGE_IS_5705_PLUS(sc))
4440                 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4441 #endif
4442         return (rx_npkts);
4443 }
4444
4445 static void
4446 bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
4447 {
4448
4449         if (BGE_IS_5717_PLUS(sc)) {
4450                 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
4451                         if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
4452                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4453                                 if ((cur_rx->bge_error_flag &
4454                                     BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
4455                                         m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4456                         }
4457                         if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
4458                                 m->m_pkthdr.csum_data =
4459                                     cur_rx->bge_tcp_udp_csum;
4460                                 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
4461                                     CSUM_PSEUDO_HDR;
4462                         }
4463                 }
4464         } else {
4465                 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
4466                         m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4467                         if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
4468                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4469                 }
4470                 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
4471                     m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
4472                         m->m_pkthdr.csum_data =
4473                             cur_rx->bge_tcp_udp_csum;
4474                         m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
4475                             CSUM_PSEUDO_HDR;
4476                 }
4477         }
4478 }
4479
4480 static void
4481 bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
4482 {
4483         struct bge_tx_bd *cur_tx;
4484         struct ifnet *ifp;
4485
4486         BGE_LOCK_ASSERT(sc);
4487
4488         /* Nothing to do. */
4489         if (sc->bge_tx_saved_considx == tx_cons)
4490                 return;
4491
4492         ifp = sc->bge_ifp;
4493
4494         bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4495             sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
4496         /*
4497          * Go through our tx ring and free mbufs for those
4498          * frames that have been sent.
4499          */
4500         while (sc->bge_tx_saved_considx != tx_cons) {
4501                 uint32_t                idx;
4502
4503                 idx = sc->bge_tx_saved_considx;
4504                 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
4505                 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
4506                         ifp->if_opackets++;
4507                 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
4508                         bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
4509                             sc->bge_cdata.bge_tx_dmamap[idx],
4510                             BUS_DMASYNC_POSTWRITE);
4511                         bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
4512                             sc->bge_cdata.bge_tx_dmamap[idx]);
4513                         m_freem(sc->bge_cdata.bge_tx_chain[idx]);
4514                         sc->bge_cdata.bge_tx_chain[idx] = NULL;
4515                 }
4516                 sc->bge_txcnt--;
4517                 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
4518         }
4519
4520         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4521         if (sc->bge_txcnt == 0)
4522                 sc->bge_timer = 0;
4523 }
4524
4525 #ifdef DEVICE_POLLING
4526 static int
4527 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
4528 {
4529         struct bge_softc *sc = ifp->if_softc;
4530         uint16_t rx_prod, tx_cons;
4531         uint32_t statusword;
4532         int rx_npkts = 0;
4533
4534         BGE_LOCK(sc);
4535         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4536                 BGE_UNLOCK(sc);
4537                 return (rx_npkts);
4538         }
4539
4540         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4541             sc->bge_cdata.bge_status_map,
4542             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4543         /* Fetch updates from the status block. */
4544         rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4545         tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4546
4547         statusword = sc->bge_ldata.bge_status_block->bge_status;
4548         /* Clear the status so the next pass only sees the changes. */
4549         sc->bge_ldata.bge_status_block->bge_status = 0;
4550
4551         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4552             sc->bge_cdata.bge_status_map,
4553             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4554
4555         /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
4556         if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
4557                 sc->bge_link_evt++;
4558
4559         if (cmd == POLL_AND_CHECK_STATUS)
4560                 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4561                     sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
4562                     sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
4563                         bge_link_upd(sc);
4564
4565         sc->rxcycles = count;
4566         rx_npkts = bge_rxeof(sc, rx_prod, 1);
4567         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4568                 BGE_UNLOCK(sc);
4569                 return (rx_npkts);
4570         }
4571         bge_txeof(sc, tx_cons);
4572         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4573                 bge_start_locked(ifp);
4574
4575         BGE_UNLOCK(sc);
4576         return (rx_npkts);
4577 }
4578 #endif /* DEVICE_POLLING */
4579
4580 static int
4581 bge_msi_intr(void *arg)
4582 {
4583         struct bge_softc *sc;
4584
4585         sc = (struct bge_softc *)arg;
4586         /*
4587          * This interrupt is not shared and controller already
4588          * disabled further interrupt.
4589          */
4590         taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
4591         return (FILTER_HANDLED);
4592 }
4593
4594 static void
4595 bge_intr_task(void *arg, int pending)
4596 {
4597         struct bge_softc *sc;
4598         struct ifnet *ifp;
4599         uint32_t status, status_tag;
4600         uint16_t rx_prod, tx_cons;
4601
4602         sc = (struct bge_softc *)arg;
4603         ifp = sc->bge_ifp;
4604
4605         BGE_LOCK(sc);
4606         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
4607                 BGE_UNLOCK(sc);
4608                 return;
4609         }
4610
4611         /* Get updated status block. */
4612         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4613             sc->bge_cdata.bge_status_map,
4614             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4615
4616         /* Save producer/consumer indices. */
4617         rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4618         tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4619         status = sc->bge_ldata.bge_status_block->bge_status;
4620         status_tag = sc->bge_ldata.bge_status_block->bge_status_tag << 24;
4621         /* Dirty the status flag. */
4622         sc->bge_ldata.bge_status_block->bge_status = 0;
4623         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4624             sc->bge_cdata.bge_status_map,
4625             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4626         if ((sc->bge_flags & BGE_FLAG_TAGGED_STATUS) == 0)
4627                 status_tag = 0;
4628
4629         if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0)
4630                 bge_link_upd(sc);
4631
4632         /* Let controller work. */
4633         bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag);
4634
4635         if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
4636             sc->bge_rx_saved_considx != rx_prod) {
4637                 /* Check RX return ring producer/consumer. */
4638                 BGE_UNLOCK(sc);
4639                 bge_rxeof(sc, rx_prod, 0);
4640                 BGE_LOCK(sc);
4641         }
4642         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4643                 /* Check TX ring producer/consumer. */
4644                 bge_txeof(sc, tx_cons);
4645                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4646                         bge_start_locked(ifp);
4647         }
4648         BGE_UNLOCK(sc);
4649 }
4650
4651 static void
4652 bge_intr(void *xsc)
4653 {
4654         struct bge_softc *sc;
4655         struct ifnet *ifp;
4656         uint32_t statusword;
4657         uint16_t rx_prod, tx_cons;
4658
4659         sc = xsc;
4660
4661         BGE_LOCK(sc);
4662
4663         ifp = sc->bge_ifp;
4664
4665 #ifdef DEVICE_POLLING
4666         if (ifp->if_capenable & IFCAP_POLLING) {
4667                 BGE_UNLOCK(sc);
4668                 return;
4669         }
4670 #endif
4671
4672         /*
4673          * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO.  Don't
4674          * disable interrupts by writing nonzero like we used to, since with
4675          * our current organization this just gives complications and
4676          * pessimizations for re-enabling interrupts.  We used to have races
4677          * instead of the necessary complications.  Disabling interrupts
4678          * would just reduce the chance of a status update while we are
4679          * running (by switching to the interrupt-mode coalescence
4680          * parameters), but this chance is already very low so it is more
4681          * efficient to get another interrupt than prevent it.
4682          *
4683          * We do the ack first to ensure another interrupt if there is a
4684          * status update after the ack.  We don't check for the status
4685          * changing later because it is more efficient to get another
4686          * interrupt than prevent it, not quite as above (not checking is
4687          * a smaller optimization than not toggling the interrupt enable,
4688          * since checking doesn't involve PCI accesses and toggling require
4689          * the status check).  So toggling would probably be a pessimization
4690          * even with MSI.  It would only be needed for using a task queue.
4691          */
4692         bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4693
4694         /*
4695          * Do the mandatory PCI flush as well as get the link status.
4696          */
4697         statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
4698
4699         /* Make sure the descriptor ring indexes are coherent. */
4700         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4701             sc->bge_cdata.bge_status_map,
4702             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4703         rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4704         tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4705         sc->bge_ldata.bge_status_block->bge_status = 0;
4706         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4707             sc->bge_cdata.bge_status_map,
4708             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4709
4710         if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4711             sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
4712             statusword || sc->bge_link_evt)
4713                 bge_link_upd(sc);
4714
4715         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4716                 /* Check RX return ring producer/consumer. */
4717                 bge_rxeof(sc, rx_prod, 1);
4718         }
4719
4720         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4721                 /* Check TX ring producer/consumer. */
4722                 bge_txeof(sc, tx_cons);
4723         }
4724
4725         if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
4726             !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4727                 bge_start_locked(ifp);
4728
4729         BGE_UNLOCK(sc);
4730 }
4731
4732 static void
4733 bge_asf_driver_up(struct bge_softc *sc)
4734 {
4735         if (sc->bge_asf_mode & ASF_STACKUP) {
4736                 /* Send ASF heartbeat aprox. every 2s */
4737                 if (sc->bge_asf_count)
4738                         sc->bge_asf_count --;
4739                 else {
4740                         sc->bge_asf_count = 2;
4741                         bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB,
4742                             BGE_FW_CMD_DRV_ALIVE);
4743                         bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4);
4744                         bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB,
4745                             BGE_FW_HB_TIMEOUT_SEC);
4746                         CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
4747                             CSR_READ_4(sc, BGE_RX_CPU_EVENT) |
4748                             BGE_RX_CPU_DRV_EVENT);
4749                 }
4750         }
4751 }
4752
4753 static void
4754 bge_tick(void *xsc)
4755 {
4756         struct bge_softc *sc = xsc;
4757         struct mii_data *mii = NULL;
4758
4759         BGE_LOCK_ASSERT(sc);
4760
4761         /* Synchronize with possible callout reset/stop. */
4762         if (callout_pending(&sc->bge_stat_ch) ||
4763             !callout_active(&sc->bge_stat_ch))
4764                 return;
4765
4766         if (BGE_IS_5705_PLUS(sc))
4767                 bge_stats_update_regs(sc);
4768         else
4769                 bge_stats_update(sc);
4770
4771         /* XXX Add APE heartbeat check here? */
4772
4773         if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
4774                 mii = device_get_softc(sc->bge_miibus);
4775                 /*
4776                  * Do not touch PHY if we have link up. This could break
4777                  * IPMI/ASF mode or produce extra input errors
4778                  * (extra errors was reported for bcm5701 & bcm5704).
4779                  */
4780                 if (!sc->bge_link)
4781                         mii_tick(mii);
4782         } else {
4783                 /*
4784                  * Since in TBI mode auto-polling can't be used we should poll
4785                  * link status manually. Here we register pending link event
4786                  * and trigger interrupt.
4787                  */
4788 #ifdef DEVICE_POLLING
4789                 /* In polling mode we poll link state in bge_poll(). */
4790                 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
4791 #endif
4792                 {
4793                 sc->bge_link_evt++;
4794                 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4795                     sc->bge_flags & BGE_FLAG_5788)
4796                         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4797                 else
4798                         BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4799                 }
4800         }
4801
4802         bge_asf_driver_up(sc);
4803         bge_watchdog(sc);
4804
4805         callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4806 }
4807
4808 static void
4809 bge_stats_update_regs(struct bge_softc *sc)
4810 {
4811         struct ifnet *ifp;
4812         struct bge_mac_stats *stats;
4813         uint32_t val;
4814
4815         ifp = sc->bge_ifp;
4816         stats = &sc->bge_mac_stats;
4817
4818         stats->ifHCOutOctets +=
4819             CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4820         stats->etherStatsCollisions +=
4821             CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4822         stats->outXonSent +=
4823             CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4824         stats->outXoffSent +=
4825             CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4826         stats->dot3StatsInternalMacTransmitErrors +=
4827             CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4828         stats->dot3StatsSingleCollisionFrames +=
4829             CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4830         stats->dot3StatsMultipleCollisionFrames +=
4831             CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4832         stats->dot3StatsDeferredTransmissions +=
4833             CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4834         stats->dot3StatsExcessiveCollisions +=
4835             CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4836         stats->dot3StatsLateCollisions +=
4837             CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4838         stats->ifHCOutUcastPkts +=
4839             CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4840         stats->ifHCOutMulticastPkts +=
4841             CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4842         stats->ifHCOutBroadcastPkts +=
4843             CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4844
4845         stats->ifHCInOctets +=
4846             CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4847         stats->etherStatsFragments +=
4848             CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4849         stats->ifHCInUcastPkts +=
4850             CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4851         stats->ifHCInMulticastPkts +=
4852             CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4853         stats->ifHCInBroadcastPkts +=
4854             CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4855         stats->dot3StatsFCSErrors +=
4856             CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4857         stats->dot3StatsAlignmentErrors +=
4858             CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4859         stats->xonPauseFramesReceived +=
4860             CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4861         stats->xoffPauseFramesReceived +=
4862             CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4863         stats->macControlFramesReceived +=
4864             CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4865         stats->xoffStateEntered +=
4866             CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4867         stats->dot3StatsFramesTooLong +=
4868             CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4869         stats->etherStatsJabbers +=
4870             CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4871         stats->etherStatsUndersizePkts +=
4872             CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4873
4874         stats->FramesDroppedDueToFilters +=
4875             CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4876         stats->DmaWriteQueueFull +=
4877             CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4878         stats->DmaWriteHighPriQueueFull +=
4879             CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4880         stats->NoMoreRxBDs +=
4881             CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4882         /*
4883          * XXX
4884          * Unlike other controllers, BGE_RXLP_LOCSTAT_IFIN_DROPS
4885          * counter of BCM5717, BCM5718, BCM5719 A0 and BCM5720 A0
4886          * includes number of unwanted multicast frames.  This comes
4887          * from silicon bug and known workaround to get rough(not
4888          * exact) counter is to enable interrupt on MBUF low water
4889          * attention.  This can be accomplished by setting
4890          * BGE_HCCMODE_ATTN bit of BGE_HCC_MODE,
4891          * BGE_BMANMODE_LOMBUF_ATTN bit of BGE_BMAN_MODE and
4892          * BGE_MODECTL_FLOWCTL_ATTN_INTR bit of BGE_MODE_CTL.
4893          * However that change would generate more interrupts and
4894          * there are still possibilities of losing multiple frames
4895          * during BGE_MODECTL_FLOWCTL_ATTN_INTR interrupt handling.
4896          * Given that the workaround still would not get correct
4897          * counter I don't think it's worth to implement it.  So
4898          * ignore reading the counter on controllers that have the
4899          * silicon bug.
4900          */
4901         if (sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
4902             sc->bge_chipid != BGE_CHIPID_BCM5719_A0 &&
4903             sc->bge_chipid != BGE_CHIPID_BCM5720_A0)
4904                 stats->InputDiscards +=
4905                     CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4906         stats->InputErrors +=
4907             CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4908         stats->RecvThresholdHit +=
4909             CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4910
4911         ifp->if_collisions = (u_long)stats->etherStatsCollisions;
4912         ifp->if_ierrors = (u_long)(stats->NoMoreRxBDs + stats->InputDiscards +
4913             stats->InputErrors);
4914
4915         if (sc->bge_flags & BGE_FLAG_RDMA_BUG) {
4916                 /*
4917                  * If controller transmitted more than BGE_NUM_RDMA_CHANNELS
4918                  * frames, it's safe to disable workaround for DMA engine's
4919                  * miscalculation of TXMBUF space.
4920                  */
4921                 if (stats->ifHCOutUcastPkts + stats->ifHCOutMulticastPkts +
4922                     stats->ifHCOutBroadcastPkts > BGE_NUM_RDMA_CHANNELS) {
4923                         val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL);
4924                         if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
4925                                 val &= ~BGE_RDMA_TX_LENGTH_WA_5719;
4926                         else
4927                                 val &= ~BGE_RDMA_TX_LENGTH_WA_5720;
4928                         CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val);
4929                         sc->bge_flags &= ~BGE_FLAG_RDMA_BUG;
4930                 }
4931         }
4932 }
4933
4934 static void
4935 bge_stats_clear_regs(struct bge_softc *sc)
4936 {
4937
4938         CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4939         CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4940         CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4941         CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4942         CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4943         CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4944         CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4945         CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4946         CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4947         CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4948         CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4949         CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4950         CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4951
4952         CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4953         CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4954         CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4955         CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4956         CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4957         CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4958         CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4959         CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4960         CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4961         CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4962         CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4963         CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4964         CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4965         CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4966
4967         CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4968         CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4969         CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4970         CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4971         CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4972         CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4973         CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4974 }
4975
4976 static void
4977 bge_stats_update(struct bge_softc *sc)
4978 {
4979         struct ifnet *ifp;
4980         bus_size_t stats;
4981         uint32_t cnt;   /* current register value */
4982
4983         ifp = sc->bge_ifp;
4984
4985         stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
4986
4987 #define READ_STAT(sc, stats, stat) \
4988         CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
4989
4990         cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
4991         ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
4992         sc->bge_tx_collisions = cnt;
4993
4994         cnt = READ_STAT(sc, stats, nicNoMoreRxBDs.bge_addr_lo);
4995         ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_nobds);
4996         sc->bge_rx_nobds = cnt;
4997         cnt = READ_STAT(sc, stats, ifInErrors.bge_addr_lo);
4998         ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_inerrs);
4999         sc->bge_rx_inerrs = cnt;
5000         cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
5001         ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
5002         sc->bge_rx_discards = cnt;
5003
5004         cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
5005         ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
5006         sc->bge_tx_discards = cnt;
5007
5008 #undef  READ_STAT
5009 }
5010
5011 /*
5012  * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
5013  * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
5014  * but when such padded frames employ the bge IP/TCP checksum offload,
5015  * the hardware checksum assist gives incorrect results (possibly
5016  * from incorporating its own padding into the UDP/TCP checksum; who knows).
5017  * If we pad such runts with zeros, the onboard checksum comes out correct.
5018  */
5019 static __inline int
5020 bge_cksum_pad(struct mbuf *m)
5021 {
5022         int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
5023         struct mbuf *last;
5024
5025         /* If there's only the packet-header and we can pad there, use it. */
5026         if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
5027             M_TRAILINGSPACE(m) >= padlen) {
5028                 last = m;
5029         } else {
5030                 /*
5031                  * Walk packet chain to find last mbuf. We will either
5032                  * pad there, or append a new mbuf and pad it.
5033                  */
5034                 for (last = m; last->m_next != NULL; last = last->m_next);
5035                 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
5036                         /* Allocate new empty mbuf, pad it. Compact later. */
5037                         struct mbuf *n;
5038
5039                         MGET(n, M_NOWAIT, MT_DATA);
5040                         if (n == NULL)
5041                                 return (ENOBUFS);
5042                         n->m_len = 0;
5043                         last->m_next = n;
5044                         last = n;
5045                 }
5046         }
5047
5048         /* Now zero the pad area, to avoid the bge cksum-assist bug. */
5049         memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
5050         last->m_len += padlen;
5051         m->m_pkthdr.len += padlen;
5052
5053         return (0);
5054 }
5055
5056 static struct mbuf *
5057 bge_check_short_dma(struct mbuf *m)
5058 {
5059         struct mbuf *n;
5060         int found;
5061
5062         /*
5063          * If device receive two back-to-back send BDs with less than
5064          * or equal to 8 total bytes then the device may hang.  The two
5065          * back-to-back send BDs must in the same frame for this failure
5066          * to occur.  Scan mbuf chains and see whether two back-to-back
5067          * send BDs are there. If this is the case, allocate new mbuf
5068          * and copy the frame to workaround the silicon bug.
5069          */
5070         for (n = m, found = 0; n != NULL; n = n->m_next) {
5071                 if (n->m_len < 8) {
5072                         found++;
5073                         if (found > 1)
5074                                 break;
5075                         continue;
5076                 }
5077                 found = 0;
5078         }
5079
5080         if (found > 1) {
5081                 n = m_defrag(m, M_NOWAIT);
5082                 if (n == NULL)
5083                         m_freem(m);
5084         } else
5085                 n = m;
5086         return (n);
5087 }
5088
5089 static struct mbuf *
5090 bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss,
5091     uint16_t *flags)
5092 {
5093         struct ip *ip;
5094         struct tcphdr *tcp;
5095         struct mbuf *n;
5096         uint16_t hlen;
5097         uint32_t poff;
5098
5099         if (M_WRITABLE(m) == 0) {
5100                 /* Get a writable copy. */
5101                 n = m_dup(m, M_NOWAIT);
5102                 m_freem(m);
5103                 if (n == NULL)
5104                         return (NULL);
5105                 m = n;
5106         }
5107         m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip));
5108         if (m == NULL)
5109                 return (NULL);
5110         ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
5111         poff = sizeof(struct ether_header) + (ip->ip_hl << 2);
5112         m = m_pullup(m, poff + sizeof(struct tcphdr));
5113         if (m == NULL)
5114                 return (NULL);
5115         tcp = (struct tcphdr *)(mtod(m, char *) + poff);
5116         m = m_pullup(m, poff + (tcp->th_off << 2));
5117         if (m == NULL)
5118                 return (NULL);
5119         /*
5120          * It seems controller doesn't modify IP length and TCP pseudo
5121          * checksum. These checksum computed by upper stack should be 0.
5122          */
5123         *mss = m->m_pkthdr.tso_segsz;
5124         ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
5125         ip->ip_sum = 0;
5126         ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
5127         /* Clear pseudo checksum computed by TCP stack. */
5128         tcp = (struct tcphdr *)(mtod(m, char *) + poff);
5129         tcp->th_sum = 0;
5130         /*
5131          * Broadcom controllers uses different descriptor format for
5132          * TSO depending on ASIC revision. Due to TSO-capable firmware
5133          * license issue and lower performance of firmware based TSO
5134          * we only support hardware based TSO.
5135          */
5136         /* Calculate header length, incl. TCP/IP options, in 32 bit units. */
5137         hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
5138         if (sc->bge_flags & BGE_FLAG_TSO3) {
5139                 /*
5140                  * For BCM5717 and newer controllers, hardware based TSO
5141                  * uses the 14 lower bits of the bge_mss field to store the
5142                  * MSS and the upper 2 bits to store the lowest 2 bits of
5143                  * the IP/TCP header length.  The upper 6 bits of the header
5144                  * length are stored in the bge_flags[14:10,4] field.  Jumbo
5145                  * frames are supported.
5146                  */
5147                 *mss |= ((hlen & 0x3) << 14);
5148                 *flags |= ((hlen & 0xF8) << 7) | ((hlen & 0x4) << 2);
5149         } else {
5150                 /*
5151                  * For BCM5755 and newer controllers, hardware based TSO uses
5152                  * the lower 11 bits to store the MSS and the upper 5 bits to
5153                  * store the IP/TCP header length. Jumbo frames are not
5154                  * supported.
5155                  */
5156                 *mss |= (hlen << 11);
5157         }
5158         return (m);
5159 }
5160
5161 /*
5162  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
5163  * pointers to descriptors.
5164  */
5165 static int
5166 bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
5167 {
5168         bus_dma_segment_t       segs[BGE_NSEG_NEW];
5169         bus_dmamap_t            map;
5170         struct bge_tx_bd        *d;
5171         struct mbuf             *m = *m_head;
5172         uint32_t                idx = *txidx;
5173         uint16_t                csum_flags, mss, vlan_tag;
5174         int                     nsegs, i, error;
5175
5176         csum_flags = 0;
5177         mss = 0;
5178         vlan_tag = 0;
5179         if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 &&
5180             m->m_next != NULL) {
5181                 *m_head = bge_check_short_dma(m);
5182                 if (*m_head == NULL)
5183                         return (ENOBUFS);
5184                 m = *m_head;
5185         }
5186         if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
5187                 *m_head = m = bge_setup_tso(sc, m, &mss, &csum_flags);
5188                 if (*m_head == NULL)
5189                         return (ENOBUFS);
5190                 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
5191                     BGE_TXBDFLAG_CPU_POST_DMA;
5192         } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) {
5193                 if (m->m_pkthdr.csum_flags & CSUM_IP)
5194                         csum_flags |= BGE_TXBDFLAG_IP_CSUM;
5195                 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
5196                         csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
5197                         if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
5198                             (error = bge_cksum_pad(m)) != 0) {
5199                                 m_freem(m);
5200                                 *m_head = NULL;
5201                                 return (error);
5202                         }
5203                 }
5204         }
5205
5206         if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
5207                 if (sc->bge_flags & BGE_FLAG_JUMBO_FRAME &&
5208                     m->m_pkthdr.len > ETHER_MAX_LEN)
5209                         csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME;
5210                 if (sc->bge_forced_collapse > 0 &&
5211                     (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
5212                         /*
5213                          * Forcedly collapse mbuf chains to overcome hardware
5214                          * limitation which only support a single outstanding
5215                          * DMA read operation.
5216                          */
5217                         if (sc->bge_forced_collapse == 1)
5218                                 m = m_defrag(m, M_NOWAIT);
5219                         else
5220                                 m = m_collapse(m, M_NOWAIT,
5221                                     sc->bge_forced_collapse);
5222                         if (m == NULL)
5223                                 m = *m_head;
5224                         *m_head = m;
5225                 }
5226         }
5227
5228         map = sc->bge_cdata.bge_tx_dmamap[idx];
5229         error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
5230             &nsegs, BUS_DMA_NOWAIT);
5231         if (error == EFBIG) {
5232                 m = m_collapse(m, M_NOWAIT, BGE_NSEG_NEW);
5233                 if (m == NULL) {
5234                         m_freem(*m_head);
5235                         *m_head = NULL;
5236                         return (ENOBUFS);
5237                 }
5238                 *m_head = m;
5239                 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
5240                     m, segs, &nsegs, BUS_DMA_NOWAIT);
5241                 if (error) {
5242                         m_freem(m);
5243                         *m_head = NULL;
5244                         return (error);
5245                 }
5246         } else if (error != 0)
5247                 return (error);
5248
5249         /* Check if we have enough free send BDs. */
5250         if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
5251                 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
5252                 return (ENOBUFS);
5253         }
5254
5255         bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
5256
5257         if (m->m_flags & M_VLANTAG) {
5258                 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
5259                 vlan_tag = m->m_pkthdr.ether_vtag;
5260         }
5261
5262         if (sc->bge_asicrev == BGE_ASICREV_BCM5762 &&
5263             (m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
5264                 /*
5265                  * 5725 family of devices corrupts TSO packets when TSO DMA
5266                  * buffers cross into regions which are within MSS bytes of
5267                  * a 4GB boundary.  If we encounter the condition, drop the
5268                  * packet.
5269                  */
5270                 for (i = 0; ; i++) {
5271                         d = &sc->bge_ldata.bge_tx_ring[idx];
5272                         d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
5273                         d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
5274                         d->bge_len = segs[i].ds_len;
5275                         if (d->bge_addr.bge_addr_lo + segs[i].ds_len + mss <
5276                             d->bge_addr.bge_addr_lo)
5277                                 break;
5278                         d->bge_flags = csum_flags;
5279                         d->bge_vlan_tag = vlan_tag;
5280                         d->bge_mss = mss;
5281                         if (i == nsegs - 1)
5282                                 break;
5283                         BGE_INC(idx, BGE_TX_RING_CNT);
5284                 }
5285                 if (i != nsegs - 1) {
5286                         bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map,
5287                             BUS_DMASYNC_POSTWRITE);
5288                         bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
5289                         m_freem(*m_head);
5290                         *m_head = NULL;
5291                         return (EIO);
5292                 }
5293         } else {
5294                 for (i = 0; ; i++) {
5295                         d = &sc->bge_ldata.bge_tx_ring[idx];
5296                         d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
5297                         d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
5298                         d->bge_len = segs[i].ds_len;
5299                         d->bge_flags = csum_flags;
5300                         d->bge_vlan_tag = vlan_tag;
5301                         d->bge_mss = mss;
5302                         if (i == nsegs - 1)
5303                                 break;
5304                         BGE_INC(idx, BGE_TX_RING_CNT);
5305                 }
5306         }
5307
5308         /* Mark the last segment as end of packet... */
5309         d->bge_flags |= BGE_TXBDFLAG_END;
5310
5311         /*
5312          * Insure that the map for this transmission
5313          * is placed at the array index of the last descriptor
5314          * in this chain.
5315          */
5316         sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
5317         sc->bge_cdata.bge_tx_dmamap[idx] = map;
5318         sc->bge_cdata.bge_tx_chain[idx] = m;
5319         sc->bge_txcnt += nsegs;
5320
5321         BGE_INC(idx, BGE_TX_RING_CNT);
5322         *txidx = idx;
5323
5324         return (0);
5325 }
5326
5327 /*
5328  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
5329  * to the mbuf data regions directly in the transmit descriptors.
5330  */
5331 static void
5332 bge_start_locked(struct ifnet *ifp)
5333 {
5334         struct bge_softc *sc;
5335         struct mbuf *m_head;
5336         uint32_t prodidx;
5337         int count;
5338
5339         sc = ifp->if_softc;
5340         BGE_LOCK_ASSERT(sc);
5341
5342         if (!sc->bge_link ||
5343             (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
5344             IFF_DRV_RUNNING)
5345                 return;
5346
5347         prodidx = sc->bge_tx_prodidx;
5348
5349         for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
5350                 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
5351                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
5352                         break;
5353                 }
5354                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
5355                 if (m_head == NULL)
5356                         break;
5357
5358                 /*
5359                  * Pack the data into the transmit ring. If we
5360                  * don't have room, set the OACTIVE flag and wait
5361                  * for the NIC to drain the ring.
5362                  */
5363                 if (bge_encap(sc, &m_head, &prodidx)) {
5364                         if (m_head == NULL)
5365                                 break;
5366                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
5367                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
5368                         break;
5369                 }
5370                 ++count;
5371
5372                 /*
5373                  * If there's a BPF listener, bounce a copy of this frame
5374                  * to him.
5375                  */
5376 #ifdef ETHER_BPF_MTAP
5377                 ETHER_BPF_MTAP(ifp, m_head);
5378 #else
5379                 BPF_MTAP(ifp, m_head);
5380 #endif
5381         }
5382
5383         if (count > 0) {
5384                 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
5385                     sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
5386                 /* Transmit. */
5387                 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
5388                 /* 5700 b2 errata */
5389                 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
5390                         bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
5391
5392                 sc->bge_tx_prodidx = prodidx;
5393
5394                 /*
5395                  * Set a timeout in case the chip goes out to lunch.
5396                  */
5397                 sc->bge_timer = BGE_TX_TIMEOUT;
5398         }
5399 }
5400
5401 /*
5402  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
5403  * to the mbuf data regions directly in the transmit descriptors.
5404  */
5405 static void
5406 bge_start(struct ifnet *ifp)
5407 {
5408         struct bge_softc *sc;
5409
5410         sc = ifp->if_softc;
5411         BGE_LOCK(sc);
5412         bge_start_locked(ifp);
5413         BGE_UNLOCK(sc);
5414 }
5415
5416 static void
5417 bge_init_locked(struct bge_softc *sc)
5418 {
5419         struct ifnet *ifp;
5420         uint16_t *m;
5421         uint32_t mode;
5422
5423         BGE_LOCK_ASSERT(sc);
5424
5425         ifp = sc->bge_ifp;
5426
5427         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5428                 return;
5429
5430         /* Cancel pending I/O and flush buffers. */
5431         bge_stop(sc);
5432
5433         bge_stop_fw(sc);
5434         bge_sig_pre_reset(sc, BGE_RESET_START);
5435         bge_reset(sc);
5436         bge_sig_legacy(sc, BGE_RESET_START);
5437         bge_sig_post_reset(sc, BGE_RESET_START);
5438
5439         bge_chipinit(sc);
5440
5441         /*
5442          * Init the various state machines, ring
5443          * control blocks and firmware.
5444          */
5445         if (bge_blockinit(sc)) {
5446                 device_printf(sc->bge_dev, "initialization failure\n");
5447                 return;
5448         }
5449
5450         ifp = sc->bge_ifp;
5451
5452         /* Specify MTU. */
5453         CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
5454             ETHER_HDR_LEN + ETHER_CRC_LEN +
5455             (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
5456
5457         /* Load our MAC address. */
5458         m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
5459         CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
5460         CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
5461
5462         /* Program promiscuous mode. */
5463         bge_setpromisc(sc);
5464
5465         /* Program multicast filter. */
5466         bge_setmulti(sc);
5467
5468         /* Program VLAN tag stripping. */
5469         bge_setvlan(sc);
5470
5471         /* Override UDP checksum offloading. */
5472         if (sc->bge_forced_udpcsum == 0)
5473                 sc->bge_csum_features &= ~CSUM_UDP;
5474         else
5475                 sc->bge_csum_features |= CSUM_UDP;
5476         if (ifp->if_capabilities & IFCAP_TXCSUM &&
5477             ifp->if_capenable & IFCAP_TXCSUM) {
5478                 ifp->if_hwassist &= ~(BGE_CSUM_FEATURES | CSUM_UDP);
5479                 ifp->if_hwassist |= sc->bge_csum_features;
5480         }
5481
5482         /* Init RX ring. */
5483         if (bge_init_rx_ring_std(sc) != 0) {
5484                 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
5485                 bge_stop(sc);
5486                 return;
5487         }
5488
5489         /*
5490          * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
5491          * memory to insure that the chip has in fact read the first
5492          * entry of the ring.
5493          */
5494         if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
5495                 uint32_t                v, i;
5496                 for (i = 0; i < 10; i++) {
5497                         DELAY(20);
5498                         v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
5499                         if (v == (MCLBYTES - ETHER_ALIGN))
5500                                 break;
5501                 }
5502                 if (i == 10)
5503                         device_printf (sc->bge_dev,
5504                             "5705 A0 chip failed to load RX ring\n");
5505         }
5506
5507         /* Init jumbo RX ring. */
5508         if (BGE_IS_JUMBO_CAPABLE(sc) &&
5509             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
5510             (MCLBYTES - ETHER_ALIGN)) {
5511                 if (bge_init_rx_ring_jumbo(sc) != 0) {
5512                         device_printf(sc->bge_dev,
5513                             "no memory for jumbo Rx buffers.\n");
5514                         bge_stop(sc);
5515                         return;
5516                 }
5517         }
5518
5519         /* Init our RX return ring index. */
5520         sc->bge_rx_saved_considx = 0;
5521
5522         /* Init our RX/TX stat counters. */
5523         sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
5524
5525         /* Init TX ring. */
5526         bge_init_tx_ring(sc);
5527
5528         /* Enable TX MAC state machine lockup fix. */
5529         mode = CSR_READ_4(sc, BGE_TX_MODE);
5530         if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
5531                 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
5532         if (sc->bge_asicrev == BGE_ASICREV_BCM5720 ||
5533             sc->bge_asicrev == BGE_ASICREV_BCM5762) {
5534                 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
5535                 mode |= CSR_READ_4(sc, BGE_TX_MODE) &
5536                     (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
5537         }
5538         /* Turn on transmitter. */
5539         CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
5540         DELAY(100);
5541
5542         /* Turn on receiver. */
5543         mode = CSR_READ_4(sc, BGE_RX_MODE);
5544         if (BGE_IS_5755_PLUS(sc))
5545                 mode |= BGE_RXMODE_IPV6_ENABLE;
5546         if (sc->bge_asicrev == BGE_ASICREV_BCM5762)
5547                 mode |= BGE_RXMODE_IPV4_FRAG_FIX;
5548         CSR_WRITE_4(sc,BGE_RX_MODE, mode | BGE_RXMODE_ENABLE);
5549         DELAY(10);
5550
5551         /*
5552          * Set the number of good frames to receive after RX MBUF
5553          * Low Watermark has been reached. After the RX MAC receives
5554          * this number of frames, it will drop subsequent incoming
5555          * frames until the MBUF High Watermark is reached.
5556          */
5557         if (BGE_IS_57765_PLUS(sc))
5558                 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
5559         else
5560                 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
5561
5562         /* Clear MAC statistics. */
5563         if (BGE_IS_5705_PLUS(sc))
5564                 bge_stats_clear_regs(sc);
5565
5566         /* Tell firmware we're alive. */
5567         BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5568
5569 #ifdef DEVICE_POLLING
5570         /* Disable interrupts if we are polling. */
5571         if (ifp->if_capenable & IFCAP_POLLING) {
5572                 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
5573                     BGE_PCIMISCCTL_MASK_PCI_INTR);
5574                 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5575         } else
5576 #endif
5577
5578         /* Enable host interrupts. */
5579         {
5580         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
5581         BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5582         bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
5583         }
5584
5585         ifp->if_drv_flags |= IFF_DRV_RUNNING;
5586         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5587
5588         bge_ifmedia_upd_locked(ifp);
5589
5590         callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
5591 }
5592
5593 static void
5594 bge_init(void *xsc)
5595 {
5596         struct bge_softc *sc = xsc;
5597
5598         BGE_LOCK(sc);
5599         bge_init_locked(sc);
5600         BGE_UNLOCK(sc);
5601 }
5602
5603 /*
5604  * Set media options.
5605  */
5606 static int
5607 bge_ifmedia_upd(struct ifnet *ifp)
5608 {
5609         struct bge_softc *sc = ifp->if_softc;
5610         int res;
5611
5612         BGE_LOCK(sc);
5613         res = bge_ifmedia_upd_locked(ifp);
5614         BGE_UNLOCK(sc);
5615
5616         return (res);
5617 }
5618
5619 static int
5620 bge_ifmedia_upd_locked(struct ifnet *ifp)
5621 {
5622         struct bge_softc *sc = ifp->if_softc;
5623         struct mii_data *mii;
5624         struct mii_softc *miisc;
5625         struct ifmedia *ifm;
5626
5627         BGE_LOCK_ASSERT(sc);
5628
5629         ifm = &sc->bge_ifmedia;
5630
5631         /* If this is a 1000baseX NIC, enable the TBI port. */
5632         if (sc->bge_flags & BGE_FLAG_TBI) {
5633                 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
5634                         return (EINVAL);
5635                 switch(IFM_SUBTYPE(ifm->ifm_media)) {
5636                 case IFM_AUTO:
5637                         /*
5638                          * The BCM5704 ASIC appears to have a special
5639                          * mechanism for programming the autoneg
5640                          * advertisement registers in TBI mode.
5641                          */
5642                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
5643                                 uint32_t sgdig;
5644                                 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
5645                                 if (sgdig & BGE_SGDIGSTS_DONE) {
5646                                         CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
5647                                         sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
5648                                         sgdig |= BGE_SGDIGCFG_AUTO |
5649                                             BGE_SGDIGCFG_PAUSE_CAP |
5650                                             BGE_SGDIGCFG_ASYM_PAUSE;
5651                                         CSR_WRITE_4(sc, BGE_SGDIG_CFG,
5652                                             sgdig | BGE_SGDIGCFG_SEND);
5653                                         DELAY(5);
5654                                         CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
5655                                 }
5656                         }
5657                         break;
5658                 case IFM_1000_SX:
5659                         if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
5660                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
5661                                     BGE_MACMODE_HALF_DUPLEX);
5662                         } else {
5663                                 BGE_SETBIT(sc, BGE_MAC_MODE,
5664                                     BGE_MACMODE_HALF_DUPLEX);
5665                         }
5666                         DELAY(40);
5667                         break;
5668                 default:
5669                         return (EINVAL);
5670                 }
5671                 return (0);
5672         }
5673
5674         sc->bge_link_evt++;
5675         mii = device_get_softc(sc->bge_miibus);
5676         LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
5677                 PHY_RESET(miisc);
5678         mii_mediachg(mii);
5679
5680         /*
5681          * Force an interrupt so that we will call bge_link_upd
5682          * if needed and clear any pending link state attention.
5683          * Without this we are not getting any further interrupts
5684          * for link state changes and thus will not UP the link and
5685          * not be able to send in bge_start_locked. The only
5686          * way to get things working was to receive a packet and
5687          * get an RX intr.
5688          * bge_tick should help for fiber cards and we might not
5689          * need to do this here if BGE_FLAG_TBI is set but as
5690          * we poll for fiber anyway it should not harm.
5691          */
5692         if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
5693             sc->bge_flags & BGE_FLAG_5788)
5694                 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
5695         else
5696                 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
5697
5698         return (0);
5699 }
5700
5701 /*
5702  * Report current media status.
5703  */
5704 static void
5705 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
5706 {
5707         struct bge_softc *sc = ifp->if_softc;
5708         struct mii_data *mii;
5709
5710         BGE_LOCK(sc);
5711
5712         if ((ifp->if_flags & IFF_UP) == 0) {
5713                 BGE_UNLOCK(sc);
5714                 return;
5715         }
5716         if (sc->bge_flags & BGE_FLAG_TBI) {
5717                 ifmr->ifm_status = IFM_AVALID;
5718                 ifmr->ifm_active = IFM_ETHER;
5719                 if (CSR_READ_4(sc, BGE_MAC_STS) &
5720                     BGE_MACSTAT_TBI_PCS_SYNCHED)
5721                         ifmr->ifm_status |= IFM_ACTIVE;
5722                 else {
5723                         ifmr->ifm_active |= IFM_NONE;
5724                         BGE_UNLOCK(sc);
5725                         return;
5726                 }
5727                 ifmr->ifm_active |= IFM_1000_SX;
5728                 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
5729                         ifmr->ifm_active |= IFM_HDX;
5730                 else
5731                         ifmr->ifm_active |= IFM_FDX;
5732                 BGE_UNLOCK(sc);
5733                 return;
5734         }
5735
5736         mii = device_get_softc(sc->bge_miibus);
5737         mii_pollstat(mii);
5738         ifmr->ifm_active = mii->mii_media_active;
5739         ifmr->ifm_status = mii->mii_media_status;
5740
5741         BGE_UNLOCK(sc);
5742 }
5743
5744 static int
5745 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
5746 {
5747         struct bge_softc *sc = ifp->if_softc;
5748         struct ifreq *ifr = (struct ifreq *) data;
5749         struct mii_data *mii;
5750         int flags, mask, error = 0;
5751
5752         switch (command) {
5753         case SIOCSIFMTU:
5754                 if (BGE_IS_JUMBO_CAPABLE(sc) ||
5755                     (sc->bge_flags & BGE_FLAG_JUMBO_STD)) {
5756                         if (ifr->ifr_mtu < ETHERMIN ||
5757                             ifr->ifr_mtu > BGE_JUMBO_MTU) {
5758                                 error = EINVAL;
5759                                 break;
5760                         }
5761                 } else if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) {
5762                         error = EINVAL;
5763                         break;
5764                 }
5765                 BGE_LOCK(sc);
5766                 if (ifp->if_mtu != ifr->ifr_mtu) {
5767                         ifp->if_mtu = ifr->ifr_mtu;
5768                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5769                                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5770                                 bge_init_locked(sc);
5771                         }
5772                 }
5773                 BGE_UNLOCK(sc);
5774                 break;
5775         case SIOCSIFFLAGS:
5776                 BGE_LOCK(sc);
5777                 if (ifp->if_flags & IFF_UP) {
5778                         /*
5779                          * If only the state of the PROMISC flag changed,
5780                          * then just use the 'set promisc mode' command
5781                          * instead of reinitializing the entire NIC. Doing
5782                          * a full re-init means reloading the firmware and
5783                          * waiting for it to start up, which may take a
5784                          * second or two.  Similarly for ALLMULTI.
5785                          */
5786                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5787                                 flags = ifp->if_flags ^ sc->bge_if_flags;
5788                                 if (flags & IFF_PROMISC)
5789                                         bge_setpromisc(sc);
5790                                 if (flags & IFF_ALLMULTI)
5791                                         bge_setmulti(sc);
5792                         } else
5793                                 bge_init_locked(sc);
5794                 } else {
5795                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5796                                 bge_stop(sc);
5797                         }
5798                 }
5799                 sc->bge_if_flags = ifp->if_flags;
5800                 BGE_UNLOCK(sc);
5801                 error = 0;
5802                 break;
5803         case SIOCADDMULTI:
5804         case SIOCDELMULTI:
5805                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5806                         BGE_LOCK(sc);
5807                         bge_setmulti(sc);
5808                         BGE_UNLOCK(sc);
5809                         error = 0;
5810                 }
5811                 break;
5812         case SIOCSIFMEDIA:
5813         case SIOCGIFMEDIA:
5814                 if (sc->bge_flags & BGE_FLAG_TBI) {
5815                         error = ifmedia_ioctl(ifp, ifr,
5816                             &sc->bge_ifmedia, command);
5817                 } else {
5818                         mii = device_get_softc(sc->bge_miibus);
5819                         error = ifmedia_ioctl(ifp, ifr,
5820                             &mii->mii_media, command);
5821                 }
5822                 break;
5823         case SIOCSIFCAP:
5824                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5825 #ifdef DEVICE_POLLING
5826                 if (mask & IFCAP_POLLING) {
5827                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
5828                                 error = ether_poll_register(bge_poll, ifp);
5829                                 if (error)
5830                                         return (error);
5831                                 BGE_LOCK(sc);
5832                                 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
5833                                     BGE_PCIMISCCTL_MASK_PCI_INTR);
5834                                 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5835                                 ifp->if_capenable |= IFCAP_POLLING;
5836                                 BGE_UNLOCK(sc);
5837                         } else {
5838                                 error = ether_poll_deregister(ifp);
5839                                 /* Enable interrupt even in error case */
5840                                 BGE_LOCK(sc);
5841                                 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
5842                                     BGE_PCIMISCCTL_MASK_PCI_INTR);
5843                                 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
5844                                 ifp->if_capenable &= ~IFCAP_POLLING;
5845                                 BGE_UNLOCK(sc);
5846                         }
5847                 }
5848 #endif
5849                 if ((mask & IFCAP_TXCSUM) != 0 &&
5850                     (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
5851                         ifp->if_capenable ^= IFCAP_TXCSUM;
5852                         if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
5853                                 ifp->if_hwassist |= sc->bge_csum_features;
5854                         else
5855                                 ifp->if_hwassist &= ~sc->bge_csum_features;
5856                 }
5857
5858                 if ((mask & IFCAP_RXCSUM) != 0 &&
5859                     (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
5860                         ifp->if_capenable ^= IFCAP_RXCSUM;
5861
5862                 if ((mask & IFCAP_TSO4) != 0 &&
5863                     (ifp->if_capabilities & IFCAP_TSO4) != 0) {
5864                         ifp->if_capenable ^= IFCAP_TSO4;
5865                         if ((ifp->if_capenable & IFCAP_TSO4) != 0)
5866                                 ifp->if_hwassist |= CSUM_TSO;
5867                         else
5868                                 ifp->if_hwassist &= ~CSUM_TSO;
5869                 }
5870
5871                 if (mask & IFCAP_VLAN_MTU) {
5872                         ifp->if_capenable ^= IFCAP_VLAN_MTU;
5873                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5874                         bge_init(sc);
5875                 }
5876
5877                 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
5878                     (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
5879                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5880                 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
5881                     (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
5882                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
5883                         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
5884                                 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
5885                         BGE_LOCK(sc);
5886                         bge_setvlan(sc);
5887                         BGE_UNLOCK(sc);
5888                 }
5889 #ifdef VLAN_CAPABILITIES
5890                 VLAN_CAPABILITIES(ifp);
5891 #endif
5892                 break;
5893         default:
5894                 error = ether_ioctl(ifp, command, data);
5895                 break;
5896         }
5897
5898         return (error);
5899 }
5900
5901 static void
5902 bge_watchdog(struct bge_softc *sc)
5903 {
5904         struct ifnet *ifp;
5905         uint32_t status;
5906
5907         BGE_LOCK_ASSERT(sc);
5908
5909         if (sc->bge_timer == 0 || --sc->bge_timer)
5910                 return;
5911
5912         /* If pause frames are active then don't reset the hardware. */
5913         if ((CSR_READ_4(sc, BGE_RX_MODE) & BGE_RXMODE_FLOWCTL_ENABLE) != 0) {
5914                 status = CSR_READ_4(sc, BGE_RX_STS);
5915                 if ((status & BGE_RXSTAT_REMOTE_XOFFED) != 0) {
5916                         /*
5917                          * If link partner has us in XOFF state then wait for
5918                          * the condition to clear.
5919                          */
5920                         CSR_WRITE_4(sc, BGE_RX_STS, status);
5921                         sc->bge_timer = BGE_TX_TIMEOUT;
5922                         return;
5923                 } else if ((status & BGE_RXSTAT_RCVD_XOFF) != 0 &&
5924                     (status & BGE_RXSTAT_RCVD_XON) != 0) {
5925                         /*
5926                          * If link partner has us in XOFF state then wait for
5927                          * the condition to clear.
5928                          */
5929                         CSR_WRITE_4(sc, BGE_RX_STS, status);
5930                         sc->bge_timer = BGE_TX_TIMEOUT;
5931                         return;
5932                 }
5933                 /*
5934                  * Any other condition is unexpected and the controller
5935                  * should be reset.
5936                  */
5937         }
5938
5939         ifp = sc->bge_ifp;
5940
5941         if_printf(ifp, "watchdog timeout -- resetting\n");
5942
5943         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5944         bge_init_locked(sc);
5945
5946         ifp->if_oerrors++;
5947 }
5948
5949 static void
5950 bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit)
5951 {
5952         int i;
5953
5954         BGE_CLRBIT(sc, reg, bit);
5955
5956         for (i = 0; i < BGE_TIMEOUT; i++) {
5957                 if ((CSR_READ_4(sc, reg) & bit) == 0)
5958                         return;
5959                 DELAY(100);
5960         }
5961 }
5962
5963 /*
5964  * Stop the adapter and free any mbufs allocated to the
5965  * RX and TX lists.
5966  */
5967 static void
5968 bge_stop(struct bge_softc *sc)
5969 {
5970         struct ifnet *ifp;
5971
5972         BGE_LOCK_ASSERT(sc);
5973
5974         ifp = sc->bge_ifp;
5975
5976         callout_stop(&sc->bge_stat_ch);
5977
5978         /* Disable host interrupts. */
5979         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5980         bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5981
5982         /*
5983          * Tell firmware we're shutting down.
5984          */
5985         bge_stop_fw(sc);
5986         bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN);
5987
5988         /*
5989          * Disable all of the receiver blocks.
5990          */
5991         bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
5992         bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
5993         bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
5994         if (BGE_IS_5700_FAMILY(sc))
5995                 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
5996         bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
5997         bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
5998         bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
5999
6000         /*
6001          * Disable all of the transmit blocks.
6002          */
6003         bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
6004         bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
6005         bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
6006         bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
6007         bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
6008         if (BGE_IS_5700_FAMILY(sc))
6009                 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
6010         bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
6011
6012         /*
6013          * Shut down all of the memory managers and related
6014          * state machines.
6015          */
6016         bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
6017         bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
6018         if (BGE_IS_5700_FAMILY(sc))
6019                 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
6020
6021         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
6022         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
6023         if (!(BGE_IS_5705_PLUS(sc))) {
6024                 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
6025                 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
6026         }
6027         /* Update MAC statistics. */
6028         if (BGE_IS_5705_PLUS(sc))
6029                 bge_stats_update_regs(sc);
6030
6031         bge_reset(sc);
6032         bge_sig_legacy(sc, BGE_RESET_SHUTDOWN);
6033         bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN);
6034
6035         /*
6036          * Keep the ASF firmware running if up.
6037          */
6038         if (sc->bge_asf_mode & ASF_STACKUP)
6039                 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
6040         else
6041                 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
6042
6043         /* Free the RX lists. */
6044         bge_free_rx_ring_std(sc);
6045
6046         /* Free jumbo RX list. */
6047         if (BGE_IS_JUMBO_CAPABLE(sc))
6048                 bge_free_rx_ring_jumbo(sc);
6049
6050         /* Free TX buffers. */
6051         bge_free_tx_ring(sc);
6052
6053         sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
6054
6055         /* Clear MAC's link state (PHY may still have link UP). */
6056         if (bootverbose && sc->bge_link)
6057                 if_printf(sc->bge_ifp, "link DOWN\n");
6058         sc->bge_link = 0;
6059
6060         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
6061 }
6062
6063 /*
6064  * Stop all chip I/O so that the kernel's probe routines don't
6065  * get confused by errant DMAs when rebooting.
6066  */
6067 static int
6068 bge_shutdown(device_t dev)
6069 {
6070         struct bge_softc *sc;
6071
6072         sc = device_get_softc(dev);
6073         BGE_LOCK(sc);
6074         bge_stop(sc);
6075         BGE_UNLOCK(sc);
6076
6077         return (0);
6078 }
6079
6080 static int
6081 bge_suspend(device_t dev)
6082 {
6083         struct bge_softc *sc;
6084
6085         sc = device_get_softc(dev);
6086         BGE_LOCK(sc);
6087         bge_stop(sc);
6088         BGE_UNLOCK(sc);
6089
6090         return (0);
6091 }
6092
6093 static int
6094 bge_resume(device_t dev)
6095 {
6096         struct bge_softc *sc;
6097         struct ifnet *ifp;
6098
6099         sc = device_get_softc(dev);
6100         BGE_LOCK(sc);
6101         ifp = sc->bge_ifp;
6102         if (ifp->if_flags & IFF_UP) {
6103                 bge_init_locked(sc);
6104                 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
6105                         bge_start_locked(ifp);
6106         }
6107         BGE_UNLOCK(sc);
6108
6109         return (0);
6110 }
6111
6112 static void
6113 bge_link_upd(struct bge_softc *sc)
6114 {
6115         struct mii_data *mii;
6116         uint32_t link, status;
6117
6118         BGE_LOCK_ASSERT(sc);
6119
6120         /* Clear 'pending link event' flag. */
6121         sc->bge_link_evt = 0;
6122
6123         /*
6124          * Process link state changes.
6125          * Grrr. The link status word in the status block does
6126          * not work correctly on the BCM5700 rev AX and BX chips,
6127          * according to all available information. Hence, we have
6128          * to enable MII interrupts in order to properly obtain
6129          * async link changes. Unfortunately, this also means that
6130          * we have to read the MAC status register to detect link
6131          * changes, thereby adding an additional register access to
6132          * the interrupt handler.
6133          *
6134          * XXX: perhaps link state detection procedure used for
6135          * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
6136          */
6137
6138         if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
6139             sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
6140                 status = CSR_READ_4(sc, BGE_MAC_STS);
6141                 if (status & BGE_MACSTAT_MI_INTERRUPT) {
6142                         mii = device_get_softc(sc->bge_miibus);
6143                         mii_pollstat(mii);
6144                         if (!sc->bge_link &&
6145                             mii->mii_media_status & IFM_ACTIVE &&
6146                             IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
6147                                 sc->bge_link++;
6148                                 if (bootverbose)
6149                                         if_printf(sc->bge_ifp, "link UP\n");
6150                         } else if (sc->bge_link &&
6151                             (!(mii->mii_media_status & IFM_ACTIVE) ||
6152                             IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
6153                                 sc->bge_link = 0;
6154                                 if (bootverbose)
6155                                         if_printf(sc->bge_ifp, "link DOWN\n");
6156                         }
6157
6158                         /* Clear the interrupt. */
6159                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
6160                             BGE_EVTENB_MI_INTERRUPT);
6161                         bge_miibus_readreg(sc->bge_dev, sc->bge_phy_addr,
6162                             BRGPHY_MII_ISR);
6163                         bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr,
6164                             BRGPHY_MII_IMR, BRGPHY_INTRS);
6165                 }
6166                 return;
6167         }
6168
6169         if (sc->bge_flags & BGE_FLAG_TBI) {
6170                 status = CSR_READ_4(sc, BGE_MAC_STS);
6171                 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
6172                         if (!sc->bge_link) {
6173                                 sc->bge_link++;
6174                                 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
6175                                         BGE_CLRBIT(sc, BGE_MAC_MODE,
6176                                             BGE_MACMODE_TBI_SEND_CFGS);
6177                                         DELAY(40);
6178                                 }
6179                                 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
6180                                 if (bootverbose)
6181                                         if_printf(sc->bge_ifp, "link UP\n");
6182                                 if_link_state_change(sc->bge_ifp,
6183                                     LINK_STATE_UP);
6184                         }
6185                 } else if (sc->bge_link) {
6186                         sc->bge_link = 0;
6187                         if (bootverbose)
6188                                 if_printf(sc->bge_ifp, "link DOWN\n");
6189                         if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
6190                 }
6191         } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
6192                 /*
6193                  * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
6194                  * in status word always set. Workaround this bug by reading
6195                  * PHY link status directly.
6196                  */
6197                 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
6198
6199                 if (link != sc->bge_link ||
6200                     sc->bge_asicrev == BGE_ASICREV_BCM5700) {
6201                         mii = device_get_softc(sc->bge_miibus);
6202                         mii_pollstat(mii);
6203                         if (!sc->bge_link &&
6204                             mii->mii_media_status & IFM_ACTIVE &&
6205                             IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
6206                                 sc->bge_link++;
6207                                 if (bootverbose)
6208                                         if_printf(sc->bge_ifp, "link UP\n");
6209                         } else if (sc->bge_link &&
6210                             (!(mii->mii_media_status & IFM_ACTIVE) ||
6211                             IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
6212                                 sc->bge_link = 0;
6213                                 if (bootverbose)
6214                                         if_printf(sc->bge_ifp, "link DOWN\n");
6215                         }
6216                 }
6217         } else {
6218                 /*
6219                  * For controllers that call mii_tick, we have to poll
6220                  * link status.
6221                  */
6222                 mii = device_get_softc(sc->bge_miibus);
6223                 mii_pollstat(mii);
6224                 bge_miibus_statchg(sc->bge_dev);
6225         }
6226
6227         /* Disable MAC attention when link is up. */
6228         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
6229             BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
6230             BGE_MACSTAT_LINK_CHANGED);
6231 }
6232
6233 static void
6234 bge_add_sysctls(struct bge_softc *sc)
6235 {
6236         struct sysctl_ctx_list *ctx;
6237         struct sysctl_oid_list *children;
6238         char tn[32];
6239         int unit;
6240
6241         ctx = device_get_sysctl_ctx(sc->bge_dev);
6242         children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
6243
6244 #ifdef BGE_REGISTER_DEBUG
6245         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
6246             CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
6247             "Debug Information");
6248
6249         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
6250             CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
6251             "MAC Register Read");
6252
6253         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ape_read",
6254             CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_ape_read, "I",
6255             "APE Register Read");
6256
6257         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
6258             CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
6259             "Memory Read");
6260
6261 #endif
6262
6263         unit = device_get_unit(sc->bge_dev);
6264         /*
6265          * A common design characteristic for many Broadcom client controllers
6266          * is that they only support a single outstanding DMA read operation
6267          * on the PCIe bus. This means that it will take twice as long to fetch
6268          * a TX frame that is split into header and payload buffers as it does
6269          * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
6270          * these controllers, coalescing buffers to reduce the number of memory
6271          * reads is effective way to get maximum performance(about 940Mbps).
6272          * Without collapsing TX buffers the maximum TCP bulk transfer
6273          * performance is about 850Mbps. However forcing coalescing mbufs
6274          * consumes a lot of CPU cycles, so leave it off by default.
6275          */
6276         sc->bge_forced_collapse = 0;
6277         snprintf(tn, sizeof(tn), "dev.bge.%d.forced_collapse", unit);
6278         TUNABLE_INT_FETCH(tn, &sc->bge_forced_collapse);
6279         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
6280             CTLFLAG_RW, &sc->bge_forced_collapse, 0,
6281             "Number of fragmented TX buffers of a frame allowed before "
6282             "forced collapsing");
6283
6284         sc->bge_msi = 1;
6285         snprintf(tn, sizeof(tn), "dev.bge.%d.msi", unit);
6286         TUNABLE_INT_FETCH(tn, &sc->bge_msi);
6287         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "msi",
6288             CTLFLAG_RD, &sc->bge_msi, 0, "Enable MSI");
6289
6290         /*
6291          * It seems all Broadcom controllers have a bug that can generate UDP
6292          * datagrams with checksum value 0 when TX UDP checksum offloading is
6293          * enabled.  Generating UDP checksum value 0 is RFC 768 violation.
6294          * Even though the probability of generating such UDP datagrams is
6295          * low, I don't want to see FreeBSD boxes to inject such datagrams
6296          * into network so disable UDP checksum offloading by default.  Users
6297          * still override this behavior by setting a sysctl variable,
6298          * dev.bge.0.forced_udpcsum.
6299          */
6300         sc->bge_forced_udpcsum = 0;
6301         snprintf(tn, sizeof(tn), "dev.bge.%d.bge_forced_udpcsum", unit);
6302         TUNABLE_INT_FETCH(tn, &sc->bge_forced_udpcsum);
6303         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
6304             CTLFLAG_RW, &sc->bge_forced_udpcsum, 0,
6305             "Enable UDP checksum offloading even if controller can "
6306             "generate UDP checksum value 0");
6307
6308         if (BGE_IS_5705_PLUS(sc))
6309                 bge_add_sysctl_stats_regs(sc, ctx, children);
6310         else
6311                 bge_add_sysctl_stats(sc, ctx, children);
6312 }
6313
6314 #define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
6315         SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
6316             sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
6317             desc)
6318
6319 static void
6320 bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
6321     struct sysctl_oid_list *parent)
6322 {
6323         struct sysctl_oid *tree;
6324         struct sysctl_oid_list *children, *schildren;
6325
6326         tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
6327             NULL, "BGE Statistics");
6328         schildren = children = SYSCTL_CHILDREN(tree);
6329         BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
6330             children, COSFramesDroppedDueToFilters,
6331             "FramesDroppedDueToFilters");
6332         BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
6333             children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
6334         BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
6335             children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
6336         BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
6337             children, nicNoMoreRxBDs, "NoMoreRxBDs");
6338         BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
6339             children, ifInDiscards, "InputDiscards");
6340         BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
6341             children, ifInErrors, "InputErrors");
6342         BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
6343             children, nicRecvThresholdHit, "RecvThresholdHit");
6344         BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
6345             children, nicDmaReadQueueFull, "DmaReadQueueFull");
6346         BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
6347             children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
6348         BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
6349             children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
6350         BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
6351             children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
6352         BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
6353             children, nicRingStatusUpdate, "RingStatusUpdate");
6354         BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
6355             children, nicInterrupts, "Interrupts");
6356         BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
6357             children, nicAvoidedInterrupts, "AvoidedInterrupts");
6358         BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
6359             children, nicSendThresholdHit, "SendThresholdHit");
6360
6361         tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
6362             NULL, "BGE RX Statistics");
6363         children = SYSCTL_CHILDREN(tree);
6364         BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
6365             children, rxstats.ifHCInOctets, "ifHCInOctets");
6366         BGE_SYSCTL_STAT(sc, ctx, "Fragments",
6367             children, rxstats.etherStatsFragments, "Fragments");
6368         BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
6369             children, rxstats.ifHCInUcastPkts, "UnicastPkts");
6370         BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
6371             children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
6372         BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
6373             children, rxstats.dot3StatsFCSErrors, "FCSErrors");
6374         BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
6375             children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
6376         BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
6377             children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
6378         BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
6379             children, rxstats.xoffPauseFramesReceived,
6380             "xoffPauseFramesReceived");
6381         BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
6382             children, rxstats.macControlFramesReceived,
6383             "ControlFramesReceived");
6384         BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
6385             children, rxstats.xoffStateEntered, "xoffStateEntered");
6386         BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
6387             children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
6388         BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
6389             children, rxstats.etherStatsJabbers, "Jabbers");
6390         BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
6391             children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
6392         BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
6393             children, rxstats.inRangeLengthError, "inRangeLengthError");
6394         BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
6395             children, rxstats.outRangeLengthError, "outRangeLengthError");
6396
6397         tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
6398             NULL, "BGE TX Statistics");
6399         children = SYSCTL_CHILDREN(tree);
6400         BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
6401             children, txstats.ifHCOutOctets, "ifHCOutOctets");
6402         BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
6403             children, txstats.etherStatsCollisions, "Collisions");
6404         BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
6405             children, txstats.outXonSent, "XonSent");
6406         BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
6407             children, txstats.outXoffSent, "XoffSent");
6408         BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
6409             children, txstats.flowControlDone, "flowControlDone");
6410         BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
6411             children, txstats.dot3StatsInternalMacTransmitErrors,
6412             "InternalMacTransmitErrors");
6413         BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
6414             children, txstats.dot3StatsSingleCollisionFrames,
6415             "SingleCollisionFrames");
6416         BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
6417             children, txstats.dot3StatsMultipleCollisionFrames,
6418             "MultipleCollisionFrames");
6419         BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
6420             children, txstats.dot3StatsDeferredTransmissions,
6421             "DeferredTransmissions");
6422         BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
6423             children, txstats.dot3StatsExcessiveCollisions,
6424             "ExcessiveCollisions");
6425         BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
6426             children, txstats.dot3StatsLateCollisions,
6427             "LateCollisions");
6428         BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
6429             children, txstats.ifHCOutUcastPkts, "UnicastPkts");
6430         BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
6431             children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
6432         BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
6433             children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
6434         BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
6435             children, txstats.dot3StatsCarrierSenseErrors,
6436             "CarrierSenseErrors");
6437         BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
6438             children, txstats.ifOutDiscards, "Discards");
6439         BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
6440             children, txstats.ifOutErrors, "Errors");
6441 }
6442
6443 #undef BGE_SYSCTL_STAT
6444
6445 #define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d)    \
6446             SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
6447
6448 static void
6449 bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
6450     struct sysctl_oid_list *parent)
6451 {
6452         struct sysctl_oid *tree;
6453         struct sysctl_oid_list *child, *schild;
6454         struct bge_mac_stats *stats;
6455
6456         stats = &sc->bge_mac_stats;
6457         tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
6458             NULL, "BGE Statistics");
6459         schild = child = SYSCTL_CHILDREN(tree);
6460         BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters",
6461             &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters");
6462         BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull",
6463             &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full");
6464         BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull",
6465             &stats->DmaWriteHighPriQueueFull,
6466             "NIC DMA Write High Priority Queue Full");
6467         BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs",
6468             &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors");
6469         BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards",
6470             &stats->InputDiscards, "Discarded Input Frames");
6471         BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors",
6472             &stats->InputErrors, "Input Errors");
6473         BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit",
6474             &stats->RecvThresholdHit, "NIC Recv Threshold Hit");
6475
6476         tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
6477             NULL, "BGE RX Statistics");
6478         child = SYSCTL_CHILDREN(tree);
6479         BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets",
6480             &stats->ifHCInOctets, "Inbound Octets");
6481         BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments",
6482             &stats->etherStatsFragments, "Fragments");
6483         BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
6484             &stats->ifHCInUcastPkts, "Inbound Unicast Packets");
6485         BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
6486             &stats->ifHCInMulticastPkts, "Inbound Multicast Packets");
6487         BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
6488             &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets");
6489         BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors",
6490             &stats->dot3StatsFCSErrors, "FCS Errors");
6491         BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors",
6492             &stats->dot3StatsAlignmentErrors, "Alignment Errors");
6493         BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived",
6494             &stats->xonPauseFramesReceived, "XON Pause Frames Received");
6495         BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived",
6496             &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received");
6497         BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived",
6498             &stats->macControlFramesReceived, "MAC Control Frames Received");
6499         BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered",
6500             &stats->xoffStateEntered, "XOFF State Entered");
6501         BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong",
6502             &stats->dot3StatsFramesTooLong, "Frames Too Long");
6503         BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers",
6504             &stats->etherStatsJabbers, "Jabbers");
6505         BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts",
6506             &stats->etherStatsUndersizePkts, "Undersized Packets");
6507
6508         tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
6509             NULL, "BGE TX Statistics");
6510         child = SYSCTL_CHILDREN(tree);
6511         BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets",
6512             &stats->ifHCOutOctets, "Outbound Octets");
6513         BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions",
6514             &stats->etherStatsCollisions, "TX Collisions");
6515         BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent",
6516             &stats->outXonSent, "XON Sent");
6517         BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent",
6518             &stats->outXoffSent, "XOFF Sent");
6519         BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors",
6520             &stats->dot3StatsInternalMacTransmitErrors,
6521             "Internal MAC TX Errors");
6522         BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames",
6523             &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames");
6524         BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames",
6525             &stats->dot3StatsMultipleCollisionFrames,
6526             "Multiple Collision Frames");
6527         BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions",
6528             &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions");
6529         BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions",
6530             &stats->dot3StatsExcessiveCollisions, "Excessive Collisions");
6531         BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions",
6532             &stats->dot3StatsLateCollisions, "Late Collisions");
6533         BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
6534             &stats->ifHCOutUcastPkts, "Outbound Unicast Packets");
6535         BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
6536             &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets");
6537         BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
6538             &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets");
6539 }
6540
6541 #undef  BGE_SYSCTL_STAT_ADD64
6542
6543 static int
6544 bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
6545 {
6546         struct bge_softc *sc;
6547         uint32_t result;
6548         int offset;
6549
6550         sc = (struct bge_softc *)arg1;
6551         offset = arg2;
6552         result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
6553             offsetof(bge_hostaddr, bge_addr_lo));
6554         return (sysctl_handle_int(oidp, &result, 0, req));
6555 }
6556
6557 #ifdef BGE_REGISTER_DEBUG
6558 static int
6559 bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
6560 {
6561         struct bge_softc *sc;
6562         uint16_t *sbdata;
6563         int error, result, sbsz;
6564         int i, j;
6565
6566         result = -1;
6567         error = sysctl_handle_int(oidp, &result, 0, req);
6568         if (error || (req->newptr == NULL))
6569                 return (error);
6570
6571         if (result == 1) {
6572                 sc = (struct bge_softc *)arg1;
6573
6574                 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
6575                     sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
6576                         sbsz = BGE_STATUS_BLK_SZ;
6577                 else
6578                         sbsz = 32;
6579                 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
6580                 printf("Status Block:\n");
6581                 BGE_LOCK(sc);
6582                 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
6583                     sc->bge_cdata.bge_status_map,
6584                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
6585                 for (i = 0x0; i < sbsz / sizeof(uint16_t); ) {
6586                         printf("%06x:", i);
6587                         for (j = 0; j < 8; j++)
6588                                 printf(" %04x", sbdata[i++]);
6589                         printf("\n");
6590                 }
6591
6592                 printf("Registers:\n");
6593                 for (i = 0x800; i < 0xA00; ) {
6594                         printf("%06x:", i);
6595                         for (j = 0; j < 8; j++) {
6596                                 printf(" %08x", CSR_READ_4(sc, i));
6597                                 i += 4;
6598                         }
6599                         printf("\n");
6600                 }
6601                 BGE_UNLOCK(sc);
6602
6603                 printf("Hardware Flags:\n");
6604                 if (BGE_IS_5717_PLUS(sc))
6605                         printf(" - 5717 Plus\n");
6606                 if (BGE_IS_5755_PLUS(sc))
6607                         printf(" - 5755 Plus\n");
6608                 if (BGE_IS_575X_PLUS(sc))
6609                         printf(" - 575X Plus\n");
6610                 if (BGE_IS_5705_PLUS(sc))
6611                         printf(" - 5705 Plus\n");
6612                 if (BGE_IS_5714_FAMILY(sc))
6613                         printf(" - 5714 Family\n");
6614                 if (BGE_IS_5700_FAMILY(sc))
6615                         printf(" - 5700 Family\n");
6616                 if (sc->bge_flags & BGE_FLAG_JUMBO)
6617                         printf(" - Supports Jumbo Frames\n");
6618                 if (sc->bge_flags & BGE_FLAG_PCIX)
6619                         printf(" - PCI-X Bus\n");
6620                 if (sc->bge_flags & BGE_FLAG_PCIE)
6621                         printf(" - PCI Express Bus\n");
6622                 if (sc->bge_phy_flags & BGE_PHY_NO_3LED)
6623                         printf(" - No 3 LEDs\n");
6624                 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
6625                         printf(" - RX Alignment Bug\n");
6626         }
6627
6628         return (error);
6629 }
6630
6631 static int
6632 bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
6633 {
6634         struct bge_softc *sc;
6635         int error;
6636         uint16_t result;
6637         uint32_t val;
6638
6639         result = -1;
6640         error = sysctl_handle_int(oidp, &result, 0, req);
6641         if (error || (req->newptr == NULL))
6642                 return (error);
6643
6644         if (result < 0x8000) {
6645                 sc = (struct bge_softc *)arg1;
6646                 val = CSR_READ_4(sc, result);
6647                 printf("reg 0x%06X = 0x%08X\n", result, val);
6648         }
6649
6650         return (error);
6651 }
6652
6653 static int
6654 bge_sysctl_ape_read(SYSCTL_HANDLER_ARGS)
6655 {
6656         struct bge_softc *sc;
6657         int error;
6658         uint16_t result;
6659         uint32_t val;
6660
6661         result = -1;
6662         error = sysctl_handle_int(oidp, &result, 0, req);
6663         if (error || (req->newptr == NULL))
6664                 return (error);
6665
6666         if (result < 0x8000) {
6667                 sc = (struct bge_softc *)arg1;
6668                 val = APE_READ_4(sc, result);
6669                 printf("reg 0x%06X = 0x%08X\n", result, val);
6670         }
6671
6672         return (error);
6673 }
6674
6675 static int
6676 bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
6677 {
6678         struct bge_softc *sc;
6679         int error;
6680         uint16_t result;
6681         uint32_t val;
6682
6683         result = -1;
6684         error = sysctl_handle_int(oidp, &result, 0, req);
6685         if (error || (req->newptr == NULL))
6686                 return (error);
6687
6688         if (result < 0x8000) {
6689                 sc = (struct bge_softc *)arg1;
6690                 val = bge_readmem_ind(sc, result);
6691                 printf("mem 0x%06X = 0x%08X\n", result, val);
6692         }
6693
6694         return (error);
6695 }
6696 #endif
6697
6698 static int
6699 bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
6700 {
6701
6702         if (sc->bge_flags & BGE_FLAG_EADDR)
6703                 return (1);
6704
6705 #ifdef __sparc64__
6706         OF_getetheraddr(sc->bge_dev, ether_addr);
6707         return (0);
6708 #endif
6709         return (1);
6710 }
6711
6712 static int
6713 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
6714 {
6715         uint32_t mac_addr;
6716
6717         mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB);
6718         if ((mac_addr >> 16) == 0x484b) {
6719                 ether_addr[0] = (uint8_t)(mac_addr >> 8);
6720                 ether_addr[1] = (uint8_t)mac_addr;
6721                 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB);
6722                 ether_addr[2] = (uint8_t)(mac_addr >> 24);
6723                 ether_addr[3] = (uint8_t)(mac_addr >> 16);
6724                 ether_addr[4] = (uint8_t)(mac_addr >> 8);
6725                 ether_addr[5] = (uint8_t)mac_addr;
6726                 return (0);
6727         }
6728         return (1);
6729 }
6730
6731 static int
6732 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
6733 {
6734         int mac_offset = BGE_EE_MAC_OFFSET;
6735
6736         if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
6737                 mac_offset = BGE_EE_MAC_OFFSET_5906;
6738
6739         return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
6740             ETHER_ADDR_LEN));
6741 }
6742
6743 static int
6744 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
6745 {
6746
6747         if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
6748                 return (1);
6749
6750         return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
6751            ETHER_ADDR_LEN));
6752 }
6753
6754 static int
6755 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
6756 {
6757         static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
6758                 /* NOTE: Order is critical */
6759                 bge_get_eaddr_fw,
6760                 bge_get_eaddr_mem,
6761                 bge_get_eaddr_nvram,
6762                 bge_get_eaddr_eeprom,
6763                 NULL
6764         };
6765         const bge_eaddr_fcn_t *func;
6766
6767         for (func = bge_eaddr_funcs; *func != NULL; ++func) {
6768                 if ((*func)(sc, eaddr) == 0)
6769                         break;
6770         }
6771         return (*func == NULL ? ENXIO : 0);
6772 }