]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/bge/if_bge.c
Update mandoc to 1.13.1
[FreeBSD/FreeBSD.git] / sys / dev / bge / if_bge.c
1 /*-
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 /*
38  * Broadcom BCM57xx(x)/BCM590x NetXtreme and NetLink family Ethernet driver
39  *
40  * The Broadcom BCM5700 is based on technology originally developed by
41  * Alteon Networks as part of the Tigon I and Tigon II Gigabit Ethernet
42  * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has
43  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45  * frames, highly configurable RX filtering, and 16 RX and TX queues
46  * (which, along with RX filter rules, can be used for QOS applications).
47  * Other features, such as TCP segmentation, may be available as part
48  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49  * firmware images can be stored in hardware and need not be compiled
50  * into the driver.
51  *
52  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54  *
55  * The BCM5701 is a single-chip solution incorporating both the BCM5700
56  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57  * does not support external SSRAM.
58  *
59  * Broadcom also produces a variation of the BCM5700 under the "Altima"
60  * brand name, which is functionally similar but lacks PCI-X support.
61  *
62  * Without external SSRAM, you can only have at most 4 TX rings,
63  * and the use of the mini RX ring is disabled. This seems to imply
64  * that these features are simply not available on the BCM5701. As a
65  * result, this driver does not implement any support for the mini RX
66  * ring.
67  */
68
69 #ifdef HAVE_KERNEL_OPTION_HEADERS
70 #include "opt_device_polling.h"
71 #endif
72
73 #include <sys/param.h>
74 #include <sys/endian.h>
75 #include <sys/systm.h>
76 #include <sys/sockio.h>
77 #include <sys/mbuf.h>
78 #include <sys/malloc.h>
79 #include <sys/kernel.h>
80 #include <sys/module.h>
81 #include <sys/socket.h>
82 #include <sys/sysctl.h>
83 #include <sys/taskqueue.h>
84
85 #include <net/if.h>
86 #include <net/if_var.h>
87 #include <net/if_arp.h>
88 #include <net/ethernet.h>
89 #include <net/if_dl.h>
90 #include <net/if_media.h>
91
92 #include <net/bpf.h>
93
94 #include <net/if_types.h>
95 #include <net/if_vlan_var.h>
96
97 #include <netinet/in_systm.h>
98 #include <netinet/in.h>
99 #include <netinet/ip.h>
100 #include <netinet/tcp.h>
101
102 #include <machine/bus.h>
103 #include <machine/resource.h>
104 #include <sys/bus.h>
105 #include <sys/rman.h>
106
107 #include <dev/mii/mii.h>
108 #include <dev/mii/miivar.h>
109 #include "miidevs.h"
110 #include <dev/mii/brgphyreg.h>
111
112 #ifdef __sparc64__
113 #include <dev/ofw/ofw_bus.h>
114 #include <dev/ofw/openfirm.h>
115 #include <machine/ofw_machdep.h>
116 #include <machine/ver.h>
117 #endif
118
119 #include <dev/pci/pcireg.h>
120 #include <dev/pci/pcivar.h>
121
122 #include <dev/bge/if_bgereg.h>
123
124 #define BGE_CSUM_FEATURES       (CSUM_IP | CSUM_TCP)
125 #define ETHER_MIN_NOPAD         (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
126
127 MODULE_DEPEND(bge, pci, 1, 1, 1);
128 MODULE_DEPEND(bge, ether, 1, 1, 1);
129 MODULE_DEPEND(bge, miibus, 1, 1, 1);
130
131 /* "device miibus" required.  See GENERIC if you get errors here. */
132 #include "miibus_if.h"
133
134 /*
135  * Various supported device vendors/types and their names. Note: the
136  * spec seems to indicate that the hardware still has Alteon's vendor
137  * ID burned into it, though it will always be overriden by the vendor
138  * ID in the EEPROM. Just to be safe, we cover all possibilities.
139  */
140 static const struct bge_type {
141         uint16_t        bge_vid;
142         uint16_t        bge_did;
143 } bge_devs[] = {
144         { ALTEON_VENDORID,      ALTEON_DEVICEID_BCM5700 },
145         { ALTEON_VENDORID,      ALTEON_DEVICEID_BCM5701 },
146
147         { ALTIMA_VENDORID,      ALTIMA_DEVICE_AC1000 },
148         { ALTIMA_VENDORID,      ALTIMA_DEVICE_AC1002 },
149         { ALTIMA_VENDORID,      ALTIMA_DEVICE_AC9100 },
150
151         { APPLE_VENDORID,       APPLE_DEVICE_BCM5701 },
152
153         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5700 },
154         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5701 },
155         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5702 },
156         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5702_ALT },
157         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5702X },
158         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5703 },
159         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5703_ALT },
160         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5703X },
161         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5704C },
162         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5704S },
163         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5704S_ALT },
164         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5705 },
165         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5705F },
166         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5705K },
167         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5705M },
168         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5705M_ALT },
169         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5714C },
170         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5714S },
171         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5715 },
172         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5715S },
173         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5717 },
174         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5718 },
175         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5719 },
176         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5720 },
177         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5721 },
178         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5722 },
179         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5723 },
180         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5725 },
181         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5727 },
182         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5750 },
183         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5750M },
184         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5751 },
185         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5751F },
186         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5751M },
187         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5752 },
188         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5752M },
189         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5753 },
190         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5753F },
191         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5753M },
192         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5754 },
193         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5754M },
194         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5755 },
195         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5755M },
196         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5756 },
197         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5761 },
198         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5761E },
199         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5761S },
200         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5761SE },
201         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5762 },
202         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5764 },
203         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5780 },
204         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5780S },
205         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5781 },
206         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5782 },
207         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5784 },
208         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5785F },
209         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5785G },
210         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5786 },
211         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5787 },
212         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5787F },
213         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5787M },
214         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5788 },
215         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5789 },
216         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5901 },
217         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5901A2 },
218         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5903M },
219         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5906 },
220         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5906M },
221         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57760 },
222         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57761 },
223         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57762 },
224         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57764 },
225         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57765 },
226         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57766 },
227         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57767 },
228         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57780 },
229         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57781 },
230         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57782 },
231         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57785 },
232         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57786 },
233         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57787 },
234         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57788 },
235         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57790 },
236         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57791 },
237         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57795 },
238
239         { SK_VENDORID,          SK_DEVICEID_ALTIMA },
240
241         { TC_VENDORID,          TC_DEVICEID_3C996 },
242
243         { FJTSU_VENDORID,       FJTSU_DEVICEID_PW008GE4 },
244         { FJTSU_VENDORID,       FJTSU_DEVICEID_PW008GE5 },
245         { FJTSU_VENDORID,       FJTSU_DEVICEID_PP250450 },
246
247         { 0, 0 }
248 };
249
250 static const struct bge_vendor {
251         uint16_t        v_id;
252         const char      *v_name;
253 } bge_vendors[] = {
254         { ALTEON_VENDORID,      "Alteon" },
255         { ALTIMA_VENDORID,      "Altima" },
256         { APPLE_VENDORID,       "Apple" },
257         { BCOM_VENDORID,        "Broadcom" },
258         { SK_VENDORID,          "SysKonnect" },
259         { TC_VENDORID,          "3Com" },
260         { FJTSU_VENDORID,       "Fujitsu" },
261
262         { 0, NULL }
263 };
264
265 static const struct bge_revision {
266         uint32_t        br_chipid;
267         const char      *br_name;
268 } bge_revisions[] = {
269         { BGE_CHIPID_BCM5700_A0,        "BCM5700 A0" },
270         { BGE_CHIPID_BCM5700_A1,        "BCM5700 A1" },
271         { BGE_CHIPID_BCM5700_B0,        "BCM5700 B0" },
272         { BGE_CHIPID_BCM5700_B1,        "BCM5700 B1" },
273         { BGE_CHIPID_BCM5700_B2,        "BCM5700 B2" },
274         { BGE_CHIPID_BCM5700_B3,        "BCM5700 B3" },
275         { BGE_CHIPID_BCM5700_ALTIMA,    "BCM5700 Altima" },
276         { BGE_CHIPID_BCM5700_C0,        "BCM5700 C0" },
277         { BGE_CHIPID_BCM5701_A0,        "BCM5701 A0" },
278         { BGE_CHIPID_BCM5701_B0,        "BCM5701 B0" },
279         { BGE_CHIPID_BCM5701_B2,        "BCM5701 B2" },
280         { BGE_CHIPID_BCM5701_B5,        "BCM5701 B5" },
281         { BGE_CHIPID_BCM5703_A0,        "BCM5703 A0" },
282         { BGE_CHIPID_BCM5703_A1,        "BCM5703 A1" },
283         { BGE_CHIPID_BCM5703_A2,        "BCM5703 A2" },
284         { BGE_CHIPID_BCM5703_A3,        "BCM5703 A3" },
285         { BGE_CHIPID_BCM5703_B0,        "BCM5703 B0" },
286         { BGE_CHIPID_BCM5704_A0,        "BCM5704 A0" },
287         { BGE_CHIPID_BCM5704_A1,        "BCM5704 A1" },
288         { BGE_CHIPID_BCM5704_A2,        "BCM5704 A2" },
289         { BGE_CHIPID_BCM5704_A3,        "BCM5704 A3" },
290         { BGE_CHIPID_BCM5704_B0,        "BCM5704 B0" },
291         { BGE_CHIPID_BCM5705_A0,        "BCM5705 A0" },
292         { BGE_CHIPID_BCM5705_A1,        "BCM5705 A1" },
293         { BGE_CHIPID_BCM5705_A2,        "BCM5705 A2" },
294         { BGE_CHIPID_BCM5705_A3,        "BCM5705 A3" },
295         { BGE_CHIPID_BCM5750_A0,        "BCM5750 A0" },
296         { BGE_CHIPID_BCM5750_A1,        "BCM5750 A1" },
297         { BGE_CHIPID_BCM5750_A3,        "BCM5750 A3" },
298         { BGE_CHIPID_BCM5750_B0,        "BCM5750 B0" },
299         { BGE_CHIPID_BCM5750_B1,        "BCM5750 B1" },
300         { BGE_CHIPID_BCM5750_C0,        "BCM5750 C0" },
301         { BGE_CHIPID_BCM5750_C1,        "BCM5750 C1" },
302         { BGE_CHIPID_BCM5750_C2,        "BCM5750 C2" },
303         { BGE_CHIPID_BCM5714_A0,        "BCM5714 A0" },
304         { BGE_CHIPID_BCM5752_A0,        "BCM5752 A0" },
305         { BGE_CHIPID_BCM5752_A1,        "BCM5752 A1" },
306         { BGE_CHIPID_BCM5752_A2,        "BCM5752 A2" },
307         { BGE_CHIPID_BCM5714_B0,        "BCM5714 B0" },
308         { BGE_CHIPID_BCM5714_B3,        "BCM5714 B3" },
309         { BGE_CHIPID_BCM5715_A0,        "BCM5715 A0" },
310         { BGE_CHIPID_BCM5715_A1,        "BCM5715 A1" },
311         { BGE_CHIPID_BCM5715_A3,        "BCM5715 A3" },
312         { BGE_CHIPID_BCM5717_A0,        "BCM5717 A0" },
313         { BGE_CHIPID_BCM5717_B0,        "BCM5717 B0" },
314         { BGE_CHIPID_BCM5719_A0,        "BCM5719 A0" },
315         { BGE_CHIPID_BCM5720_A0,        "BCM5720 A0" },
316         { BGE_CHIPID_BCM5755_A0,        "BCM5755 A0" },
317         { BGE_CHIPID_BCM5755_A1,        "BCM5755 A1" },
318         { BGE_CHIPID_BCM5755_A2,        "BCM5755 A2" },
319         { BGE_CHIPID_BCM5722_A0,        "BCM5722 A0" },
320         { BGE_CHIPID_BCM5761_A0,        "BCM5761 A0" },
321         { BGE_CHIPID_BCM5761_A1,        "BCM5761 A1" },
322         { BGE_CHIPID_BCM5762_A0,        "BCM5762 A0" },
323         { BGE_CHIPID_BCM5784_A0,        "BCM5784 A0" },
324         { BGE_CHIPID_BCM5784_A1,        "BCM5784 A1" },
325         /* 5754 and 5787 share the same ASIC ID */
326         { BGE_CHIPID_BCM5787_A0,        "BCM5754/5787 A0" },
327         { BGE_CHIPID_BCM5787_A1,        "BCM5754/5787 A1" },
328         { BGE_CHIPID_BCM5787_A2,        "BCM5754/5787 A2" },
329         { BGE_CHIPID_BCM5906_A1,        "BCM5906 A1" },
330         { BGE_CHIPID_BCM5906_A2,        "BCM5906 A2" },
331         { BGE_CHIPID_BCM57765_A0,       "BCM57765 A0" },
332         { BGE_CHIPID_BCM57765_B0,       "BCM57765 B0" },
333         { BGE_CHIPID_BCM57780_A0,       "BCM57780 A0" },
334         { BGE_CHIPID_BCM57780_A1,       "BCM57780 A1" },
335
336         { 0, NULL }
337 };
338
339 /*
340  * Some defaults for major revisions, so that newer steppings
341  * that we don't know about have a shot at working.
342  */
343 static const struct bge_revision bge_majorrevs[] = {
344         { BGE_ASICREV_BCM5700,          "unknown BCM5700" },
345         { BGE_ASICREV_BCM5701,          "unknown BCM5701" },
346         { BGE_ASICREV_BCM5703,          "unknown BCM5703" },
347         { BGE_ASICREV_BCM5704,          "unknown BCM5704" },
348         { BGE_ASICREV_BCM5705,          "unknown BCM5705" },
349         { BGE_ASICREV_BCM5750,          "unknown BCM5750" },
350         { BGE_ASICREV_BCM5714_A0,       "unknown BCM5714" },
351         { BGE_ASICREV_BCM5752,          "unknown BCM5752" },
352         { BGE_ASICREV_BCM5780,          "unknown BCM5780" },
353         { BGE_ASICREV_BCM5714,          "unknown BCM5714" },
354         { BGE_ASICREV_BCM5755,          "unknown BCM5755" },
355         { BGE_ASICREV_BCM5761,          "unknown BCM5761" },
356         { BGE_ASICREV_BCM5784,          "unknown BCM5784" },
357         { BGE_ASICREV_BCM5785,          "unknown BCM5785" },
358         /* 5754 and 5787 share the same ASIC ID */
359         { BGE_ASICREV_BCM5787,          "unknown BCM5754/5787" },
360         { BGE_ASICREV_BCM5906,          "unknown BCM5906" },
361         { BGE_ASICREV_BCM57765,         "unknown BCM57765" },
362         { BGE_ASICREV_BCM57766,         "unknown BCM57766" },
363         { BGE_ASICREV_BCM57780,         "unknown BCM57780" },
364         { BGE_ASICREV_BCM5717,          "unknown BCM5717" },
365         { BGE_ASICREV_BCM5719,          "unknown BCM5719" },
366         { BGE_ASICREV_BCM5720,          "unknown BCM5720" },
367         { BGE_ASICREV_BCM5762,          "unknown BCM5762" },
368
369         { 0, NULL }
370 };
371
372 #define BGE_IS_JUMBO_CAPABLE(sc)        ((sc)->bge_flags & BGE_FLAG_JUMBO)
373 #define BGE_IS_5700_FAMILY(sc)          ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
374 #define BGE_IS_5705_PLUS(sc)            ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
375 #define BGE_IS_5714_FAMILY(sc)          ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
376 #define BGE_IS_575X_PLUS(sc)            ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
377 #define BGE_IS_5755_PLUS(sc)            ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
378 #define BGE_IS_5717_PLUS(sc)            ((sc)->bge_flags & BGE_FLAG_5717_PLUS)
379 #define BGE_IS_57765_PLUS(sc)           ((sc)->bge_flags & BGE_FLAG_57765_PLUS)
380
381 static uint32_t bge_chipid(device_t);
382 static const struct bge_vendor * bge_lookup_vendor(uint16_t);
383 static const struct bge_revision * bge_lookup_rev(uint32_t);
384
385 typedef int     (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
386
387 static int bge_probe(device_t);
388 static int bge_attach(device_t);
389 static int bge_detach(device_t);
390 static int bge_suspend(device_t);
391 static int bge_resume(device_t);
392 static void bge_release_resources(struct bge_softc *);
393 static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
394 static int bge_dma_alloc(struct bge_softc *);
395 static void bge_dma_free(struct bge_softc *);
396 static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t,
397     bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
398
399 static void bge_devinfo(struct bge_softc *);
400 static int bge_mbox_reorder(struct bge_softc *);
401
402 static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
403 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
404 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
405 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
406 static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
407
408 static void bge_txeof(struct bge_softc *, uint16_t);
409 static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
410 static int bge_rxeof(struct bge_softc *, uint16_t, int);
411
412 static void bge_asf_driver_up (struct bge_softc *);
413 static void bge_tick(void *);
414 static void bge_stats_clear_regs(struct bge_softc *);
415 static void bge_stats_update(struct bge_softc *);
416 static void bge_stats_update_regs(struct bge_softc *);
417 static struct mbuf *bge_check_short_dma(struct mbuf *);
418 static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
419     uint16_t *, uint16_t *);
420 static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
421
422 static void bge_intr(void *);
423 static int bge_msi_intr(void *);
424 static void bge_intr_task(void *, int);
425 static void bge_start_locked(if_t);
426 static void bge_start(if_t);
427 static int bge_ioctl(if_t, u_long, caddr_t);
428 static void bge_init_locked(struct bge_softc *);
429 static void bge_init(void *);
430 static void bge_stop_block(struct bge_softc *, bus_size_t, uint32_t);
431 static void bge_stop(struct bge_softc *);
432 static void bge_watchdog(struct bge_softc *);
433 static int bge_shutdown(device_t);
434 static int bge_ifmedia_upd_locked(if_t);
435 static int bge_ifmedia_upd(if_t);
436 static void bge_ifmedia_sts(if_t, struct ifmediareq *);
437 static uint64_t bge_get_counter(if_t, ift_counter);
438
439 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
440 static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
441
442 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
443 static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
444
445 static void bge_setpromisc(struct bge_softc *);
446 static void bge_setmulti(struct bge_softc *);
447 static void bge_setvlan(struct bge_softc *);
448
449 static __inline void bge_rxreuse_std(struct bge_softc *, int);
450 static __inline void bge_rxreuse_jumbo(struct bge_softc *, int);
451 static int bge_newbuf_std(struct bge_softc *, int);
452 static int bge_newbuf_jumbo(struct bge_softc *, int);
453 static int bge_init_rx_ring_std(struct bge_softc *);
454 static void bge_free_rx_ring_std(struct bge_softc *);
455 static int bge_init_rx_ring_jumbo(struct bge_softc *);
456 static void bge_free_rx_ring_jumbo(struct bge_softc *);
457 static void bge_free_tx_ring(struct bge_softc *);
458 static int bge_init_tx_ring(struct bge_softc *);
459
460 static int bge_chipinit(struct bge_softc *);
461 static int bge_blockinit(struct bge_softc *);
462 static uint32_t bge_dma_swap_options(struct bge_softc *);
463
464 static int bge_has_eaddr(struct bge_softc *);
465 static uint32_t bge_readmem_ind(struct bge_softc *, int);
466 static void bge_writemem_ind(struct bge_softc *, int, int);
467 static void bge_writembx(struct bge_softc *, int, int);
468 #ifdef notdef
469 static uint32_t bge_readreg_ind(struct bge_softc *, int);
470 #endif
471 static void bge_writemem_direct(struct bge_softc *, int, int);
472 static void bge_writereg_ind(struct bge_softc *, int, int);
473
474 static int bge_miibus_readreg(device_t, int, int);
475 static int bge_miibus_writereg(device_t, int, int, int);
476 static void bge_miibus_statchg(device_t);
477 #ifdef DEVICE_POLLING
478 static int bge_poll(if_t ifp, enum poll_cmd cmd, int count);
479 #endif
480
481 #define BGE_RESET_SHUTDOWN      0
482 #define BGE_RESET_START         1
483 #define BGE_RESET_SUSPEND       2
484 static void bge_sig_post_reset(struct bge_softc *, int);
485 static void bge_sig_legacy(struct bge_softc *, int);
486 static void bge_sig_pre_reset(struct bge_softc *, int);
487 static void bge_stop_fw(struct bge_softc *);
488 static int bge_reset(struct bge_softc *);
489 static void bge_link_upd(struct bge_softc *);
490
491 static void bge_ape_lock_init(struct bge_softc *);
492 static void bge_ape_read_fw_ver(struct bge_softc *);
493 static int bge_ape_lock(struct bge_softc *, int);
494 static void bge_ape_unlock(struct bge_softc *, int);
495 static void bge_ape_send_event(struct bge_softc *, uint32_t);
496 static void bge_ape_driver_state_change(struct bge_softc *, int);
497
498 /*
499  * The BGE_REGISTER_DEBUG option is only for low-level debugging.  It may
500  * leak information to untrusted users.  It is also known to cause alignment
501  * traps on certain architectures.
502  */
503 #ifdef BGE_REGISTER_DEBUG
504 static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
505 static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
506 static int bge_sysctl_ape_read(SYSCTL_HANDLER_ARGS);
507 static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
508 #endif
509 static void bge_add_sysctls(struct bge_softc *);
510 static void bge_add_sysctl_stats_regs(struct bge_softc *,
511     struct sysctl_ctx_list *, struct sysctl_oid_list *);
512 static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *,
513     struct sysctl_oid_list *);
514 static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
515
516 static device_method_t bge_methods[] = {
517         /* Device interface */
518         DEVMETHOD(device_probe,         bge_probe),
519         DEVMETHOD(device_attach,        bge_attach),
520         DEVMETHOD(device_detach,        bge_detach),
521         DEVMETHOD(device_shutdown,      bge_shutdown),
522         DEVMETHOD(device_suspend,       bge_suspend),
523         DEVMETHOD(device_resume,        bge_resume),
524
525         /* MII interface */
526         DEVMETHOD(miibus_readreg,       bge_miibus_readreg),
527         DEVMETHOD(miibus_writereg,      bge_miibus_writereg),
528         DEVMETHOD(miibus_statchg,       bge_miibus_statchg),
529
530         DEVMETHOD_END
531 };
532
533 static driver_t bge_driver = {
534         "bge",
535         bge_methods,
536         sizeof(struct bge_softc)
537 };
538
539 static devclass_t bge_devclass;
540
541 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
542 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
543
544 static int bge_allow_asf = 1;
545
546 static SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
547 SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RDTUN, &bge_allow_asf, 0,
548         "Allow ASF mode if available");
549
550 #define SPARC64_BLADE_1500_MODEL        "SUNW,Sun-Blade-1500"
551 #define SPARC64_BLADE_1500_PATH_BGE     "/pci@1f,700000/network@2"
552 #define SPARC64_BLADE_2500_MODEL        "SUNW,Sun-Blade-2500"
553 #define SPARC64_BLADE_2500_PATH_BGE     "/pci@1c,600000/network@3"
554 #define SPARC64_OFW_SUBVENDOR           "subsystem-vendor-id"
555
556 static int
557 bge_has_eaddr(struct bge_softc *sc)
558 {
559 #ifdef __sparc64__
560         char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
561         device_t dev;
562         uint32_t subvendor;
563
564         dev = sc->bge_dev;
565
566         /*
567          * The on-board BGEs found in sun4u machines aren't fitted with
568          * an EEPROM which means that we have to obtain the MAC address
569          * via OFW and that some tests will always fail.  We distinguish
570          * such BGEs by the subvendor ID, which also has to be obtained
571          * from OFW instead of the PCI configuration space as the latter
572          * indicates Broadcom as the subvendor of the netboot interface.
573          * For early Blade 1500 and 2500 we even have to check the OFW
574          * device path as the subvendor ID always defaults to Broadcom
575          * there.
576          */
577         if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
578             &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
579             (subvendor == FJTSU_VENDORID || subvendor == SUN_VENDORID))
580                 return (0);
581         memset(buf, 0, sizeof(buf));
582         if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
583                 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
584                     strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
585                         return (0);
586                 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
587                     strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
588                         return (0);
589         }
590 #endif
591         return (1);
592 }
593
594 static uint32_t
595 bge_readmem_ind(struct bge_softc *sc, int off)
596 {
597         device_t dev;
598         uint32_t val;
599
600         if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
601             off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
602                 return (0);
603
604         dev = sc->bge_dev;
605
606         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
607         val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
608         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
609         return (val);
610 }
611
612 static void
613 bge_writemem_ind(struct bge_softc *sc, int off, int val)
614 {
615         device_t dev;
616
617         if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
618             off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
619                 return;
620
621         dev = sc->bge_dev;
622
623         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
624         pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
625         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
626 }
627
628 #ifdef notdef
629 static uint32_t
630 bge_readreg_ind(struct bge_softc *sc, int off)
631 {
632         device_t dev;
633
634         dev = sc->bge_dev;
635
636         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
637         return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
638 }
639 #endif
640
641 static void
642 bge_writereg_ind(struct bge_softc *sc, int off, int val)
643 {
644         device_t dev;
645
646         dev = sc->bge_dev;
647
648         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
649         pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
650 }
651
652 static void
653 bge_writemem_direct(struct bge_softc *sc, int off, int val)
654 {
655         CSR_WRITE_4(sc, off, val);
656 }
657
658 static void
659 bge_writembx(struct bge_softc *sc, int off, int val)
660 {
661         if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
662                 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
663
664         CSR_WRITE_4(sc, off, val);
665         if ((sc->bge_flags & BGE_FLAG_MBOX_REORDER) != 0)
666                 CSR_READ_4(sc, off);
667 }
668
669 /*
670  * Clear all stale locks and select the lock for this driver instance.
671  */
672 static void
673 bge_ape_lock_init(struct bge_softc *sc)
674 {
675         uint32_t bit, regbase;
676         int i;
677
678         if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
679                 regbase = BGE_APE_LOCK_GRANT;
680         else
681                 regbase = BGE_APE_PER_LOCK_GRANT;
682
683         /* Clear any stale locks. */
684         for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) {
685                 switch (i) {
686                 case BGE_APE_LOCK_PHY0:
687                 case BGE_APE_LOCK_PHY1:
688                 case BGE_APE_LOCK_PHY2:
689                 case BGE_APE_LOCK_PHY3:
690                         bit = BGE_APE_LOCK_GRANT_DRIVER0;
691                         break;
692                 default:
693                         if (sc->bge_func_addr == 0)
694                                 bit = BGE_APE_LOCK_GRANT_DRIVER0;
695                         else
696                                 bit = (1 << sc->bge_func_addr);
697                 }
698                 APE_WRITE_4(sc, regbase + 4 * i, bit);
699         }
700
701         /* Select the PHY lock based on the device's function number. */
702         switch (sc->bge_func_addr) {
703         case 0:
704                 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0;
705                 break;
706         case 1:
707                 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1;
708                 break;
709         case 2:
710                 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2;
711                 break;
712         case 3:
713                 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3;
714                 break;
715         default:
716                 device_printf(sc->bge_dev,
717                     "PHY lock not supported on this function\n");
718         }
719 }
720
721 /*
722  * Check for APE firmware, set flags, and print version info.
723  */
724 static void
725 bge_ape_read_fw_ver(struct bge_softc *sc)
726 {
727         const char *fwtype;
728         uint32_t apedata, features;
729
730         /* Check for a valid APE signature in shared memory. */
731         apedata = APE_READ_4(sc, BGE_APE_SEG_SIG);
732         if (apedata != BGE_APE_SEG_SIG_MAGIC) {
733                 sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE;
734                 return;
735         }
736
737         /* Check if APE firmware is running. */
738         apedata = APE_READ_4(sc, BGE_APE_FW_STATUS);
739         if ((apedata & BGE_APE_FW_STATUS_READY) == 0) {
740                 device_printf(sc->bge_dev, "APE signature found "
741                     "but FW status not ready! 0x%08x\n", apedata);
742                 return;
743         }
744
745         sc->bge_mfw_flags |= BGE_MFW_ON_APE;
746
747         /* Fetch the APE firwmare type and version. */
748         apedata = APE_READ_4(sc, BGE_APE_FW_VERSION);
749         features = APE_READ_4(sc, BGE_APE_FW_FEATURES);
750         if ((features & BGE_APE_FW_FEATURE_NCSI) != 0) {
751                 sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI;
752                 fwtype = "NCSI";
753         } else if ((features & BGE_APE_FW_FEATURE_DASH) != 0) {
754                 sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH;
755                 fwtype = "DASH";
756         } else
757                 fwtype = "UNKN";
758
759         /* Print the APE firmware version. */
760         device_printf(sc->bge_dev, "APE FW version: %s v%d.%d.%d.%d\n",
761             fwtype,
762             (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT,
763             (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT,
764             (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT,
765             (apedata & BGE_APE_FW_VERSION_BLDMSK));
766 }
767
768 static int
769 bge_ape_lock(struct bge_softc *sc, int locknum)
770 {
771         uint32_t bit, gnt, req, status;
772         int i, off;
773
774         if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
775                 return (0);
776
777         /* Lock request/grant registers have different bases. */
778         if (sc->bge_asicrev == BGE_ASICREV_BCM5761) {
779                 req = BGE_APE_LOCK_REQ;
780                 gnt = BGE_APE_LOCK_GRANT;
781         } else {
782                 req = BGE_APE_PER_LOCK_REQ;
783                 gnt = BGE_APE_PER_LOCK_GRANT;
784         }
785
786         off = 4 * locknum;
787
788         switch (locknum) {
789         case BGE_APE_LOCK_GPIO:
790                 /* Lock required when using GPIO. */
791                 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
792                         return (0);
793                 if (sc->bge_func_addr == 0)
794                         bit = BGE_APE_LOCK_REQ_DRIVER0;
795                 else
796                         bit = (1 << sc->bge_func_addr);
797                 break;
798         case BGE_APE_LOCK_GRC:
799                 /* Lock required to reset the device. */
800                 if (sc->bge_func_addr == 0)
801                         bit = BGE_APE_LOCK_REQ_DRIVER0;
802                 else
803                         bit = (1 << sc->bge_func_addr);
804                 break;
805         case BGE_APE_LOCK_MEM:
806                 /* Lock required when accessing certain APE memory. */
807                 if (sc->bge_func_addr == 0)
808                         bit = BGE_APE_LOCK_REQ_DRIVER0;
809                 else
810                         bit = (1 << sc->bge_func_addr);
811                 break;
812         case BGE_APE_LOCK_PHY0:
813         case BGE_APE_LOCK_PHY1:
814         case BGE_APE_LOCK_PHY2:
815         case BGE_APE_LOCK_PHY3:
816                 /* Lock required when accessing PHYs. */
817                 bit = BGE_APE_LOCK_REQ_DRIVER0;
818                 break;
819         default:
820                 return (EINVAL);
821         }
822
823         /* Request a lock. */
824         APE_WRITE_4(sc, req + off, bit);
825
826         /* Wait up to 1 second to acquire lock. */
827         for (i = 0; i < 20000; i++) {
828                 status = APE_READ_4(sc, gnt + off);
829                 if (status == bit)
830                         break;
831                 DELAY(50);
832         }
833
834         /* Handle any errors. */
835         if (status != bit) {
836                 device_printf(sc->bge_dev, "APE lock %d request failed! "
837                     "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n",
838                     locknum, req + off, bit & 0xFFFF, gnt + off,
839                     status & 0xFFFF);
840                 /* Revoke the lock request. */
841                 APE_WRITE_4(sc, gnt + off, bit);
842                 return (EBUSY);
843         }
844
845         return (0);
846 }
847
848 static void
849 bge_ape_unlock(struct bge_softc *sc, int locknum)
850 {
851         uint32_t bit, gnt;
852         int off;
853
854         if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
855                 return;
856
857         if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
858                 gnt = BGE_APE_LOCK_GRANT;
859         else
860                 gnt = BGE_APE_PER_LOCK_GRANT;
861
862         off = 4 * locknum;
863
864         switch (locknum) {
865         case BGE_APE_LOCK_GPIO:
866                 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
867                         return;
868                 if (sc->bge_func_addr == 0)
869                         bit = BGE_APE_LOCK_GRANT_DRIVER0;
870                 else
871                         bit = (1 << sc->bge_func_addr);
872                 break;
873         case BGE_APE_LOCK_GRC:
874                 if (sc->bge_func_addr == 0)
875                         bit = BGE_APE_LOCK_GRANT_DRIVER0;
876                 else
877                         bit = (1 << sc->bge_func_addr);
878                 break;
879         case BGE_APE_LOCK_MEM:
880                 if (sc->bge_func_addr == 0)
881                         bit = BGE_APE_LOCK_GRANT_DRIVER0;
882                 else
883                         bit = (1 << sc->bge_func_addr);
884                 break;
885         case BGE_APE_LOCK_PHY0:
886         case BGE_APE_LOCK_PHY1:
887         case BGE_APE_LOCK_PHY2:
888         case BGE_APE_LOCK_PHY3:
889                 bit = BGE_APE_LOCK_GRANT_DRIVER0;
890                 break;
891         default:
892                 return;
893         }
894
895         APE_WRITE_4(sc, gnt + off, bit);
896 }
897
898 /*
899  * Send an event to the APE firmware.
900  */
901 static void
902 bge_ape_send_event(struct bge_softc *sc, uint32_t event)
903 {
904         uint32_t apedata;
905         int i;
906
907         /* NCSI does not support APE events. */
908         if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
909                 return;
910
911         /* Wait up to 1ms for APE to service previous event. */
912         for (i = 10; i > 0; i--) {
913                 if (bge_ape_lock(sc, BGE_APE_LOCK_MEM) != 0)
914                         break;
915                 apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS);
916                 if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) {
917                         APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event |
918                             BGE_APE_EVENT_STATUS_EVENT_PENDING);
919                         bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
920                         APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1);
921                         break;
922                 }
923                 bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
924                 DELAY(100);
925         }
926         if (i == 0)
927                 device_printf(sc->bge_dev, "APE event 0x%08x send timed out\n",
928                     event);
929 }
930
931 static void
932 bge_ape_driver_state_change(struct bge_softc *sc, int kind)
933 {
934         uint32_t apedata, event;
935
936         if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
937                 return;
938
939         switch (kind) {
940         case BGE_RESET_START:
941                 /* If this is the first load, clear the load counter. */
942                 apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG);
943                 if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC)
944                         APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0);
945                 else {
946                         apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT);
947                         APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata);
948                 }
949                 APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG,
950                     BGE_APE_HOST_SEG_SIG_MAGIC);
951                 APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN,
952                     BGE_APE_HOST_SEG_LEN_MAGIC);
953
954                 /* Add some version info if bge(4) supports it. */
955                 APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID,
956                     BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0));
957                 APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR,
958                     BGE_APE_HOST_BEHAV_NO_PHYLOCK);
959                 APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS,
960                     BGE_APE_HOST_HEARTBEAT_INT_DISABLE);
961                 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
962                     BGE_APE_HOST_DRVR_STATE_START);
963                 event = BGE_APE_EVENT_STATUS_STATE_START;
964                 break;
965         case BGE_RESET_SHUTDOWN:
966                 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
967                     BGE_APE_HOST_DRVR_STATE_UNLOAD);
968                 event = BGE_APE_EVENT_STATUS_STATE_UNLOAD;
969                 break;
970         case BGE_RESET_SUSPEND:
971                 event = BGE_APE_EVENT_STATUS_STATE_SUSPEND;
972                 break;
973         default:
974                 return;
975         }
976
977         bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT |
978             BGE_APE_EVENT_STATUS_STATE_CHNGE);
979 }
980
981 /*
982  * Map a single buffer address.
983  */
984
985 static void
986 bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
987 {
988         struct bge_dmamap_arg *ctx;
989
990         if (error)
991                 return;
992
993         KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
994
995         ctx = arg;
996         ctx->bge_busaddr = segs->ds_addr;
997 }
998
999 static uint8_t
1000 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
1001 {
1002         uint32_t access, byte = 0;
1003         int i;
1004
1005         /* Lock. */
1006         CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
1007         for (i = 0; i < 8000; i++) {
1008                 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
1009                         break;
1010                 DELAY(20);
1011         }
1012         if (i == 8000)
1013                 return (1);
1014
1015         /* Enable access. */
1016         access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
1017         CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
1018
1019         CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
1020         CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
1021         for (i = 0; i < BGE_TIMEOUT * 10; i++) {
1022                 DELAY(10);
1023                 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
1024                         DELAY(10);
1025                         break;
1026                 }
1027         }
1028
1029         if (i == BGE_TIMEOUT * 10) {
1030                 if_printf(sc->bge_ifp, "nvram read timed out\n");
1031                 return (1);
1032         }
1033
1034         /* Get result. */
1035         byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
1036
1037         *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
1038
1039         /* Disable access. */
1040         CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
1041
1042         /* Unlock. */
1043         CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
1044         CSR_READ_4(sc, BGE_NVRAM_SWARB);
1045
1046         return (0);
1047 }
1048
1049 /*
1050  * Read a sequence of bytes from NVRAM.
1051  */
1052 static int
1053 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
1054 {
1055         int err = 0, i;
1056         uint8_t byte = 0;
1057
1058         if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
1059                 return (1);
1060
1061         for (i = 0; i < cnt; i++) {
1062                 err = bge_nvram_getbyte(sc, off + i, &byte);
1063                 if (err)
1064                         break;
1065                 *(dest + i) = byte;
1066         }
1067
1068         return (err ? 1 : 0);
1069 }
1070
1071 /*
1072  * Read a byte of data stored in the EEPROM at address 'addr.' The
1073  * BCM570x supports both the traditional bitbang interface and an
1074  * auto access interface for reading the EEPROM. We use the auto
1075  * access method.
1076  */
1077 static uint8_t
1078 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
1079 {
1080         int i;
1081         uint32_t byte = 0;
1082
1083         /*
1084          * Enable use of auto EEPROM access so we can avoid
1085          * having to use the bitbang method.
1086          */
1087         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
1088
1089         /* Reset the EEPROM, load the clock period. */
1090         CSR_WRITE_4(sc, BGE_EE_ADDR,
1091             BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
1092         DELAY(20);
1093
1094         /* Issue the read EEPROM command. */
1095         CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
1096
1097         /* Wait for completion */
1098         for(i = 0; i < BGE_TIMEOUT * 10; i++) {
1099                 DELAY(10);
1100                 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
1101                         break;
1102         }
1103
1104         if (i == BGE_TIMEOUT * 10) {
1105                 device_printf(sc->bge_dev, "EEPROM read timed out\n");
1106                 return (1);
1107         }
1108
1109         /* Get result. */
1110         byte = CSR_READ_4(sc, BGE_EE_DATA);
1111
1112         *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
1113
1114         return (0);
1115 }
1116
1117 /*
1118  * Read a sequence of bytes from the EEPROM.
1119  */
1120 static int
1121 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
1122 {
1123         int i, error = 0;
1124         uint8_t byte = 0;
1125
1126         for (i = 0; i < cnt; i++) {
1127                 error = bge_eeprom_getbyte(sc, off + i, &byte);
1128                 if (error)
1129                         break;
1130                 *(dest + i) = byte;
1131         }
1132
1133         return (error ? 1 : 0);
1134 }
1135
1136 static int
1137 bge_miibus_readreg(device_t dev, int phy, int reg)
1138 {
1139         struct bge_softc *sc;
1140         uint32_t val;
1141         int i;
1142
1143         sc = device_get_softc(dev);
1144
1145         if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
1146                 return (0);
1147
1148         /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
1149         if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
1150                 CSR_WRITE_4(sc, BGE_MI_MODE,
1151                     sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
1152                 DELAY(80);
1153         }
1154
1155         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
1156             BGE_MIPHY(phy) | BGE_MIREG(reg));
1157
1158         /* Poll for the PHY register access to complete. */
1159         for (i = 0; i < BGE_TIMEOUT; i++) {
1160                 DELAY(10);
1161                 val = CSR_READ_4(sc, BGE_MI_COMM);
1162                 if ((val & BGE_MICOMM_BUSY) == 0) {
1163                         DELAY(5);
1164                         val = CSR_READ_4(sc, BGE_MI_COMM);
1165                         break;
1166                 }
1167         }
1168
1169         if (i == BGE_TIMEOUT) {
1170                 device_printf(sc->bge_dev,
1171                     "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
1172                     phy, reg, val);
1173                 val = 0;
1174         }
1175
1176         /* Restore the autopoll bit if necessary. */
1177         if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
1178                 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
1179                 DELAY(80);
1180         }
1181
1182         bge_ape_unlock(sc, sc->bge_phy_ape_lock);
1183
1184         if (val & BGE_MICOMM_READFAIL)
1185                 return (0);
1186
1187         return (val & 0xFFFF);
1188 }
1189
1190 static int
1191 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
1192 {
1193         struct bge_softc *sc;
1194         int i;
1195
1196         sc = device_get_softc(dev);
1197
1198         if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
1199             (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
1200                 return (0);
1201
1202         if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
1203                 return (0);
1204
1205         /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
1206         if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
1207                 CSR_WRITE_4(sc, BGE_MI_MODE,
1208                     sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
1209                 DELAY(80);
1210         }
1211
1212         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
1213             BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
1214
1215         for (i = 0; i < BGE_TIMEOUT; i++) {
1216                 DELAY(10);
1217                 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
1218                         DELAY(5);
1219                         CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
1220                         break;
1221                 }
1222         }
1223
1224         /* Restore the autopoll bit if necessary. */
1225         if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
1226                 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
1227                 DELAY(80);
1228         }
1229
1230         bge_ape_unlock(sc, sc->bge_phy_ape_lock);
1231
1232         if (i == BGE_TIMEOUT)
1233                 device_printf(sc->bge_dev,
1234                     "PHY write timed out (phy %d, reg %d, val 0x%04x)\n",
1235                     phy, reg, val);
1236
1237         return (0);
1238 }
1239
1240 static void
1241 bge_miibus_statchg(device_t dev)
1242 {
1243         struct bge_softc *sc;
1244         struct mii_data *mii;
1245         uint32_t mac_mode, rx_mode, tx_mode;
1246
1247         sc = device_get_softc(dev);
1248         if ((if_getdrvflags(sc->bge_ifp) & IFF_DRV_RUNNING) == 0)
1249                 return;
1250         mii = device_get_softc(sc->bge_miibus);
1251
1252         if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1253             (IFM_ACTIVE | IFM_AVALID)) {
1254                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1255                 case IFM_10_T:
1256                 case IFM_100_TX:
1257                         sc->bge_link = 1;
1258                         break;
1259                 case IFM_1000_T:
1260                 case IFM_1000_SX:
1261                 case IFM_2500_SX:
1262                         if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
1263                                 sc->bge_link = 1;
1264                         else
1265                                 sc->bge_link = 0;
1266                         break;
1267                 default:
1268                         sc->bge_link = 0;
1269                         break;
1270                 }
1271         } else
1272                 sc->bge_link = 0;
1273         if (sc->bge_link == 0)
1274                 return;
1275
1276         /*
1277          * APE firmware touches these registers to keep the MAC
1278          * connected to the outside world.  Try to keep the
1279          * accesses atomic.
1280          */
1281
1282         /* Set the port mode (MII/GMII) to match the link speed. */
1283         mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) &
1284             ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX);
1285         tx_mode = CSR_READ_4(sc, BGE_TX_MODE);
1286         rx_mode = CSR_READ_4(sc, BGE_RX_MODE);
1287
1288         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1289             IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
1290                 mac_mode |= BGE_PORTMODE_GMII;
1291         else
1292                 mac_mode |= BGE_PORTMODE_MII;
1293
1294         /* Set MAC flow control behavior to match link flow control settings. */
1295         tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE;
1296         rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE;
1297         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1298                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1299                         tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE;
1300                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1301                         rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE;
1302         } else
1303                 mac_mode |= BGE_MACMODE_HALF_DUPLEX;
1304
1305         CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode);
1306         DELAY(40);
1307         CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode);
1308         CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode);
1309 }
1310
1311 /*
1312  * Intialize a standard receive ring descriptor.
1313  */
1314 static int
1315 bge_newbuf_std(struct bge_softc *sc, int i)
1316 {
1317         struct mbuf *m;
1318         struct bge_rx_bd *r;
1319         bus_dma_segment_t segs[1];
1320         bus_dmamap_t map;
1321         int error, nsegs;
1322
1323         if (sc->bge_flags & BGE_FLAG_JUMBO_STD &&
1324             (if_getmtu(sc->bge_ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN +
1325             ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) {
1326                 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
1327                 if (m == NULL)
1328                         return (ENOBUFS);
1329                 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1330         } else {
1331                 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1332                 if (m == NULL)
1333                         return (ENOBUFS);
1334                 m->m_len = m->m_pkthdr.len = MCLBYTES;
1335         }
1336         if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
1337                 m_adj(m, ETHER_ALIGN);
1338
1339         error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
1340             sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
1341         if (error != 0) {
1342                 m_freem(m);
1343                 return (error);
1344         }
1345         if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1346                 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1347                     sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
1348                 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1349                     sc->bge_cdata.bge_rx_std_dmamap[i]);
1350         }
1351         map = sc->bge_cdata.bge_rx_std_dmamap[i];
1352         sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
1353         sc->bge_cdata.bge_rx_std_sparemap = map;
1354         sc->bge_cdata.bge_rx_std_chain[i] = m;
1355         sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len;
1356         r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
1357         r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1358         r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1359         r->bge_flags = BGE_RXBDFLAG_END;
1360         r->bge_len = segs[0].ds_len;
1361         r->bge_idx = i;
1362
1363         bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1364             sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
1365
1366         return (0);
1367 }
1368
1369 /*
1370  * Initialize a jumbo receive ring descriptor. This allocates
1371  * a jumbo buffer from the pool managed internally by the driver.
1372  */
1373 static int
1374 bge_newbuf_jumbo(struct bge_softc *sc, int i)
1375 {
1376         bus_dma_segment_t segs[BGE_NSEG_JUMBO];
1377         bus_dmamap_t map;
1378         struct bge_extrx_bd *r;
1379         struct mbuf *m;
1380         int error, nsegs;
1381
1382         MGETHDR(m, M_NOWAIT, MT_DATA);
1383         if (m == NULL)
1384                 return (ENOBUFS);
1385
1386         m_cljget(m, M_NOWAIT, MJUM9BYTES);
1387         if (!(m->m_flags & M_EXT)) {
1388                 m_freem(m);
1389                 return (ENOBUFS);
1390         }
1391         m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1392         if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
1393                 m_adj(m, ETHER_ALIGN);
1394
1395         error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
1396             sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
1397         if (error != 0) {
1398                 m_freem(m);
1399                 return (error);
1400         }
1401
1402         if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1403                 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1404                     sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
1405                 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1406                     sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1407         }
1408         map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
1409         sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
1410             sc->bge_cdata.bge_rx_jumbo_sparemap;
1411         sc->bge_cdata.bge_rx_jumbo_sparemap = map;
1412         sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1413         sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0;
1414         sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0;
1415         sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0;
1416         sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0;
1417
1418         /*
1419          * Fill in the extended RX buffer descriptor.
1420          */
1421         r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
1422         r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1423         r->bge_idx = i;
1424         r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1425         switch (nsegs) {
1426         case 4:
1427                 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1428                 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1429                 r->bge_len3 = segs[3].ds_len;
1430                 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len;
1431         case 3:
1432                 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1433                 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1434                 r->bge_len2 = segs[2].ds_len;
1435                 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len;
1436         case 2:
1437                 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1438                 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1439                 r->bge_len1 = segs[1].ds_len;
1440                 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len;
1441         case 1:
1442                 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1443                 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1444                 r->bge_len0 = segs[0].ds_len;
1445                 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len;
1446                 break;
1447         default:
1448                 panic("%s: %d segments\n", __func__, nsegs);
1449         }
1450
1451         bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1452             sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
1453
1454         return (0);
1455 }
1456
1457 static int
1458 bge_init_rx_ring_std(struct bge_softc *sc)
1459 {
1460         int error, i;
1461
1462         bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1463         sc->bge_std = 0;
1464         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1465                 if ((error = bge_newbuf_std(sc, i)) != 0)
1466                         return (error);
1467                 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1468         }
1469
1470         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1471             sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
1472
1473         sc->bge_std = 0;
1474         bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1);
1475
1476         return (0);
1477 }
1478
1479 static void
1480 bge_free_rx_ring_std(struct bge_softc *sc)
1481 {
1482         int i;
1483
1484         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1485                 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1486                         bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1487                             sc->bge_cdata.bge_rx_std_dmamap[i],
1488                             BUS_DMASYNC_POSTREAD);
1489                         bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1490                             sc->bge_cdata.bge_rx_std_dmamap[i]);
1491                         m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1492                         sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1493                 }
1494                 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1495                     sizeof(struct bge_rx_bd));
1496         }
1497 }
1498
1499 static int
1500 bge_init_rx_ring_jumbo(struct bge_softc *sc)
1501 {
1502         struct bge_rcb *rcb;
1503         int error, i;
1504
1505         bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
1506         sc->bge_jumbo = 0;
1507         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1508                 if ((error = bge_newbuf_jumbo(sc, i)) != 0)
1509                         return (error);
1510                 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1511         }
1512
1513         bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1514             sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
1515
1516         sc->bge_jumbo = 0;
1517
1518         /* Enable the jumbo receive producer ring. */
1519         rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1520         rcb->bge_maxlen_flags =
1521             BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
1522         CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1523
1524         bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1);
1525
1526         return (0);
1527 }
1528
1529 static void
1530 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1531 {
1532         int i;
1533
1534         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1535                 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1536                         bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1537                             sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1538                             BUS_DMASYNC_POSTREAD);
1539                         bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1540                             sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1541                         m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1542                         sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1543                 }
1544                 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1545                     sizeof(struct bge_extrx_bd));
1546         }
1547 }
1548
1549 static void
1550 bge_free_tx_ring(struct bge_softc *sc)
1551 {
1552         int i;
1553
1554         if (sc->bge_ldata.bge_tx_ring == NULL)
1555                 return;
1556
1557         for (i = 0; i < BGE_TX_RING_CNT; i++) {
1558                 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1559                         bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
1560                             sc->bge_cdata.bge_tx_dmamap[i],
1561                             BUS_DMASYNC_POSTWRITE);
1562                         bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1563                             sc->bge_cdata.bge_tx_dmamap[i]);
1564                         m_freem(sc->bge_cdata.bge_tx_chain[i]);
1565                         sc->bge_cdata.bge_tx_chain[i] = NULL;
1566                 }
1567                 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1568                     sizeof(struct bge_tx_bd));
1569         }
1570 }
1571
1572 static int
1573 bge_init_tx_ring(struct bge_softc *sc)
1574 {
1575         sc->bge_txcnt = 0;
1576         sc->bge_tx_saved_considx = 0;
1577
1578         bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1579         bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
1580             sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1581
1582         /* Initialize transmit producer index for host-memory send ring. */
1583         sc->bge_tx_prodidx = 0;
1584         bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1585
1586         /* 5700 b2 errata */
1587         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1588                 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1589
1590         /* NIC-memory send ring not used; initialize to zero. */
1591         bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1592         /* 5700 b2 errata */
1593         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1594                 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1595
1596         return (0);
1597 }
1598
1599 static void
1600 bge_setpromisc(struct bge_softc *sc)
1601 {
1602         if_t ifp;
1603
1604         BGE_LOCK_ASSERT(sc);
1605
1606         ifp = sc->bge_ifp;
1607
1608         /* Enable or disable promiscuous mode as needed. */
1609         if (if_getflags(ifp) & IFF_PROMISC)
1610                 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1611         else
1612                 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1613 }
1614
1615 static void
1616 bge_setmulti(struct bge_softc *sc)
1617 {
1618         if_t ifp;
1619         int mc_count = 0;
1620         uint32_t hashes[4] = { 0, 0, 0, 0 };
1621         int h, i, mcnt;
1622         unsigned char *mta;
1623
1624         BGE_LOCK_ASSERT(sc);
1625
1626         ifp = sc->bge_ifp;
1627
1628         mc_count = if_multiaddr_count(ifp, -1);
1629         mta = malloc(sizeof(unsigned char) *  ETHER_ADDR_LEN *
1630             mc_count, M_DEVBUF, M_NOWAIT);
1631
1632         if(mta == NULL) {
1633                 device_printf(sc->bge_dev, 
1634                     "Failed to allocated temp mcast list\n");
1635                 return;
1636         }
1637
1638         if (if_getflags(ifp) & IFF_ALLMULTI || if_getflags(ifp) & IFF_PROMISC) {
1639                 for (i = 0; i < 4; i++)
1640                         CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1641                 return;
1642         }
1643
1644         /* First, zot all the existing filters. */
1645         for (i = 0; i < 4; i++)
1646                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1647
1648         if_multiaddr_array(ifp, mta, &mcnt, mc_count);
1649         for(i = 0; i < mcnt; i++) {
1650                 h = ether_crc32_le(mta + (i * ETHER_ADDR_LEN),
1651                     ETHER_ADDR_LEN) & 0x7F;
1652                 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1653         }
1654
1655         for (i = 0; i < 4; i++)
1656                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1657
1658         free(mta, M_DEVBUF);
1659 }
1660
1661 static void
1662 bge_setvlan(struct bge_softc *sc)
1663 {
1664         if_t ifp;
1665
1666         BGE_LOCK_ASSERT(sc);
1667
1668         ifp = sc->bge_ifp;
1669
1670         /* Enable or disable VLAN tag stripping as needed. */
1671         if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING)
1672                 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1673         else
1674                 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1675 }
1676
1677 static void
1678 bge_sig_pre_reset(struct bge_softc *sc, int type)
1679 {
1680
1681         /*
1682          * Some chips don't like this so only do this if ASF is enabled
1683          */
1684         if (sc->bge_asf_mode)
1685                 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
1686
1687         if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1688                 switch (type) {
1689                 case BGE_RESET_START:
1690                         bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1691                             BGE_FW_DRV_STATE_START);
1692                         break;
1693                 case BGE_RESET_SHUTDOWN:
1694                         bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1695                             BGE_FW_DRV_STATE_UNLOAD);
1696                         break;
1697                 case BGE_RESET_SUSPEND:
1698                         bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1699                             BGE_FW_DRV_STATE_SUSPEND);
1700                         break;
1701                 }
1702         }
1703
1704         if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND)
1705                 bge_ape_driver_state_change(sc, type);
1706 }
1707
1708 static void
1709 bge_sig_post_reset(struct bge_softc *sc, int type)
1710 {
1711
1712         if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1713                 switch (type) {
1714                 case BGE_RESET_START:
1715                         bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1716                             BGE_FW_DRV_STATE_START_DONE);
1717                         /* START DONE */
1718                         break;
1719                 case BGE_RESET_SHUTDOWN:
1720                         bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1721                             BGE_FW_DRV_STATE_UNLOAD_DONE);
1722                         break;
1723                 }
1724         }
1725         if (type == BGE_RESET_SHUTDOWN)
1726                 bge_ape_driver_state_change(sc, type);
1727 }
1728
1729 static void
1730 bge_sig_legacy(struct bge_softc *sc, int type)
1731 {
1732
1733         if (sc->bge_asf_mode) {
1734                 switch (type) {
1735                 case BGE_RESET_START:
1736                         bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1737                             BGE_FW_DRV_STATE_START);
1738                         break;
1739                 case BGE_RESET_SHUTDOWN:
1740                         bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1741                             BGE_FW_DRV_STATE_UNLOAD);
1742                         break;
1743                 }
1744         }
1745 }
1746
1747 static void
1748 bge_stop_fw(struct bge_softc *sc)
1749 {
1750         int i;
1751
1752         if (sc->bge_asf_mode) {
1753                 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE);
1754                 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
1755                     CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT);
1756
1757                 for (i = 0; i < 100; i++ ) {
1758                         if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) &
1759                             BGE_RX_CPU_DRV_EVENT))
1760                                 break;
1761                         DELAY(10);
1762                 }
1763         }
1764 }
1765
1766 static uint32_t
1767 bge_dma_swap_options(struct bge_softc *sc)
1768 {
1769         uint32_t dma_options;
1770
1771         dma_options = BGE_MODECTL_WORDSWAP_NONFRAME |
1772             BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA;
1773 #if BYTE_ORDER == BIG_ENDIAN
1774         dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME;
1775 #endif
1776         return (dma_options);
1777 }
1778
1779 /*
1780  * Do endian, PCI and DMA initialization.
1781  */
1782 static int
1783 bge_chipinit(struct bge_softc *sc)
1784 {
1785         uint32_t dma_rw_ctl, misc_ctl, mode_ctl;
1786         uint16_t val;
1787         int i;
1788
1789         /* Set endianness before we access any non-PCI registers. */
1790         misc_ctl = BGE_INIT;
1791         if (sc->bge_flags & BGE_FLAG_TAGGED_STATUS)
1792                 misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
1793         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, misc_ctl, 4);
1794
1795         /*
1796          * Clear the MAC statistics block in the NIC's
1797          * internal memory.
1798          */
1799         for (i = BGE_STATS_BLOCK;
1800             i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1801                 BGE_MEMWIN_WRITE(sc, i, 0);
1802
1803         for (i = BGE_STATUS_BLOCK;
1804             i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1805                 BGE_MEMWIN_WRITE(sc, i, 0);
1806
1807         if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1808                 /*
1809                  *  Fix data corruption caused by non-qword write with WB.
1810                  *  Fix master abort in PCI mode.
1811                  *  Fix PCI latency timer.
1812                  */
1813                 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1814                 val |= (1 << 10) | (1 << 12) | (1 << 13);
1815                 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1816         }
1817
1818         if (sc->bge_asicrev == BGE_ASICREV_BCM57765 ||
1819             sc->bge_asicrev == BGE_ASICREV_BCM57766) {
1820                 /*
1821                  * For the 57766 and non Ax versions of 57765, bootcode
1822                  * needs to setup the PCIE Fast Training Sequence (FTS)
1823                  * value to prevent transmit hangs.
1824                  */
1825                 if (sc->bge_chiprev != BGE_CHIPREV_57765_AX) {
1826                         CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL,
1827                             CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL) |
1828                             BGE_CPMU_PADRNG_CTL_RDIV2);
1829                 }
1830         }
1831
1832         /*
1833          * Set up the PCI DMA control register.
1834          */
1835         dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1836             BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1837         if (sc->bge_flags & BGE_FLAG_PCIE) {
1838                 if (sc->bge_mps >= 256)
1839                         dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1840                 else
1841                         dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1842         } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1843                 if (BGE_IS_5714_FAMILY(sc)) {
1844                         /* 256 bytes for read and write. */
1845                         dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1846                             BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1847                         dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1848                             BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1849                             BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1850                 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
1851                         /*
1852                          * In the BCM5703, the DMA read watermark should
1853                          * be set to less than or equal to the maximum
1854                          * memory read byte count of the PCI-X command
1855                          * register.
1856                          */
1857                         dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
1858                             BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1859                 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1860                         /* 1536 bytes for read, 384 bytes for write. */
1861                         dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1862                             BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1863                 } else {
1864                         /* 384 bytes for read and write. */
1865                         dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1866                             BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1867                             0x0F;
1868                 }
1869                 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1870                     sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1871                         uint32_t tmp;
1872
1873                         /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1874                         tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1875                         if (tmp == 6 || tmp == 7)
1876                                 dma_rw_ctl |=
1877                                     BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1878
1879                         /* Set PCI-X DMA write workaround. */
1880                         dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1881                 }
1882         } else {
1883                 /* Conventional PCI bus: 256 bytes for read and write. */
1884                 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1885                     BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1886
1887                 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1888                     sc->bge_asicrev != BGE_ASICREV_BCM5750)
1889                         dma_rw_ctl |= 0x0F;
1890         }
1891         if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1892             sc->bge_asicrev == BGE_ASICREV_BCM5701)
1893                 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1894                     BGE_PCIDMARWCTL_ASRT_ALL_BE;
1895         if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1896             sc->bge_asicrev == BGE_ASICREV_BCM5704)
1897                 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1898         if (BGE_IS_5717_PLUS(sc)) {
1899                 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1900                 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
1901                         dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1902                 /*
1903                  * Enable HW workaround for controllers that misinterpret
1904                  * a status tag update and leave interrupts permanently
1905                  * disabled.
1906                  */
1907                 if (!BGE_IS_57765_PLUS(sc) &&
1908                     sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
1909                     sc->bge_asicrev != BGE_ASICREV_BCM5762)
1910                         dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1911         }
1912         pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1913
1914         /*
1915          * Set up general mode register.
1916          */
1917         mode_ctl = bge_dma_swap_options(sc);
1918         if (sc->bge_asicrev == BGE_ASICREV_BCM5720 ||
1919             sc->bge_asicrev == BGE_ASICREV_BCM5762) {
1920                 /* Retain Host-2-BMC settings written by APE firmware. */
1921                 mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) &
1922                     (BGE_MODECTL_BYTESWAP_B2HRX_DATA |
1923                     BGE_MODECTL_WORDSWAP_B2HRX_DATA |
1924                     BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE);
1925         }
1926         mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1927             BGE_MODECTL_TX_NO_PHDR_CSUM;
1928
1929         /*
1930          * BCM5701 B5 have a bug causing data corruption when using
1931          * 64-bit DMA reads, which can be terminated early and then
1932          * completed later as 32-bit accesses, in combination with
1933          * certain bridges.
1934          */
1935         if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1936             sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1937                 mode_ctl |= BGE_MODECTL_FORCE_PCI32;
1938
1939         /*
1940          * Tell the firmware the driver is running
1941          */
1942         if (sc->bge_asf_mode & ASF_STACKUP)
1943                 mode_ctl |= BGE_MODECTL_STACKUP;
1944
1945         CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1946
1947         /*
1948          * Disable memory write invalidate.  Apparently it is not supported
1949          * properly by these devices.  Also ensure that INTx isn't disabled,
1950          * as these chips need it even when using MSI.
1951          */
1952         PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1953             PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1954
1955         /* Set the timer prescaler (always 66 MHz). */
1956         CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1957
1958         /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1959         if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1960                 DELAY(40);      /* XXX */
1961
1962                 /* Put PHY into ready state */
1963                 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1964                 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1965                 DELAY(40);
1966         }
1967
1968         return (0);
1969 }
1970
1971 static int
1972 bge_blockinit(struct bge_softc *sc)
1973 {
1974         struct bge_rcb *rcb;
1975         bus_size_t vrcb;
1976         bge_hostaddr taddr;
1977         uint32_t dmactl, rdmareg, val;
1978         int i, limit;
1979
1980         /*
1981          * Initialize the memory window pointer register so that
1982          * we can access the first 32K of internal NIC RAM. This will
1983          * allow us to set up the TX send ring RCBs and the RX return
1984          * ring RCBs, plus other things which live in NIC memory.
1985          */
1986         CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1987
1988         /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1989
1990         if (!(BGE_IS_5705_PLUS(sc))) {
1991                 /* Configure mbuf memory pool */
1992                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1993                 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1994                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1995                 else
1996                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1997
1998                 /* Configure DMA resource pool */
1999                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
2000                     BGE_DMA_DESCRIPTORS);
2001                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
2002         }
2003
2004         /* Configure mbuf pool watermarks */
2005         if (BGE_IS_5717_PLUS(sc)) {
2006                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
2007                 if (if_getmtu(sc->bge_ifp) > ETHERMTU) {
2008                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
2009                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
2010                 } else {
2011                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
2012                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
2013                 }
2014         } else if (!BGE_IS_5705_PLUS(sc)) {
2015                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
2016                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
2017                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
2018         } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2019                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
2020                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
2021                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
2022         } else {
2023                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
2024                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
2025                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
2026         }
2027
2028         /* Configure DMA resource watermarks */
2029         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
2030         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
2031
2032         /* Enable buffer manager */
2033         val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
2034         /*
2035          * Change the arbitration algorithm of TXMBUF read request to
2036          * round-robin instead of priority based for BCM5719.  When
2037          * TXFIFO is almost empty, RDMA will hold its request until
2038          * TXFIFO is not almost empty.
2039          */
2040         if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
2041                 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
2042         CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
2043
2044         /* Poll for buffer manager start indication */
2045         for (i = 0; i < BGE_TIMEOUT; i++) {
2046                 DELAY(10);
2047                 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
2048                         break;
2049         }
2050
2051         if (i == BGE_TIMEOUT) {
2052                 device_printf(sc->bge_dev, "buffer manager failed to start\n");
2053                 return (ENXIO);
2054         }
2055
2056         /* Enable flow-through queues */
2057         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
2058         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
2059
2060         /* Wait until queue initialization is complete */
2061         for (i = 0; i < BGE_TIMEOUT; i++) {
2062                 DELAY(10);
2063                 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
2064                         break;
2065         }
2066
2067         if (i == BGE_TIMEOUT) {
2068                 device_printf(sc->bge_dev, "flow-through queue init failed\n");
2069                 return (ENXIO);
2070         }
2071
2072         /*
2073          * Summary of rings supported by the controller:
2074          *
2075          * Standard Receive Producer Ring
2076          * - This ring is used to feed receive buffers for "standard"
2077          *   sized frames (typically 1536 bytes) to the controller.
2078          *
2079          * Jumbo Receive Producer Ring
2080          * - This ring is used to feed receive buffers for jumbo sized
2081          *   frames (i.e. anything bigger than the "standard" frames)
2082          *   to the controller.
2083          *
2084          * Mini Receive Producer Ring
2085          * - This ring is used to feed receive buffers for "mini"
2086          *   sized frames to the controller.
2087          * - This feature required external memory for the controller
2088          *   but was never used in a production system.  Should always
2089          *   be disabled.
2090          *
2091          * Receive Return Ring
2092          * - After the controller has placed an incoming frame into a
2093          *   receive buffer that buffer is moved into a receive return
2094          *   ring.  The driver is then responsible to passing the
2095          *   buffer up to the stack.  Many versions of the controller
2096          *   support multiple RR rings.
2097          *
2098          * Send Ring
2099          * - This ring is used for outgoing frames.  Many versions of
2100          *   the controller support multiple send rings.
2101          */
2102
2103         /* Initialize the standard receive producer ring control block. */
2104         rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
2105         rcb->bge_hostaddr.bge_addr_lo =
2106             BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
2107         rcb->bge_hostaddr.bge_addr_hi =
2108             BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
2109         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2110             sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
2111         if (BGE_IS_5717_PLUS(sc)) {
2112                 /*
2113                  * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
2114                  * Bits 15-2 : Maximum RX frame size
2115                  * Bit 1     : 1 = Ring Disabled, 0 = Ring ENabled
2116                  * Bit 0     : Reserved
2117                  */
2118                 rcb->bge_maxlen_flags =
2119                     BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
2120         } else if (BGE_IS_5705_PLUS(sc)) {
2121                 /*
2122                  * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
2123                  * Bits 15-2 : Reserved (should be 0)
2124                  * Bit 1     : 1 = Ring Disabled, 0 = Ring Enabled
2125                  * Bit 0     : Reserved
2126                  */
2127                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
2128         } else {
2129                 /*
2130                  * Ring size is always XXX entries
2131                  * Bits 31-16: Maximum RX frame size
2132                  * Bits 15-2 : Reserved (should be 0)
2133                  * Bit 1     : 1 = Ring Disabled, 0 = Ring Enabled
2134                  * Bit 0     : Reserved
2135                  */
2136                 rcb->bge_maxlen_flags =
2137                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
2138         }
2139         if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
2140             sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
2141             sc->bge_asicrev == BGE_ASICREV_BCM5720)
2142                 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
2143         else
2144                 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
2145         /* Write the standard receive producer ring control block. */
2146         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
2147         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
2148         CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
2149         CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
2150
2151         /* Reset the standard receive producer ring producer index. */
2152         bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
2153
2154         /*
2155          * Initialize the jumbo RX producer ring control
2156          * block.  We set the 'ring disabled' bit in the
2157          * flags field until we're actually ready to start
2158          * using this ring (i.e. once we set the MTU
2159          * high enough to require it).
2160          */
2161         if (BGE_IS_JUMBO_CAPABLE(sc)) {
2162                 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
2163                 /* Get the jumbo receive producer ring RCB parameters. */
2164                 rcb->bge_hostaddr.bge_addr_lo =
2165                     BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
2166                 rcb->bge_hostaddr.bge_addr_hi =
2167                     BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
2168                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2169                     sc->bge_cdata.bge_rx_jumbo_ring_map,
2170                     BUS_DMASYNC_PREREAD);
2171                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
2172                     BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
2173                 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
2174                     sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
2175                     sc->bge_asicrev == BGE_ASICREV_BCM5720)
2176                         rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
2177                 else
2178                         rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
2179                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
2180                     rcb->bge_hostaddr.bge_addr_hi);
2181                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
2182                     rcb->bge_hostaddr.bge_addr_lo);
2183                 /* Program the jumbo receive producer ring RCB parameters. */
2184                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
2185                     rcb->bge_maxlen_flags);
2186                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
2187                 /* Reset the jumbo receive producer ring producer index. */
2188                 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
2189         }
2190
2191         /* Disable the mini receive producer ring RCB. */
2192         if (BGE_IS_5700_FAMILY(sc)) {
2193                 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
2194                 rcb->bge_maxlen_flags =
2195                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
2196                 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
2197                     rcb->bge_maxlen_flags);
2198                 /* Reset the mini receive producer ring producer index. */
2199                 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
2200         }
2201
2202         /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
2203         if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2204                 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
2205                     sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
2206                     sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
2207                         CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
2208                             (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
2209         }
2210         /*
2211          * The BD ring replenish thresholds control how often the
2212          * hardware fetches new BD's from the producer rings in host
2213          * memory.  Setting the value too low on a busy system can
2214          * starve the hardware and recue the throughpout.
2215          *
2216          * Set the BD ring replentish thresholds. The recommended
2217          * values are 1/8th the number of descriptors allocated to
2218          * each ring.
2219          * XXX The 5754 requires a lower threshold, so it might be a
2220          * requirement of all 575x family chips.  The Linux driver sets
2221          * the lower threshold for all 5705 family chips as well, but there
2222          * are reports that it might not need to be so strict.
2223          *
2224          * XXX Linux does some extra fiddling here for the 5906 parts as
2225          * well.
2226          */
2227         if (BGE_IS_5705_PLUS(sc))
2228                 val = 8;
2229         else
2230                 val = BGE_STD_RX_RING_CNT / 8;
2231         CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
2232         if (BGE_IS_JUMBO_CAPABLE(sc))
2233                 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
2234                     BGE_JUMBO_RX_RING_CNT/8);
2235         if (BGE_IS_5717_PLUS(sc)) {
2236                 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
2237                 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
2238         }
2239
2240         /*
2241          * Disable all send rings by setting the 'ring disabled' bit
2242          * in the flags field of all the TX send ring control blocks,
2243          * located in NIC memory.
2244          */
2245         if (!BGE_IS_5705_PLUS(sc))
2246                 /* 5700 to 5704 had 16 send rings. */
2247                 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
2248         else if (BGE_IS_57765_PLUS(sc) ||
2249             sc->bge_asicrev == BGE_ASICREV_BCM5762)
2250                 limit = 2;
2251         else if (BGE_IS_5717_PLUS(sc))
2252                 limit = 4;
2253         else
2254                 limit = 1;
2255         vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
2256         for (i = 0; i < limit; i++) {
2257                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
2258                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
2259                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
2260                 vrcb += sizeof(struct bge_rcb);
2261         }
2262
2263         /* Configure send ring RCB 0 (we use only the first ring) */
2264         vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
2265         BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
2266         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
2267         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
2268         if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
2269             sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
2270             sc->bge_asicrev == BGE_ASICREV_BCM5720)
2271                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
2272         else
2273                 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
2274                     BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
2275         RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
2276             BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
2277
2278         /*
2279          * Disable all receive return rings by setting the
2280          * 'ring diabled' bit in the flags field of all the receive
2281          * return ring control blocks, located in NIC memory.
2282          */
2283         if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
2284             sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
2285             sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2286                 /* Should be 17, use 16 until we get an SRAM map. */
2287                 limit = 16;
2288         } else if (!BGE_IS_5705_PLUS(sc))
2289                 limit = BGE_RX_RINGS_MAX;
2290         else if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2291             sc->bge_asicrev == BGE_ASICREV_BCM5762 ||
2292             BGE_IS_57765_PLUS(sc))
2293                 limit = 4;
2294         else
2295                 limit = 1;
2296         /* Disable all receive return rings. */
2297         vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
2298         for (i = 0; i < limit; i++) {
2299                 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
2300                 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
2301                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
2302                     BGE_RCB_FLAG_RING_DISABLED);
2303                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
2304                 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
2305                     (i * (sizeof(uint64_t))), 0);
2306                 vrcb += sizeof(struct bge_rcb);
2307         }
2308
2309         /*
2310          * Set up receive return ring 0.  Note that the NIC address
2311          * for RX return rings is 0x0.  The return rings live entirely
2312          * within the host, so the nicaddr field in the RCB isn't used.
2313          */
2314         vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
2315         BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
2316         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
2317         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
2318         RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
2319         RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
2320             BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
2321
2322         /* Set random backoff seed for TX */
2323         CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
2324             (IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
2325             IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
2326             IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5]) &
2327             BGE_TX_BACKOFF_SEED_MASK);
2328
2329         /* Set inter-packet gap */
2330         val = 0x2620;
2331         if (sc->bge_asicrev == BGE_ASICREV_BCM5720 ||
2332             sc->bge_asicrev == BGE_ASICREV_BCM5762)
2333                 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
2334                     (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
2335         CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
2336
2337         /*
2338          * Specify which ring to use for packets that don't match
2339          * any RX rules.
2340          */
2341         CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
2342
2343         /*
2344          * Configure number of RX lists. One interrupt distribution
2345          * list, sixteen active lists, one bad frames class.
2346          */
2347         CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
2348
2349         /* Inialize RX list placement stats mask. */
2350         CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
2351         CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
2352
2353         /* Disable host coalescing until we get it set up */
2354         CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
2355
2356         /* Poll to make sure it's shut down. */
2357         for (i = 0; i < BGE_TIMEOUT; i++) {
2358                 DELAY(10);
2359                 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
2360                         break;
2361         }
2362
2363         if (i == BGE_TIMEOUT) {
2364                 device_printf(sc->bge_dev,
2365                     "host coalescing engine failed to idle\n");
2366                 return (ENXIO);
2367         }
2368
2369         /* Set up host coalescing defaults */
2370         CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
2371         CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
2372         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
2373         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
2374         if (!(BGE_IS_5705_PLUS(sc))) {
2375                 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
2376                 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
2377         }
2378         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
2379         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
2380
2381         /* Set up address of statistics block */
2382         if (!(BGE_IS_5705_PLUS(sc))) {
2383                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
2384                     BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
2385                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
2386                     BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
2387                 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
2388                 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
2389                 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
2390         }
2391
2392         /* Set up address of status block */
2393         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
2394             BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
2395         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
2396             BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
2397
2398         /* Set up status block size. */
2399         if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2400             sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
2401                 val = BGE_STATBLKSZ_FULL;
2402                 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
2403         } else {
2404                 val = BGE_STATBLKSZ_32BYTE;
2405                 bzero(sc->bge_ldata.bge_status_block, 32);
2406         }
2407         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2408             sc->bge_cdata.bge_status_map,
2409             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2410
2411         /* Turn on host coalescing state machine */
2412         CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
2413
2414         /* Turn on RX BD completion state machine and enable attentions */
2415         CSR_WRITE_4(sc, BGE_RBDC_MODE,
2416             BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
2417
2418         /* Turn on RX list placement state machine */
2419         CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2420
2421         /* Turn on RX list selector state machine. */
2422         if (!(BGE_IS_5705_PLUS(sc)))
2423                 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2424
2425         /* Turn on DMA, clear stats. */
2426         val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
2427             BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
2428             BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
2429             BGE_MACMODE_FRMHDR_DMA_ENB;
2430
2431         if (sc->bge_flags & BGE_FLAG_TBI)
2432                 val |= BGE_PORTMODE_TBI;
2433         else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
2434                 val |= BGE_PORTMODE_GMII;
2435         else
2436                 val |= BGE_PORTMODE_MII;
2437
2438         /* Allow APE to send/receive frames. */
2439         if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
2440                 val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
2441
2442         CSR_WRITE_4(sc, BGE_MAC_MODE, val);
2443         DELAY(40);
2444
2445         /* Set misc. local control, enable interrupts on attentions */
2446         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
2447
2448 #ifdef notdef
2449         /* Assert GPIO pins for PHY reset */
2450         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
2451             BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
2452         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
2453             BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
2454 #endif
2455
2456         /* Turn on DMA completion state machine */
2457         if (!(BGE_IS_5705_PLUS(sc)))
2458                 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2459
2460         val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
2461
2462         /* Enable host coalescing bug fix. */
2463         if (BGE_IS_5755_PLUS(sc))
2464                 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
2465
2466         /* Request larger DMA burst size to get better performance. */
2467         if (sc->bge_asicrev == BGE_ASICREV_BCM5785)
2468                 val |= BGE_WDMAMODE_BURST_ALL_DATA;
2469
2470         /* Turn on write DMA state machine */
2471         CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
2472         DELAY(40);
2473
2474         /* Turn on read DMA state machine */
2475         val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
2476
2477         if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
2478                 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
2479
2480         if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2481             sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2482             sc->bge_asicrev == BGE_ASICREV_BCM57780)
2483                 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
2484                     BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
2485                     BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
2486         if (sc->bge_flags & BGE_FLAG_PCIE)
2487                 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
2488         if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2489                 val |= BGE_RDMAMODE_TSO4_ENABLE;
2490                 if (sc->bge_flags & BGE_FLAG_TSO3 ||
2491                     sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2492                     sc->bge_asicrev == BGE_ASICREV_BCM57780)
2493                         val |= BGE_RDMAMODE_TSO6_ENABLE;
2494         }
2495
2496         if (sc->bge_asicrev == BGE_ASICREV_BCM5720 ||
2497             sc->bge_asicrev == BGE_ASICREV_BCM5762) {
2498                 val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
2499                         BGE_RDMAMODE_H2BNC_VLAN_DET;
2500                 /*
2501                  * Allow multiple outstanding read requests from
2502                  * non-LSO read DMA engine.
2503                  */
2504                 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
2505         }
2506
2507         if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2508             sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2509             sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2510             sc->bge_asicrev == BGE_ASICREV_BCM57780 ||
2511             BGE_IS_5717_PLUS(sc) || BGE_IS_57765_PLUS(sc)) {
2512                 if (sc->bge_asicrev == BGE_ASICREV_BCM5762)
2513                         rdmareg = BGE_RDMA_RSRVCTRL_REG2;
2514                 else
2515                         rdmareg = BGE_RDMA_RSRVCTRL;
2516                 dmactl = CSR_READ_4(sc, rdmareg);
2517                 /*
2518                  * Adjust tx margin to prevent TX data corruption and
2519                  * fix internal FIFO overflow.
2520                  */
2521                 if (sc->bge_chipid == BGE_CHIPID_BCM5719_A0 ||
2522                     sc->bge_asicrev == BGE_ASICREV_BCM5762) {
2523                         dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
2524                             BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
2525                             BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
2526                         dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
2527                             BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
2528                             BGE_RDMA_RSRVCTRL_TXMRGN_320B;
2529                 }
2530                 /*
2531                  * Enable fix for read DMA FIFO overruns.
2532                  * The fix is to limit the number of RX BDs
2533                  * the hardware would fetch at a fime.
2534                  */
2535                 CSR_WRITE_4(sc, rdmareg, dmactl |
2536                     BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2537         }
2538
2539         if (sc->bge_asicrev == BGE_ASICREV_BCM5719) {
2540                 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2541                     CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2542                     BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2543                     BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2544         } else if (sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2545                 /*
2546                  * Allow 4KB burst length reads for non-LSO frames.
2547                  * Enable 512B burst length reads for buffer descriptors.
2548                  */
2549                 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2550                     CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2551                     BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
2552                     BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2553         } else if (sc->bge_asicrev == BGE_ASICREV_BCM5762) {
2554                 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2,
2555                     CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2) |
2556                     BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2557                     BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2558         }
2559
2560         CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
2561         DELAY(40);
2562
2563         if (sc->bge_flags & BGE_FLAG_RDMA_BUG) {
2564                 for (i = 0; i < BGE_NUM_RDMA_CHANNELS / 2; i++) {
2565                         val = CSR_READ_4(sc, BGE_RDMA_LENGTH + i * 4);
2566                         if ((val & 0xFFFF) > BGE_FRAMELEN)
2567                                 break;
2568                         if (((val >> 16) & 0xFFFF) > BGE_FRAMELEN)
2569                                 break;
2570                 }
2571                 if (i != BGE_NUM_RDMA_CHANNELS / 2) {
2572                         val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL);
2573                         if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
2574                                 val |= BGE_RDMA_TX_LENGTH_WA_5719;
2575                         else
2576                                 val |= BGE_RDMA_TX_LENGTH_WA_5720;
2577                         CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val);
2578                 }
2579         }
2580
2581         /* Turn on RX data completion state machine */
2582         CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2583
2584         /* Turn on RX BD initiator state machine */
2585         CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2586
2587         /* Turn on RX data and RX BD initiator state machine */
2588         CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
2589
2590         /* Turn on Mbuf cluster free state machine */
2591         if (!(BGE_IS_5705_PLUS(sc)))
2592                 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2593
2594         /* Turn on send BD completion state machine */
2595         CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2596
2597         /* Turn on send data completion state machine */
2598         val = BGE_SDCMODE_ENABLE;
2599         if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
2600                 val |= BGE_SDCMODE_CDELAY;
2601         CSR_WRITE_4(sc, BGE_SDC_MODE, val);
2602
2603         /* Turn on send data initiator state machine */
2604         if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3))
2605                 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
2606                     BGE_SDIMODE_HW_LSO_PRE_DMA);
2607         else
2608                 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2609
2610         /* Turn on send BD initiator state machine */
2611         CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2612
2613         /* Turn on send BD selector state machine */
2614         CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2615
2616         CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
2617         CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2618             BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
2619
2620         /* ack/clear link change events */
2621         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2622             BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2623             BGE_MACSTAT_LINK_CHANGED);
2624         CSR_WRITE_4(sc, BGE_MI_STS, 0);
2625
2626         /*
2627          * Enable attention when the link has changed state for
2628          * devices that use auto polling.
2629          */
2630         if (sc->bge_flags & BGE_FLAG_TBI) {
2631                 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2632         } else {
2633                 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
2634                         CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
2635                         DELAY(80);
2636                 }
2637                 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2638                     sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
2639                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2640                             BGE_EVTENB_MI_INTERRUPT);
2641         }
2642
2643         /*
2644          * Clear any pending link state attention.
2645          * Otherwise some link state change events may be lost until attention
2646          * is cleared by bge_intr() -> bge_link_upd() sequence.
2647          * It's not necessary on newer BCM chips - perhaps enabling link
2648          * state change attentions implies clearing pending attention.
2649          */
2650         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2651             BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2652             BGE_MACSTAT_LINK_CHANGED);
2653
2654         /* Enable link state change attentions. */
2655         BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2656
2657         return (0);
2658 }
2659
2660 static const struct bge_revision *
2661 bge_lookup_rev(uint32_t chipid)
2662 {
2663         const struct bge_revision *br;
2664
2665         for (br = bge_revisions; br->br_name != NULL; br++) {
2666                 if (br->br_chipid == chipid)
2667                         return (br);
2668         }
2669
2670         for (br = bge_majorrevs; br->br_name != NULL; br++) {
2671                 if (br->br_chipid == BGE_ASICREV(chipid))
2672                         return (br);
2673         }
2674
2675         return (NULL);
2676 }
2677
2678 static const struct bge_vendor *
2679 bge_lookup_vendor(uint16_t vid)
2680 {
2681         const struct bge_vendor *v;
2682
2683         for (v = bge_vendors; v->v_name != NULL; v++)
2684                 if (v->v_id == vid)
2685                         return (v);
2686
2687         return (NULL);
2688 }
2689
2690 static uint32_t
2691 bge_chipid(device_t dev)
2692 {
2693         uint32_t id;
2694
2695         id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2696             BGE_PCIMISCCTL_ASICREV_SHIFT;
2697         if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) {
2698                 /*
2699                  * Find the ASCI revision.  Different chips use different
2700                  * registers.
2701                  */
2702                 switch (pci_get_device(dev)) {
2703                 case BCOM_DEVICEID_BCM5717:
2704                 case BCOM_DEVICEID_BCM5718:
2705                 case BCOM_DEVICEID_BCM5719:
2706                 case BCOM_DEVICEID_BCM5720:
2707                 case BCOM_DEVICEID_BCM5725:
2708                 case BCOM_DEVICEID_BCM5727:
2709                 case BCOM_DEVICEID_BCM5762:
2710                 case BCOM_DEVICEID_BCM57764:
2711                 case BCOM_DEVICEID_BCM57767:
2712                 case BCOM_DEVICEID_BCM57787:
2713                         id = pci_read_config(dev,
2714                             BGE_PCI_GEN2_PRODID_ASICREV, 4);
2715                         break;
2716                 case BCOM_DEVICEID_BCM57761:
2717                 case BCOM_DEVICEID_BCM57762:
2718                 case BCOM_DEVICEID_BCM57765:
2719                 case BCOM_DEVICEID_BCM57766:
2720                 case BCOM_DEVICEID_BCM57781:
2721                 case BCOM_DEVICEID_BCM57782:
2722                 case BCOM_DEVICEID_BCM57785:
2723                 case BCOM_DEVICEID_BCM57786:
2724                 case BCOM_DEVICEID_BCM57791:
2725                 case BCOM_DEVICEID_BCM57795:
2726                         id = pci_read_config(dev,
2727                             BGE_PCI_GEN15_PRODID_ASICREV, 4);
2728                         break;
2729                 default:
2730                         id = pci_read_config(dev, BGE_PCI_PRODID_ASICREV, 4);
2731                 }
2732         }
2733         return (id);
2734 }
2735
2736 /*
2737  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2738  * against our list and return its name if we find a match.
2739  *
2740  * Note that since the Broadcom controller contains VPD support, we
2741  * try to get the device name string from the controller itself instead
2742  * of the compiled-in string. It guarantees we'll always announce the
2743  * right product name. We fall back to the compiled-in string when
2744  * VPD is unavailable or corrupt.
2745  */
2746 static int
2747 bge_probe(device_t dev)
2748 {
2749         char buf[96];
2750         char model[64];
2751         const struct bge_revision *br;
2752         const char *pname;
2753         struct bge_softc *sc;
2754         const struct bge_type *t = bge_devs;
2755         const struct bge_vendor *v;
2756         uint32_t id;
2757         uint16_t did, vid;
2758
2759         sc = device_get_softc(dev);
2760         sc->bge_dev = dev;
2761         vid = pci_get_vendor(dev);
2762         did = pci_get_device(dev);
2763         while(t->bge_vid != 0) {
2764                 if ((vid == t->bge_vid) && (did == t->bge_did)) {
2765                         id = bge_chipid(dev);
2766                         br = bge_lookup_rev(id);
2767                         if (bge_has_eaddr(sc) &&
2768                             pci_get_vpd_ident(dev, &pname) == 0)
2769                                 snprintf(model, sizeof(model), "%s", pname);
2770                         else {
2771                                 v = bge_lookup_vendor(vid);
2772                                 snprintf(model, sizeof(model), "%s %s",
2773                                     v != NULL ? v->v_name : "Unknown",
2774                                     br != NULL ? br->br_name :
2775                                     "NetXtreme/NetLink Ethernet Controller");
2776                         }
2777                         snprintf(buf, sizeof(buf), "%s, %sASIC rev. %#08x",
2778                             model, br != NULL ? "" : "unknown ", id);
2779                         device_set_desc_copy(dev, buf);
2780                         return (BUS_PROBE_DEFAULT);
2781                 }
2782                 t++;
2783         }
2784
2785         return (ENXIO);
2786 }
2787
2788 static void
2789 bge_dma_free(struct bge_softc *sc)
2790 {
2791         int i;
2792
2793         /* Destroy DMA maps for RX buffers. */
2794         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2795                 if (sc->bge_cdata.bge_rx_std_dmamap[i])
2796                         bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2797                             sc->bge_cdata.bge_rx_std_dmamap[i]);
2798         }
2799         if (sc->bge_cdata.bge_rx_std_sparemap)
2800                 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2801                     sc->bge_cdata.bge_rx_std_sparemap);
2802
2803         /* Destroy DMA maps for jumbo RX buffers. */
2804         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2805                 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
2806                         bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2807                             sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2808         }
2809         if (sc->bge_cdata.bge_rx_jumbo_sparemap)
2810                 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2811                     sc->bge_cdata.bge_rx_jumbo_sparemap);
2812
2813         /* Destroy DMA maps for TX buffers. */
2814         for (i = 0; i < BGE_TX_RING_CNT; i++) {
2815                 if (sc->bge_cdata.bge_tx_dmamap[i])
2816                         bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
2817                             sc->bge_cdata.bge_tx_dmamap[i]);
2818         }
2819
2820         if (sc->bge_cdata.bge_rx_mtag)
2821                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
2822         if (sc->bge_cdata.bge_mtag_jumbo)
2823                 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag_jumbo);
2824         if (sc->bge_cdata.bge_tx_mtag)
2825                 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
2826
2827         /* Destroy standard RX ring. */
2828         if (sc->bge_ldata.bge_rx_std_ring_paddr)
2829                 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2830                     sc->bge_cdata.bge_rx_std_ring_map);
2831         if (sc->bge_ldata.bge_rx_std_ring)
2832                 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2833                     sc->bge_ldata.bge_rx_std_ring,
2834                     sc->bge_cdata.bge_rx_std_ring_map);
2835
2836         if (sc->bge_cdata.bge_rx_std_ring_tag)
2837                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2838
2839         /* Destroy jumbo RX ring. */
2840         if (sc->bge_ldata.bge_rx_jumbo_ring_paddr)
2841                 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2842                     sc->bge_cdata.bge_rx_jumbo_ring_map);
2843
2844         if (sc->bge_ldata.bge_rx_jumbo_ring)
2845                 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2846                     sc->bge_ldata.bge_rx_jumbo_ring,
2847                     sc->bge_cdata.bge_rx_jumbo_ring_map);
2848
2849         if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2850                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2851
2852         /* Destroy RX return ring. */
2853         if (sc->bge_ldata.bge_rx_return_ring_paddr)
2854                 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2855                     sc->bge_cdata.bge_rx_return_ring_map);
2856
2857         if (sc->bge_ldata.bge_rx_return_ring)
2858                 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2859                     sc->bge_ldata.bge_rx_return_ring,
2860                     sc->bge_cdata.bge_rx_return_ring_map);
2861
2862         if (sc->bge_cdata.bge_rx_return_ring_tag)
2863                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2864
2865         /* Destroy TX ring. */
2866         if (sc->bge_ldata.bge_tx_ring_paddr)
2867                 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2868                     sc->bge_cdata.bge_tx_ring_map);
2869
2870         if (sc->bge_ldata.bge_tx_ring)
2871                 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2872                     sc->bge_ldata.bge_tx_ring,
2873                     sc->bge_cdata.bge_tx_ring_map);
2874
2875         if (sc->bge_cdata.bge_tx_ring_tag)
2876                 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2877
2878         /* Destroy status block. */
2879         if (sc->bge_ldata.bge_status_block_paddr)
2880                 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2881                     sc->bge_cdata.bge_status_map);
2882
2883         if (sc->bge_ldata.bge_status_block)
2884                 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2885                     sc->bge_ldata.bge_status_block,
2886                     sc->bge_cdata.bge_status_map);
2887
2888         if (sc->bge_cdata.bge_status_tag)
2889                 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2890
2891         /* Destroy statistics block. */
2892         if (sc->bge_ldata.bge_stats_paddr)
2893                 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2894                     sc->bge_cdata.bge_stats_map);
2895
2896         if (sc->bge_ldata.bge_stats)
2897                 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2898                     sc->bge_ldata.bge_stats,
2899                     sc->bge_cdata.bge_stats_map);
2900
2901         if (sc->bge_cdata.bge_stats_tag)
2902                 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2903
2904         if (sc->bge_cdata.bge_buffer_tag)
2905                 bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag);
2906
2907         /* Destroy the parent tag. */
2908         if (sc->bge_cdata.bge_parent_tag)
2909                 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2910 }
2911
2912 static int
2913 bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment,
2914     bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
2915     bus_addr_t *paddr, const char *msg)
2916 {
2917         struct bge_dmamap_arg ctx;
2918         int error;
2919
2920         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2921             alignment, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2922             NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
2923         if (error != 0) {
2924                 device_printf(sc->bge_dev,
2925                     "could not create %s dma tag\n", msg);
2926                 return (ENOMEM);
2927         }
2928         /* Allocate DMA'able memory for ring. */
2929         error = bus_dmamem_alloc(*tag, (void **)ring,
2930             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
2931         if (error != 0) {
2932                 device_printf(sc->bge_dev,
2933                     "could not allocate DMA'able memory for %s\n", msg);
2934                 return (ENOMEM);
2935         }
2936         /* Load the address of the ring. */
2937         ctx.bge_busaddr = 0;
2938         error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr,
2939             &ctx, BUS_DMA_NOWAIT);
2940         if (error != 0) {
2941                 device_printf(sc->bge_dev,
2942                     "could not load DMA'able memory for %s\n", msg);
2943                 return (ENOMEM);
2944         }
2945         *paddr = ctx.bge_busaddr;
2946         return (0);
2947 }
2948
2949 static int
2950 bge_dma_alloc(struct bge_softc *sc)
2951 {
2952         bus_addr_t lowaddr;
2953         bus_size_t rxmaxsegsz, sbsz, txsegsz, txmaxsegsz;
2954         int i, error;
2955
2956         lowaddr = BUS_SPACE_MAXADDR;
2957         if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
2958                 lowaddr = BGE_DMA_MAXADDR;
2959         /*
2960          * Allocate the parent bus DMA tag appropriate for PCI.
2961          */
2962         error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2963             1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2964             NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2965             0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2966         if (error != 0) {
2967                 device_printf(sc->bge_dev,
2968                     "could not allocate parent dma tag\n");
2969                 return (ENOMEM);
2970         }
2971
2972         /* Create tag for standard RX ring. */
2973         error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ,
2974             &sc->bge_cdata.bge_rx_std_ring_tag,
2975             (uint8_t **)&sc->bge_ldata.bge_rx_std_ring,
2976             &sc->bge_cdata.bge_rx_std_ring_map,
2977             &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring");
2978         if (error)
2979                 return (error);
2980
2981         /* Create tag for RX return ring. */
2982         error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc),
2983             &sc->bge_cdata.bge_rx_return_ring_tag,
2984             (uint8_t **)&sc->bge_ldata.bge_rx_return_ring,
2985             &sc->bge_cdata.bge_rx_return_ring_map,
2986             &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring");
2987         if (error)
2988                 return (error);
2989
2990         /* Create tag for TX ring. */
2991         error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ,
2992             &sc->bge_cdata.bge_tx_ring_tag,
2993             (uint8_t **)&sc->bge_ldata.bge_tx_ring,
2994             &sc->bge_cdata.bge_tx_ring_map,
2995             &sc->bge_ldata.bge_tx_ring_paddr, "TX ring");
2996         if (error)
2997                 return (error);
2998
2999         /*
3000          * Create tag for status block.
3001          * Because we only use single Tx/Rx/Rx return ring, use
3002          * minimum status block size except BCM5700 AX/BX which
3003          * seems to want to see full status block size regardless
3004          * of configured number of ring.
3005          */
3006         if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3007             sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
3008                 sbsz = BGE_STATUS_BLK_SZ;
3009         else
3010                 sbsz = 32;
3011         error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz,
3012             &sc->bge_cdata.bge_status_tag,
3013             (uint8_t **)&sc->bge_ldata.bge_status_block,
3014             &sc->bge_cdata.bge_status_map,
3015             &sc->bge_ldata.bge_status_block_paddr, "status block");
3016         if (error)
3017                 return (error);
3018
3019         /* Create tag for statistics block. */
3020         error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ,
3021             &sc->bge_cdata.bge_stats_tag,
3022             (uint8_t **)&sc->bge_ldata.bge_stats,
3023             &sc->bge_cdata.bge_stats_map,
3024             &sc->bge_ldata.bge_stats_paddr, "statistics block");
3025         if (error)
3026                 return (error);
3027
3028         /* Create tag for jumbo RX ring. */
3029         if (BGE_IS_JUMBO_CAPABLE(sc)) {
3030                 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ,
3031                     &sc->bge_cdata.bge_rx_jumbo_ring_tag,
3032                     (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring,
3033                     &sc->bge_cdata.bge_rx_jumbo_ring_map,
3034                     &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring");
3035                 if (error)
3036                         return (error);
3037         }
3038
3039         /* Create parent tag for buffers. */
3040         if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) {
3041                 /*
3042                  * XXX
3043                  * watchdog timeout issue was observed on BCM5704 which
3044                  * lives behind PCI-X bridge(e.g AMD 8131 PCI-X bridge).
3045                  * Both limiting DMA address space to 32bits and flushing
3046                  * mailbox write seem to address the issue.
3047                  */
3048                 if (sc->bge_pcixcap != 0)
3049                         lowaddr = BUS_SPACE_MAXADDR_32BIT;
3050         }
3051         error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev), 1, 0, lowaddr,
3052             BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0,
3053             BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
3054             &sc->bge_cdata.bge_buffer_tag);
3055         if (error != 0) {
3056                 device_printf(sc->bge_dev,
3057                     "could not allocate buffer dma tag\n");
3058                 return (ENOMEM);
3059         }
3060         /* Create tag for Tx mbufs. */
3061         if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
3062                 txsegsz = BGE_TSOSEG_SZ;
3063                 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
3064         } else {
3065                 txsegsz = MCLBYTES;
3066                 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
3067         }
3068         error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1,
3069             0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
3070             txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
3071             &sc->bge_cdata.bge_tx_mtag);
3072
3073         if (error) {
3074                 device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
3075                 return (ENOMEM);
3076         }
3077
3078         /* Create tag for Rx mbufs. */
3079         if (sc->bge_flags & BGE_FLAG_JUMBO_STD)
3080                 rxmaxsegsz = MJUM9BYTES;
3081         else
3082                 rxmaxsegsz = MCLBYTES;
3083         error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0,
3084             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, rxmaxsegsz, 1,
3085             rxmaxsegsz, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
3086
3087         if (error) {
3088                 device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
3089                 return (ENOMEM);
3090         }
3091
3092         /* Create DMA maps for RX buffers. */
3093         error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
3094             &sc->bge_cdata.bge_rx_std_sparemap);
3095         if (error) {
3096                 device_printf(sc->bge_dev,
3097                     "can't create spare DMA map for RX\n");
3098                 return (ENOMEM);
3099         }
3100         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3101                 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
3102                             &sc->bge_cdata.bge_rx_std_dmamap[i]);
3103                 if (error) {
3104                         device_printf(sc->bge_dev,
3105                             "can't create DMA map for RX\n");
3106                         return (ENOMEM);
3107                 }
3108         }
3109
3110         /* Create DMA maps for TX buffers. */
3111         for (i = 0; i < BGE_TX_RING_CNT; i++) {
3112                 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
3113                             &sc->bge_cdata.bge_tx_dmamap[i]);
3114                 if (error) {
3115                         device_printf(sc->bge_dev,
3116                             "can't create DMA map for TX\n");
3117                         return (ENOMEM);
3118                 }
3119         }
3120
3121         /* Create tags for jumbo RX buffers. */
3122         if (BGE_IS_JUMBO_CAPABLE(sc)) {
3123                 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag,
3124                     1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
3125                     NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
3126                     0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
3127                 if (error) {
3128                         device_printf(sc->bge_dev,
3129                             "could not allocate jumbo dma tag\n");
3130                         return (ENOMEM);
3131                 }
3132                 /* Create DMA maps for jumbo RX buffers. */
3133                 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
3134                     0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
3135                 if (error) {
3136                         device_printf(sc->bge_dev,
3137                             "can't create spare DMA map for jumbo RX\n");
3138                         return (ENOMEM);
3139                 }
3140                 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
3141                         error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
3142                                     0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
3143                         if (error) {
3144                                 device_printf(sc->bge_dev,
3145                                     "can't create DMA map for jumbo RX\n");
3146                                 return (ENOMEM);
3147                         }
3148                 }
3149         }
3150
3151         return (0);
3152 }
3153
3154 /*
3155  * Return true if this device has more than one port.
3156  */
3157 static int
3158 bge_has_multiple_ports(struct bge_softc *sc)
3159 {
3160         device_t dev = sc->bge_dev;
3161         u_int b, d, f, fscan, s;
3162
3163         d = pci_get_domain(dev);
3164         b = pci_get_bus(dev);
3165         s = pci_get_slot(dev);
3166         f = pci_get_function(dev);
3167         for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
3168                 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
3169                         return (1);
3170         return (0);
3171 }
3172
3173 /*
3174  * Return true if MSI can be used with this device.
3175  */
3176 static int
3177 bge_can_use_msi(struct bge_softc *sc)
3178 {
3179         int can_use_msi = 0;
3180
3181         if (sc->bge_msi == 0)
3182                 return (0);
3183
3184         /* Disable MSI for polling(4). */
3185 #ifdef DEVICE_POLLING
3186         return (0);
3187 #endif
3188         switch (sc->bge_asicrev) {
3189         case BGE_ASICREV_BCM5714_A0:
3190         case BGE_ASICREV_BCM5714:
3191                 /*
3192                  * Apparently, MSI doesn't work when these chips are
3193                  * configured in single-port mode.
3194                  */
3195                 if (bge_has_multiple_ports(sc))
3196                         can_use_msi = 1;
3197                 break;
3198         case BGE_ASICREV_BCM5750:
3199                 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
3200                     sc->bge_chiprev != BGE_CHIPREV_5750_BX)
3201                         can_use_msi = 1;
3202                 break;
3203         default:
3204                 if (BGE_IS_575X_PLUS(sc))
3205                         can_use_msi = 1;
3206         }
3207         return (can_use_msi);
3208 }
3209
3210 static int
3211 bge_mbox_reorder(struct bge_softc *sc)
3212 {
3213         /* Lists of PCI bridges that are known to reorder mailbox writes. */
3214         static const struct mbox_reorder {
3215                 const uint16_t vendor;
3216                 const uint16_t device;
3217                 const char *desc;
3218         } mbox_reorder_lists[] = {
3219                 { 0x1022, 0x7450, "AMD-8131 PCI-X Bridge" },
3220         };
3221         devclass_t pci, pcib;
3222         device_t bus, dev;
3223         int i;
3224
3225         pci = devclass_find("pci");
3226         pcib = devclass_find("pcib");
3227         dev = sc->bge_dev;
3228         bus = device_get_parent(dev);
3229         for (;;) {
3230                 dev = device_get_parent(bus);
3231                 bus = device_get_parent(dev);
3232                 if (device_get_devclass(dev) != pcib)
3233                         break;
3234                 for (i = 0; i < nitems(mbox_reorder_lists); i++) {
3235                         if (pci_get_vendor(dev) ==
3236                             mbox_reorder_lists[i].vendor &&
3237                             pci_get_device(dev) ==
3238                             mbox_reorder_lists[i].device) {
3239                                 device_printf(sc->bge_dev,
3240                                     "enabling MBOX workaround for %s\n",
3241                                     mbox_reorder_lists[i].desc);
3242                                 return (1);
3243                         }
3244                 }
3245                 if (device_get_devclass(bus) != pci)
3246                         break;
3247         }
3248         return (0);
3249 }
3250
3251 static void
3252 bge_devinfo(struct bge_softc *sc)
3253 {
3254         uint32_t cfg, clk;
3255
3256         device_printf(sc->bge_dev,
3257             "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; ",
3258             sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev);
3259         if (sc->bge_flags & BGE_FLAG_PCIE)
3260                 printf("PCI-E\n");
3261         else if (sc->bge_flags & BGE_FLAG_PCIX) {
3262                 printf("PCI-X ");
3263                 cfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK;
3264                 if (cfg == BGE_MISCCFG_BOARD_ID_5704CIOBE)
3265                         clk = 133;
3266                 else {
3267                         clk = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
3268                         switch (clk) {
3269                         case 0:
3270                                 clk = 33;
3271                                 break;
3272                         case 2:
3273                                 clk = 50;
3274                                 break;
3275                         case 4:
3276                                 clk = 66;
3277                                 break;
3278                         case 6:
3279                                 clk = 100;
3280                                 break;
3281                         case 7:
3282                                 clk = 133;
3283                                 break;
3284                         }
3285                 }
3286                 printf("%u MHz\n", clk);
3287         } else {
3288                 if (sc->bge_pcixcap != 0)
3289                         printf("PCI on PCI-X ");
3290                 else
3291                         printf("PCI ");
3292                 cfg = pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4);
3293                 if (cfg & BGE_PCISTATE_PCI_BUSSPEED)
3294                         clk = 66;
3295                 else
3296                         clk = 33;
3297                 if (cfg & BGE_PCISTATE_32BIT_BUS)
3298                         printf("%u MHz; 32bit\n", clk);
3299                 else
3300                         printf("%u MHz; 64bit\n", clk);
3301         }
3302 }
3303
3304 static int
3305 bge_attach(device_t dev)
3306 {
3307         if_t ifp;
3308         struct bge_softc *sc;
3309         uint32_t hwcfg = 0, misccfg, pcistate;
3310         u_char eaddr[ETHER_ADDR_LEN];
3311         int capmask, error, reg, rid, trys;
3312
3313         sc = device_get_softc(dev);
3314         sc->bge_dev = dev;
3315
3316         BGE_LOCK_INIT(sc, device_get_nameunit(dev));
3317         TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
3318         callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
3319
3320         pci_enable_busmaster(dev);
3321
3322         /*
3323          * Allocate control/status registers.
3324          */
3325         rid = PCIR_BAR(0);
3326         sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
3327             RF_ACTIVE);
3328
3329         if (sc->bge_res == NULL) {
3330                 device_printf (sc->bge_dev, "couldn't map BAR0 memory\n");
3331                 error = ENXIO;
3332                 goto fail;
3333         }
3334
3335         /* Save various chip information. */
3336         sc->bge_func_addr = pci_get_function(dev);
3337         sc->bge_chipid = bge_chipid(dev);
3338         sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
3339         sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
3340
3341         /* Set default PHY address. */
3342         sc->bge_phy_addr = 1;
3343          /*
3344           * PHY address mapping for various devices.
3345           *
3346           *          | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
3347           * ---------+-------+-------+-------+-------+
3348           * BCM57XX  |   1   |   X   |   X   |   X   |
3349           * BCM5704  |   1   |   X   |   1   |   X   |
3350           * BCM5717  |   1   |   8   |   2   |   9   |
3351           * BCM5719  |   1   |   8   |   2   |   9   |
3352           * BCM5720  |   1   |   8   |   2   |   9   |
3353           *
3354           *          | F2 Cu | F2 Sr | F3 Cu | F3 Sr |
3355           * ---------+-------+-------+-------+-------+
3356           * BCM57XX  |   X   |   X   |   X   |   X   |
3357           * BCM5704  |   X   |   X   |   X   |   X   |
3358           * BCM5717  |   X   |   X   |   X   |   X   |
3359           * BCM5719  |   3   |   10  |   4   |   11  |
3360           * BCM5720  |   X   |   X   |   X   |   X   |
3361           *
3362           * Other addresses may respond but they are not
3363           * IEEE compliant PHYs and should be ignored.
3364           */
3365         if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
3366             sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
3367             sc->bge_asicrev == BGE_ASICREV_BCM5720) {
3368                 if (sc->bge_chipid != BGE_CHIPID_BCM5717_A0) {
3369                         if (CSR_READ_4(sc, BGE_SGDIG_STS) &
3370                             BGE_SGDIGSTS_IS_SERDES)
3371                                 sc->bge_phy_addr = sc->bge_func_addr + 8;
3372                         else
3373                                 sc->bge_phy_addr = sc->bge_func_addr + 1;
3374                 } else {
3375                         if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
3376                             BGE_CPMU_PHY_STRAP_IS_SERDES)
3377                                 sc->bge_phy_addr = sc->bge_func_addr + 8;
3378                         else
3379                                 sc->bge_phy_addr = sc->bge_func_addr + 1;
3380                 }
3381         }
3382
3383         if (bge_has_eaddr(sc))
3384                 sc->bge_flags |= BGE_FLAG_EADDR;
3385
3386         /* Save chipset family. */
3387         switch (sc->bge_asicrev) {
3388         case BGE_ASICREV_BCM5762:
3389         case BGE_ASICREV_BCM57765:
3390         case BGE_ASICREV_BCM57766:
3391                 sc->bge_flags |= BGE_FLAG_57765_PLUS;
3392                 /* FALLTHROUGH */
3393         case BGE_ASICREV_BCM5717:
3394         case BGE_ASICREV_BCM5719:
3395         case BGE_ASICREV_BCM5720:
3396                 sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS |
3397                     BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS | BGE_FLAG_JUMBO |
3398                     BGE_FLAG_JUMBO_FRAME;
3399                 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
3400                     sc->bge_asicrev == BGE_ASICREV_BCM5720) {
3401                         /*
3402                          * Enable work around for DMA engine miscalculation
3403                          * of TXMBUF available space.
3404                          */
3405                         sc->bge_flags |= BGE_FLAG_RDMA_BUG;
3406                         if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
3407                             sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
3408                                 /* Jumbo frame on BCM5719 A0 does not work. */
3409                                 sc->bge_flags &= ~BGE_FLAG_JUMBO;
3410                         }
3411                 }
3412                 break;
3413         case BGE_ASICREV_BCM5755:
3414         case BGE_ASICREV_BCM5761:
3415         case BGE_ASICREV_BCM5784:
3416         case BGE_ASICREV_BCM5785:
3417         case BGE_ASICREV_BCM5787:
3418         case BGE_ASICREV_BCM57780:
3419                 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
3420                     BGE_FLAG_5705_PLUS;
3421                 break;
3422         case BGE_ASICREV_BCM5700:
3423         case BGE_ASICREV_BCM5701:
3424         case BGE_ASICREV_BCM5703:
3425         case BGE_ASICREV_BCM5704:
3426                 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
3427                 break;
3428         case BGE_ASICREV_BCM5714_A0:
3429         case BGE_ASICREV_BCM5780:
3430         case BGE_ASICREV_BCM5714:
3431                 sc->bge_flags |= BGE_FLAG_5714_FAMILY | BGE_FLAG_JUMBO_STD;
3432                 /* FALLTHROUGH */
3433         case BGE_ASICREV_BCM5750:
3434         case BGE_ASICREV_BCM5752:
3435         case BGE_ASICREV_BCM5906:
3436                 sc->bge_flags |= BGE_FLAG_575X_PLUS;
3437                 /* FALLTHROUGH */
3438         case BGE_ASICREV_BCM5705:
3439                 sc->bge_flags |= BGE_FLAG_5705_PLUS;
3440                 break;
3441         }
3442
3443         /* Identify chips with APE processor. */
3444         switch (sc->bge_asicrev) {
3445         case BGE_ASICREV_BCM5717:
3446         case BGE_ASICREV_BCM5719:
3447         case BGE_ASICREV_BCM5720:
3448         case BGE_ASICREV_BCM5761:
3449         case BGE_ASICREV_BCM5762:
3450                 sc->bge_flags |= BGE_FLAG_APE;
3451                 break;
3452         }
3453
3454         /* Chips with APE need BAR2 access for APE registers/memory. */
3455         if ((sc->bge_flags & BGE_FLAG_APE) != 0) {
3456                 rid = PCIR_BAR(2);
3457                 sc->bge_res2 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
3458                     RF_ACTIVE);
3459                 if (sc->bge_res2 == NULL) {
3460                         device_printf (sc->bge_dev,
3461                             "couldn't map BAR2 memory\n");
3462                         error = ENXIO;
3463                         goto fail;
3464                 }
3465
3466                 /* Enable APE register/memory access by host driver. */
3467                 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
3468                 pcistate |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
3469                     BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
3470                     BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
3471                 pci_write_config(dev, BGE_PCI_PCISTATE, pcistate, 4);
3472
3473                 bge_ape_lock_init(sc);
3474                 bge_ape_read_fw_ver(sc);
3475         }
3476
3477         /* Add SYSCTLs, requires the chipset family to be set. */
3478         bge_add_sysctls(sc);
3479
3480         /* Identify the chips that use an CPMU. */
3481         if (BGE_IS_5717_PLUS(sc) ||
3482             sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
3483             sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
3484             sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
3485             sc->bge_asicrev == BGE_ASICREV_BCM57780)
3486                 sc->bge_flags |= BGE_FLAG_CPMU_PRESENT;
3487         if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0)
3488                 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
3489         else
3490                 sc->bge_mi_mode = BGE_MIMODE_BASE;
3491         /* Enable auto polling for BCM570[0-5]. */
3492         if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705)
3493                 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
3494
3495         /*
3496          * All Broadcom controllers have 4GB boundary DMA bug.
3497          * Whenever an address crosses a multiple of the 4GB boundary
3498          * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
3499          * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
3500          * state machine will lockup and cause the device to hang.
3501          */
3502         sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
3503
3504         /* BCM5755 or higher and BCM5906 have short DMA bug. */
3505         if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
3506                 sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG;
3507
3508         /*
3509          * BCM5719 cannot handle DMA requests for DMA segments that
3510          * have larger than 4KB in size.  However the maximum DMA
3511          * segment size created in DMA tag is 4KB for TSO, so we
3512          * wouldn't encounter the issue here.
3513          */
3514         if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
3515                 sc->bge_flags |= BGE_FLAG_4K_RDMA_BUG;
3516
3517         misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK;
3518         if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
3519                 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
3520                     misccfg == BGE_MISCCFG_BOARD_ID_5788M)
3521                         sc->bge_flags |= BGE_FLAG_5788;
3522         }
3523
3524         capmask = BMSR_DEFCAPMASK;
3525         if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 &&
3526             (misccfg == 0x4000 || misccfg == 0x8000)) ||
3527             (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
3528             pci_get_vendor(dev) == BCOM_VENDORID &&
3529             (pci_get_device(dev) == BCOM_DEVICEID_BCM5901 ||
3530             pci_get_device(dev) == BCOM_DEVICEID_BCM5901A2 ||
3531             pci_get_device(dev) == BCOM_DEVICEID_BCM5705F)) ||
3532             (pci_get_vendor(dev) == BCOM_VENDORID &&
3533             (pci_get_device(dev) == BCOM_DEVICEID_BCM5751F ||
3534             pci_get_device(dev) == BCOM_DEVICEID_BCM5753F ||
3535             pci_get_device(dev) == BCOM_DEVICEID_BCM5787F)) ||
3536             pci_get_device(dev) == BCOM_DEVICEID_BCM57790 ||
3537             pci_get_device(dev) == BCOM_DEVICEID_BCM57791 ||
3538             pci_get_device(dev) == BCOM_DEVICEID_BCM57795 ||
3539             sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3540                 /* These chips are 10/100 only. */
3541                 capmask &= ~BMSR_EXTSTAT;
3542                 sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
3543         }
3544
3545         /*
3546          * Some controllers seem to require a special firmware to use
3547          * TSO. But the firmware is not available to FreeBSD and Linux
3548          * claims that the TSO performed by the firmware is slower than
3549          * hardware based TSO. Moreover the firmware based TSO has one
3550          * known bug which can't handle TSO if Ethernet header + IP/TCP
3551          * header is greater than 80 bytes. A workaround for the TSO
3552          * bug exist but it seems it's too expensive than not using
3553          * TSO at all. Some hardwares also have the TSO bug so limit
3554          * the TSO to the controllers that are not affected TSO issues
3555          * (e.g. 5755 or higher).
3556          */
3557         if (BGE_IS_5717_PLUS(sc)) {
3558                 /* BCM5717 requires different TSO configuration. */
3559                 sc->bge_flags |= BGE_FLAG_TSO3;
3560                 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
3561                     sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
3562                         /* TSO on BCM5719 A0 does not work. */
3563                         sc->bge_flags &= ~BGE_FLAG_TSO3;
3564                 }
3565         } else if (BGE_IS_5755_PLUS(sc)) {
3566                 /*
3567                  * BCM5754 and BCM5787 shares the same ASIC id so
3568                  * explicit device id check is required.
3569                  * Due to unknown reason TSO does not work on BCM5755M.
3570                  */
3571                 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 &&
3572                     pci_get_device(dev) != BCOM_DEVICEID_BCM5754M &&
3573                     pci_get_device(dev) != BCOM_DEVICEID_BCM5755M)
3574                         sc->bge_flags |= BGE_FLAG_TSO;
3575         }
3576
3577         /*
3578          * Check if this is a PCI-X or PCI Express device.
3579          */
3580         if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
3581                 /*
3582                  * Found a PCI Express capabilities register, this
3583                  * must be a PCI Express device.
3584                  */
3585                 sc->bge_flags |= BGE_FLAG_PCIE;
3586                 sc->bge_expcap = reg;
3587                 /* Extract supported maximum payload size. */
3588                 sc->bge_mps = pci_read_config(dev, sc->bge_expcap +
3589                     PCIER_DEVICE_CAP, 2);
3590                 sc->bge_mps = 128 << (sc->bge_mps & PCIEM_CAP_MAX_PAYLOAD);
3591                 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
3592                     sc->bge_asicrev == BGE_ASICREV_BCM5720)
3593                         sc->bge_expmrq = 2048;
3594                 else
3595                         sc->bge_expmrq = 4096;
3596                 pci_set_max_read_req(dev, sc->bge_expmrq);
3597         } else {
3598                 /*
3599                  * Check if the device is in PCI-X Mode.
3600                  * (This bit is not valid on PCI Express controllers.)
3601                  */
3602                 if (pci_find_cap(dev, PCIY_PCIX, &reg) == 0)
3603                         sc->bge_pcixcap = reg;
3604                 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
3605                     BGE_PCISTATE_PCI_BUSMODE) == 0)
3606                         sc->bge_flags |= BGE_FLAG_PCIX;
3607         }
3608
3609         /*
3610          * The 40bit DMA bug applies to the 5714/5715 controllers and is
3611          * not actually a MAC controller bug but an issue with the embedded
3612          * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
3613          */
3614         if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
3615                 sc->bge_flags |= BGE_FLAG_40BIT_BUG;
3616         /*
3617          * Some PCI-X bridges are known to trigger write reordering to
3618          * the mailbox registers. Typical phenomena is watchdog timeouts
3619          * caused by out-of-order TX completions.  Enable workaround for
3620          * PCI-X devices that live behind these bridges.
3621          * Note, PCI-X controllers can run in PCI mode so we can't use
3622          * BGE_FLAG_PCIX flag to detect PCI-X controllers.
3623          */
3624         if (sc->bge_pcixcap != 0 && bge_mbox_reorder(sc) != 0)
3625                 sc->bge_flags |= BGE_FLAG_MBOX_REORDER;
3626         /*
3627          * Allocate the interrupt, using MSI if possible.  These devices
3628          * support 8 MSI messages, but only the first one is used in
3629          * normal operation.
3630          */
3631         rid = 0;
3632         if (pci_find_cap(sc->bge_dev, PCIY_MSI, &reg) == 0) {
3633                 sc->bge_msicap = reg;
3634                 reg = 1;
3635                 if (bge_can_use_msi(sc) && pci_alloc_msi(dev, &reg) == 0) {
3636                         rid = 1;
3637                         sc->bge_flags |= BGE_FLAG_MSI;
3638                 }
3639         }
3640
3641         /*
3642          * All controllers except BCM5700 supports tagged status but
3643          * we use tagged status only for MSI case on BCM5717. Otherwise
3644          * MSI on BCM5717 does not work.
3645          */
3646 #ifndef DEVICE_POLLING
3647         if (sc->bge_flags & BGE_FLAG_MSI && BGE_IS_5717_PLUS(sc))
3648                 sc->bge_flags |= BGE_FLAG_TAGGED_STATUS;
3649 #endif
3650
3651         sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
3652             RF_ACTIVE | (rid != 0 ? 0 : RF_SHAREABLE));
3653
3654         if (sc->bge_irq == NULL) {
3655                 device_printf(sc->bge_dev, "couldn't map interrupt\n");
3656                 error = ENXIO;
3657                 goto fail;
3658         }
3659
3660         bge_devinfo(sc);
3661
3662         sc->bge_asf_mode = 0;
3663         /* No ASF if APE present. */
3664         if ((sc->bge_flags & BGE_FLAG_APE) == 0) {
3665                 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) ==
3666                     BGE_SRAM_DATA_SIG_MAGIC)) {
3667                         if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG) &
3668                             BGE_HWCFG_ASF) {
3669                                 sc->bge_asf_mode |= ASF_ENABLE;
3670                                 sc->bge_asf_mode |= ASF_STACKUP;
3671                                 if (BGE_IS_575X_PLUS(sc))
3672                                         sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
3673                         }
3674                 }
3675         }
3676
3677         bge_stop_fw(sc);
3678         bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN);
3679         if (bge_reset(sc)) {
3680                 device_printf(sc->bge_dev, "chip reset failed\n");
3681                 error = ENXIO;
3682                 goto fail;
3683         }
3684
3685         bge_sig_legacy(sc, BGE_RESET_SHUTDOWN);
3686         bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN);
3687
3688         if (bge_chipinit(sc)) {
3689                 device_printf(sc->bge_dev, "chip initialization failed\n");
3690                 error = ENXIO;
3691                 goto fail;
3692         }
3693
3694         error = bge_get_eaddr(sc, eaddr);
3695         if (error) {
3696                 device_printf(sc->bge_dev,
3697                     "failed to read station address\n");
3698                 error = ENXIO;
3699                 goto fail;
3700         }
3701
3702         /* 5705 limits RX return ring to 512 entries. */
3703         if (BGE_IS_5717_PLUS(sc))
3704                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3705         else if (BGE_IS_5705_PLUS(sc))
3706                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
3707         else
3708                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3709
3710         if (bge_dma_alloc(sc)) {
3711                 device_printf(sc->bge_dev,
3712                     "failed to allocate DMA resources\n");
3713                 error = ENXIO;
3714                 goto fail;
3715         }
3716
3717         /* Set default tuneable values. */
3718         sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
3719         sc->bge_rx_coal_ticks = 150;
3720         sc->bge_tx_coal_ticks = 150;
3721         sc->bge_rx_max_coal_bds = 10;
3722         sc->bge_tx_max_coal_bds = 10;
3723
3724         /* Initialize checksum features to use. */
3725         sc->bge_csum_features = BGE_CSUM_FEATURES;
3726         if (sc->bge_forced_udpcsum != 0)
3727                 sc->bge_csum_features |= CSUM_UDP;
3728
3729         /* Set up ifnet structure */
3730         ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
3731         if (ifp == NULL) {
3732                 device_printf(sc->bge_dev, "failed to if_alloc()\n");
3733                 error = ENXIO;
3734                 goto fail;
3735         }
3736         if_setsoftc(ifp, sc);
3737         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3738         if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
3739         if_setioctlfn(ifp, bge_ioctl);
3740         if_setstartfn(ifp, bge_start);
3741         if_setinitfn(ifp, bge_init);
3742         if_setgetcounterfn(ifp, bge_get_counter);
3743         if_setsendqlen(ifp, BGE_TX_RING_CNT - 1);
3744         if_setsendqready(ifp);
3745         if_sethwassist(ifp, sc->bge_csum_features);
3746         if_setcapabilities(ifp, IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
3747             IFCAP_VLAN_MTU);
3748         if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) {
3749                 if_sethwassistbits(ifp, CSUM_TSO, 0);
3750                 if_setcapabilitiesbit(ifp, IFCAP_TSO4 | IFCAP_VLAN_HWTSO, 0);
3751         }
3752 #ifdef IFCAP_VLAN_HWCSUM
3753         if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0);
3754 #endif
3755         if_setcapenable(ifp, if_getcapabilities(ifp));
3756 #ifdef DEVICE_POLLING
3757         if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
3758 #endif
3759
3760         /*
3761          * 5700 B0 chips do not support checksumming correctly due
3762          * to hardware bugs.
3763          */
3764         if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
3765                 if_setcapabilitiesbit(ifp, 0, IFCAP_HWCSUM);
3766                 if_setcapenablebit(ifp, 0, IFCAP_HWCSUM);
3767                 if_sethwassist(ifp, 0);
3768         }
3769
3770         /*
3771          * Figure out what sort of media we have by checking the
3772          * hardware config word in the first 32k of NIC internal memory,
3773          * or fall back to examining the EEPROM if necessary.
3774          * Note: on some BCM5700 cards, this value appears to be unset.
3775          * If that's the case, we have to rely on identifying the NIC
3776          * by its PCI subsystem ID, as we do below for the SysKonnect
3777          * SK-9D41.
3778          */
3779         if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC)
3780                 hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG);
3781         else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
3782             (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3783                 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
3784                     sizeof(hwcfg))) {
3785                         device_printf(sc->bge_dev, "failed to read EEPROM\n");
3786                         error = ENXIO;
3787                         goto fail;
3788                 }
3789                 hwcfg = ntohl(hwcfg);
3790         }
3791
3792         /* The SysKonnect SK-9D41 is a 1000baseSX card. */
3793         if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) ==
3794             SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
3795                 if (BGE_IS_5705_PLUS(sc)) {
3796                         sc->bge_flags |= BGE_FLAG_MII_SERDES;
3797                         sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
3798                 } else
3799                         sc->bge_flags |= BGE_FLAG_TBI;
3800         }
3801
3802         /* Set various PHY bug flags. */
3803         if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
3804             sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
3805                 sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
3806         if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
3807             sc->bge_chiprev == BGE_CHIPREV_5704_AX)
3808                 sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
3809         if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
3810                 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
3811         if (pci_get_subvendor(dev) == DELL_VENDORID)
3812                 sc->bge_phy_flags |= BGE_PHY_NO_3LED;
3813         if ((BGE_IS_5705_PLUS(sc)) &&
3814             sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
3815             sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
3816             sc->bge_asicrev != BGE_ASICREV_BCM57780 &&
3817             !BGE_IS_5717_PLUS(sc)) {
3818                 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
3819                     sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
3820                     sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
3821                     sc->bge_asicrev == BGE_ASICREV_BCM5787) {
3822                         if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 &&
3823                             pci_get_device(dev) != BCOM_DEVICEID_BCM5756)
3824                                 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
3825                         if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M)
3826                                 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
3827                 } else
3828                         sc->bge_phy_flags |= BGE_PHY_BER_BUG;
3829         }
3830
3831         /*
3832          * Don't enable Ethernet@WireSpeed for the 5700 or the
3833          * 5705 A0 and A1 chips.
3834          */
3835         if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
3836             (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
3837             (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
3838             sc->bge_chipid != BGE_CHIPID_BCM5705_A1)))
3839                 sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
3840
3841         if (sc->bge_flags & BGE_FLAG_TBI) {
3842                 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3843                     bge_ifmedia_sts);
3844                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
3845                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
3846                     0, NULL);
3847                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
3848                 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
3849                 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
3850         } else {
3851                 /*
3852                  * Do transceiver setup and tell the firmware the
3853                  * driver is down so we can try to get access the
3854                  * probe if ASF is running.  Retry a couple of times
3855                  * if we get a conflict with the ASF firmware accessing
3856                  * the PHY.
3857                  */
3858                 trys = 0;
3859                 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3860 again:
3861                 bge_asf_driver_up(sc);
3862
3863                 error = mii_attach(dev, &sc->bge_miibus, ifp, 
3864                     (ifm_change_cb_t)bge_ifmedia_upd,
3865                     (ifm_stat_cb_t)bge_ifmedia_sts, capmask, sc->bge_phy_addr, 
3866                     MII_OFFSET_ANY, MIIF_DOPAUSE);
3867                 if (error != 0) {
3868                         if (trys++ < 4) {
3869                                 device_printf(sc->bge_dev, "Try again\n");
3870                                 bge_miibus_writereg(sc->bge_dev,
3871                                     sc->bge_phy_addr, MII_BMCR, BMCR_RESET);
3872                                 goto again;
3873                         }
3874                         device_printf(sc->bge_dev, "attaching PHYs failed\n");
3875                         goto fail;
3876                 }
3877
3878                 /*
3879                  * Now tell the firmware we are going up after probing the PHY
3880                  */
3881                 if (sc->bge_asf_mode & ASF_STACKUP)
3882                         BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3883         }
3884
3885         /*
3886          * When using the BCM5701 in PCI-X mode, data corruption has
3887          * been observed in the first few bytes of some received packets.
3888          * Aligning the packet buffer in memory eliminates the corruption.
3889          * Unfortunately, this misaligns the packet payloads.  On platforms
3890          * which do not support unaligned accesses, we will realign the
3891          * payloads by copying the received packets.
3892          */
3893         if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
3894             sc->bge_flags & BGE_FLAG_PCIX)
3895                 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
3896
3897         /*
3898          * Call MI attach routine.
3899          */
3900         ether_ifattach(ifp, eaddr);
3901
3902         /* Tell upper layer we support long frames. */
3903         if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
3904
3905         /*
3906          * Hookup IRQ last.
3907          */
3908         if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
3909                 /* Take advantage of single-shot MSI. */
3910                 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
3911                     ~BGE_MSIMODE_ONE_SHOT_DISABLE);
3912                 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
3913                     taskqueue_thread_enqueue, &sc->bge_tq);
3914                 if (sc->bge_tq == NULL) {
3915                         device_printf(dev, "could not create taskqueue.\n");
3916                         ether_ifdetach(ifp);
3917                         error = ENOMEM;
3918                         goto fail;
3919                 }
3920                 error = taskqueue_start_threads(&sc->bge_tq, 1, PI_NET,
3921                     "%s taskq", device_get_nameunit(sc->bge_dev));
3922                 if (error != 0) {
3923                         device_printf(dev, "could not start threads.\n");
3924                         ether_ifdetach(ifp);
3925                         goto fail;
3926                 }
3927                 error = bus_setup_intr(dev, sc->bge_irq,
3928                     INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
3929                     &sc->bge_intrhand);
3930         } else
3931                 error = bus_setup_intr(dev, sc->bge_irq,
3932                     INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
3933                     &sc->bge_intrhand);
3934
3935         if (error) {
3936                 ether_ifdetach(ifp);
3937                 device_printf(sc->bge_dev, "couldn't set up irq\n");
3938         }
3939
3940 fail:
3941         if (error)
3942                 bge_detach(dev);
3943         return (error);
3944 }
3945
3946 static int
3947 bge_detach(device_t dev)
3948 {
3949         struct bge_softc *sc;
3950         if_t ifp;
3951
3952         sc = device_get_softc(dev);
3953         ifp = sc->bge_ifp;
3954
3955 #ifdef DEVICE_POLLING
3956         if (if_getcapenable(ifp) & IFCAP_POLLING)
3957                 ether_poll_deregister(ifp);
3958 #endif
3959
3960         if (device_is_attached(dev)) {
3961                 ether_ifdetach(ifp);
3962                 BGE_LOCK(sc);
3963                 bge_stop(sc);
3964                 BGE_UNLOCK(sc);
3965                 callout_drain(&sc->bge_stat_ch);
3966         }
3967
3968         if (sc->bge_tq)
3969                 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
3970
3971         if (sc->bge_flags & BGE_FLAG_TBI)
3972                 ifmedia_removeall(&sc->bge_ifmedia);
3973         else if (sc->bge_miibus != NULL) {
3974                 bus_generic_detach(dev);
3975                 device_delete_child(dev, sc->bge_miibus);
3976         }
3977
3978         bge_release_resources(sc);
3979
3980         return (0);
3981 }
3982
3983 static void
3984 bge_release_resources(struct bge_softc *sc)
3985 {
3986         device_t dev;
3987
3988         dev = sc->bge_dev;
3989
3990         if (sc->bge_tq != NULL)
3991                 taskqueue_free(sc->bge_tq);
3992
3993         if (sc->bge_intrhand != NULL)
3994                 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
3995
3996         if (sc->bge_irq != NULL) {
3997                 bus_release_resource(dev, SYS_RES_IRQ,
3998                     rman_get_rid(sc->bge_irq), sc->bge_irq);
3999                 pci_release_msi(dev);
4000         }
4001
4002         if (sc->bge_res != NULL)
4003                 bus_release_resource(dev, SYS_RES_MEMORY,
4004                     rman_get_rid(sc->bge_res), sc->bge_res);
4005
4006         if (sc->bge_res2 != NULL)
4007                 bus_release_resource(dev, SYS_RES_MEMORY,
4008                     rman_get_rid(sc->bge_res2), sc->bge_res2);
4009
4010         if (sc->bge_ifp != NULL)
4011                 if_free(sc->bge_ifp);
4012
4013         bge_dma_free(sc);
4014
4015         if (mtx_initialized(&sc->bge_mtx))      /* XXX */
4016                 BGE_LOCK_DESTROY(sc);
4017 }
4018
4019 static int
4020 bge_reset(struct bge_softc *sc)
4021 {
4022         device_t dev;
4023         uint32_t cachesize, command, mac_mode, mac_mode_mask, reset, val;
4024         void (*write_op)(struct bge_softc *, int, int);
4025         uint16_t devctl;
4026         int i;
4027
4028         dev = sc->bge_dev;
4029
4030         mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE;
4031         if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
4032                 mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
4033         mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask;
4034
4035         if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
4036             (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
4037                 if (sc->bge_flags & BGE_FLAG_PCIE)
4038                         write_op = bge_writemem_direct;
4039                 else
4040                         write_op = bge_writemem_ind;
4041         } else
4042                 write_op = bge_writereg_ind;
4043
4044         if (sc->bge_asicrev != BGE_ASICREV_BCM5700 &&
4045             sc->bge_asicrev != BGE_ASICREV_BCM5701) {
4046                 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
4047                 for (i = 0; i < 8000; i++) {
4048                         if (CSR_READ_4(sc, BGE_NVRAM_SWARB) &
4049                             BGE_NVRAMSWARB_GNT1)
4050                                 break;
4051                         DELAY(20);
4052                 }
4053                 if (i == 8000) {
4054                         if (bootverbose)
4055                                 device_printf(dev, "NVRAM lock timedout!\n");
4056                 }
4057         }
4058         /* Take APE lock when performing reset. */
4059         bge_ape_lock(sc, BGE_APE_LOCK_GRC);
4060
4061         /* Save some important PCI state. */
4062         cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
4063         command = pci_read_config(dev, BGE_PCI_CMD, 4);
4064
4065         pci_write_config(dev, BGE_PCI_MISC_CTL,
4066             BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
4067             BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
4068
4069         /* Disable fastboot on controllers that support it. */
4070         if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
4071             BGE_IS_5755_PLUS(sc)) {
4072                 if (bootverbose)
4073                         device_printf(dev, "Disabling fastboot\n");
4074                 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
4075         }
4076
4077         /*
4078          * Write the magic number to SRAM at offset 0xB50.
4079          * When firmware finishes its initialization it will
4080          * write ~BGE_SRAM_FW_MB_MAGIC to the same location.
4081          */
4082         bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
4083
4084         reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
4085
4086         /* XXX: Broadcom Linux driver. */
4087         if (sc->bge_flags & BGE_FLAG_PCIE) {
4088                 if (sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
4089                     (sc->bge_flags & BGE_FLAG_5717_PLUS) == 0) {
4090                         if (CSR_READ_4(sc, 0x7E2C) == 0x60)     /* PCIE 1.0 */
4091                                 CSR_WRITE_4(sc, 0x7E2C, 0x20);
4092                 }
4093                 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
4094                         /* Prevent PCIE link training during global reset */
4095                         CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
4096                         reset |= 1 << 29;
4097                 }
4098         }
4099
4100         if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
4101                 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
4102                 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
4103                     val | BGE_VCPU_STATUS_DRV_RESET);
4104                 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
4105                 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
4106                     val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
4107         }
4108
4109         /*
4110          * Set GPHY Power Down Override to leave GPHY
4111          * powered up in D0 uninitialized.
4112          */
4113         if (BGE_IS_5705_PLUS(sc) &&
4114             (sc->bge_flags & BGE_FLAG_CPMU_PRESENT) == 0)
4115                 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
4116
4117         /* Issue global reset */
4118         write_op(sc, BGE_MISC_CFG, reset);
4119
4120         if (sc->bge_flags & BGE_FLAG_PCIE)
4121                 DELAY(100 * 1000);
4122         else
4123                 DELAY(1000);
4124
4125         /* XXX: Broadcom Linux driver. */
4126         if (sc->bge_flags & BGE_FLAG_PCIE) {
4127                 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
4128                         DELAY(500000); /* wait for link training to complete */
4129                         val = pci_read_config(dev, 0xC4, 4);
4130                         pci_write_config(dev, 0xC4, val | (1 << 15), 4);
4131                 }
4132                 devctl = pci_read_config(dev,
4133                     sc->bge_expcap + PCIER_DEVICE_CTL, 2);
4134                 /* Clear enable no snoop and disable relaxed ordering. */
4135                 devctl &= ~(PCIEM_CTL_RELAXED_ORD_ENABLE |
4136                     PCIEM_CTL_NOSNOOP_ENABLE);
4137                 pci_write_config(dev, sc->bge_expcap + PCIER_DEVICE_CTL,
4138                     devctl, 2);
4139                 pci_set_max_read_req(dev, sc->bge_expmrq);
4140                 /* Clear error status. */
4141                 pci_write_config(dev, sc->bge_expcap + PCIER_DEVICE_STA,
4142                     PCIEM_STA_CORRECTABLE_ERROR |
4143                     PCIEM_STA_NON_FATAL_ERROR | PCIEM_STA_FATAL_ERROR |
4144                     PCIEM_STA_UNSUPPORTED_REQ, 2);
4145         }
4146
4147         /* Reset some of the PCI state that got zapped by reset. */
4148         pci_write_config(dev, BGE_PCI_MISC_CTL,
4149             BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
4150             BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
4151         val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE;
4152         if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 &&
4153             (sc->bge_flags & BGE_FLAG_PCIX) != 0)
4154                 val |= BGE_PCISTATE_RETRY_SAME_DMA;
4155         if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
4156                 val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
4157                     BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
4158                     BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
4159         pci_write_config(dev, BGE_PCI_PCISTATE, val, 4);
4160         pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
4161         pci_write_config(dev, BGE_PCI_CMD, command, 4);
4162         /*
4163          * Disable PCI-X relaxed ordering to ensure status block update
4164          * comes first then packet buffer DMA. Otherwise driver may
4165          * read stale status block.
4166          */
4167         if (sc->bge_flags & BGE_FLAG_PCIX) {
4168                 devctl = pci_read_config(dev,
4169                     sc->bge_pcixcap + PCIXR_COMMAND, 2);
4170                 devctl &= ~PCIXM_COMMAND_ERO;
4171                 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
4172                         devctl &= ~PCIXM_COMMAND_MAX_READ;
4173                         devctl |= PCIXM_COMMAND_MAX_READ_2048;
4174                 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
4175                         devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
4176                             PCIXM_COMMAND_MAX_READ);
4177                         devctl |= PCIXM_COMMAND_MAX_READ_2048;
4178                 }
4179                 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
4180                     devctl, 2);
4181         }
4182         /* Re-enable MSI, if necessary, and enable the memory arbiter. */
4183         if (BGE_IS_5714_FAMILY(sc)) {
4184                 /* This chip disables MSI on reset. */
4185                 if (sc->bge_flags & BGE_FLAG_MSI) {
4186                         val = pci_read_config(dev,
4187                             sc->bge_msicap + PCIR_MSI_CTRL, 2);
4188                         pci_write_config(dev,
4189                             sc->bge_msicap + PCIR_MSI_CTRL,
4190                             val | PCIM_MSICTRL_MSI_ENABLE, 2);
4191                         val = CSR_READ_4(sc, BGE_MSI_MODE);
4192                         CSR_WRITE_4(sc, BGE_MSI_MODE,
4193                             val | BGE_MSIMODE_ENABLE);
4194                 }
4195                 val = CSR_READ_4(sc, BGE_MARB_MODE);
4196                 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
4197         } else
4198                 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
4199
4200         /* Fix up byte swapping. */
4201         CSR_WRITE_4(sc, BGE_MODE_CTL, bge_dma_swap_options(sc));
4202
4203         val = CSR_READ_4(sc, BGE_MAC_MODE);
4204         val = (val & ~mac_mode_mask) | mac_mode;
4205         CSR_WRITE_4(sc, BGE_MAC_MODE, val);
4206         DELAY(40);
4207
4208         bge_ape_unlock(sc, BGE_APE_LOCK_GRC);
4209
4210         if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
4211                 for (i = 0; i < BGE_TIMEOUT; i++) {
4212                         val = CSR_READ_4(sc, BGE_VCPU_STATUS);
4213                         if (val & BGE_VCPU_STATUS_INIT_DONE)
4214                                 break;
4215                         DELAY(100);
4216                 }
4217                 if (i == BGE_TIMEOUT) {
4218                         device_printf(dev, "reset timed out\n");
4219                         return (1);
4220                 }
4221         } else {
4222                 /*
4223                  * Poll until we see the 1's complement of the magic number.
4224                  * This indicates that the firmware initialization is complete.
4225                  * We expect this to fail if no chip containing the Ethernet
4226                  * address is fitted though.
4227                  */
4228                 for (i = 0; i < BGE_TIMEOUT; i++) {
4229                         DELAY(10);
4230                         val = bge_readmem_ind(sc, BGE_SRAM_FW_MB);
4231                         if (val == ~BGE_SRAM_FW_MB_MAGIC)
4232                                 break;
4233                 }
4234
4235                 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
4236                         device_printf(dev,
4237                             "firmware handshake timed out, found 0x%08x\n",
4238                             val);
4239                 /* BCM57765 A0 needs additional time before accessing. */
4240                 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
4241                         DELAY(10 * 1000);       /* XXX */
4242         }
4243
4244         /*
4245          * The 5704 in TBI mode apparently needs some special
4246          * adjustment to insure the SERDES drive level is set
4247          * to 1.2V.
4248          */
4249         if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
4250             sc->bge_flags & BGE_FLAG_TBI) {
4251                 val = CSR_READ_4(sc, BGE_SERDES_CFG);
4252                 val = (val & ~0xFFF) | 0x880;
4253                 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
4254         }
4255
4256         /* XXX: Broadcom Linux driver. */
4257         if (sc->bge_flags & BGE_FLAG_PCIE &&
4258             !BGE_IS_5717_PLUS(sc) &&
4259             sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
4260             sc->bge_asicrev != BGE_ASICREV_BCM5785) {
4261                 /* Enable Data FIFO protection. */
4262                 val = CSR_READ_4(sc, 0x7C00);
4263                 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
4264         }
4265
4266         if (sc->bge_asicrev == BGE_ASICREV_BCM5720)
4267                 BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
4268                     CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
4269
4270         return (0);
4271 }
4272
4273 static __inline void
4274 bge_rxreuse_std(struct bge_softc *sc, int i)
4275 {
4276         struct bge_rx_bd *r;
4277
4278         r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
4279         r->bge_flags = BGE_RXBDFLAG_END;
4280         r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i];
4281         r->bge_idx = i;
4282         BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
4283 }
4284
4285 static __inline void
4286 bge_rxreuse_jumbo(struct bge_softc *sc, int i)
4287 {
4288         struct bge_extrx_bd *r;
4289
4290         r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
4291         r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
4292         r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0];
4293         r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1];
4294         r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2];
4295         r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3];
4296         r->bge_idx = i;
4297         BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
4298 }
4299
4300 /*
4301  * Frame reception handling. This is called if there's a frame
4302  * on the receive return list.
4303  *
4304  * Note: we have to be able to handle two possibilities here:
4305  * 1) the frame is from the jumbo receive ring
4306  * 2) the frame is from the standard receive ring
4307  */
4308
4309 static int
4310 bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
4311 {
4312         if_t ifp;
4313         int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
4314         uint16_t rx_cons;
4315
4316         rx_cons = sc->bge_rx_saved_considx;
4317
4318         /* Nothing to do. */
4319         if (rx_cons == rx_prod)
4320                 return (rx_npkts);
4321
4322         ifp = sc->bge_ifp;
4323
4324         bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
4325             sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
4326         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
4327             sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
4328         if (BGE_IS_JUMBO_CAPABLE(sc) &&
4329             if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN + 
4330             ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))
4331                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
4332                     sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
4333
4334         while (rx_cons != rx_prod) {
4335                 struct bge_rx_bd        *cur_rx;
4336                 uint32_t                rxidx;
4337                 struct mbuf             *m = NULL;
4338                 uint16_t                vlan_tag = 0;
4339                 int                     have_tag = 0;
4340
4341 #ifdef DEVICE_POLLING
4342                 if (if_getcapenable(ifp) & IFCAP_POLLING) {
4343                         if (sc->rxcycles <= 0)
4344                                 break;
4345                         sc->rxcycles--;
4346                 }
4347 #endif
4348
4349                 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
4350
4351                 rxidx = cur_rx->bge_idx;
4352                 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
4353
4354                 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING &&
4355                     cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
4356                         have_tag = 1;
4357                         vlan_tag = cur_rx->bge_vlan_tag;
4358                 }
4359
4360                 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
4361                         jumbocnt++;
4362                         m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
4363                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
4364                                 bge_rxreuse_jumbo(sc, rxidx);
4365                                 continue;
4366                         }
4367                         if (bge_newbuf_jumbo(sc, rxidx) != 0) {
4368                                 bge_rxreuse_jumbo(sc, rxidx);
4369                                 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
4370                                 continue;
4371                         }
4372                         BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
4373                 } else {
4374                         stdcnt++;
4375                         m = sc->bge_cdata.bge_rx_std_chain[rxidx];
4376                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
4377                                 bge_rxreuse_std(sc, rxidx);
4378                                 continue;
4379                         }
4380                         if (bge_newbuf_std(sc, rxidx) != 0) {
4381                                 bge_rxreuse_std(sc, rxidx);
4382                                 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
4383                                 continue;
4384                         }
4385                         BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
4386                 }
4387
4388                 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
4389 #ifndef __NO_STRICT_ALIGNMENT
4390                 /*
4391                  * For architectures with strict alignment we must make sure
4392                  * the payload is aligned.
4393                  */
4394                 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
4395                         bcopy(m->m_data, m->m_data + ETHER_ALIGN,
4396                             cur_rx->bge_len);
4397                         m->m_data += ETHER_ALIGN;
4398                 }
4399 #endif
4400                 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
4401                 m->m_pkthdr.rcvif = ifp;
4402
4403                 if (if_getcapenable(ifp) & IFCAP_RXCSUM)
4404                         bge_rxcsum(sc, cur_rx, m);
4405
4406                 /*
4407                  * If we received a packet with a vlan tag,
4408                  * attach that information to the packet.
4409                  */
4410                 if (have_tag) {
4411                         m->m_pkthdr.ether_vtag = vlan_tag;
4412                         m->m_flags |= M_VLANTAG;
4413                 }
4414
4415                 if (holdlck != 0) {
4416                         BGE_UNLOCK(sc);
4417                         if_input(ifp, m);
4418                         BGE_LOCK(sc);
4419                 } else
4420                         if_input(ifp, m);
4421                 rx_npkts++;
4422
4423                 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
4424                         return (rx_npkts);
4425         }
4426
4427         bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
4428             sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
4429         if (stdcnt > 0)
4430                 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
4431                     sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
4432
4433         if (jumbocnt > 0)
4434                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
4435                     sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
4436
4437         sc->bge_rx_saved_considx = rx_cons;
4438         bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
4439         if (stdcnt)
4440                 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std +
4441                     BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT);
4442         if (jumbocnt)
4443                 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo +
4444                     BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT);
4445 #ifdef notyet
4446         /*
4447          * This register wraps very quickly under heavy packet drops.
4448          * If you need correct statistics, you can enable this check.
4449          */
4450         if (BGE_IS_5705_PLUS(sc))
4451                 if_incierrors(ifp, CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS));
4452 #endif
4453         return (rx_npkts);
4454 }
4455
4456 static void
4457 bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
4458 {
4459
4460         if (BGE_IS_5717_PLUS(sc)) {
4461                 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
4462                         if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
4463                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4464                                 if ((cur_rx->bge_error_flag &
4465                                     BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
4466                                         m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4467                         }
4468                         if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
4469                                 m->m_pkthdr.csum_data =
4470                                     cur_rx->bge_tcp_udp_csum;
4471                                 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
4472                                     CSUM_PSEUDO_HDR;
4473                         }
4474                 }
4475         } else {
4476                 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
4477                         m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4478                         if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
4479                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4480                 }
4481                 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
4482                     m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
4483                         m->m_pkthdr.csum_data =
4484                             cur_rx->bge_tcp_udp_csum;
4485                         m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
4486                             CSUM_PSEUDO_HDR;
4487                 }
4488         }
4489 }
4490
4491 static void
4492 bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
4493 {
4494         struct bge_tx_bd *cur_tx;
4495         if_t ifp;
4496
4497         BGE_LOCK_ASSERT(sc);
4498
4499         /* Nothing to do. */
4500         if (sc->bge_tx_saved_considx == tx_cons)
4501                 return;
4502
4503         ifp = sc->bge_ifp;
4504
4505         bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4506             sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
4507         /*
4508          * Go through our tx ring and free mbufs for those
4509          * frames that have been sent.
4510          */
4511         while (sc->bge_tx_saved_considx != tx_cons) {
4512                 uint32_t                idx;
4513
4514                 idx = sc->bge_tx_saved_considx;
4515                 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
4516                 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
4517                         if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
4518                 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
4519                         bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
4520                             sc->bge_cdata.bge_tx_dmamap[idx],
4521                             BUS_DMASYNC_POSTWRITE);
4522                         bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
4523                             sc->bge_cdata.bge_tx_dmamap[idx]);
4524                         m_freem(sc->bge_cdata.bge_tx_chain[idx]);
4525                         sc->bge_cdata.bge_tx_chain[idx] = NULL;
4526                 }
4527                 sc->bge_txcnt--;
4528                 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
4529         }
4530
4531         if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
4532         if (sc->bge_txcnt == 0)
4533                 sc->bge_timer = 0;
4534 }
4535
4536 #ifdef DEVICE_POLLING
4537 static int
4538 bge_poll(if_t ifp, enum poll_cmd cmd, int count)
4539 {
4540         struct bge_softc *sc = if_getsoftc(ifp);
4541         uint16_t rx_prod, tx_cons;
4542         uint32_t statusword;
4543         int rx_npkts = 0;
4544
4545         BGE_LOCK(sc);
4546         if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
4547                 BGE_UNLOCK(sc);
4548                 return (rx_npkts);
4549         }
4550
4551         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4552             sc->bge_cdata.bge_status_map,
4553             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4554         /* Fetch updates from the status block. */
4555         rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4556         tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4557
4558         statusword = sc->bge_ldata.bge_status_block->bge_status;
4559         /* Clear the status so the next pass only sees the changes. */
4560         sc->bge_ldata.bge_status_block->bge_status = 0;
4561
4562         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4563             sc->bge_cdata.bge_status_map,
4564             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4565
4566         /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
4567         if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
4568                 sc->bge_link_evt++;
4569
4570         if (cmd == POLL_AND_CHECK_STATUS)
4571                 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4572                     sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
4573                     sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
4574                         bge_link_upd(sc);
4575
4576         sc->rxcycles = count;
4577         rx_npkts = bge_rxeof(sc, rx_prod, 1);
4578         if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
4579                 BGE_UNLOCK(sc);
4580                 return (rx_npkts);
4581         }
4582         bge_txeof(sc, tx_cons);
4583         if (!if_sendq_empty(ifp))
4584                 bge_start_locked(ifp);
4585
4586         BGE_UNLOCK(sc);
4587         return (rx_npkts);
4588 }
4589 #endif /* DEVICE_POLLING */
4590
4591 static int
4592 bge_msi_intr(void *arg)
4593 {
4594         struct bge_softc *sc;
4595
4596         sc = (struct bge_softc *)arg;
4597         /*
4598          * This interrupt is not shared and controller already
4599          * disabled further interrupt.
4600          */
4601         taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
4602         return (FILTER_HANDLED);
4603 }
4604
4605 static void
4606 bge_intr_task(void *arg, int pending)
4607 {
4608         struct bge_softc *sc;
4609         if_t ifp;
4610         uint32_t status, status_tag;
4611         uint16_t rx_prod, tx_cons;
4612
4613         sc = (struct bge_softc *)arg;
4614         ifp = sc->bge_ifp;
4615
4616         BGE_LOCK(sc);
4617         if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
4618                 BGE_UNLOCK(sc);
4619                 return;
4620         }
4621
4622         /* Get updated status block. */
4623         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4624             sc->bge_cdata.bge_status_map,
4625             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4626
4627         /* Save producer/consumer indices. */
4628         rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4629         tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4630         status = sc->bge_ldata.bge_status_block->bge_status;
4631         status_tag = sc->bge_ldata.bge_status_block->bge_status_tag << 24;
4632         /* Dirty the status flag. */
4633         sc->bge_ldata.bge_status_block->bge_status = 0;
4634         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4635             sc->bge_cdata.bge_status_map,
4636             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4637         if ((sc->bge_flags & BGE_FLAG_TAGGED_STATUS) == 0)
4638                 status_tag = 0;
4639
4640         if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0)
4641                 bge_link_upd(sc);
4642
4643         /* Let controller work. */
4644         bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag);
4645
4646         if (if_getdrvflags(ifp) & IFF_DRV_RUNNING &&
4647             sc->bge_rx_saved_considx != rx_prod) {
4648                 /* Check RX return ring producer/consumer. */
4649                 BGE_UNLOCK(sc);
4650                 bge_rxeof(sc, rx_prod, 0);
4651                 BGE_LOCK(sc);
4652         }
4653         if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4654                 /* Check TX ring producer/consumer. */
4655                 bge_txeof(sc, tx_cons);
4656                 if (!if_sendq_empty(ifp))
4657                         bge_start_locked(ifp);
4658         }
4659         BGE_UNLOCK(sc);
4660 }
4661
4662 static void
4663 bge_intr(void *xsc)
4664 {
4665         struct bge_softc *sc;
4666         if_t ifp;
4667         uint32_t statusword;
4668         uint16_t rx_prod, tx_cons;
4669
4670         sc = xsc;
4671
4672         BGE_LOCK(sc);
4673
4674         ifp = sc->bge_ifp;
4675
4676 #ifdef DEVICE_POLLING
4677         if (if_getcapenable(ifp) & IFCAP_POLLING) {
4678                 BGE_UNLOCK(sc);
4679                 return;
4680         }
4681 #endif
4682
4683         /*
4684          * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO.  Don't
4685          * disable interrupts by writing nonzero like we used to, since with
4686          * our current organization this just gives complications and
4687          * pessimizations for re-enabling interrupts.  We used to have races
4688          * instead of the necessary complications.  Disabling interrupts
4689          * would just reduce the chance of a status update while we are
4690          * running (by switching to the interrupt-mode coalescence
4691          * parameters), but this chance is already very low so it is more
4692          * efficient to get another interrupt than prevent it.
4693          *
4694          * We do the ack first to ensure another interrupt if there is a
4695          * status update after the ack.  We don't check for the status
4696          * changing later because it is more efficient to get another
4697          * interrupt than prevent it, not quite as above (not checking is
4698          * a smaller optimization than not toggling the interrupt enable,
4699          * since checking doesn't involve PCI accesses and toggling require
4700          * the status check).  So toggling would probably be a pessimization
4701          * even with MSI.  It would only be needed for using a task queue.
4702          */
4703         bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4704
4705         /*
4706          * Do the mandatory PCI flush as well as get the link status.
4707          */
4708         statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
4709
4710         /* Make sure the descriptor ring indexes are coherent. */
4711         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4712             sc->bge_cdata.bge_status_map,
4713             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4714         rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4715         tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4716         sc->bge_ldata.bge_status_block->bge_status = 0;
4717         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4718             sc->bge_cdata.bge_status_map,
4719             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4720
4721         if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4722             sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
4723             statusword || sc->bge_link_evt)
4724                 bge_link_upd(sc);
4725
4726         if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4727                 /* Check RX return ring producer/consumer. */
4728                 bge_rxeof(sc, rx_prod, 1);
4729         }
4730
4731         if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4732                 /* Check TX ring producer/consumer. */
4733                 bge_txeof(sc, tx_cons);
4734         }
4735
4736         if (if_getdrvflags(ifp) & IFF_DRV_RUNNING &&
4737             !if_sendq_empty(ifp))
4738                 bge_start_locked(ifp);
4739
4740         BGE_UNLOCK(sc);
4741 }
4742
4743 static void
4744 bge_asf_driver_up(struct bge_softc *sc)
4745 {
4746         if (sc->bge_asf_mode & ASF_STACKUP) {
4747                 /* Send ASF heartbeat aprox. every 2s */
4748                 if (sc->bge_asf_count)
4749                         sc->bge_asf_count --;
4750                 else {
4751                         sc->bge_asf_count = 2;
4752                         bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB,
4753                             BGE_FW_CMD_DRV_ALIVE);
4754                         bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4);
4755                         bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB,
4756                             BGE_FW_HB_TIMEOUT_SEC);
4757                         CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
4758                             CSR_READ_4(sc, BGE_RX_CPU_EVENT) |
4759                             BGE_RX_CPU_DRV_EVENT);
4760                 }
4761         }
4762 }
4763
4764 static void
4765 bge_tick(void *xsc)
4766 {
4767         struct bge_softc *sc = xsc;
4768         struct mii_data *mii = NULL;
4769
4770         BGE_LOCK_ASSERT(sc);
4771
4772         /* Synchronize with possible callout reset/stop. */
4773         if (callout_pending(&sc->bge_stat_ch) ||
4774             !callout_active(&sc->bge_stat_ch))
4775                 return;
4776
4777         if (BGE_IS_5705_PLUS(sc))
4778                 bge_stats_update_regs(sc);
4779         else
4780                 bge_stats_update(sc);
4781
4782         /* XXX Add APE heartbeat check here? */
4783
4784         if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
4785                 mii = device_get_softc(sc->bge_miibus);
4786                 /*
4787                  * Do not touch PHY if we have link up. This could break
4788                  * IPMI/ASF mode or produce extra input errors
4789                  * (extra errors was reported for bcm5701 & bcm5704).
4790                  */
4791                 if (!sc->bge_link)
4792                         mii_tick(mii);
4793         } else {
4794                 /*
4795                  * Since in TBI mode auto-polling can't be used we should poll
4796                  * link status manually. Here we register pending link event
4797                  * and trigger interrupt.
4798                  */
4799 #ifdef DEVICE_POLLING
4800                 /* In polling mode we poll link state in bge_poll(). */
4801                 if (!(if_getcapenable(sc->bge_ifp) & IFCAP_POLLING))
4802 #endif
4803                 {
4804                 sc->bge_link_evt++;
4805                 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4806                     sc->bge_flags & BGE_FLAG_5788)
4807                         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4808                 else
4809                         BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4810                 }
4811         }
4812
4813         bge_asf_driver_up(sc);
4814         bge_watchdog(sc);
4815
4816         callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4817 }
4818
4819 static void
4820 bge_stats_update_regs(struct bge_softc *sc)
4821 {
4822         if_t ifp;
4823         struct bge_mac_stats *stats;
4824         uint32_t val;
4825
4826         ifp = sc->bge_ifp;
4827         stats = &sc->bge_mac_stats;
4828
4829         stats->ifHCOutOctets +=
4830             CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4831         stats->etherStatsCollisions +=
4832             CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4833         stats->outXonSent +=
4834             CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4835         stats->outXoffSent +=
4836             CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4837         stats->dot3StatsInternalMacTransmitErrors +=
4838             CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4839         stats->dot3StatsSingleCollisionFrames +=
4840             CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4841         stats->dot3StatsMultipleCollisionFrames +=
4842             CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4843         stats->dot3StatsDeferredTransmissions +=
4844             CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4845         stats->dot3StatsExcessiveCollisions +=
4846             CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4847         stats->dot3StatsLateCollisions +=
4848             CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4849         stats->ifHCOutUcastPkts +=
4850             CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4851         stats->ifHCOutMulticastPkts +=
4852             CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4853         stats->ifHCOutBroadcastPkts +=
4854             CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4855
4856         stats->ifHCInOctets +=
4857             CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4858         stats->etherStatsFragments +=
4859             CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4860         stats->ifHCInUcastPkts +=
4861             CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4862         stats->ifHCInMulticastPkts +=
4863             CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4864         stats->ifHCInBroadcastPkts +=
4865             CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4866         stats->dot3StatsFCSErrors +=
4867             CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4868         stats->dot3StatsAlignmentErrors +=
4869             CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4870         stats->xonPauseFramesReceived +=
4871             CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4872         stats->xoffPauseFramesReceived +=
4873             CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4874         stats->macControlFramesReceived +=
4875             CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4876         stats->xoffStateEntered +=
4877             CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4878         stats->dot3StatsFramesTooLong +=
4879             CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4880         stats->etherStatsJabbers +=
4881             CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4882         stats->etherStatsUndersizePkts +=
4883             CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4884
4885         stats->FramesDroppedDueToFilters +=
4886             CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4887         stats->DmaWriteQueueFull +=
4888             CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4889         stats->DmaWriteHighPriQueueFull +=
4890             CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4891         stats->NoMoreRxBDs +=
4892             CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4893         /*
4894          * XXX
4895          * Unlike other controllers, BGE_RXLP_LOCSTAT_IFIN_DROPS
4896          * counter of BCM5717, BCM5718, BCM5719 A0 and BCM5720 A0
4897          * includes number of unwanted multicast frames.  This comes
4898          * from silicon bug and known workaround to get rough(not
4899          * exact) counter is to enable interrupt on MBUF low water
4900          * attention.  This can be accomplished by setting
4901          * BGE_HCCMODE_ATTN bit of BGE_HCC_MODE,
4902          * BGE_BMANMODE_LOMBUF_ATTN bit of BGE_BMAN_MODE and
4903          * BGE_MODECTL_FLOWCTL_ATTN_INTR bit of BGE_MODE_CTL.
4904          * However that change would generate more interrupts and
4905          * there are still possibilities of losing multiple frames
4906          * during BGE_MODECTL_FLOWCTL_ATTN_INTR interrupt handling.
4907          * Given that the workaround still would not get correct
4908          * counter I don't think it's worth to implement it.  So
4909          * ignore reading the counter on controllers that have the
4910          * silicon bug.
4911          */
4912         if (sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
4913             sc->bge_chipid != BGE_CHIPID_BCM5719_A0 &&
4914             sc->bge_chipid != BGE_CHIPID_BCM5720_A0)
4915                 stats->InputDiscards +=
4916                     CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4917         stats->InputErrors +=
4918             CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4919         stats->RecvThresholdHit +=
4920             CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4921
4922         if (sc->bge_flags & BGE_FLAG_RDMA_BUG) {
4923                 /*
4924                  * If controller transmitted more than BGE_NUM_RDMA_CHANNELS
4925                  * frames, it's safe to disable workaround for DMA engine's
4926                  * miscalculation of TXMBUF space.
4927                  */
4928                 if (stats->ifHCOutUcastPkts + stats->ifHCOutMulticastPkts +
4929                     stats->ifHCOutBroadcastPkts > BGE_NUM_RDMA_CHANNELS) {
4930                         val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL);
4931                         if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
4932                                 val &= ~BGE_RDMA_TX_LENGTH_WA_5719;
4933                         else
4934                                 val &= ~BGE_RDMA_TX_LENGTH_WA_5720;
4935                         CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val);
4936                         sc->bge_flags &= ~BGE_FLAG_RDMA_BUG;
4937                 }
4938         }
4939 }
4940
4941 static void
4942 bge_stats_clear_regs(struct bge_softc *sc)
4943 {
4944
4945         CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4946         CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4947         CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4948         CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4949         CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4950         CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4951         CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4952         CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4953         CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4954         CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4955         CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4956         CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4957         CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4958
4959         CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4960         CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4961         CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4962         CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4963         CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4964         CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4965         CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4966         CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4967         CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4968         CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4969         CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4970         CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4971         CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4972         CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4973
4974         CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4975         CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4976         CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4977         CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4978         CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4979         CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4980         CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4981 }
4982
4983 static void
4984 bge_stats_update(struct bge_softc *sc)
4985 {
4986         if_t ifp;
4987         bus_size_t stats;
4988         uint32_t cnt;   /* current register value */
4989
4990         ifp = sc->bge_ifp;
4991
4992         stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
4993
4994 #define READ_STAT(sc, stats, stat) \
4995         CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
4996
4997         cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
4998         if_inc_counter(ifp, IFCOUNTER_COLLISIONS, cnt - sc->bge_tx_collisions);
4999         sc->bge_tx_collisions = cnt;
5000
5001         cnt = READ_STAT(sc, stats, nicNoMoreRxBDs.bge_addr_lo);
5002         if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_nobds);
5003         sc->bge_rx_nobds = cnt;
5004         cnt = READ_STAT(sc, stats, ifInErrors.bge_addr_lo);
5005         if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_inerrs);
5006         sc->bge_rx_inerrs = cnt;
5007         cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
5008         if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_discards);
5009         sc->bge_rx_discards = cnt;
5010
5011         cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
5012         if_inc_counter(ifp, IFCOUNTER_OERRORS, cnt - sc->bge_tx_discards);
5013         sc->bge_tx_discards = cnt;
5014
5015 #undef  READ_STAT
5016 }
5017
5018 /*
5019  * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
5020  * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
5021  * but when such padded frames employ the bge IP/TCP checksum offload,
5022  * the hardware checksum assist gives incorrect results (possibly
5023  * from incorporating its own padding into the UDP/TCP checksum; who knows).
5024  * If we pad such runts with zeros, the onboard checksum comes out correct.
5025  */
5026 static __inline int
5027 bge_cksum_pad(struct mbuf *m)
5028 {
5029         int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
5030         struct mbuf *last;
5031
5032         /* If there's only the packet-header and we can pad there, use it. */
5033         if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
5034             M_TRAILINGSPACE(m) >= padlen) {
5035                 last = m;
5036         } else {
5037                 /*
5038                  * Walk packet chain to find last mbuf. We will either
5039                  * pad there, or append a new mbuf and pad it.
5040                  */
5041                 for (last = m; last->m_next != NULL; last = last->m_next);
5042                 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
5043                         /* Allocate new empty mbuf, pad it. Compact later. */
5044                         struct mbuf *n;
5045
5046                         MGET(n, M_NOWAIT, MT_DATA);
5047                         if (n == NULL)
5048                                 return (ENOBUFS);
5049                         n->m_len = 0;
5050                         last->m_next = n;
5051                         last = n;
5052                 }
5053         }
5054
5055         /* Now zero the pad area, to avoid the bge cksum-assist bug. */
5056         memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
5057         last->m_len += padlen;
5058         m->m_pkthdr.len += padlen;
5059
5060         return (0);
5061 }
5062
5063 static struct mbuf *
5064 bge_check_short_dma(struct mbuf *m)
5065 {
5066         struct mbuf *n;
5067         int found;
5068
5069         /*
5070          * If device receive two back-to-back send BDs with less than
5071          * or equal to 8 total bytes then the device may hang.  The two
5072          * back-to-back send BDs must in the same frame for this failure
5073          * to occur.  Scan mbuf chains and see whether two back-to-back
5074          * send BDs are there. If this is the case, allocate new mbuf
5075          * and copy the frame to workaround the silicon bug.
5076          */
5077         for (n = m, found = 0; n != NULL; n = n->m_next) {
5078                 if (n->m_len < 8) {
5079                         found++;
5080                         if (found > 1)
5081                                 break;
5082                         continue;
5083                 }
5084                 found = 0;
5085         }
5086
5087         if (found > 1) {
5088                 n = m_defrag(m, M_NOWAIT);
5089                 if (n == NULL)
5090                         m_freem(m);
5091         } else
5092                 n = m;
5093         return (n);
5094 }
5095
5096 static struct mbuf *
5097 bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss,
5098     uint16_t *flags)
5099 {
5100         struct ip *ip;
5101         struct tcphdr *tcp;
5102         struct mbuf *n;
5103         uint16_t hlen;
5104         uint32_t poff;
5105
5106         if (M_WRITABLE(m) == 0) {
5107                 /* Get a writable copy. */
5108                 n = m_dup(m, M_NOWAIT);
5109                 m_freem(m);
5110                 if (n == NULL)
5111                         return (NULL);
5112                 m = n;
5113         }
5114         m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip));
5115         if (m == NULL)
5116                 return (NULL);
5117         ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
5118         poff = sizeof(struct ether_header) + (ip->ip_hl << 2);
5119         m = m_pullup(m, poff + sizeof(struct tcphdr));
5120         if (m == NULL)
5121                 return (NULL);
5122         tcp = (struct tcphdr *)(mtod(m, char *) + poff);
5123         m = m_pullup(m, poff + (tcp->th_off << 2));
5124         if (m == NULL)
5125                 return (NULL);
5126         /*
5127          * It seems controller doesn't modify IP length and TCP pseudo
5128          * checksum. These checksum computed by upper stack should be 0.
5129          */
5130         *mss = m->m_pkthdr.tso_segsz;
5131         ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
5132         ip->ip_sum = 0;
5133         ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
5134         /* Clear pseudo checksum computed by TCP stack. */
5135         tcp = (struct tcphdr *)(mtod(m, char *) + poff);
5136         tcp->th_sum = 0;
5137         /*
5138          * Broadcom controllers uses different descriptor format for
5139          * TSO depending on ASIC revision. Due to TSO-capable firmware
5140          * license issue and lower performance of firmware based TSO
5141          * we only support hardware based TSO.
5142          */
5143         /* Calculate header length, incl. TCP/IP options, in 32 bit units. */
5144         hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
5145         if (sc->bge_flags & BGE_FLAG_TSO3) {
5146                 /*
5147                  * For BCM5717 and newer controllers, hardware based TSO
5148                  * uses the 14 lower bits of the bge_mss field to store the
5149                  * MSS and the upper 2 bits to store the lowest 2 bits of
5150                  * the IP/TCP header length.  The upper 6 bits of the header
5151                  * length are stored in the bge_flags[14:10,4] field.  Jumbo
5152                  * frames are supported.
5153                  */
5154                 *mss |= ((hlen & 0x3) << 14);
5155                 *flags |= ((hlen & 0xF8) << 7) | ((hlen & 0x4) << 2);
5156         } else {
5157                 /*
5158                  * For BCM5755 and newer controllers, hardware based TSO uses
5159                  * the lower 11 bits to store the MSS and the upper 5 bits to
5160                  * store the IP/TCP header length. Jumbo frames are not
5161                  * supported.
5162                  */
5163                 *mss |= (hlen << 11);
5164         }
5165         return (m);
5166 }
5167
5168 /*
5169  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
5170  * pointers to descriptors.
5171  */
5172 static int
5173 bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
5174 {
5175         bus_dma_segment_t       segs[BGE_NSEG_NEW];
5176         bus_dmamap_t            map;
5177         struct bge_tx_bd        *d;
5178         struct mbuf             *m = *m_head;
5179         uint32_t                idx = *txidx;
5180         uint16_t                csum_flags, mss, vlan_tag;
5181         int                     nsegs, i, error;
5182
5183         csum_flags = 0;
5184         mss = 0;
5185         vlan_tag = 0;
5186         if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 &&
5187             m->m_next != NULL) {
5188                 *m_head = bge_check_short_dma(m);
5189                 if (*m_head == NULL)
5190                         return (ENOBUFS);
5191                 m = *m_head;
5192         }
5193         if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
5194                 *m_head = m = bge_setup_tso(sc, m, &mss, &csum_flags);
5195                 if (*m_head == NULL)
5196                         return (ENOBUFS);
5197                 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
5198                     BGE_TXBDFLAG_CPU_POST_DMA;
5199         } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) {
5200                 if (m->m_pkthdr.csum_flags & CSUM_IP)
5201                         csum_flags |= BGE_TXBDFLAG_IP_CSUM;
5202                 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
5203                         csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
5204                         if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
5205                             (error = bge_cksum_pad(m)) != 0) {
5206                                 m_freem(m);
5207                                 *m_head = NULL;
5208                                 return (error);
5209                         }
5210                 }
5211         }
5212
5213         if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
5214                 if (sc->bge_flags & BGE_FLAG_JUMBO_FRAME &&
5215                     m->m_pkthdr.len > ETHER_MAX_LEN)
5216                         csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME;
5217                 if (sc->bge_forced_collapse > 0 &&
5218                     (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
5219                         /*
5220                          * Forcedly collapse mbuf chains to overcome hardware
5221                          * limitation which only support a single outstanding
5222                          * DMA read operation.
5223                          */
5224                         if (sc->bge_forced_collapse == 1)
5225                                 m = m_defrag(m, M_NOWAIT);
5226                         else
5227                                 m = m_collapse(m, M_NOWAIT,
5228                                     sc->bge_forced_collapse);
5229                         if (m == NULL)
5230                                 m = *m_head;
5231                         *m_head = m;
5232                 }
5233         }
5234
5235         map = sc->bge_cdata.bge_tx_dmamap[idx];
5236         error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
5237             &nsegs, BUS_DMA_NOWAIT);
5238         if (error == EFBIG) {
5239                 m = m_collapse(m, M_NOWAIT, BGE_NSEG_NEW);
5240                 if (m == NULL) {
5241                         m_freem(*m_head);
5242                         *m_head = NULL;
5243                         return (ENOBUFS);
5244                 }
5245                 *m_head = m;
5246                 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
5247                     m, segs, &nsegs, BUS_DMA_NOWAIT);
5248                 if (error) {
5249                         m_freem(m);
5250                         *m_head = NULL;
5251                         return (error);
5252                 }
5253         } else if (error != 0)
5254                 return (error);
5255
5256         /* Check if we have enough free send BDs. */
5257         if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
5258                 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
5259                 return (ENOBUFS);
5260         }
5261
5262         bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
5263
5264         if (m->m_flags & M_VLANTAG) {
5265                 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
5266                 vlan_tag = m->m_pkthdr.ether_vtag;
5267         }
5268
5269         if (sc->bge_asicrev == BGE_ASICREV_BCM5762 &&
5270             (m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
5271                 /*
5272                  * 5725 family of devices corrupts TSO packets when TSO DMA
5273                  * buffers cross into regions which are within MSS bytes of
5274                  * a 4GB boundary.  If we encounter the condition, drop the
5275                  * packet.
5276                  */
5277                 for (i = 0; ; i++) {
5278                         d = &sc->bge_ldata.bge_tx_ring[idx];
5279                         d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
5280                         d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
5281                         d->bge_len = segs[i].ds_len;
5282                         if (d->bge_addr.bge_addr_lo + segs[i].ds_len + mss <
5283                             d->bge_addr.bge_addr_lo)
5284                                 break;
5285                         d->bge_flags = csum_flags;
5286                         d->bge_vlan_tag = vlan_tag;
5287                         d->bge_mss = mss;
5288                         if (i == nsegs - 1)
5289                                 break;
5290                         BGE_INC(idx, BGE_TX_RING_CNT);
5291                 }
5292                 if (i != nsegs - 1) {
5293                         bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map,
5294                             BUS_DMASYNC_POSTWRITE);
5295                         bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
5296                         m_freem(*m_head);
5297                         *m_head = NULL;
5298                         return (EIO);
5299                 }
5300         } else {
5301                 for (i = 0; ; i++) {
5302                         d = &sc->bge_ldata.bge_tx_ring[idx];
5303                         d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
5304                         d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
5305                         d->bge_len = segs[i].ds_len;
5306                         d->bge_flags = csum_flags;
5307                         d->bge_vlan_tag = vlan_tag;
5308                         d->bge_mss = mss;
5309                         if (i == nsegs - 1)
5310                                 break;
5311                         BGE_INC(idx, BGE_TX_RING_CNT);
5312                 }
5313         }
5314
5315         /* Mark the last segment as end of packet... */
5316         d->bge_flags |= BGE_TXBDFLAG_END;
5317
5318         /*
5319          * Insure that the map for this transmission
5320          * is placed at the array index of the last descriptor
5321          * in this chain.
5322          */
5323         sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
5324         sc->bge_cdata.bge_tx_dmamap[idx] = map;
5325         sc->bge_cdata.bge_tx_chain[idx] = m;
5326         sc->bge_txcnt += nsegs;
5327
5328         BGE_INC(idx, BGE_TX_RING_CNT);
5329         *txidx = idx;
5330
5331         return (0);
5332 }
5333
5334 /*
5335  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
5336  * to the mbuf data regions directly in the transmit descriptors.
5337  */
5338 static void
5339 bge_start_locked(if_t ifp)
5340 {
5341         struct bge_softc *sc;
5342         struct mbuf *m_head;
5343         uint32_t prodidx;
5344         int count;
5345
5346         sc = if_getsoftc(ifp);
5347         BGE_LOCK_ASSERT(sc);
5348
5349         if (!sc->bge_link ||
5350             (if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
5351             IFF_DRV_RUNNING)
5352                 return;
5353
5354         prodidx = sc->bge_tx_prodidx;
5355
5356         for (count = 0; !if_sendq_empty(ifp);) {
5357                 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
5358                         if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5359                         break;
5360                 }
5361                 m_head = if_dequeue(ifp);
5362                 if (m_head == NULL)
5363                         break;
5364
5365                 /*
5366                  * Pack the data into the transmit ring. If we
5367                  * don't have room, set the OACTIVE flag and wait
5368                  * for the NIC to drain the ring.
5369                  */
5370                 if (bge_encap(sc, &m_head, &prodidx)) {
5371                         if (m_head == NULL)
5372                                 break;
5373                         if_sendq_prepend(ifp, m_head);
5374                         if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5375                         break;
5376                 }
5377                 ++count;
5378
5379                 /*
5380                  * If there's a BPF listener, bounce a copy of this frame
5381                  * to him.
5382                  */
5383                 if_bpfmtap(ifp, m_head);
5384         }
5385
5386         if (count > 0) {
5387                 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
5388                     sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
5389                 /* Transmit. */
5390                 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
5391                 /* 5700 b2 errata */
5392                 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
5393                         bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
5394
5395                 sc->bge_tx_prodidx = prodidx;
5396
5397                 /*
5398                  * Set a timeout in case the chip goes out to lunch.
5399                  */
5400                 sc->bge_timer = BGE_TX_TIMEOUT;
5401         }
5402 }
5403
5404 /*
5405  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
5406  * to the mbuf data regions directly in the transmit descriptors.
5407  */
5408 static void
5409 bge_start(if_t ifp)
5410 {
5411         struct bge_softc *sc;
5412
5413         sc = if_getsoftc(ifp);
5414         BGE_LOCK(sc);
5415         bge_start_locked(ifp);
5416         BGE_UNLOCK(sc);
5417 }
5418
5419 static void
5420 bge_init_locked(struct bge_softc *sc)
5421 {
5422         if_t ifp;
5423         uint16_t *m;
5424         uint32_t mode;
5425
5426         BGE_LOCK_ASSERT(sc);
5427
5428         ifp = sc->bge_ifp;
5429
5430         if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
5431                 return;
5432
5433         /* Cancel pending I/O and flush buffers. */
5434         bge_stop(sc);
5435
5436         bge_stop_fw(sc);
5437         bge_sig_pre_reset(sc, BGE_RESET_START);
5438         bge_reset(sc);
5439         bge_sig_legacy(sc, BGE_RESET_START);
5440         bge_sig_post_reset(sc, BGE_RESET_START);
5441
5442         bge_chipinit(sc);
5443
5444         /*
5445          * Init the various state machines, ring
5446          * control blocks and firmware.
5447          */
5448         if (bge_blockinit(sc)) {
5449                 device_printf(sc->bge_dev, "initialization failure\n");
5450                 return;
5451         }
5452
5453         ifp = sc->bge_ifp;
5454
5455         /* Specify MTU. */
5456         CSR_WRITE_4(sc, BGE_RX_MTU, if_getmtu(ifp) +
5457             ETHER_HDR_LEN + ETHER_CRC_LEN +
5458             (if_getcapenable(ifp) & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
5459
5460         /* Load our MAC address. */
5461         m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
5462         CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
5463         CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
5464
5465         /* Program promiscuous mode. */
5466         bge_setpromisc(sc);
5467
5468         /* Program multicast filter. */
5469         bge_setmulti(sc);
5470
5471         /* Program VLAN tag stripping. */
5472         bge_setvlan(sc);
5473
5474         /* Override UDP checksum offloading. */
5475         if (sc->bge_forced_udpcsum == 0)
5476                 sc->bge_csum_features &= ~CSUM_UDP;
5477         else
5478                 sc->bge_csum_features |= CSUM_UDP;
5479         if (if_getcapabilities(ifp) & IFCAP_TXCSUM &&
5480             if_getcapenable(ifp) & IFCAP_TXCSUM) {
5481                 if_sethwassistbits(ifp, 0, (BGE_CSUM_FEATURES | CSUM_UDP));
5482                 if_sethwassistbits(ifp, sc->bge_csum_features, 0);
5483         }
5484
5485         /* Init RX ring. */
5486         if (bge_init_rx_ring_std(sc) != 0) {
5487                 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
5488                 bge_stop(sc);
5489                 return;
5490         }
5491
5492         /*
5493          * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
5494          * memory to insure that the chip has in fact read the first
5495          * entry of the ring.
5496          */
5497         if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
5498                 uint32_t                v, i;
5499                 for (i = 0; i < 10; i++) {
5500                         DELAY(20);
5501                         v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
5502                         if (v == (MCLBYTES - ETHER_ALIGN))
5503                                 break;
5504                 }
5505                 if (i == 10)
5506                         device_printf (sc->bge_dev,
5507                             "5705 A0 chip failed to load RX ring\n");
5508         }
5509
5510         /* Init jumbo RX ring. */
5511         if (BGE_IS_JUMBO_CAPABLE(sc) &&
5512             if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN + 
5513             ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN)) {
5514                 if (bge_init_rx_ring_jumbo(sc) != 0) {
5515                         device_printf(sc->bge_dev,
5516                             "no memory for jumbo Rx buffers.\n");
5517                         bge_stop(sc);
5518                         return;
5519                 }
5520         }
5521
5522         /* Init our RX return ring index. */
5523         sc->bge_rx_saved_considx = 0;
5524
5525         /* Init our RX/TX stat counters. */
5526         sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
5527
5528         /* Init TX ring. */
5529         bge_init_tx_ring(sc);
5530
5531         /* Enable TX MAC state machine lockup fix. */
5532         mode = CSR_READ_4(sc, BGE_TX_MODE);
5533         if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
5534                 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
5535         if (sc->bge_asicrev == BGE_ASICREV_BCM5720 ||
5536             sc->bge_asicrev == BGE_ASICREV_BCM5762) {
5537                 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
5538                 mode |= CSR_READ_4(sc, BGE_TX_MODE) &
5539                     (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
5540         }
5541         /* Turn on transmitter. */
5542         CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
5543         DELAY(100);
5544
5545         /* Turn on receiver. */
5546         mode = CSR_READ_4(sc, BGE_RX_MODE);
5547         if (BGE_IS_5755_PLUS(sc))
5548                 mode |= BGE_RXMODE_IPV6_ENABLE;
5549         if (sc->bge_asicrev == BGE_ASICREV_BCM5762)
5550                 mode |= BGE_RXMODE_IPV4_FRAG_FIX;
5551         CSR_WRITE_4(sc,BGE_RX_MODE, mode | BGE_RXMODE_ENABLE);
5552         DELAY(10);
5553
5554         /*
5555          * Set the number of good frames to receive after RX MBUF
5556          * Low Watermark has been reached. After the RX MAC receives
5557          * this number of frames, it will drop subsequent incoming
5558          * frames until the MBUF High Watermark is reached.
5559          */
5560         if (BGE_IS_57765_PLUS(sc))
5561                 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
5562         else
5563                 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
5564
5565         /* Clear MAC statistics. */
5566         if (BGE_IS_5705_PLUS(sc))
5567                 bge_stats_clear_regs(sc);
5568
5569         /* Tell firmware we're alive. */
5570         BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5571
5572 #ifdef DEVICE_POLLING
5573         /* Disable interrupts if we are polling. */
5574         if (if_getcapenable(ifp) & IFCAP_POLLING) {
5575                 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
5576                     BGE_PCIMISCCTL_MASK_PCI_INTR);
5577                 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5578         } else
5579 #endif
5580
5581         /* Enable host interrupts. */
5582         {
5583         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
5584         BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5585         bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
5586         }
5587
5588         if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
5589         if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
5590
5591         bge_ifmedia_upd_locked(ifp);
5592
5593         callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
5594 }
5595
5596 static void
5597 bge_init(void *xsc)
5598 {
5599         struct bge_softc *sc = xsc;
5600
5601         BGE_LOCK(sc);
5602         bge_init_locked(sc);
5603         BGE_UNLOCK(sc);
5604 }
5605
5606 /*
5607  * Set media options.
5608  */
5609 static int
5610 bge_ifmedia_upd(if_t ifp)
5611 {
5612         struct bge_softc *sc = if_getsoftc(ifp);
5613         int res;
5614
5615         BGE_LOCK(sc);
5616         res = bge_ifmedia_upd_locked(ifp);
5617         BGE_UNLOCK(sc);
5618
5619         return (res);
5620 }
5621
5622 static int
5623 bge_ifmedia_upd_locked(if_t ifp)
5624 {
5625         struct bge_softc *sc = if_getsoftc(ifp);
5626         struct mii_data *mii;
5627         struct mii_softc *miisc;
5628         struct ifmedia *ifm;
5629
5630         BGE_LOCK_ASSERT(sc);
5631
5632         ifm = &sc->bge_ifmedia;
5633
5634         /* If this is a 1000baseX NIC, enable the TBI port. */
5635         if (sc->bge_flags & BGE_FLAG_TBI) {
5636                 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
5637                         return (EINVAL);
5638                 switch(IFM_SUBTYPE(ifm->ifm_media)) {
5639                 case IFM_AUTO:
5640                         /*
5641                          * The BCM5704 ASIC appears to have a special
5642                          * mechanism for programming the autoneg
5643                          * advertisement registers in TBI mode.
5644                          */
5645                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
5646                                 uint32_t sgdig;
5647                                 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
5648                                 if (sgdig & BGE_SGDIGSTS_DONE) {
5649                                         CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
5650                                         sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
5651                                         sgdig |= BGE_SGDIGCFG_AUTO |
5652                                             BGE_SGDIGCFG_PAUSE_CAP |
5653                                             BGE_SGDIGCFG_ASYM_PAUSE;
5654                                         CSR_WRITE_4(sc, BGE_SGDIG_CFG,
5655                                             sgdig | BGE_SGDIGCFG_SEND);
5656                                         DELAY(5);
5657                                         CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
5658                                 }
5659                         }
5660                         break;
5661                 case IFM_1000_SX:
5662                         if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
5663                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
5664                                     BGE_MACMODE_HALF_DUPLEX);
5665                         } else {
5666                                 BGE_SETBIT(sc, BGE_MAC_MODE,
5667                                     BGE_MACMODE_HALF_DUPLEX);
5668                         }
5669                         DELAY(40);
5670                         break;
5671                 default:
5672                         return (EINVAL);
5673                 }
5674                 return (0);
5675         }
5676
5677         sc->bge_link_evt++;
5678         mii = device_get_softc(sc->bge_miibus);
5679         LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
5680                 PHY_RESET(miisc);
5681         mii_mediachg(mii);
5682
5683         /*
5684          * Force an interrupt so that we will call bge_link_upd
5685          * if needed and clear any pending link state attention.
5686          * Without this we are not getting any further interrupts
5687          * for link state changes and thus will not UP the link and
5688          * not be able to send in bge_start_locked. The only
5689          * way to get things working was to receive a packet and
5690          * get an RX intr.
5691          * bge_tick should help for fiber cards and we might not
5692          * need to do this here if BGE_FLAG_TBI is set but as
5693          * we poll for fiber anyway it should not harm.
5694          */
5695         if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
5696             sc->bge_flags & BGE_FLAG_5788)
5697                 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
5698         else
5699                 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
5700
5701         return (0);
5702 }
5703
5704 /*
5705  * Report current media status.
5706  */
5707 static void
5708 bge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
5709 {
5710         struct bge_softc *sc = if_getsoftc(ifp);
5711         struct mii_data *mii;
5712
5713         BGE_LOCK(sc);
5714
5715         if ((if_getflags(ifp) & IFF_UP) == 0) {
5716                 BGE_UNLOCK(sc);
5717                 return;
5718         }
5719         if (sc->bge_flags & BGE_FLAG_TBI) {
5720                 ifmr->ifm_status = IFM_AVALID;
5721                 ifmr->ifm_active = IFM_ETHER;
5722                 if (CSR_READ_4(sc, BGE_MAC_STS) &
5723                     BGE_MACSTAT_TBI_PCS_SYNCHED)
5724                         ifmr->ifm_status |= IFM_ACTIVE;
5725                 else {
5726                         ifmr->ifm_active |= IFM_NONE;
5727                         BGE_UNLOCK(sc);
5728                         return;
5729                 }
5730                 ifmr->ifm_active |= IFM_1000_SX;
5731                 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
5732                         ifmr->ifm_active |= IFM_HDX;
5733                 else
5734                         ifmr->ifm_active |= IFM_FDX;
5735                 BGE_UNLOCK(sc);
5736                 return;
5737         }
5738
5739         mii = device_get_softc(sc->bge_miibus);
5740         mii_pollstat(mii);
5741         ifmr->ifm_active = mii->mii_media_active;
5742         ifmr->ifm_status = mii->mii_media_status;
5743
5744         BGE_UNLOCK(sc);
5745 }
5746
5747 static int
5748 bge_ioctl(if_t ifp, u_long command, caddr_t data)
5749 {
5750         struct bge_softc *sc = if_getsoftc(ifp);
5751         struct ifreq *ifr = (struct ifreq *) data;
5752         struct mii_data *mii;
5753         int flags, mask, error = 0;
5754
5755         switch (command) {
5756         case SIOCSIFMTU:
5757                 if (BGE_IS_JUMBO_CAPABLE(sc) ||
5758                     (sc->bge_flags & BGE_FLAG_JUMBO_STD)) {
5759                         if (ifr->ifr_mtu < ETHERMIN ||
5760                             ifr->ifr_mtu > BGE_JUMBO_MTU) {
5761                                 error = EINVAL;
5762                                 break;
5763                         }
5764                 } else if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) {
5765                         error = EINVAL;
5766                         break;
5767                 }
5768                 BGE_LOCK(sc);
5769                 if (if_getmtu(ifp) != ifr->ifr_mtu) {
5770                         if_setmtu(ifp, ifr->ifr_mtu);
5771                         if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
5772                                 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
5773                                 bge_init_locked(sc);
5774                         }
5775                 }
5776                 BGE_UNLOCK(sc);
5777                 break;
5778         case SIOCSIFFLAGS:
5779                 BGE_LOCK(sc);
5780                 if (if_getflags(ifp) & IFF_UP) {
5781                         /*
5782                          * If only the state of the PROMISC flag changed,
5783                          * then just use the 'set promisc mode' command
5784                          * instead of reinitializing the entire NIC. Doing
5785                          * a full re-init means reloading the firmware and
5786                          * waiting for it to start up, which may take a
5787                          * second or two.  Similarly for ALLMULTI.
5788                          */
5789                         if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
5790                                 flags = if_getflags(ifp) ^ sc->bge_if_flags;
5791                                 if (flags & IFF_PROMISC)
5792                                         bge_setpromisc(sc);
5793                                 if (flags & IFF_ALLMULTI)
5794                                         bge_setmulti(sc);
5795                         } else
5796                                 bge_init_locked(sc);
5797                 } else {
5798                         if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
5799                                 bge_stop(sc);
5800                         }
5801                 }
5802                 sc->bge_if_flags = if_getflags(ifp);
5803                 BGE_UNLOCK(sc);
5804                 error = 0;
5805                 break;
5806         case SIOCADDMULTI:
5807         case SIOCDELMULTI:
5808                 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
5809                         BGE_LOCK(sc);
5810                         bge_setmulti(sc);
5811                         BGE_UNLOCK(sc);
5812                         error = 0;
5813                 }
5814                 break;
5815         case SIOCSIFMEDIA:
5816         case SIOCGIFMEDIA:
5817                 if (sc->bge_flags & BGE_FLAG_TBI) {
5818                         error = ifmedia_ioctl(ifp, ifr,
5819                             &sc->bge_ifmedia, command);
5820                 } else {
5821                         mii = device_get_softc(sc->bge_miibus);
5822                         error = ifmedia_ioctl(ifp, ifr,
5823                             &mii->mii_media, command);
5824                 }
5825                 break;
5826         case SIOCSIFCAP:
5827                 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
5828 #ifdef DEVICE_POLLING
5829                 if (mask & IFCAP_POLLING) {
5830                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
5831                                 error = ether_poll_register(bge_poll, ifp);
5832                                 if (error)
5833                                         return (error);
5834                                 BGE_LOCK(sc);
5835                                 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
5836                                     BGE_PCIMISCCTL_MASK_PCI_INTR);
5837                                 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5838                                 if_setcapenablebit(ifp, IFCAP_POLLING, 0);
5839                                 BGE_UNLOCK(sc);
5840                         } else {
5841                                 error = ether_poll_deregister(ifp);
5842                                 /* Enable interrupt even in error case */
5843                                 BGE_LOCK(sc);
5844                                 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
5845                                     BGE_PCIMISCCTL_MASK_PCI_INTR);
5846                                 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
5847                                 if_setcapenablebit(ifp, 0, IFCAP_POLLING);
5848                                 BGE_UNLOCK(sc);
5849                         }
5850                 }
5851 #endif
5852                 if ((mask & IFCAP_TXCSUM) != 0 &&
5853                     (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) {
5854                         if_togglecapenable(ifp, IFCAP_TXCSUM);
5855                         if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
5856                                 if_sethwassistbits(ifp,
5857                                     sc->bge_csum_features, 0);
5858                         else
5859                                 if_sethwassistbits(ifp, 0,
5860                                     sc->bge_csum_features);
5861                 }
5862
5863                 if ((mask & IFCAP_RXCSUM) != 0 &&
5864                     (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0)
5865                         if_togglecapenable(ifp, IFCAP_RXCSUM);
5866
5867                 if ((mask & IFCAP_TSO4) != 0 &&
5868                     (if_getcapabilities(ifp) & IFCAP_TSO4) != 0) {
5869                         if_togglecapenable(ifp, IFCAP_TSO4);
5870                         if ((if_getcapenable(ifp) & IFCAP_TSO4) != 0)
5871                                 if_sethwassistbits(ifp, CSUM_TSO, 0);
5872                         else
5873                                 if_sethwassistbits(ifp, 0, CSUM_TSO);
5874                 }
5875
5876                 if (mask & IFCAP_VLAN_MTU) {
5877                         if_togglecapenable(ifp, IFCAP_VLAN_MTU);
5878                         if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
5879                         bge_init(sc);
5880                 }
5881
5882                 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
5883                     (if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0)
5884                         if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
5885                 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
5886                     (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
5887                         if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
5888                         if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0)
5889                                 if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO);
5890                         BGE_LOCK(sc);
5891                         bge_setvlan(sc);
5892                         BGE_UNLOCK(sc);
5893                 }
5894 #ifdef VLAN_CAPABILITIES
5895                 if_vlancap(ifp);
5896 #endif
5897                 break;
5898         default:
5899                 error = ether_ioctl(ifp, command, data);
5900                 break;
5901         }
5902
5903         return (error);
5904 }
5905
5906 static void
5907 bge_watchdog(struct bge_softc *sc)
5908 {
5909         if_t ifp;
5910         uint32_t status;
5911
5912         BGE_LOCK_ASSERT(sc);
5913
5914         if (sc->bge_timer == 0 || --sc->bge_timer)
5915                 return;
5916
5917         /* If pause frames are active then don't reset the hardware. */
5918         if ((CSR_READ_4(sc, BGE_RX_MODE) & BGE_RXMODE_FLOWCTL_ENABLE) != 0) {
5919                 status = CSR_READ_4(sc, BGE_RX_STS);
5920                 if ((status & BGE_RXSTAT_REMOTE_XOFFED) != 0) {
5921                         /*
5922                          * If link partner has us in XOFF state then wait for
5923                          * the condition to clear.
5924                          */
5925                         CSR_WRITE_4(sc, BGE_RX_STS, status);
5926                         sc->bge_timer = BGE_TX_TIMEOUT;
5927                         return;
5928                 } else if ((status & BGE_RXSTAT_RCVD_XOFF) != 0 &&
5929                     (status & BGE_RXSTAT_RCVD_XON) != 0) {
5930                         /*
5931                          * If link partner has us in XOFF state then wait for
5932                          * the condition to clear.
5933                          */
5934                         CSR_WRITE_4(sc, BGE_RX_STS, status);
5935                         sc->bge_timer = BGE_TX_TIMEOUT;
5936                         return;
5937                 }
5938                 /*
5939                  * Any other condition is unexpected and the controller
5940                  * should be reset.
5941                  */
5942         }
5943
5944         ifp = sc->bge_ifp;
5945
5946         if_printf(ifp, "watchdog timeout -- resetting\n");
5947
5948         if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
5949         bge_init_locked(sc);
5950
5951         if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
5952 }
5953
5954 static void
5955 bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit)
5956 {
5957         int i;
5958
5959         BGE_CLRBIT(sc, reg, bit);
5960
5961         for (i = 0; i < BGE_TIMEOUT; i++) {
5962                 if ((CSR_READ_4(sc, reg) & bit) == 0)
5963                         return;
5964                 DELAY(100);
5965         }
5966 }
5967
5968 /*
5969  * Stop the adapter and free any mbufs allocated to the
5970  * RX and TX lists.
5971  */
5972 static void
5973 bge_stop(struct bge_softc *sc)
5974 {
5975         if_t ifp;
5976
5977         BGE_LOCK_ASSERT(sc);
5978
5979         ifp = sc->bge_ifp;
5980
5981         callout_stop(&sc->bge_stat_ch);
5982
5983         /* Disable host interrupts. */
5984         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5985         bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5986
5987         /*
5988          * Tell firmware we're shutting down.
5989          */
5990         bge_stop_fw(sc);
5991         bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN);
5992
5993         /*
5994          * Disable all of the receiver blocks.
5995          */
5996         bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
5997         bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
5998         bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
5999         if (BGE_IS_5700_FAMILY(sc))
6000                 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
6001         bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
6002         bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
6003         bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
6004
6005         /*
6006          * Disable all of the transmit blocks.
6007          */
6008         bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
6009         bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
6010         bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
6011         bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
6012         bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
6013         if (BGE_IS_5700_FAMILY(sc))
6014                 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
6015         bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
6016
6017         /*
6018          * Shut down all of the memory managers and related
6019          * state machines.
6020          */
6021         bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
6022         bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
6023         if (BGE_IS_5700_FAMILY(sc))
6024                 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
6025
6026         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
6027         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
6028         if (!(BGE_IS_5705_PLUS(sc))) {
6029                 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
6030                 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
6031         }
6032         /* Update MAC statistics. */
6033         if (BGE_IS_5705_PLUS(sc))
6034                 bge_stats_update_regs(sc);
6035
6036         bge_reset(sc);
6037         bge_sig_legacy(sc, BGE_RESET_SHUTDOWN);
6038         bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN);
6039
6040         /*
6041          * Keep the ASF firmware running if up.
6042          */
6043         if (sc->bge_asf_mode & ASF_STACKUP)
6044                 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
6045         else
6046                 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
6047
6048         /* Free the RX lists. */
6049         bge_free_rx_ring_std(sc);
6050
6051         /* Free jumbo RX list. */
6052         if (BGE_IS_JUMBO_CAPABLE(sc))
6053                 bge_free_rx_ring_jumbo(sc);
6054
6055         /* Free TX buffers. */
6056         bge_free_tx_ring(sc);
6057
6058         sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
6059
6060         /* Clear MAC's link state (PHY may still have link UP). */
6061         if (bootverbose && sc->bge_link)
6062                 if_printf(sc->bge_ifp, "link DOWN\n");
6063         sc->bge_link = 0;
6064
6065         if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
6066 }
6067
6068 /*
6069  * Stop all chip I/O so that the kernel's probe routines don't
6070  * get confused by errant DMAs when rebooting.
6071  */
6072 static int
6073 bge_shutdown(device_t dev)
6074 {
6075         struct bge_softc *sc;
6076
6077         sc = device_get_softc(dev);
6078         BGE_LOCK(sc);
6079         bge_stop(sc);
6080         BGE_UNLOCK(sc);
6081
6082         return (0);
6083 }
6084
6085 static int
6086 bge_suspend(device_t dev)
6087 {
6088         struct bge_softc *sc;
6089
6090         sc = device_get_softc(dev);
6091         BGE_LOCK(sc);
6092         bge_stop(sc);
6093         BGE_UNLOCK(sc);
6094
6095         return (0);
6096 }
6097
6098 static int
6099 bge_resume(device_t dev)
6100 {
6101         struct bge_softc *sc;
6102         if_t ifp;
6103
6104         sc = device_get_softc(dev);
6105         BGE_LOCK(sc);
6106         ifp = sc->bge_ifp;
6107         if (if_getflags(ifp) & IFF_UP) {
6108                 bge_init_locked(sc);
6109                 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
6110                         bge_start_locked(ifp);
6111         }
6112         BGE_UNLOCK(sc);
6113
6114         return (0);
6115 }
6116
6117 static void
6118 bge_link_upd(struct bge_softc *sc)
6119 {
6120         struct mii_data *mii;
6121         uint32_t link, status;
6122
6123         BGE_LOCK_ASSERT(sc);
6124
6125         /* Clear 'pending link event' flag. */
6126         sc->bge_link_evt = 0;
6127
6128         /*
6129          * Process link state changes.
6130          * Grrr. The link status word in the status block does
6131          * not work correctly on the BCM5700 rev AX and BX chips,
6132          * according to all available information. Hence, we have
6133          * to enable MII interrupts in order to properly obtain
6134          * async link changes. Unfortunately, this also means that
6135          * we have to read the MAC status register to detect link
6136          * changes, thereby adding an additional register access to
6137          * the interrupt handler.
6138          *
6139          * XXX: perhaps link state detection procedure used for
6140          * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
6141          */
6142
6143         if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
6144             sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
6145                 status = CSR_READ_4(sc, BGE_MAC_STS);
6146                 if (status & BGE_MACSTAT_MI_INTERRUPT) {
6147                         mii = device_get_softc(sc->bge_miibus);
6148                         mii_pollstat(mii);
6149                         if (!sc->bge_link &&
6150                             mii->mii_media_status & IFM_ACTIVE &&
6151                             IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
6152                                 sc->bge_link++;
6153                                 if (bootverbose)
6154                                         if_printf(sc->bge_ifp, "link UP\n");
6155                         } else if (sc->bge_link &&
6156                             (!(mii->mii_media_status & IFM_ACTIVE) ||
6157                             IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
6158                                 sc->bge_link = 0;
6159                                 if (bootverbose)
6160                                         if_printf(sc->bge_ifp, "link DOWN\n");
6161                         }
6162
6163                         /* Clear the interrupt. */
6164                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
6165                             BGE_EVTENB_MI_INTERRUPT);
6166                         bge_miibus_readreg(sc->bge_dev, sc->bge_phy_addr,
6167                             BRGPHY_MII_ISR);
6168                         bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr,
6169                             BRGPHY_MII_IMR, BRGPHY_INTRS);
6170                 }
6171                 return;
6172         }
6173
6174         if (sc->bge_flags & BGE_FLAG_TBI) {
6175                 status = CSR_READ_4(sc, BGE_MAC_STS);
6176                 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
6177                         if (!sc->bge_link) {
6178                                 sc->bge_link++;
6179                                 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
6180                                         BGE_CLRBIT(sc, BGE_MAC_MODE,
6181                                             BGE_MACMODE_TBI_SEND_CFGS);
6182                                         DELAY(40);
6183                                 }
6184                                 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
6185                                 if (bootverbose)
6186                                         if_printf(sc->bge_ifp, "link UP\n");
6187                                 if_link_state_change(sc->bge_ifp,
6188                                     LINK_STATE_UP);
6189                         }
6190                 } else if (sc->bge_link) {
6191                         sc->bge_link = 0;
6192                         if (bootverbose)
6193                                 if_printf(sc->bge_ifp, "link DOWN\n");
6194                         if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
6195                 }
6196         } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
6197                 /*
6198                  * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
6199                  * in status word always set. Workaround this bug by reading
6200                  * PHY link status directly.
6201                  */
6202                 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
6203
6204                 if (link != sc->bge_link ||
6205                     sc->bge_asicrev == BGE_ASICREV_BCM5700) {
6206                         mii = device_get_softc(sc->bge_miibus);
6207                         mii_pollstat(mii);
6208                         if (!sc->bge_link &&
6209                             mii->mii_media_status & IFM_ACTIVE &&
6210                             IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
6211                                 sc->bge_link++;
6212                                 if (bootverbose)
6213                                         if_printf(sc->bge_ifp, "link UP\n");
6214                         } else if (sc->bge_link &&
6215                             (!(mii->mii_media_status & IFM_ACTIVE) ||
6216                             IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
6217                                 sc->bge_link = 0;
6218                                 if (bootverbose)
6219                                         if_printf(sc->bge_ifp, "link DOWN\n");
6220                         }
6221                 }
6222         } else {
6223                 /*
6224                  * For controllers that call mii_tick, we have to poll
6225                  * link status.
6226                  */
6227                 mii = device_get_softc(sc->bge_miibus);
6228                 mii_pollstat(mii);
6229                 bge_miibus_statchg(sc->bge_dev);
6230         }
6231
6232         /* Disable MAC attention when link is up. */
6233         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
6234             BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
6235             BGE_MACSTAT_LINK_CHANGED);
6236 }
6237
6238 static void
6239 bge_add_sysctls(struct bge_softc *sc)
6240 {
6241         struct sysctl_ctx_list *ctx;
6242         struct sysctl_oid_list *children;
6243         int unit;
6244
6245         ctx = device_get_sysctl_ctx(sc->bge_dev);
6246         children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
6247
6248 #ifdef BGE_REGISTER_DEBUG
6249         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
6250             CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
6251             "Debug Information");
6252
6253         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
6254             CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
6255             "MAC Register Read");
6256
6257         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ape_read",
6258             CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_ape_read, "I",
6259             "APE Register Read");
6260
6261         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
6262             CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
6263             "Memory Read");
6264
6265 #endif
6266
6267         unit = device_get_unit(sc->bge_dev);
6268         /*
6269          * A common design characteristic for many Broadcom client controllers
6270          * is that they only support a single outstanding DMA read operation
6271          * on the PCIe bus. This means that it will take twice as long to fetch
6272          * a TX frame that is split into header and payload buffers as it does
6273          * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
6274          * these controllers, coalescing buffers to reduce the number of memory
6275          * reads is effective way to get maximum performance(about 940Mbps).
6276          * Without collapsing TX buffers the maximum TCP bulk transfer
6277          * performance is about 850Mbps. However forcing coalescing mbufs
6278          * consumes a lot of CPU cycles, so leave it off by default.
6279          */
6280         sc->bge_forced_collapse = 0;
6281         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
6282             CTLFLAG_RWTUN, &sc->bge_forced_collapse, 0,
6283             "Number of fragmented TX buffers of a frame allowed before "
6284             "forced collapsing");
6285
6286         sc->bge_msi = 1;
6287         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "msi",
6288             CTLFLAG_RDTUN, &sc->bge_msi, 0, "Enable MSI");
6289
6290         /*
6291          * It seems all Broadcom controllers have a bug that can generate UDP
6292          * datagrams with checksum value 0 when TX UDP checksum offloading is
6293          * enabled.  Generating UDP checksum value 0 is RFC 768 violation.
6294          * Even though the probability of generating such UDP datagrams is
6295          * low, I don't want to see FreeBSD boxes to inject such datagrams
6296          * into network so disable UDP checksum offloading by default.  Users
6297          * still override this behavior by setting a sysctl variable,
6298          * dev.bge.0.forced_udpcsum.
6299          */
6300         sc->bge_forced_udpcsum = 0;
6301         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
6302             CTLFLAG_RWTUN, &sc->bge_forced_udpcsum, 0,
6303             "Enable UDP checksum offloading even if controller can "
6304             "generate UDP checksum value 0");
6305
6306         if (BGE_IS_5705_PLUS(sc))
6307                 bge_add_sysctl_stats_regs(sc, ctx, children);
6308         else
6309                 bge_add_sysctl_stats(sc, ctx, children);
6310 }
6311
6312 #define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
6313         SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
6314             sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
6315             desc)
6316
6317 static void
6318 bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
6319     struct sysctl_oid_list *parent)
6320 {
6321         struct sysctl_oid *tree;
6322         struct sysctl_oid_list *children, *schildren;
6323
6324         tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
6325             NULL, "BGE Statistics");
6326         schildren = children = SYSCTL_CHILDREN(tree);
6327         BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
6328             children, COSFramesDroppedDueToFilters,
6329             "FramesDroppedDueToFilters");
6330         BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
6331             children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
6332         BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
6333             children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
6334         BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
6335             children, nicNoMoreRxBDs, "NoMoreRxBDs");
6336         BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
6337             children, ifInDiscards, "InputDiscards");
6338         BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
6339             children, ifInErrors, "InputErrors");
6340         BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
6341             children, nicRecvThresholdHit, "RecvThresholdHit");
6342         BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
6343             children, nicDmaReadQueueFull, "DmaReadQueueFull");
6344         BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
6345             children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
6346         BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
6347             children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
6348         BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
6349             children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
6350         BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
6351             children, nicRingStatusUpdate, "RingStatusUpdate");
6352         BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
6353             children, nicInterrupts, "Interrupts");
6354         BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
6355             children, nicAvoidedInterrupts, "AvoidedInterrupts");
6356         BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
6357             children, nicSendThresholdHit, "SendThresholdHit");
6358
6359         tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
6360             NULL, "BGE RX Statistics");
6361         children = SYSCTL_CHILDREN(tree);
6362         BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
6363             children, rxstats.ifHCInOctets, "ifHCInOctets");
6364         BGE_SYSCTL_STAT(sc, ctx, "Fragments",
6365             children, rxstats.etherStatsFragments, "Fragments");
6366         BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
6367             children, rxstats.ifHCInUcastPkts, "UnicastPkts");
6368         BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
6369             children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
6370         BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
6371             children, rxstats.dot3StatsFCSErrors, "FCSErrors");
6372         BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
6373             children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
6374         BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
6375             children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
6376         BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
6377             children, rxstats.xoffPauseFramesReceived,
6378             "xoffPauseFramesReceived");
6379         BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
6380             children, rxstats.macControlFramesReceived,
6381             "ControlFramesReceived");
6382         BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
6383             children, rxstats.xoffStateEntered, "xoffStateEntered");
6384         BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
6385             children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
6386         BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
6387             children, rxstats.etherStatsJabbers, "Jabbers");
6388         BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
6389             children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
6390         BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
6391             children, rxstats.inRangeLengthError, "inRangeLengthError");
6392         BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
6393             children, rxstats.outRangeLengthError, "outRangeLengthError");
6394
6395         tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
6396             NULL, "BGE TX Statistics");
6397         children = SYSCTL_CHILDREN(tree);
6398         BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
6399             children, txstats.ifHCOutOctets, "ifHCOutOctets");
6400         BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
6401             children, txstats.etherStatsCollisions, "Collisions");
6402         BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
6403             children, txstats.outXonSent, "XonSent");
6404         BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
6405             children, txstats.outXoffSent, "XoffSent");
6406         BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
6407             children, txstats.flowControlDone, "flowControlDone");
6408         BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
6409             children, txstats.dot3StatsInternalMacTransmitErrors,
6410             "InternalMacTransmitErrors");
6411         BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
6412             children, txstats.dot3StatsSingleCollisionFrames,
6413             "SingleCollisionFrames");
6414         BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
6415             children, txstats.dot3StatsMultipleCollisionFrames,
6416             "MultipleCollisionFrames");
6417         BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
6418             children, txstats.dot3StatsDeferredTransmissions,
6419             "DeferredTransmissions");
6420         BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
6421             children, txstats.dot3StatsExcessiveCollisions,
6422             "ExcessiveCollisions");
6423         BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
6424             children, txstats.dot3StatsLateCollisions,
6425             "LateCollisions");
6426         BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
6427             children, txstats.ifHCOutUcastPkts, "UnicastPkts");
6428         BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
6429             children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
6430         BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
6431             children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
6432         BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
6433             children, txstats.dot3StatsCarrierSenseErrors,
6434             "CarrierSenseErrors");
6435         BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
6436             children, txstats.ifOutDiscards, "Discards");
6437         BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
6438             children, txstats.ifOutErrors, "Errors");
6439 }
6440
6441 #undef BGE_SYSCTL_STAT
6442
6443 #define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d)    \
6444             SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
6445
6446 static void
6447 bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
6448     struct sysctl_oid_list *parent)
6449 {
6450         struct sysctl_oid *tree;
6451         struct sysctl_oid_list *child, *schild;
6452         struct bge_mac_stats *stats;
6453
6454         stats = &sc->bge_mac_stats;
6455         tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
6456             NULL, "BGE Statistics");
6457         schild = child = SYSCTL_CHILDREN(tree);
6458         BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters",
6459             &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters");
6460         BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull",
6461             &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full");
6462         BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull",
6463             &stats->DmaWriteHighPriQueueFull,
6464             "NIC DMA Write High Priority Queue Full");
6465         BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs",
6466             &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors");
6467         BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards",
6468             &stats->InputDiscards, "Discarded Input Frames");
6469         BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors",
6470             &stats->InputErrors, "Input Errors");
6471         BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit",
6472             &stats->RecvThresholdHit, "NIC Recv Threshold Hit");
6473
6474         tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
6475             NULL, "BGE RX Statistics");
6476         child = SYSCTL_CHILDREN(tree);
6477         BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets",
6478             &stats->ifHCInOctets, "Inbound Octets");
6479         BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments",
6480             &stats->etherStatsFragments, "Fragments");
6481         BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
6482             &stats->ifHCInUcastPkts, "Inbound Unicast Packets");
6483         BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
6484             &stats->ifHCInMulticastPkts, "Inbound Multicast Packets");
6485         BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
6486             &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets");
6487         BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors",
6488             &stats->dot3StatsFCSErrors, "FCS Errors");
6489         BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors",
6490             &stats->dot3StatsAlignmentErrors, "Alignment Errors");
6491         BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived",
6492             &stats->xonPauseFramesReceived, "XON Pause Frames Received");
6493         BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived",
6494             &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received");
6495         BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived",
6496             &stats->macControlFramesReceived, "MAC Control Frames Received");
6497         BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered",
6498             &stats->xoffStateEntered, "XOFF State Entered");
6499         BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong",
6500             &stats->dot3StatsFramesTooLong, "Frames Too Long");
6501         BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers",
6502             &stats->etherStatsJabbers, "Jabbers");
6503         BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts",
6504             &stats->etherStatsUndersizePkts, "Undersized Packets");
6505
6506         tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
6507             NULL, "BGE TX Statistics");
6508         child = SYSCTL_CHILDREN(tree);
6509         BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets",
6510             &stats->ifHCOutOctets, "Outbound Octets");
6511         BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions",
6512             &stats->etherStatsCollisions, "TX Collisions");
6513         BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent",
6514             &stats->outXonSent, "XON Sent");
6515         BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent",
6516             &stats->outXoffSent, "XOFF Sent");
6517         BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors",
6518             &stats->dot3StatsInternalMacTransmitErrors,
6519             "Internal MAC TX Errors");
6520         BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames",
6521             &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames");
6522         BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames",
6523             &stats->dot3StatsMultipleCollisionFrames,
6524             "Multiple Collision Frames");
6525         BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions",
6526             &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions");
6527         BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions",
6528             &stats->dot3StatsExcessiveCollisions, "Excessive Collisions");
6529         BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions",
6530             &stats->dot3StatsLateCollisions, "Late Collisions");
6531         BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
6532             &stats->ifHCOutUcastPkts, "Outbound Unicast Packets");
6533         BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
6534             &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets");
6535         BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
6536             &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets");
6537 }
6538
6539 #undef  BGE_SYSCTL_STAT_ADD64
6540
6541 static int
6542 bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
6543 {
6544         struct bge_softc *sc;
6545         uint32_t result;
6546         int offset;
6547
6548         sc = (struct bge_softc *)arg1;
6549         offset = arg2;
6550         result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
6551             offsetof(bge_hostaddr, bge_addr_lo));
6552         return (sysctl_handle_int(oidp, &result, 0, req));
6553 }
6554
6555 #ifdef BGE_REGISTER_DEBUG
6556 static int
6557 bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
6558 {
6559         struct bge_softc *sc;
6560         uint16_t *sbdata;
6561         int error, result, sbsz;
6562         int i, j;
6563
6564         result = -1;
6565         error = sysctl_handle_int(oidp, &result, 0, req);
6566         if (error || (req->newptr == NULL))
6567                 return (error);
6568
6569         if (result == 1) {
6570                 sc = (struct bge_softc *)arg1;
6571
6572                 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
6573                     sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
6574                         sbsz = BGE_STATUS_BLK_SZ;
6575                 else
6576                         sbsz = 32;
6577                 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
6578                 printf("Status Block:\n");
6579                 BGE_LOCK(sc);
6580                 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
6581                     sc->bge_cdata.bge_status_map,
6582                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
6583                 for (i = 0x0; i < sbsz / sizeof(uint16_t); ) {
6584                         printf("%06x:", i);
6585                         for (j = 0; j < 8; j++)
6586                                 printf(" %04x", sbdata[i++]);
6587                         printf("\n");
6588                 }
6589
6590                 printf("Registers:\n");
6591                 for (i = 0x800; i < 0xA00; ) {
6592                         printf("%06x:", i);
6593                         for (j = 0; j < 8; j++) {
6594                                 printf(" %08x", CSR_READ_4(sc, i));
6595                                 i += 4;
6596                         }
6597                         printf("\n");
6598                 }
6599                 BGE_UNLOCK(sc);
6600
6601                 printf("Hardware Flags:\n");
6602                 if (BGE_IS_5717_PLUS(sc))
6603                         printf(" - 5717 Plus\n");
6604                 if (BGE_IS_5755_PLUS(sc))
6605                         printf(" - 5755 Plus\n");
6606                 if (BGE_IS_575X_PLUS(sc))
6607                         printf(" - 575X Plus\n");
6608                 if (BGE_IS_5705_PLUS(sc))
6609                         printf(" - 5705 Plus\n");
6610                 if (BGE_IS_5714_FAMILY(sc))
6611                         printf(" - 5714 Family\n");
6612                 if (BGE_IS_5700_FAMILY(sc))
6613                         printf(" - 5700 Family\n");
6614                 if (sc->bge_flags & BGE_FLAG_JUMBO)
6615                         printf(" - Supports Jumbo Frames\n");
6616                 if (sc->bge_flags & BGE_FLAG_PCIX)
6617                         printf(" - PCI-X Bus\n");
6618                 if (sc->bge_flags & BGE_FLAG_PCIE)
6619                         printf(" - PCI Express Bus\n");
6620                 if (sc->bge_phy_flags & BGE_PHY_NO_3LED)
6621                         printf(" - No 3 LEDs\n");
6622                 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
6623                         printf(" - RX Alignment Bug\n");
6624         }
6625
6626         return (error);
6627 }
6628
6629 static int
6630 bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
6631 {
6632         struct bge_softc *sc;
6633         int error;
6634         uint16_t result;
6635         uint32_t val;
6636
6637         result = -1;
6638         error = sysctl_handle_int(oidp, &result, 0, req);
6639         if (error || (req->newptr == NULL))
6640                 return (error);
6641
6642         if (result < 0x8000) {
6643                 sc = (struct bge_softc *)arg1;
6644                 val = CSR_READ_4(sc, result);
6645                 printf("reg 0x%06X = 0x%08X\n", result, val);
6646         }
6647
6648         return (error);
6649 }
6650
6651 static int
6652 bge_sysctl_ape_read(SYSCTL_HANDLER_ARGS)
6653 {
6654         struct bge_softc *sc;
6655         int error;
6656         uint16_t result;
6657         uint32_t val;
6658
6659         result = -1;
6660         error = sysctl_handle_int(oidp, &result, 0, req);
6661         if (error || (req->newptr == NULL))
6662                 return (error);
6663
6664         if (result < 0x8000) {
6665                 sc = (struct bge_softc *)arg1;
6666                 val = APE_READ_4(sc, result);
6667                 printf("reg 0x%06X = 0x%08X\n", result, val);
6668         }
6669
6670         return (error);
6671 }
6672
6673 static int
6674 bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
6675 {
6676         struct bge_softc *sc;
6677         int error;
6678         uint16_t result;
6679         uint32_t val;
6680
6681         result = -1;
6682         error = sysctl_handle_int(oidp, &result, 0, req);
6683         if (error || (req->newptr == NULL))
6684                 return (error);
6685
6686         if (result < 0x8000) {
6687                 sc = (struct bge_softc *)arg1;
6688                 val = bge_readmem_ind(sc, result);
6689                 printf("mem 0x%06X = 0x%08X\n", result, val);
6690         }
6691
6692         return (error);
6693 }
6694 #endif
6695
6696 static int
6697 bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
6698 {
6699
6700         if (sc->bge_flags & BGE_FLAG_EADDR)
6701                 return (1);
6702
6703 #ifdef __sparc64__
6704         OF_getetheraddr(sc->bge_dev, ether_addr);
6705         return (0);
6706 #endif
6707         return (1);
6708 }
6709
6710 static int
6711 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
6712 {
6713         uint32_t mac_addr;
6714
6715         mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB);
6716         if ((mac_addr >> 16) == 0x484b) {
6717                 ether_addr[0] = (uint8_t)(mac_addr >> 8);
6718                 ether_addr[1] = (uint8_t)mac_addr;
6719                 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB);
6720                 ether_addr[2] = (uint8_t)(mac_addr >> 24);
6721                 ether_addr[3] = (uint8_t)(mac_addr >> 16);
6722                 ether_addr[4] = (uint8_t)(mac_addr >> 8);
6723                 ether_addr[5] = (uint8_t)mac_addr;
6724                 return (0);
6725         }
6726         return (1);
6727 }
6728
6729 static int
6730 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
6731 {
6732         int mac_offset = BGE_EE_MAC_OFFSET;
6733
6734         if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
6735                 mac_offset = BGE_EE_MAC_OFFSET_5906;
6736
6737         return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
6738             ETHER_ADDR_LEN));
6739 }
6740
6741 static int
6742 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
6743 {
6744
6745         if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
6746                 return (1);
6747
6748         return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
6749            ETHER_ADDR_LEN));
6750 }
6751
6752 static int
6753 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
6754 {
6755         static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
6756                 /* NOTE: Order is critical */
6757                 bge_get_eaddr_fw,
6758                 bge_get_eaddr_mem,
6759                 bge_get_eaddr_nvram,
6760                 bge_get_eaddr_eeprom,
6761                 NULL
6762         };
6763         const bge_eaddr_fcn_t *func;
6764
6765         for (func = bge_eaddr_funcs; *func != NULL; ++func) {
6766                 if ((*func)(sc, eaddr) == 0)
6767                         break;
6768         }
6769         return (*func == NULL ? ENXIO : 0);
6770 }
6771
6772 static uint64_t
6773 bge_get_counter(if_t ifp, ift_counter cnt)
6774 {
6775         struct bge_softc *sc;
6776         struct bge_mac_stats *stats;
6777
6778         sc = if_getsoftc(ifp);
6779         if (!BGE_IS_5705_PLUS(sc))
6780                 return (if_get_counter_default(ifp, cnt));
6781         stats = &sc->bge_mac_stats;
6782
6783         switch (cnt) {
6784         case IFCOUNTER_IERRORS:
6785                 return (stats->NoMoreRxBDs + stats->InputDiscards +
6786                     stats->InputErrors);
6787         case IFCOUNTER_COLLISIONS:
6788                 return (stats->etherStatsCollisions);
6789         default:
6790                 return (if_get_counter_default(ifp, cnt));
6791         }
6792 }