]> CyberLeo.Net >> Repos - FreeBSD/releng/8.2.git/blob - sys/dev/bge/if_bge.c
MFC r217226:
[FreeBSD/releng/8.2.git] / sys / dev / bge / if_bge.c
1 /*-
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 /*
38  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39  *
40  * The Broadcom BCM5700 is based on technology originally developed by
41  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45  * frames, highly configurable RX filtering, and 16 RX and TX queues
46  * (which, along with RX filter rules, can be used for QOS applications).
47  * Other features, such as TCP segmentation, may be available as part
48  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49  * firmware images can be stored in hardware and need not be compiled
50  * into the driver.
51  *
52  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54  *
55  * The BCM5701 is a single-chip solution incorporating both the BCM5700
56  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57  * does not support external SSRAM.
58  *
59  * Broadcom also produces a variation of the BCM5700 under the "Altima"
60  * brand name, which is functionally similar but lacks PCI-X support.
61  *
62  * Without external SSRAM, you can only have at most 4 TX rings,
63  * and the use of the mini RX ring is disabled. This seems to imply
64  * that these features are simply not available on the BCM5701. As a
65  * result, this driver does not implement any support for the mini RX
66  * ring.
67  */
68
69 #ifdef HAVE_KERNEL_OPTION_HEADERS
70 #include "opt_device_polling.h"
71 #endif
72
73 #include <sys/param.h>
74 #include <sys/endian.h>
75 #include <sys/systm.h>
76 #include <sys/sockio.h>
77 #include <sys/mbuf.h>
78 #include <sys/malloc.h>
79 #include <sys/kernel.h>
80 #include <sys/module.h>
81 #include <sys/socket.h>
82 #include <sys/sysctl.h>
83 #include <sys/taskqueue.h>
84
85 #include <net/if.h>
86 #include <net/if_arp.h>
87 #include <net/ethernet.h>
88 #include <net/if_dl.h>
89 #include <net/if_media.h>
90
91 #include <net/bpf.h>
92
93 #include <net/if_types.h>
94 #include <net/if_vlan_var.h>
95
96 #include <netinet/in_systm.h>
97 #include <netinet/in.h>
98 #include <netinet/ip.h>
99 #include <netinet/tcp.h>
100
101 #include <machine/bus.h>
102 #include <machine/resource.h>
103 #include <sys/bus.h>
104 #include <sys/rman.h>
105
106 #include <dev/mii/mii.h>
107 #include <dev/mii/miivar.h>
108 #include "miidevs.h"
109 #include <dev/mii/brgphyreg.h>
110
111 #ifdef __sparc64__
112 #include <dev/ofw/ofw_bus.h>
113 #include <dev/ofw/openfirm.h>
114 #include <machine/ofw_machdep.h>
115 #include <machine/ver.h>
116 #endif
117
118 #include <dev/pci/pcireg.h>
119 #include <dev/pci/pcivar.h>
120
121 #include <dev/bge/if_bgereg.h>
122
123 #define BGE_CSUM_FEATURES       (CSUM_IP | CSUM_TCP)
124 #define ETHER_MIN_NOPAD         (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
125
126 MODULE_DEPEND(bge, pci, 1, 1, 1);
127 MODULE_DEPEND(bge, ether, 1, 1, 1);
128 MODULE_DEPEND(bge, miibus, 1, 1, 1);
129
130 /* "device miibus" required.  See GENERIC if you get errors here. */
131 #include "miibus_if.h"
132
133 /*
134  * Various supported device vendors/types and their names. Note: the
135  * spec seems to indicate that the hardware still has Alteon's vendor
136  * ID burned into it, though it will always be overriden by the vendor
137  * ID in the EEPROM. Just to be safe, we cover all possibilities.
138  */
139 static const struct bge_type {
140         uint16_t        bge_vid;
141         uint16_t        bge_did;
142 } bge_devs[] = {
143         { ALTEON_VENDORID,      ALTEON_DEVICEID_BCM5700 },
144         { ALTEON_VENDORID,      ALTEON_DEVICEID_BCM5701 },
145
146         { ALTIMA_VENDORID,      ALTIMA_DEVICE_AC1000 },
147         { ALTIMA_VENDORID,      ALTIMA_DEVICE_AC1002 },
148         { ALTIMA_VENDORID,      ALTIMA_DEVICE_AC9100 },
149
150         { APPLE_VENDORID,       APPLE_DEVICE_BCM5701 },
151
152         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5700 },
153         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5701 },
154         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5702 },
155         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5702_ALT },
156         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5702X },
157         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5703 },
158         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5703_ALT },
159         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5703X },
160         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5704C },
161         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5704S },
162         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5704S_ALT },
163         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5705 },
164         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5705F },
165         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5705K },
166         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5705M },
167         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5705M_ALT },
168         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5714C },
169         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5714S },
170         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5715 },
171         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5715S },
172         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5717 },
173         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5718 },
174         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5720 },
175         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5721 },
176         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5722 },
177         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5723 },
178         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5750 },
179         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5750M },
180         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5751 },
181         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5751F },
182         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5751M },
183         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5752 },
184         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5752M },
185         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5753 },
186         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5753F },
187         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5753M },
188         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5754 },
189         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5754M },
190         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5755 },
191         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5755M },
192         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5756 },
193         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5761 },
194         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5761E },
195         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5761S },
196         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5761SE },
197         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5764 },
198         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5780 },
199         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5780S },
200         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5781 },
201         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5782 },
202         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5784 },
203         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5785F },
204         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5785G },
205         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5786 },
206         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5787 },
207         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5787F },
208         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5787M },
209         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5788 },
210         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5789 },
211         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5901 },
212         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5901A2 },
213         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5903M },
214         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5906 },
215         { BCOM_VENDORID,        BCOM_DEVICEID_BCM5906M },
216         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57760 },
217         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57780 },
218         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57788 },
219         { BCOM_VENDORID,        BCOM_DEVICEID_BCM57790 },
220
221         { SK_VENDORID,          SK_DEVICEID_ALTIMA },
222
223         { TC_VENDORID,          TC_DEVICEID_3C996 },
224
225         { FJTSU_VENDORID,       FJTSU_DEVICEID_PW008GE4 },
226         { FJTSU_VENDORID,       FJTSU_DEVICEID_PW008GE5 },
227         { FJTSU_VENDORID,       FJTSU_DEVICEID_PP250450 },
228
229         { 0, 0 }
230 };
231
232 static const struct bge_vendor {
233         uint16_t        v_id;
234         const char      *v_name;
235 } bge_vendors[] = {
236         { ALTEON_VENDORID,      "Alteon" },
237         { ALTIMA_VENDORID,      "Altima" },
238         { APPLE_VENDORID,       "Apple" },
239         { BCOM_VENDORID,        "Broadcom" },
240         { SK_VENDORID,          "SysKonnect" },
241         { TC_VENDORID,          "3Com" },
242         { FJTSU_VENDORID,       "Fujitsu" },
243
244         { 0, NULL }
245 };
246
247 static const struct bge_revision {
248         uint32_t        br_chipid;
249         const char      *br_name;
250 } bge_revisions[] = {
251         { BGE_CHIPID_BCM5700_A0,        "BCM5700 A0" },
252         { BGE_CHIPID_BCM5700_A1,        "BCM5700 A1" },
253         { BGE_CHIPID_BCM5700_B0,        "BCM5700 B0" },
254         { BGE_CHIPID_BCM5700_B1,        "BCM5700 B1" },
255         { BGE_CHIPID_BCM5700_B2,        "BCM5700 B2" },
256         { BGE_CHIPID_BCM5700_B3,        "BCM5700 B3" },
257         { BGE_CHIPID_BCM5700_ALTIMA,    "BCM5700 Altima" },
258         { BGE_CHIPID_BCM5700_C0,        "BCM5700 C0" },
259         { BGE_CHIPID_BCM5701_A0,        "BCM5701 A0" },
260         { BGE_CHIPID_BCM5701_B0,        "BCM5701 B0" },
261         { BGE_CHIPID_BCM5701_B2,        "BCM5701 B2" },
262         { BGE_CHIPID_BCM5701_B5,        "BCM5701 B5" },
263         { BGE_CHIPID_BCM5703_A0,        "BCM5703 A0" },
264         { BGE_CHIPID_BCM5703_A1,        "BCM5703 A1" },
265         { BGE_CHIPID_BCM5703_A2,        "BCM5703 A2" },
266         { BGE_CHIPID_BCM5703_A3,        "BCM5703 A3" },
267         { BGE_CHIPID_BCM5703_B0,        "BCM5703 B0" },
268         { BGE_CHIPID_BCM5704_A0,        "BCM5704 A0" },
269         { BGE_CHIPID_BCM5704_A1,        "BCM5704 A1" },
270         { BGE_CHIPID_BCM5704_A2,        "BCM5704 A2" },
271         { BGE_CHIPID_BCM5704_A3,        "BCM5704 A3" },
272         { BGE_CHIPID_BCM5704_B0,        "BCM5704 B0" },
273         { BGE_CHIPID_BCM5705_A0,        "BCM5705 A0" },
274         { BGE_CHIPID_BCM5705_A1,        "BCM5705 A1" },
275         { BGE_CHIPID_BCM5705_A2,        "BCM5705 A2" },
276         { BGE_CHIPID_BCM5705_A3,        "BCM5705 A3" },
277         { BGE_CHIPID_BCM5750_A0,        "BCM5750 A0" },
278         { BGE_CHIPID_BCM5750_A1,        "BCM5750 A1" },
279         { BGE_CHIPID_BCM5750_A3,        "BCM5750 A3" },
280         { BGE_CHIPID_BCM5750_B0,        "BCM5750 B0" },
281         { BGE_CHIPID_BCM5750_B1,        "BCM5750 B1" },
282         { BGE_CHIPID_BCM5750_C0,        "BCM5750 C0" },
283         { BGE_CHIPID_BCM5750_C1,        "BCM5750 C1" },
284         { BGE_CHIPID_BCM5750_C2,        "BCM5750 C2" },
285         { BGE_CHIPID_BCM5714_A0,        "BCM5714 A0" },
286         { BGE_CHIPID_BCM5752_A0,        "BCM5752 A0" },
287         { BGE_CHIPID_BCM5752_A1,        "BCM5752 A1" },
288         { BGE_CHIPID_BCM5752_A2,        "BCM5752 A2" },
289         { BGE_CHIPID_BCM5714_B0,        "BCM5714 B0" },
290         { BGE_CHIPID_BCM5714_B3,        "BCM5714 B3" },
291         { BGE_CHIPID_BCM5715_A0,        "BCM5715 A0" },
292         { BGE_CHIPID_BCM5715_A1,        "BCM5715 A1" },
293         { BGE_CHIPID_BCM5715_A3,        "BCM5715 A3" },
294         { BGE_CHIPID_BCM5717_A0,        "BCM5717 A0" },
295         { BGE_CHIPID_BCM5717_B0,        "BCM5717 B0" },
296         { BGE_CHIPID_BCM5755_A0,        "BCM5755 A0" },
297         { BGE_CHIPID_BCM5755_A1,        "BCM5755 A1" },
298         { BGE_CHIPID_BCM5755_A2,        "BCM5755 A2" },
299         { BGE_CHIPID_BCM5722_A0,        "BCM5722 A0" },
300         { BGE_CHIPID_BCM5761_A0,        "BCM5761 A0" },
301         { BGE_CHIPID_BCM5761_A1,        "BCM5761 A1" },
302         { BGE_CHIPID_BCM5784_A0,        "BCM5784 A0" },
303         { BGE_CHIPID_BCM5784_A1,        "BCM5784 A1" },
304         /* 5754 and 5787 share the same ASIC ID */
305         { BGE_CHIPID_BCM5787_A0,        "BCM5754/5787 A0" },
306         { BGE_CHIPID_BCM5787_A1,        "BCM5754/5787 A1" },
307         { BGE_CHIPID_BCM5787_A2,        "BCM5754/5787 A2" },
308         { BGE_CHIPID_BCM5906_A1,        "BCM5906 A1" },
309         { BGE_CHIPID_BCM5906_A2,        "BCM5906 A2" },
310         { BGE_CHIPID_BCM57780_A0,       "BCM57780 A0" },
311         { BGE_CHIPID_BCM57780_A1,       "BCM57780 A1" },
312
313         { 0, NULL }
314 };
315
316 /*
317  * Some defaults for major revisions, so that newer steppings
318  * that we don't know about have a shot at working.
319  */
320 static const struct bge_revision bge_majorrevs[] = {
321         { BGE_ASICREV_BCM5700,          "unknown BCM5700" },
322         { BGE_ASICREV_BCM5701,          "unknown BCM5701" },
323         { BGE_ASICREV_BCM5703,          "unknown BCM5703" },
324         { BGE_ASICREV_BCM5704,          "unknown BCM5704" },
325         { BGE_ASICREV_BCM5705,          "unknown BCM5705" },
326         { BGE_ASICREV_BCM5750,          "unknown BCM5750" },
327         { BGE_ASICREV_BCM5714_A0,       "unknown BCM5714" },
328         { BGE_ASICREV_BCM5752,          "unknown BCM5752" },
329         { BGE_ASICREV_BCM5780,          "unknown BCM5780" },
330         { BGE_ASICREV_BCM5714,          "unknown BCM5714" },
331         { BGE_ASICREV_BCM5755,          "unknown BCM5755" },
332         { BGE_ASICREV_BCM5761,          "unknown BCM5761" },
333         { BGE_ASICREV_BCM5784,          "unknown BCM5784" },
334         { BGE_ASICREV_BCM5785,          "unknown BCM5785" },
335         /* 5754 and 5787 share the same ASIC ID */
336         { BGE_ASICREV_BCM5787,          "unknown BCM5754/5787" },
337         { BGE_ASICREV_BCM5906,          "unknown BCM5906" },
338         { BGE_ASICREV_BCM57780,         "unknown BCM57780" },
339         { BGE_ASICREV_BCM5717,          "unknown BCM5717" },
340
341         { 0, NULL }
342 };
343
344 #define BGE_IS_JUMBO_CAPABLE(sc)        ((sc)->bge_flags & BGE_FLAG_JUMBO)
345 #define BGE_IS_5700_FAMILY(sc)          ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
346 #define BGE_IS_5705_PLUS(sc)            ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
347 #define BGE_IS_5714_FAMILY(sc)          ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
348 #define BGE_IS_575X_PLUS(sc)            ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
349 #define BGE_IS_5755_PLUS(sc)            ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
350 #define BGE_IS_5717_PLUS(sc)            ((sc)->bge_flags & BGE_FLAG_5717_PLUS)
351
352 const struct bge_revision * bge_lookup_rev(uint32_t);
353 const struct bge_vendor * bge_lookup_vendor(uint16_t);
354
355 typedef int     (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
356
357 static int bge_probe(device_t);
358 static int bge_attach(device_t);
359 static int bge_detach(device_t);
360 static int bge_suspend(device_t);
361 static int bge_resume(device_t);
362 static void bge_release_resources(struct bge_softc *);
363 static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
364 static int bge_dma_alloc(struct bge_softc *);
365 static void bge_dma_free(struct bge_softc *);
366 static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t,
367     bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
368
369 static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
370 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
371 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
372 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
373 static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
374
375 static void bge_txeof(struct bge_softc *, uint16_t);
376 static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
377 static int bge_rxeof(struct bge_softc *, uint16_t, int);
378
379 static void bge_asf_driver_up (struct bge_softc *);
380 static void bge_tick(void *);
381 static void bge_stats_clear_regs(struct bge_softc *);
382 static void bge_stats_update(struct bge_softc *);
383 static void bge_stats_update_regs(struct bge_softc *);
384 static struct mbuf *bge_check_short_dma(struct mbuf *);
385 static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
386     uint16_t *, uint16_t *);
387 static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
388
389 static void bge_intr(void *);
390 static int bge_msi_intr(void *);
391 static void bge_intr_task(void *, int);
392 static void bge_start_locked(struct ifnet *);
393 static void bge_start(struct ifnet *);
394 static int bge_ioctl(struct ifnet *, u_long, caddr_t);
395 static void bge_init_locked(struct bge_softc *);
396 static void bge_init(void *);
397 static void bge_stop(struct bge_softc *);
398 static void bge_watchdog(struct bge_softc *);
399 static int bge_shutdown(device_t);
400 static int bge_ifmedia_upd_locked(struct ifnet *);
401 static int bge_ifmedia_upd(struct ifnet *);
402 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
403
404 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
405 static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
406
407 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
408 static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
409
410 static void bge_setpromisc(struct bge_softc *);
411 static void bge_setmulti(struct bge_softc *);
412 static void bge_setvlan(struct bge_softc *);
413
414 static __inline void bge_rxreuse_std(struct bge_softc *, int);
415 static __inline void bge_rxreuse_jumbo(struct bge_softc *, int);
416 static int bge_newbuf_std(struct bge_softc *, int);
417 static int bge_newbuf_jumbo(struct bge_softc *, int);
418 static int bge_init_rx_ring_std(struct bge_softc *);
419 static void bge_free_rx_ring_std(struct bge_softc *);
420 static int bge_init_rx_ring_jumbo(struct bge_softc *);
421 static void bge_free_rx_ring_jumbo(struct bge_softc *);
422 static void bge_free_tx_ring(struct bge_softc *);
423 static int bge_init_tx_ring(struct bge_softc *);
424
425 static int bge_chipinit(struct bge_softc *);
426 static int bge_blockinit(struct bge_softc *);
427
428 static int bge_has_eaddr(struct bge_softc *);
429 static uint32_t bge_readmem_ind(struct bge_softc *, int);
430 static void bge_writemem_ind(struct bge_softc *, int, int);
431 static void bge_writembx(struct bge_softc *, int, int);
432 #ifdef notdef
433 static uint32_t bge_readreg_ind(struct bge_softc *, int);
434 #endif
435 static void bge_writemem_direct(struct bge_softc *, int, int);
436 static void bge_writereg_ind(struct bge_softc *, int, int);
437
438 static int bge_miibus_readreg(device_t, int, int);
439 static int bge_miibus_writereg(device_t, int, int, int);
440 static void bge_miibus_statchg(device_t);
441 #ifdef DEVICE_POLLING
442 static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
443 #endif
444
445 #define BGE_RESET_START 1
446 #define BGE_RESET_STOP  2
447 static void bge_sig_post_reset(struct bge_softc *, int);
448 static void bge_sig_legacy(struct bge_softc *, int);
449 static void bge_sig_pre_reset(struct bge_softc *, int);
450 static void bge_stop_fw(struct bge_softc *);
451 static int bge_reset(struct bge_softc *);
452 static void bge_link_upd(struct bge_softc *);
453
454 /*
455  * The BGE_REGISTER_DEBUG option is only for low-level debugging.  It may
456  * leak information to untrusted users.  It is also known to cause alignment
457  * traps on certain architectures.
458  */
459 #ifdef BGE_REGISTER_DEBUG
460 static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
461 static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
462 static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
463 #endif
464 static void bge_add_sysctls(struct bge_softc *);
465 static void bge_add_sysctl_stats_regs(struct bge_softc *,
466     struct sysctl_ctx_list *, struct sysctl_oid_list *);
467 static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *,
468     struct sysctl_oid_list *);
469 static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
470
471 static device_method_t bge_methods[] = {
472         /* Device interface */
473         DEVMETHOD(device_probe,         bge_probe),
474         DEVMETHOD(device_attach,        bge_attach),
475         DEVMETHOD(device_detach,        bge_detach),
476         DEVMETHOD(device_shutdown,      bge_shutdown),
477         DEVMETHOD(device_suspend,       bge_suspend),
478         DEVMETHOD(device_resume,        bge_resume),
479
480         /* bus interface */
481         DEVMETHOD(bus_print_child,      bus_generic_print_child),
482         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
483
484         /* MII interface */
485         DEVMETHOD(miibus_readreg,       bge_miibus_readreg),
486         DEVMETHOD(miibus_writereg,      bge_miibus_writereg),
487         DEVMETHOD(miibus_statchg,       bge_miibus_statchg),
488
489         { 0, 0 }
490 };
491
492 static driver_t bge_driver = {
493         "bge",
494         bge_methods,
495         sizeof(struct bge_softc)
496 };
497
498 static devclass_t bge_devclass;
499
500 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
501 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
502
503 static int bge_allow_asf = 0;
504
505 TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
506
507 SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
508 SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
509         "Allow ASF mode if available");
510
511 #define SPARC64_BLADE_1500_MODEL        "SUNW,Sun-Blade-1500"
512 #define SPARC64_BLADE_1500_PATH_BGE     "/pci@1f,700000/network@2"
513 #define SPARC64_BLADE_2500_MODEL        "SUNW,Sun-Blade-2500"
514 #define SPARC64_BLADE_2500_PATH_BGE     "/pci@1c,600000/network@3"
515 #define SPARC64_OFW_SUBVENDOR           "subsystem-vendor-id"
516
517 static int
518 bge_has_eaddr(struct bge_softc *sc)
519 {
520 #ifdef __sparc64__
521         char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
522         device_t dev;
523         uint32_t subvendor;
524
525         dev = sc->bge_dev;
526
527         /*
528          * The on-board BGEs found in sun4u machines aren't fitted with
529          * an EEPROM which means that we have to obtain the MAC address
530          * via OFW and that some tests will always fail.  We distinguish
531          * such BGEs by the subvendor ID, which also has to be obtained
532          * from OFW instead of the PCI configuration space as the latter
533          * indicates Broadcom as the subvendor of the netboot interface.
534          * For early Blade 1500 and 2500 we even have to check the OFW
535          * device path as the subvendor ID always defaults to Broadcom
536          * there.
537          */
538         if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
539             &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
540             (subvendor == FJTSU_VENDORID || subvendor == SUN_VENDORID))
541                 return (0);
542         memset(buf, 0, sizeof(buf));
543         if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
544                 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
545                     strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
546                         return (0);
547                 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
548                     strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
549                         return (0);
550         }
551 #endif
552         return (1);
553 }
554
555 static uint32_t
556 bge_readmem_ind(struct bge_softc *sc, int off)
557 {
558         device_t dev;
559         uint32_t val;
560
561         if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
562             off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
563                 return (0);
564
565         dev = sc->bge_dev;
566
567         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
568         val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
569         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
570         return (val);
571 }
572
573 static void
574 bge_writemem_ind(struct bge_softc *sc, int off, int val)
575 {
576         device_t dev;
577
578         if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
579             off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
580                 return;
581
582         dev = sc->bge_dev;
583
584         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
585         pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
586         pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
587 }
588
589 #ifdef notdef
590 static uint32_t
591 bge_readreg_ind(struct bge_softc *sc, int off)
592 {
593         device_t dev;
594
595         dev = sc->bge_dev;
596
597         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
598         return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
599 }
600 #endif
601
602 static void
603 bge_writereg_ind(struct bge_softc *sc, int off, int val)
604 {
605         device_t dev;
606
607         dev = sc->bge_dev;
608
609         pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
610         pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
611 }
612
613 static void
614 bge_writemem_direct(struct bge_softc *sc, int off, int val)
615 {
616         CSR_WRITE_4(sc, off, val);
617 }
618
619 static void
620 bge_writembx(struct bge_softc *sc, int off, int val)
621 {
622         if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
623                 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
624
625         CSR_WRITE_4(sc, off, val);
626 }
627
628 /*
629  * Map a single buffer address.
630  */
631
632 static void
633 bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
634 {
635         struct bge_dmamap_arg *ctx;
636
637         if (error)
638                 return;
639
640         KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
641
642         ctx = arg;
643         ctx->bge_busaddr = segs->ds_addr;
644 }
645
646 static uint8_t
647 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
648 {
649         uint32_t access, byte = 0;
650         int i;
651
652         /* Lock. */
653         CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
654         for (i = 0; i < 8000; i++) {
655                 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
656                         break;
657                 DELAY(20);
658         }
659         if (i == 8000)
660                 return (1);
661
662         /* Enable access. */
663         access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
664         CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
665
666         CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
667         CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
668         for (i = 0; i < BGE_TIMEOUT * 10; i++) {
669                 DELAY(10);
670                 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
671                         DELAY(10);
672                         break;
673                 }
674         }
675
676         if (i == BGE_TIMEOUT * 10) {
677                 if_printf(sc->bge_ifp, "nvram read timed out\n");
678                 return (1);
679         }
680
681         /* Get result. */
682         byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
683
684         *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
685
686         /* Disable access. */
687         CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
688
689         /* Unlock. */
690         CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
691         CSR_READ_4(sc, BGE_NVRAM_SWARB);
692
693         return (0);
694 }
695
696 /*
697  * Read a sequence of bytes from NVRAM.
698  */
699 static int
700 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
701 {
702         int err = 0, i;
703         uint8_t byte = 0;
704
705         if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
706                 return (1);
707
708         for (i = 0; i < cnt; i++) {
709                 err = bge_nvram_getbyte(sc, off + i, &byte);
710                 if (err)
711                         break;
712                 *(dest + i) = byte;
713         }
714
715         return (err ? 1 : 0);
716 }
717
718 /*
719  * Read a byte of data stored in the EEPROM at address 'addr.' The
720  * BCM570x supports both the traditional bitbang interface and an
721  * auto access interface for reading the EEPROM. We use the auto
722  * access method.
723  */
724 static uint8_t
725 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
726 {
727         int i;
728         uint32_t byte = 0;
729
730         /*
731          * Enable use of auto EEPROM access so we can avoid
732          * having to use the bitbang method.
733          */
734         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
735
736         /* Reset the EEPROM, load the clock period. */
737         CSR_WRITE_4(sc, BGE_EE_ADDR,
738             BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
739         DELAY(20);
740
741         /* Issue the read EEPROM command. */
742         CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
743
744         /* Wait for completion */
745         for(i = 0; i < BGE_TIMEOUT * 10; i++) {
746                 DELAY(10);
747                 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
748                         break;
749         }
750
751         if (i == BGE_TIMEOUT * 10) {
752                 device_printf(sc->bge_dev, "EEPROM read timed out\n");
753                 return (1);
754         }
755
756         /* Get result. */
757         byte = CSR_READ_4(sc, BGE_EE_DATA);
758
759         *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
760
761         return (0);
762 }
763
764 /*
765  * Read a sequence of bytes from the EEPROM.
766  */
767 static int
768 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
769 {
770         int i, error = 0;
771         uint8_t byte = 0;
772
773         for (i = 0; i < cnt; i++) {
774                 error = bge_eeprom_getbyte(sc, off + i, &byte);
775                 if (error)
776                         break;
777                 *(dest + i) = byte;
778         }
779
780         return (error ? 1 : 0);
781 }
782
783 static int
784 bge_miibus_readreg(device_t dev, int phy, int reg)
785 {
786         struct bge_softc *sc;
787         uint32_t val;
788         int i;
789
790         sc = device_get_softc(dev);
791
792         /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
793         if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
794                 CSR_WRITE_4(sc, BGE_MI_MODE,
795                     sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
796                 DELAY(80);
797         }
798
799         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
800             BGE_MIPHY(phy) | BGE_MIREG(reg));
801
802         /* Poll for the PHY register access to complete. */
803         for (i = 0; i < BGE_TIMEOUT; i++) {
804                 DELAY(10);
805                 val = CSR_READ_4(sc, BGE_MI_COMM);
806                 if ((val & BGE_MICOMM_BUSY) == 0) {
807                         DELAY(5);
808                         val = CSR_READ_4(sc, BGE_MI_COMM);
809                         break;
810                 }
811         }
812
813         if (i == BGE_TIMEOUT) {
814                 device_printf(sc->bge_dev,
815                     "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
816                     phy, reg, val);
817                 val = 0;
818         }
819
820         /* Restore the autopoll bit if necessary. */
821         if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
822                 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
823                 DELAY(80);
824         }
825
826         if (val & BGE_MICOMM_READFAIL)
827                 return (0);
828
829         return (val & 0xFFFF);
830 }
831
832 static int
833 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
834 {
835         struct bge_softc *sc;
836         int i;
837
838         sc = device_get_softc(dev);
839
840         if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
841             (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
842                 return (0);
843
844         /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
845         if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
846                 CSR_WRITE_4(sc, BGE_MI_MODE,
847                     sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
848                 DELAY(80);
849         }
850
851         CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
852             BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
853
854         for (i = 0; i < BGE_TIMEOUT; i++) {
855                 DELAY(10);
856                 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
857                         DELAY(5);
858                         CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
859                         break;
860                 }
861         }
862
863         /* Restore the autopoll bit if necessary. */
864         if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
865                 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
866                 DELAY(80);
867         }
868
869         if (i == BGE_TIMEOUT)
870                 device_printf(sc->bge_dev,
871                     "PHY write timed out (phy %d, reg %d, val %d)\n",
872                     phy, reg, val);
873
874         return (0);
875 }
876
877 static void
878 bge_miibus_statchg(device_t dev)
879 {
880         struct bge_softc *sc;
881         struct mii_data *mii;
882         sc = device_get_softc(dev);
883         mii = device_get_softc(sc->bge_miibus);
884
885         if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
886             (IFM_ACTIVE | IFM_AVALID)) {
887                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
888                 case IFM_10_T:
889                 case IFM_100_TX:
890                         sc->bge_link = 1;
891                         break;
892                 case IFM_1000_T:
893                 case IFM_1000_SX:
894                 case IFM_2500_SX:
895                         if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
896                                 sc->bge_link = 1;
897                         else
898                                 sc->bge_link = 0;
899                         break;
900                 default:
901                         sc->bge_link = 0;
902                         break;
903                 }
904         } else
905                 sc->bge_link = 0;
906         if (sc->bge_link == 0)
907                 return;
908         BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
909         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
910             IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
911                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
912         else
913                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
914
915         if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0) {
916                 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
917                 if ((IFM_OPTIONS(mii->mii_media_active) &
918                     IFM_ETH_TXPAUSE) != 0)
919                         BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
920                 else
921                         BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
922                 if ((IFM_OPTIONS(mii->mii_media_active) &
923                     IFM_ETH_RXPAUSE) != 0)
924                         BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
925                 else
926                         BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
927         } else {
928                 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
929                 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
930                 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
931         }
932 }
933
934 /*
935  * Intialize a standard receive ring descriptor.
936  */
937 static int
938 bge_newbuf_std(struct bge_softc *sc, int i)
939 {
940         struct mbuf *m;
941         struct bge_rx_bd *r;
942         bus_dma_segment_t segs[1];
943         bus_dmamap_t map;
944         int error, nsegs;
945
946         m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
947         if (m == NULL)
948                 return (ENOBUFS);
949         m->m_len = m->m_pkthdr.len = MCLBYTES;
950         if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
951                 m_adj(m, ETHER_ALIGN);
952
953         error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
954             sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
955         if (error != 0) {
956                 m_freem(m);
957                 return (error);
958         }
959         if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
960                 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
961                     sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
962                 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
963                     sc->bge_cdata.bge_rx_std_dmamap[i]);
964         }
965         map = sc->bge_cdata.bge_rx_std_dmamap[i];
966         sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
967         sc->bge_cdata.bge_rx_std_sparemap = map;
968         sc->bge_cdata.bge_rx_std_chain[i] = m;
969         sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len;
970         r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
971         r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
972         r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
973         r->bge_flags = BGE_RXBDFLAG_END;
974         r->bge_len = segs[0].ds_len;
975         r->bge_idx = i;
976
977         bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
978             sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
979
980         return (0);
981 }
982
983 /*
984  * Initialize a jumbo receive ring descriptor. This allocates
985  * a jumbo buffer from the pool managed internally by the driver.
986  */
987 static int
988 bge_newbuf_jumbo(struct bge_softc *sc, int i)
989 {
990         bus_dma_segment_t segs[BGE_NSEG_JUMBO];
991         bus_dmamap_t map;
992         struct bge_extrx_bd *r;
993         struct mbuf *m;
994         int error, nsegs;
995
996         MGETHDR(m, M_DONTWAIT, MT_DATA);
997         if (m == NULL)
998                 return (ENOBUFS);
999
1000         m_cljget(m, M_DONTWAIT, MJUM9BYTES);
1001         if (!(m->m_flags & M_EXT)) {
1002                 m_freem(m);
1003                 return (ENOBUFS);
1004         }
1005         m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1006         if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
1007                 m_adj(m, ETHER_ALIGN);
1008
1009         error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
1010             sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
1011         if (error != 0) {
1012                 m_freem(m);
1013                 return (error);
1014         }
1015
1016         if (sc->bge_cdata.bge_rx_jumbo_chain[i] == NULL) {
1017                 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1018                     sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
1019                 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1020                     sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1021         }
1022         map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
1023         sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
1024             sc->bge_cdata.bge_rx_jumbo_sparemap;
1025         sc->bge_cdata.bge_rx_jumbo_sparemap = map;
1026         sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1027         sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0;
1028         sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0;
1029         sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0;
1030         sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0;
1031
1032         /*
1033          * Fill in the extended RX buffer descriptor.
1034          */
1035         r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
1036         r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1037         r->bge_idx = i;
1038         r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1039         switch (nsegs) {
1040         case 4:
1041                 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1042                 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1043                 r->bge_len3 = segs[3].ds_len;
1044                 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len;
1045         case 3:
1046                 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1047                 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1048                 r->bge_len2 = segs[2].ds_len;
1049                 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len;
1050         case 2:
1051                 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1052                 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1053                 r->bge_len1 = segs[1].ds_len;
1054                 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len;
1055         case 1:
1056                 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1057                 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1058                 r->bge_len0 = segs[0].ds_len;
1059                 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len;
1060                 break;
1061         default:
1062                 panic("%s: %d segments\n", __func__, nsegs);
1063         }
1064
1065         bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1066             sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
1067
1068         return (0);
1069 }
1070
1071 static int
1072 bge_init_rx_ring_std(struct bge_softc *sc)
1073 {
1074         int error, i;
1075
1076         bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1077         sc->bge_std = 0;
1078         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1079                 if ((error = bge_newbuf_std(sc, i)) != 0)
1080                         return (error);
1081                 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1082         }
1083
1084         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1085             sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
1086
1087         sc->bge_std = 0;
1088         bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1);
1089
1090         return (0);
1091 }
1092
1093 static void
1094 bge_free_rx_ring_std(struct bge_softc *sc)
1095 {
1096         int i;
1097
1098         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1099                 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1100                         bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1101                             sc->bge_cdata.bge_rx_std_dmamap[i],
1102                             BUS_DMASYNC_POSTREAD);
1103                         bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1104                             sc->bge_cdata.bge_rx_std_dmamap[i]);
1105                         m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1106                         sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1107                 }
1108                 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1109                     sizeof(struct bge_rx_bd));
1110         }
1111 }
1112
1113 static int
1114 bge_init_rx_ring_jumbo(struct bge_softc *sc)
1115 {
1116         struct bge_rcb *rcb;
1117         int error, i;
1118
1119         bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
1120         sc->bge_jumbo = 0;
1121         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1122                 if ((error = bge_newbuf_jumbo(sc, i)) != 0)
1123                         return (error);
1124                 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1125         }
1126
1127         bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1128             sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
1129
1130         sc->bge_jumbo = 0;
1131
1132         /* Enable the jumbo receive producer ring. */
1133         rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1134         rcb->bge_maxlen_flags =
1135             BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
1136         CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1137
1138         bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1);
1139
1140         return (0);
1141 }
1142
1143 static void
1144 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1145 {
1146         int i;
1147
1148         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1149                 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1150                         bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1151                             sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1152                             BUS_DMASYNC_POSTREAD);
1153                         bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1154                             sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1155                         m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1156                         sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1157                 }
1158                 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1159                     sizeof(struct bge_extrx_bd));
1160         }
1161 }
1162
1163 static void
1164 bge_free_tx_ring(struct bge_softc *sc)
1165 {
1166         int i;
1167
1168         if (sc->bge_ldata.bge_tx_ring == NULL)
1169                 return;
1170
1171         for (i = 0; i < BGE_TX_RING_CNT; i++) {
1172                 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1173                         bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
1174                             sc->bge_cdata.bge_tx_dmamap[i],
1175                             BUS_DMASYNC_POSTWRITE);
1176                         bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1177                             sc->bge_cdata.bge_tx_dmamap[i]);
1178                         m_freem(sc->bge_cdata.bge_tx_chain[i]);
1179                         sc->bge_cdata.bge_tx_chain[i] = NULL;
1180                 }
1181                 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1182                     sizeof(struct bge_tx_bd));
1183         }
1184 }
1185
1186 static int
1187 bge_init_tx_ring(struct bge_softc *sc)
1188 {
1189         sc->bge_txcnt = 0;
1190         sc->bge_tx_saved_considx = 0;
1191
1192         bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1193         bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
1194             sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1195
1196         /* Initialize transmit producer index for host-memory send ring. */
1197         sc->bge_tx_prodidx = 0;
1198         bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1199
1200         /* 5700 b2 errata */
1201         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1202                 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1203
1204         /* NIC-memory send ring not used; initialize to zero. */
1205         bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1206         /* 5700 b2 errata */
1207         if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1208                 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1209
1210         return (0);
1211 }
1212
1213 static void
1214 bge_setpromisc(struct bge_softc *sc)
1215 {
1216         struct ifnet *ifp;
1217
1218         BGE_LOCK_ASSERT(sc);
1219
1220         ifp = sc->bge_ifp;
1221
1222         /* Enable or disable promiscuous mode as needed. */
1223         if (ifp->if_flags & IFF_PROMISC)
1224                 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1225         else
1226                 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1227 }
1228
1229 static void
1230 bge_setmulti(struct bge_softc *sc)
1231 {
1232         struct ifnet *ifp;
1233         struct ifmultiaddr *ifma;
1234         uint32_t hashes[4] = { 0, 0, 0, 0 };
1235         int h, i;
1236
1237         BGE_LOCK_ASSERT(sc);
1238
1239         ifp = sc->bge_ifp;
1240
1241         if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1242                 for (i = 0; i < 4; i++)
1243                         CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1244                 return;
1245         }
1246
1247         /* First, zot all the existing filters. */
1248         for (i = 0; i < 4; i++)
1249                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1250
1251         /* Now program new ones. */
1252         if_maddr_rlock(ifp);
1253         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1254                 if (ifma->ifma_addr->sa_family != AF_LINK)
1255                         continue;
1256                 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1257                     ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1258                 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1259         }
1260         if_maddr_runlock(ifp);
1261
1262         for (i = 0; i < 4; i++)
1263                 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1264 }
1265
1266 static void
1267 bge_setvlan(struct bge_softc *sc)
1268 {
1269         struct ifnet *ifp;
1270
1271         BGE_LOCK_ASSERT(sc);
1272
1273         ifp = sc->bge_ifp;
1274
1275         /* Enable or disable VLAN tag stripping as needed. */
1276         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1277                 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1278         else
1279                 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1280 }
1281
1282 static void
1283 bge_sig_pre_reset(struct bge_softc *sc, int type)
1284 {
1285
1286         /*
1287          * Some chips don't like this so only do this if ASF is enabled
1288          */
1289         if (sc->bge_asf_mode)
1290                 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1291
1292         if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1293                 switch (type) {
1294                 case BGE_RESET_START:
1295                         bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1296                         break;
1297                 case BGE_RESET_STOP:
1298                         bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1299                         break;
1300                 }
1301         }
1302 }
1303
1304 static void
1305 bge_sig_post_reset(struct bge_softc *sc, int type)
1306 {
1307
1308         if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1309                 switch (type) {
1310                 case BGE_RESET_START:
1311                         bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001);
1312                         /* START DONE */
1313                         break;
1314                 case BGE_RESET_STOP:
1315                         bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002);
1316                         break;
1317                 }
1318         }
1319 }
1320
1321 static void
1322 bge_sig_legacy(struct bge_softc *sc, int type)
1323 {
1324
1325         if (sc->bge_asf_mode) {
1326                 switch (type) {
1327                 case BGE_RESET_START:
1328                         bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1329                         break;
1330                 case BGE_RESET_STOP:
1331                         bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1332                         break;
1333                 }
1334         }
1335 }
1336
1337 static void
1338 bge_stop_fw(struct bge_softc *sc)
1339 {
1340         int i;
1341
1342         if (sc->bge_asf_mode) {
1343                 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE);
1344                 CSR_WRITE_4(sc, BGE_CPU_EVENT,
1345                     CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
1346
1347                 for (i = 0; i < 100; i++ ) {
1348                         if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14)))
1349                                 break;
1350                         DELAY(10);
1351                 }
1352         }
1353 }
1354
1355 /*
1356  * Do endian, PCI and DMA initialization.
1357  */
1358 static int
1359 bge_chipinit(struct bge_softc *sc)
1360 {
1361         uint32_t dma_rw_ctl, misc_ctl;
1362         uint16_t val;
1363         int i;
1364
1365         /* Set endianness before we access any non-PCI registers. */
1366         misc_ctl = BGE_INIT;
1367         if (sc->bge_flags & BGE_FLAG_TAGGED_STATUS)
1368                 misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
1369         pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, misc_ctl, 4);
1370
1371         /* Clear the MAC control register */
1372         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1373
1374         /*
1375          * Clear the MAC statistics block in the NIC's
1376          * internal memory.
1377          */
1378         for (i = BGE_STATS_BLOCK;
1379             i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1380                 BGE_MEMWIN_WRITE(sc, i, 0);
1381
1382         for (i = BGE_STATUS_BLOCK;
1383             i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1384                 BGE_MEMWIN_WRITE(sc, i, 0);
1385
1386         if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1387                 /*
1388                  *  Fix data corruption caused by non-qword write with WB.
1389                  *  Fix master abort in PCI mode.
1390                  *  Fix PCI latency timer.
1391                  */
1392                 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1393                 val |= (1 << 10) | (1 << 12) | (1 << 13);
1394                 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1395         }
1396
1397         /*
1398          * Set up the PCI DMA control register.
1399          */
1400         dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1401             BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1402         if (sc->bge_flags & BGE_FLAG_PCIE) {
1403                 /* Read watermark not used, 128 bytes for write. */
1404                 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1405         } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1406                 if (BGE_IS_5714_FAMILY(sc)) {
1407                         /* 256 bytes for read and write. */
1408                         dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1409                             BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1410                         dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1411                             BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1412                             BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1413                 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
1414                         /*
1415                          * In the BCM5703, the DMA read watermark should
1416                          * be set to less than or equal to the maximum
1417                          * memory read byte count of the PCI-X command
1418                          * register.
1419                          */
1420                         dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
1421                             BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1422                 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1423                         /* 1536 bytes for read, 384 bytes for write. */
1424                         dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1425                             BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1426                 } else {
1427                         /* 384 bytes for read and write. */
1428                         dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1429                             BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1430                             0x0F;
1431                 }
1432                 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1433                     sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1434                         uint32_t tmp;
1435
1436                         /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1437                         tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1438                         if (tmp == 6 || tmp == 7)
1439                                 dma_rw_ctl |=
1440                                     BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1441
1442                         /* Set PCI-X DMA write workaround. */
1443                         dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1444                 }
1445         } else {
1446                 /* Conventional PCI bus: 256 bytes for read and write. */
1447                 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1448                     BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1449
1450                 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1451                     sc->bge_asicrev != BGE_ASICREV_BCM5750)
1452                         dma_rw_ctl |= 0x0F;
1453         }
1454         if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1455             sc->bge_asicrev == BGE_ASICREV_BCM5701)
1456                 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1457                     BGE_PCIDMARWCTL_ASRT_ALL_BE;
1458         if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1459             sc->bge_asicrev == BGE_ASICREV_BCM5704)
1460                 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1461         if (BGE_IS_5717_PLUS(sc))
1462                 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1463         pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1464
1465         /*
1466          * Set up general mode register.
1467          */
1468         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1469             BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1470             BGE_MODECTL_TX_NO_PHDR_CSUM);
1471
1472         /*
1473          * BCM5701 B5 have a bug causing data corruption when using
1474          * 64-bit DMA reads, which can be terminated early and then
1475          * completed later as 32-bit accesses, in combination with
1476          * certain bridges.
1477          */
1478         if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1479             sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1480                 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1481
1482         /*
1483          * Tell the firmware the driver is running
1484          */
1485         if (sc->bge_asf_mode & ASF_STACKUP)
1486                 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1487
1488         /*
1489          * Disable memory write invalidate.  Apparently it is not supported
1490          * properly by these devices.  Also ensure that INTx isn't disabled,
1491          * as these chips need it even when using MSI.
1492          */
1493         PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1494             PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1495
1496         /* Set the timer prescaler (always 66Mhz) */
1497         CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1498
1499         /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1500         if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1501                 DELAY(40);      /* XXX */
1502
1503                 /* Put PHY into ready state */
1504                 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1505                 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1506                 DELAY(40);
1507         }
1508
1509         return (0);
1510 }
1511
1512 static int
1513 bge_blockinit(struct bge_softc *sc)
1514 {
1515         struct bge_rcb *rcb;
1516         bus_size_t vrcb;
1517         bge_hostaddr taddr;
1518         uint32_t val;
1519         int i, limit;
1520
1521         /*
1522          * Initialize the memory window pointer register so that
1523          * we can access the first 32K of internal NIC RAM. This will
1524          * allow us to set up the TX send ring RCBs and the RX return
1525          * ring RCBs, plus other things which live in NIC memory.
1526          */
1527         CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1528
1529         /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1530
1531         if (!(BGE_IS_5705_PLUS(sc))) {
1532                 /* Configure mbuf memory pool */
1533                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1534                 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1535                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1536                 else
1537                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1538
1539                 /* Configure DMA resource pool */
1540                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1541                     BGE_DMA_DESCRIPTORS);
1542                 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1543         }
1544
1545         /* Configure mbuf pool watermarks */
1546         if (sc->bge_asicrev == BGE_ASICREV_BCM5717) {
1547                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1548                 if (sc->bge_ifp->if_mtu > ETHERMTU) {
1549                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1550                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1551                 } else {
1552                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1553                         CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1554                 }
1555         } else if (!BGE_IS_5705_PLUS(sc)) {
1556                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1557                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1558                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1559         } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1560                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1561                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1562                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1563         } else {
1564                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1565                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1566                 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1567         }
1568
1569         /* Configure DMA resource watermarks */
1570         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1571         CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1572
1573         /* Enable buffer manager */
1574         if (!(BGE_IS_5705_PLUS(sc))) {
1575                 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1576                     BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN);
1577
1578                 /* Poll for buffer manager start indication */
1579                 for (i = 0; i < BGE_TIMEOUT; i++) {
1580                         DELAY(10);
1581                         if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1582                                 break;
1583                 }
1584
1585                 if (i == BGE_TIMEOUT) {
1586                         device_printf(sc->bge_dev,
1587                             "buffer manager failed to start\n");
1588                         return (ENXIO);
1589                 }
1590         }
1591
1592         /* Enable flow-through queues */
1593         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1594         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1595
1596         /* Wait until queue initialization is complete */
1597         for (i = 0; i < BGE_TIMEOUT; i++) {
1598                 DELAY(10);
1599                 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1600                         break;
1601         }
1602
1603         if (i == BGE_TIMEOUT) {
1604                 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1605                 return (ENXIO);
1606         }
1607
1608         /*
1609          * Summary of rings supported by the controller:
1610          *
1611          * Standard Receive Producer Ring
1612          * - This ring is used to feed receive buffers for "standard"
1613          *   sized frames (typically 1536 bytes) to the controller.
1614          *
1615          * Jumbo Receive Producer Ring
1616          * - This ring is used to feed receive buffers for jumbo sized
1617          *   frames (i.e. anything bigger than the "standard" frames)
1618          *   to the controller.
1619          *
1620          * Mini Receive Producer Ring
1621          * - This ring is used to feed receive buffers for "mini"
1622          *   sized frames to the controller.
1623          * - This feature required external memory for the controller
1624          *   but was never used in a production system.  Should always
1625          *   be disabled.
1626          *
1627          * Receive Return Ring
1628          * - After the controller has placed an incoming frame into a
1629          *   receive buffer that buffer is moved into a receive return
1630          *   ring.  The driver is then responsible to passing the
1631          *   buffer up to the stack.  Many versions of the controller
1632          *   support multiple RR rings.
1633          *
1634          * Send Ring
1635          * - This ring is used for outgoing frames.  Many versions of
1636          *   the controller support multiple send rings.
1637          */
1638
1639         /* Initialize the standard receive producer ring control block. */
1640         rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1641         rcb->bge_hostaddr.bge_addr_lo =
1642             BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1643         rcb->bge_hostaddr.bge_addr_hi =
1644             BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1645         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1646             sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1647         if (BGE_IS_5717_PLUS(sc)) {
1648                 /*
1649                  * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1650                  * Bits 15-2 : Maximum RX frame size
1651                  * Bit 1     : 1 = Ring Disabled, 0 = Ring ENabled
1652                  * Bit 0     : Reserved
1653                  */
1654                 rcb->bge_maxlen_flags =
1655                     BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
1656         } else if (BGE_IS_5705_PLUS(sc)) {
1657                 /*
1658                  * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1659                  * Bits 15-2 : Reserved (should be 0)
1660                  * Bit 1     : 1 = Ring Disabled, 0 = Ring Enabled
1661                  * Bit 0     : Reserved
1662                  */
1663                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1664         } else {
1665                 /*
1666                  * Ring size is always XXX entries
1667                  * Bits 31-16: Maximum RX frame size
1668                  * Bits 15-2 : Reserved (should be 0)
1669                  * Bit 1     : 1 = Ring Disabled, 0 = Ring Enabled
1670                  * Bit 0     : Reserved
1671                  */
1672                 rcb->bge_maxlen_flags =
1673                     BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1674         }
1675         if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
1676                 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1677         else
1678                 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1679         /* Write the standard receive producer ring control block. */
1680         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1681         CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1682         CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1683         CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1684
1685         /* Reset the standard receive producer ring producer index. */
1686         bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1687
1688         /*
1689          * Initialize the jumbo RX producer ring control
1690          * block.  We set the 'ring disabled' bit in the
1691          * flags field until we're actually ready to start
1692          * using this ring (i.e. once we set the MTU
1693          * high enough to require it).
1694          */
1695         if (BGE_IS_JUMBO_CAPABLE(sc)) {
1696                 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1697                 /* Get the jumbo receive producer ring RCB parameters. */
1698                 rcb->bge_hostaddr.bge_addr_lo =
1699                     BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1700                 rcb->bge_hostaddr.bge_addr_hi =
1701                     BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1702                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1703                     sc->bge_cdata.bge_rx_jumbo_ring_map,
1704                     BUS_DMASYNC_PREREAD);
1705                 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1706                     BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1707                 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
1708                         rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1709                 else
1710                         rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1711                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1712                     rcb->bge_hostaddr.bge_addr_hi);
1713                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1714                     rcb->bge_hostaddr.bge_addr_lo);
1715                 /* Program the jumbo receive producer ring RCB parameters. */
1716                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1717                     rcb->bge_maxlen_flags);
1718                 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1719                 /* Reset the jumbo receive producer ring producer index. */
1720                 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1721         }
1722
1723         /* Disable the mini receive producer ring RCB. */
1724         if (BGE_IS_5700_FAMILY(sc)) {
1725                 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1726                 rcb->bge_maxlen_flags =
1727                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1728                 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1729                     rcb->bge_maxlen_flags);
1730                 /* Reset the mini receive producer ring producer index. */
1731                 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1732         }
1733
1734         /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1735         if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1736                 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
1737                     sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
1738                     sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
1739                         CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1740                             (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1741         }
1742         /*
1743          * The BD ring replenish thresholds control how often the
1744          * hardware fetches new BD's from the producer rings in host
1745          * memory.  Setting the value too low on a busy system can
1746          * starve the hardware and recue the throughpout.
1747          *
1748          * Set the BD ring replentish thresholds. The recommended
1749          * values are 1/8th the number of descriptors allocated to
1750          * each ring.
1751          * XXX The 5754 requires a lower threshold, so it might be a
1752          * requirement of all 575x family chips.  The Linux driver sets
1753          * the lower threshold for all 5705 family chips as well, but there
1754          * are reports that it might not need to be so strict.
1755          *
1756          * XXX Linux does some extra fiddling here for the 5906 parts as
1757          * well.
1758          */
1759         if (BGE_IS_5705_PLUS(sc))
1760                 val = 8;
1761         else
1762                 val = BGE_STD_RX_RING_CNT / 8;
1763         CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1764         if (BGE_IS_JUMBO_CAPABLE(sc))
1765                 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1766                     BGE_JUMBO_RX_RING_CNT/8);
1767         if (BGE_IS_5717_PLUS(sc)) {
1768                 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1769                 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1770         }
1771
1772         /*
1773          * Disable all send rings by setting the 'ring disabled' bit
1774          * in the flags field of all the TX send ring control blocks,
1775          * located in NIC memory.
1776          */
1777         if (!BGE_IS_5705_PLUS(sc))
1778                 /* 5700 to 5704 had 16 send rings. */
1779                 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
1780         else
1781                 limit = 1;
1782         vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1783         for (i = 0; i < limit; i++) {
1784                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1785                     BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1786                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1787                 vrcb += sizeof(struct bge_rcb);
1788         }
1789
1790         /* Configure send ring RCB 0 (we use only the first ring) */
1791         vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1792         BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1793         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1794         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1795         if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
1796                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
1797         else
1798                 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1799                     BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1800         RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1801             BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1802
1803         /*
1804          * Disable all receive return rings by setting the
1805          * 'ring diabled' bit in the flags field of all the receive
1806          * return ring control blocks, located in NIC memory.
1807          */
1808         if (sc->bge_asicrev == BGE_ASICREV_BCM5717) {
1809                 /* Should be 17, use 16 until we get an SRAM map. */
1810                 limit = 16;
1811         } else if (!BGE_IS_5705_PLUS(sc))
1812                 limit = BGE_RX_RINGS_MAX;
1813         else if (sc->bge_asicrev == BGE_ASICREV_BCM5755)
1814                 limit = 4;
1815         else
1816                 limit = 1;
1817         /* Disable all receive return rings. */
1818         vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1819         for (i = 0; i < limit; i++) {
1820                 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1821                 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1822                 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1823                     BGE_RCB_FLAG_RING_DISABLED);
1824                 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1825                 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1826                     (i * (sizeof(uint64_t))), 0);
1827                 vrcb += sizeof(struct bge_rcb);
1828         }
1829
1830         /*
1831          * Set up receive return ring 0.  Note that the NIC address
1832          * for RX return rings is 0x0.  The return rings live entirely
1833          * within the host, so the nicaddr field in the RCB isn't used.
1834          */
1835         vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1836         BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1837         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1838         RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1839         RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1840         RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1841             BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1842
1843         /* Set random backoff seed for TX */
1844         CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1845             IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1846             IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1847             IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1848             BGE_TX_BACKOFF_SEED_MASK);
1849
1850         /* Set inter-packet gap */
1851         CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1852
1853         /*
1854          * Specify which ring to use for packets that don't match
1855          * any RX rules.
1856          */
1857         CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1858
1859         /*
1860          * Configure number of RX lists. One interrupt distribution
1861          * list, sixteen active lists, one bad frames class.
1862          */
1863         CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1864
1865         /* Inialize RX list placement stats mask. */
1866         CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1867         CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1868
1869         /* Disable host coalescing until we get it set up */
1870         CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1871
1872         /* Poll to make sure it's shut down. */
1873         for (i = 0; i < BGE_TIMEOUT; i++) {
1874                 DELAY(10);
1875                 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1876                         break;
1877         }
1878
1879         if (i == BGE_TIMEOUT) {
1880                 device_printf(sc->bge_dev,
1881                     "host coalescing engine failed to idle\n");
1882                 return (ENXIO);
1883         }
1884
1885         /* Set up host coalescing defaults */
1886         CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1887         CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1888         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1889         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1890         if (!(BGE_IS_5705_PLUS(sc))) {
1891                 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1892                 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1893         }
1894         CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1895         CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1896
1897         /* Set up address of statistics block */
1898         if (!(BGE_IS_5705_PLUS(sc))) {
1899                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1900                     BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1901                 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1902                     BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1903                 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1904                 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1905                 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1906         }
1907
1908         /* Set up address of status block */
1909         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1910             BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1911         CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1912             BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1913
1914         /* Set up status block size. */
1915         if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1916             sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
1917                 val = BGE_STATBLKSZ_FULL;
1918                 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1919         } else {
1920                 val = BGE_STATBLKSZ_32BYTE;
1921                 bzero(sc->bge_ldata.bge_status_block, 32);
1922         }
1923         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
1924             sc->bge_cdata.bge_status_map,
1925             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1926
1927         /* Turn on host coalescing state machine */
1928         CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
1929
1930         /* Turn on RX BD completion state machine and enable attentions */
1931         CSR_WRITE_4(sc, BGE_RBDC_MODE,
1932             BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
1933
1934         /* Turn on RX list placement state machine */
1935         CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1936
1937         /* Turn on RX list selector state machine. */
1938         if (!(BGE_IS_5705_PLUS(sc)))
1939                 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1940
1941         val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1942             BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1943             BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1944             BGE_MACMODE_FRMHDR_DMA_ENB;
1945
1946         if (sc->bge_flags & BGE_FLAG_TBI)
1947                 val |= BGE_PORTMODE_TBI;
1948         else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
1949                 val |= BGE_PORTMODE_GMII;
1950         else
1951                 val |= BGE_PORTMODE_MII;
1952
1953         /* Turn on DMA, clear stats */
1954         CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1955
1956         /* Set misc. local control, enable interrupts on attentions */
1957         CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1958
1959 #ifdef notdef
1960         /* Assert GPIO pins for PHY reset */
1961         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
1962             BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
1963         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
1964             BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
1965 #endif
1966
1967         /* Turn on DMA completion state machine */
1968         if (!(BGE_IS_5705_PLUS(sc)))
1969                 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1970
1971         val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
1972
1973         /* Enable host coalescing bug fix. */
1974         if (BGE_IS_5755_PLUS(sc))
1975                 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1976
1977         /* Request larger DMA burst size to get better performance. */
1978         if (sc->bge_asicrev == BGE_ASICREV_BCM5785)
1979                 val |= BGE_WDMAMODE_BURST_ALL_DATA;
1980
1981         /* Turn on write DMA state machine */
1982         CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1983         DELAY(40);
1984
1985         /* Turn on read DMA state machine */
1986         val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1987
1988         if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
1989                 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
1990
1991         if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1992             sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1993             sc->bge_asicrev == BGE_ASICREV_BCM57780)
1994                 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1995                     BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1996                     BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1997         if (sc->bge_flags & BGE_FLAG_PCIE)
1998                 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1999         if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2000                 val |= BGE_RDMAMODE_TSO4_ENABLE;
2001                 if (sc->bge_flags & BGE_FLAG_TSO3 ||
2002                     sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2003                     sc->bge_asicrev == BGE_ASICREV_BCM57780)
2004                         val |= BGE_RDMAMODE_TSO6_ENABLE;
2005         }
2006         if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2007             sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2008             sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2009             sc->bge_asicrev == BGE_ASICREV_BCM57780 ||
2010             BGE_IS_5717_PLUS(sc)) {
2011                 /*
2012                  * Enable fix for read DMA FIFO overruns.
2013                  * The fix is to limit the number of RX BDs
2014                  * the hardware would fetch at a fime.
2015                  */
2016                 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL,
2017                     CSR_READ_4(sc, BGE_RDMA_RSRVCTRL) |
2018                     BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2019         }
2020         CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
2021         DELAY(40);
2022
2023         /* Turn on RX data completion state machine */
2024         CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2025
2026         /* Turn on RX BD initiator state machine */
2027         CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2028
2029         /* Turn on RX data and RX BD initiator state machine */
2030         CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
2031
2032         /* Turn on Mbuf cluster free state machine */
2033         if (!(BGE_IS_5705_PLUS(sc)))
2034                 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2035
2036         /* Turn on send BD completion state machine */
2037         CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2038
2039         /* Turn on send data completion state machine */
2040         val = BGE_SDCMODE_ENABLE;
2041         if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
2042                 val |= BGE_SDCMODE_CDELAY;
2043         CSR_WRITE_4(sc, BGE_SDC_MODE, val);
2044
2045         /* Turn on send data initiator state machine */
2046         if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3))
2047                 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
2048                     BGE_SDIMODE_HW_LSO_PRE_DMA);
2049         else
2050                 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2051
2052         /* Turn on send BD initiator state machine */
2053         CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2054
2055         /* Turn on send BD selector state machine */
2056         CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2057
2058         CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
2059         CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2060             BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
2061
2062         /* ack/clear link change events */
2063         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2064             BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2065             BGE_MACSTAT_LINK_CHANGED);
2066         CSR_WRITE_4(sc, BGE_MI_STS, 0);
2067
2068         /*
2069          * Enable attention when the link has changed state for
2070          * devices that use auto polling.
2071          */
2072         if (sc->bge_flags & BGE_FLAG_TBI) {
2073                 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2074         } else {
2075                 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
2076                         CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
2077                         DELAY(80);
2078                 }
2079                 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2080                     sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
2081                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2082                             BGE_EVTENB_MI_INTERRUPT);
2083         }
2084
2085         /*
2086          * Clear any pending link state attention.
2087          * Otherwise some link state change events may be lost until attention
2088          * is cleared by bge_intr() -> bge_link_upd() sequence.
2089          * It's not necessary on newer BCM chips - perhaps enabling link
2090          * state change attentions implies clearing pending attention.
2091          */
2092         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2093             BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2094             BGE_MACSTAT_LINK_CHANGED);
2095
2096         /* Enable link state change attentions. */
2097         BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2098
2099         return (0);
2100 }
2101
2102 const struct bge_revision *
2103 bge_lookup_rev(uint32_t chipid)
2104 {
2105         const struct bge_revision *br;
2106
2107         for (br = bge_revisions; br->br_name != NULL; br++) {
2108                 if (br->br_chipid == chipid)
2109                         return (br);
2110         }
2111
2112         for (br = bge_majorrevs; br->br_name != NULL; br++) {
2113                 if (br->br_chipid == BGE_ASICREV(chipid))
2114                         return (br);
2115         }
2116
2117         return (NULL);
2118 }
2119
2120 const struct bge_vendor *
2121 bge_lookup_vendor(uint16_t vid)
2122 {
2123         const struct bge_vendor *v;
2124
2125         for (v = bge_vendors; v->v_name != NULL; v++)
2126                 if (v->v_id == vid)
2127                         return (v);
2128
2129         panic("%s: unknown vendor %d", __func__, vid);
2130         return (NULL);
2131 }
2132
2133 /*
2134  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2135  * against our list and return its name if we find a match.
2136  *
2137  * Note that since the Broadcom controller contains VPD support, we
2138  * try to get the device name string from the controller itself instead
2139  * of the compiled-in string. It guarantees we'll always announce the
2140  * right product name. We fall back to the compiled-in string when
2141  * VPD is unavailable or corrupt.
2142  */
2143 static int
2144 bge_probe(device_t dev)
2145 {
2146         const struct bge_type *t = bge_devs;
2147         struct bge_softc *sc = device_get_softc(dev);
2148         uint16_t vid, did;
2149
2150         sc->bge_dev = dev;
2151         vid = pci_get_vendor(dev);
2152         did = pci_get_device(dev);
2153         while(t->bge_vid != 0) {
2154                 if ((vid == t->bge_vid) && (did == t->bge_did)) {
2155                         char model[64], buf[96];
2156                         const struct bge_revision *br;
2157                         const struct bge_vendor *v;
2158                         uint32_t id;
2159
2160                         id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2161                             BGE_PCIMISCCTL_ASICREV_SHIFT;
2162                         if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) {
2163                                 /*
2164                                  * Find the ASCI revision.  Different chips
2165                                  * use different registers.
2166                                  */
2167                                 switch (pci_get_device(dev)) {
2168                                 case BCOM_DEVICEID_BCM5717:
2169                                 case BCOM_DEVICEID_BCM5718:
2170                                         id = pci_read_config(dev,
2171                                             BGE_PCI_GEN2_PRODID_ASICREV, 4);
2172                                         break;
2173                                 default:
2174                                         id = pci_read_config(dev,
2175                                             BGE_PCI_PRODID_ASICREV, 4);
2176                                 }
2177                         }
2178                         br = bge_lookup_rev(id);
2179                         v = bge_lookup_vendor(vid);
2180                         {
2181 #if __FreeBSD_version > 700024
2182                                 const char *pname;
2183
2184                                 if (bge_has_eaddr(sc) &&
2185                                     pci_get_vpd_ident(dev, &pname) == 0)
2186                                         snprintf(model, 64, "%s", pname);
2187                                 else
2188 #endif
2189                                         snprintf(model, 64, "%s %s",
2190                                             v->v_name,
2191                                             br != NULL ? br->br_name :
2192                                             "NetXtreme Ethernet Controller");
2193                         }
2194                         snprintf(buf, 96, "%s, %sASIC rev. %#08x", model,
2195                             br != NULL ? "" : "unknown ", id);
2196                         device_set_desc_copy(dev, buf);
2197                         return (0);
2198                 }
2199                 t++;
2200         }
2201
2202         return (ENXIO);
2203 }
2204
2205 static void
2206 bge_dma_free(struct bge_softc *sc)
2207 {
2208         int i;
2209
2210         /* Destroy DMA maps for RX buffers. */
2211         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2212                 if (sc->bge_cdata.bge_rx_std_dmamap[i])
2213                         bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2214                             sc->bge_cdata.bge_rx_std_dmamap[i]);
2215         }
2216         if (sc->bge_cdata.bge_rx_std_sparemap)
2217                 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2218                     sc->bge_cdata.bge_rx_std_sparemap);
2219
2220         /* Destroy DMA maps for jumbo RX buffers. */
2221         for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2222                 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
2223                         bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2224                             sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2225         }
2226         if (sc->bge_cdata.bge_rx_jumbo_sparemap)
2227                 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2228                     sc->bge_cdata.bge_rx_jumbo_sparemap);
2229
2230         /* Destroy DMA maps for TX buffers. */
2231         for (i = 0; i < BGE_TX_RING_CNT; i++) {
2232                 if (sc->bge_cdata.bge_tx_dmamap[i])
2233                         bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
2234                             sc->bge_cdata.bge_tx_dmamap[i]);
2235         }
2236
2237         if (sc->bge_cdata.bge_rx_mtag)
2238                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
2239         if (sc->bge_cdata.bge_tx_mtag)
2240                 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
2241
2242
2243         /* Destroy standard RX ring. */
2244         if (sc->bge_cdata.bge_rx_std_ring_map)
2245                 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2246                     sc->bge_cdata.bge_rx_std_ring_map);
2247         if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
2248                 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2249                     sc->bge_ldata.bge_rx_std_ring,
2250                     sc->bge_cdata.bge_rx_std_ring_map);
2251
2252         if (sc->bge_cdata.bge_rx_std_ring_tag)
2253                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2254
2255         /* Destroy jumbo RX ring. */
2256         if (sc->bge_cdata.bge_rx_jumbo_ring_map)
2257                 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2258                     sc->bge_cdata.bge_rx_jumbo_ring_map);
2259
2260         if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
2261             sc->bge_ldata.bge_rx_jumbo_ring)
2262                 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2263                     sc->bge_ldata.bge_rx_jumbo_ring,
2264                     sc->bge_cdata.bge_rx_jumbo_ring_map);
2265
2266         if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2267                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2268
2269         /* Destroy RX return ring. */
2270         if (sc->bge_cdata.bge_rx_return_ring_map)
2271                 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2272                     sc->bge_cdata.bge_rx_return_ring_map);
2273
2274         if (sc->bge_cdata.bge_rx_return_ring_map &&
2275             sc->bge_ldata.bge_rx_return_ring)
2276                 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2277                     sc->bge_ldata.bge_rx_return_ring,
2278                     sc->bge_cdata.bge_rx_return_ring_map);
2279
2280         if (sc->bge_cdata.bge_rx_return_ring_tag)
2281                 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2282
2283         /* Destroy TX ring. */
2284         if (sc->bge_cdata.bge_tx_ring_map)
2285                 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2286                     sc->bge_cdata.bge_tx_ring_map);
2287
2288         if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2289                 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2290                     sc->bge_ldata.bge_tx_ring,
2291                     sc->bge_cdata.bge_tx_ring_map);
2292
2293         if (sc->bge_cdata.bge_tx_ring_tag)
2294                 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2295
2296         /* Destroy status block. */
2297         if (sc->bge_cdata.bge_status_map)
2298                 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2299                     sc->bge_cdata.bge_status_map);
2300
2301         if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2302                 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2303                     sc->bge_ldata.bge_status_block,
2304                     sc->bge_cdata.bge_status_map);
2305
2306         if (sc->bge_cdata.bge_status_tag)
2307                 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2308
2309         /* Destroy statistics block. */
2310         if (sc->bge_cdata.bge_stats_map)
2311                 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2312                     sc->bge_cdata.bge_stats_map);
2313
2314         if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2315                 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2316                     sc->bge_ldata.bge_stats,
2317                     sc->bge_cdata.bge_stats_map);
2318
2319         if (sc->bge_cdata.bge_stats_tag)
2320                 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2321
2322         if (sc->bge_cdata.bge_buffer_tag)
2323                 bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag);
2324
2325         /* Destroy the parent tag. */
2326         if (sc->bge_cdata.bge_parent_tag)
2327                 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2328 }
2329
2330 static int
2331 bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment,
2332     bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
2333     bus_addr_t *paddr, const char *msg)
2334 {
2335         struct bge_dmamap_arg ctx;
2336         bus_addr_t lowaddr;
2337         bus_size_t ring_end;
2338         int error;
2339
2340         lowaddr = BUS_SPACE_MAXADDR;
2341 again:
2342         error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2343             alignment, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2344             NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
2345         if (error != 0) {
2346                 device_printf(sc->bge_dev,
2347                     "could not create %s dma tag\n", msg);
2348                 return (ENOMEM);
2349         }
2350         /* Allocate DMA'able memory for ring. */
2351         error = bus_dmamem_alloc(*tag, (void **)ring,
2352             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
2353         if (error != 0) {
2354                 device_printf(sc->bge_dev,
2355                     "could not allocate DMA'able memory for %s\n", msg);
2356                 return (ENOMEM);
2357         }
2358         /* Load the address of the ring. */
2359         ctx.bge_busaddr = 0;
2360         error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr,
2361             &ctx, BUS_DMA_NOWAIT);
2362         if (error != 0) {
2363                 device_printf(sc->bge_dev,
2364                     "could not load DMA'able memory for %s\n", msg);
2365                 return (ENOMEM);
2366         }
2367         *paddr = ctx.bge_busaddr;
2368         ring_end = *paddr + maxsize;
2369         if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 &&
2370             BGE_ADDR_HI(*paddr) != BGE_ADDR_HI(ring_end)) {
2371                 /*
2372                  * 4GB boundary crossed.  Limit maximum allowable DMA
2373                  * address space to 32bit and try again.
2374                  */
2375                 bus_dmamap_unload(*tag, *map);
2376                 bus_dmamem_free(*tag, *ring, *map);
2377                 bus_dma_tag_destroy(*tag);
2378                 if (bootverbose)
2379                         device_printf(sc->bge_dev, "4GB boundary crossed, "
2380                             "limit DMA address space to 32bit for %s\n", msg);
2381                 *ring = NULL;
2382                 *tag = NULL;
2383                 *map = NULL;
2384                 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2385                 goto again;
2386         }
2387         return (0);
2388 }
2389
2390 static int
2391 bge_dma_alloc(struct bge_softc *sc)
2392 {
2393         bus_addr_t lowaddr;
2394         bus_size_t boundary, sbsz, txsegsz, txmaxsegsz;
2395         int i, error;
2396
2397         lowaddr = BUS_SPACE_MAXADDR;
2398         if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
2399                 lowaddr = BGE_DMA_MAXADDR;
2400         /*
2401          * Allocate the parent bus DMA tag appropriate for PCI.
2402          */
2403         error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2404             1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2405             NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2406             0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2407         if (error != 0) {
2408                 device_printf(sc->bge_dev,
2409                     "could not allocate parent dma tag\n");
2410                 return (ENOMEM);
2411         }
2412
2413         /* Create tag for standard RX ring. */
2414         error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ,
2415             &sc->bge_cdata.bge_rx_std_ring_tag,
2416             (uint8_t **)&sc->bge_ldata.bge_rx_std_ring,
2417             &sc->bge_cdata.bge_rx_std_ring_map,
2418             &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring");
2419         if (error)
2420                 return (error);
2421
2422         /* Create tag for RX return ring. */
2423         error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc),
2424             &sc->bge_cdata.bge_rx_return_ring_tag,
2425             (uint8_t **)&sc->bge_ldata.bge_rx_return_ring,
2426             &sc->bge_cdata.bge_rx_return_ring_map,
2427             &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring");
2428         if (error)
2429                 return (error);
2430
2431         /* Create tag for TX ring. */
2432         error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ,
2433             &sc->bge_cdata.bge_tx_ring_tag,
2434             (uint8_t **)&sc->bge_ldata.bge_tx_ring,
2435             &sc->bge_cdata.bge_tx_ring_map,
2436             &sc->bge_ldata.bge_tx_ring_paddr, "TX ring");
2437         if (error)
2438                 return (error);
2439
2440         /*
2441          * Create tag for status block.
2442          * Because we only use single Tx/Rx/Rx return ring, use
2443          * minimum status block size except BCM5700 AX/BX which
2444          * seems to want to see full status block size regardless
2445          * of configured number of ring.
2446          */
2447         if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2448             sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
2449                 sbsz = BGE_STATUS_BLK_SZ;
2450         else
2451                 sbsz = 32;
2452         error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz,
2453             &sc->bge_cdata.bge_status_tag,
2454             (uint8_t **)&sc->bge_ldata.bge_status_block,
2455             &sc->bge_cdata.bge_status_map,
2456             &sc->bge_ldata.bge_status_block_paddr, "status block");
2457         if (error)
2458                 return (error);
2459
2460         /* Create tag for statistics block. */
2461         error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ,
2462             &sc->bge_cdata.bge_stats_tag,
2463             (uint8_t **)&sc->bge_ldata.bge_stats,
2464             &sc->bge_cdata.bge_stats_map,
2465             &sc->bge_ldata.bge_stats_paddr, "statistics block");
2466         if (error)
2467                 return (error);
2468
2469         /* Create tag for jumbo RX ring. */
2470         if (BGE_IS_JUMBO_CAPABLE(sc)) {
2471                 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ,
2472                     &sc->bge_cdata.bge_rx_jumbo_ring_tag,
2473                     (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring,
2474                     &sc->bge_cdata.bge_rx_jumbo_ring_map,
2475                     &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring");
2476                 if (error)
2477                         return (error);
2478         }
2479
2480         /* Create parent tag for buffers. */
2481         boundary = 0;
2482         if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) {
2483                 boundary = BGE_DMA_BNDRY;
2484                 /*
2485                  * XXX
2486                  * watchdog timeout issue was observed on BCM5704 which
2487                  * lives behind PCI-X bridge(e.g AMD 8131 PCI-X bridge).
2488                  * Limiting DMA address space to 32bits seems to address
2489                  * it.
2490                  */
2491                 if (sc->bge_flags & BGE_FLAG_PCIX)
2492                         lowaddr = BUS_SPACE_MAXADDR_32BIT;
2493         }
2494         error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2495             1, boundary, lowaddr, BUS_SPACE_MAXADDR, NULL,
2496             NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2497             0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag);
2498         if (error != 0) {
2499                 device_printf(sc->bge_dev,
2500                     "could not allocate buffer dma tag\n");
2501                 return (ENOMEM);
2502         }
2503         /* Create tag for Tx mbufs. */
2504         if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2505                 txsegsz = BGE_TSOSEG_SZ;
2506                 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
2507         } else {
2508                 txsegsz = MCLBYTES;
2509                 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
2510         }
2511         error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1,
2512             0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2513             txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
2514             &sc->bge_cdata.bge_tx_mtag);
2515
2516         if (error) {
2517                 device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
2518                 return (ENOMEM);
2519         }
2520
2521         /* Create tag for Rx mbufs. */
2522         error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0,
2523             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
2524             MCLBYTES, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
2525
2526         if (error) {
2527                 device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
2528                 return (ENOMEM);
2529         }
2530
2531         /* Create DMA maps for RX buffers. */
2532         error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2533             &sc->bge_cdata.bge_rx_std_sparemap);
2534         if (error) {
2535                 device_printf(sc->bge_dev,
2536                     "can't create spare DMA map for RX\n");
2537                 return (ENOMEM);
2538         }
2539         for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2540                 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2541                             &sc->bge_cdata.bge_rx_std_dmamap[i]);
2542                 if (error) {
2543                         device_printf(sc->bge_dev,
2544                             "can't create DMA map for RX\n");
2545                         return (ENOMEM);
2546                 }
2547         }
2548
2549         /* Create DMA maps for TX buffers. */
2550         for (i = 0; i < BGE_TX_RING_CNT; i++) {
2551                 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
2552                             &sc->bge_cdata.bge_tx_dmamap[i]);
2553                 if (error) {
2554                         device_printf(sc->bge_dev,
2555                             "can't create DMA map for TX\n");
2556                         return (ENOMEM);
2557                 }
2558         }
2559
2560         /* Create tags for jumbo RX buffers. */
2561         if (BGE_IS_JUMBO_CAPABLE(sc)) {
2562                 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag,
2563                     1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2564                     NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2565                     0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2566                 if (error) {
2567                         device_printf(sc->bge_dev,
2568                             "could not allocate jumbo dma tag\n");
2569                         return (ENOMEM);
2570                 }
2571                 /* Create DMA maps for jumbo RX buffers. */
2572                 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2573                     0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
2574                 if (error) {
2575                         device_printf(sc->bge_dev,
2576                             "can't create spare DMA map for jumbo RX\n");
2577                         return (ENOMEM);
2578                 }
2579                 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2580                         error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2581                                     0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2582                         if (error) {
2583                                 device_printf(sc->bge_dev,
2584                                     "can't create DMA map for jumbo RX\n");
2585                                 return (ENOMEM);
2586                         }
2587                 }
2588         }
2589
2590         return (0);
2591 }
2592
2593 /*
2594  * Return true if this device has more than one port.
2595  */
2596 static int
2597 bge_has_multiple_ports(struct bge_softc *sc)
2598 {
2599         device_t dev = sc->bge_dev;
2600         u_int b, d, f, fscan, s;
2601
2602         d = pci_get_domain(dev);
2603         b = pci_get_bus(dev);
2604         s = pci_get_slot(dev);
2605         f = pci_get_function(dev);
2606         for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2607                 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2608                         return (1);
2609         return (0);
2610 }
2611
2612 /*
2613  * Return true if MSI can be used with this device.
2614  */
2615 static int
2616 bge_can_use_msi(struct bge_softc *sc)
2617 {
2618         int can_use_msi = 0;
2619
2620         /* Disable MSI for polling(4). */
2621 #ifdef DEVICE_POLLING
2622         return (0);
2623 #endif
2624         switch (sc->bge_asicrev) {
2625         case BGE_ASICREV_BCM5714_A0:
2626         case BGE_ASICREV_BCM5714:
2627                 /*
2628                  * Apparently, MSI doesn't work when these chips are
2629                  * configured in single-port mode.
2630                  */
2631                 if (bge_has_multiple_ports(sc))
2632                         can_use_msi = 1;
2633                 break;
2634         case BGE_ASICREV_BCM5750:
2635                 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2636                     sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2637                         can_use_msi = 1;
2638                 break;
2639         default:
2640                 if (BGE_IS_575X_PLUS(sc))
2641                         can_use_msi = 1;
2642         }
2643         return (can_use_msi);
2644 }
2645
2646 static int
2647 bge_attach(device_t dev)
2648 {
2649         struct ifnet *ifp;
2650         struct bge_softc *sc;
2651         uint32_t hwcfg = 0, misccfg;
2652         u_char eaddr[ETHER_ADDR_LEN];
2653         int capmask, error, f, msicount, phy_addr, reg, rid, trys;
2654
2655         sc = device_get_softc(dev);
2656         sc->bge_dev = dev;
2657
2658         TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
2659
2660         /*
2661          * Map control/status registers.
2662          */
2663         pci_enable_busmaster(dev);
2664
2665         rid = PCIR_BAR(0);
2666         sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2667             RF_ACTIVE);
2668
2669         if (sc->bge_res == NULL) {
2670                 device_printf (sc->bge_dev, "couldn't map memory\n");
2671                 error = ENXIO;
2672                 goto fail;
2673         }
2674
2675         /* Save various chip information. */
2676         sc->bge_chipid =
2677             pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2678             BGE_PCIMISCCTL_ASICREV_SHIFT;
2679         if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
2680                 /*
2681                  * Find the ASCI revision.  Different chips use different
2682                  * registers.
2683                  */
2684                 switch (pci_get_device(dev)) {
2685                 case BCOM_DEVICEID_BCM5717:
2686                 case BCOM_DEVICEID_BCM5718:
2687                         sc->bge_chipid = pci_read_config(dev,
2688                             BGE_PCI_GEN2_PRODID_ASICREV, 4);
2689                         break;
2690                 default:
2691                         sc->bge_chipid = pci_read_config(dev,
2692                             BGE_PCI_PRODID_ASICREV, 4);
2693                 }
2694         }
2695         sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2696         sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2697
2698         /* Set default PHY address. */
2699         phy_addr = 1;
2700          /*
2701           * PHY address mapping for various devices.
2702           *
2703           *          | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2704           * ---------+-------+-------+-------+-------+
2705           * BCM57XX  |   1   |   X   |   X   |   X   |
2706           * BCM5704  |   1   |   X   |   1   |   X   |
2707           * BCM5717  |   1   |   8   |   2   |   9   |
2708           *
2709           * Other addresses may respond but they are not
2710           * IEEE compliant PHYs and should be ignored.
2711           */
2712         if (sc->bge_asicrev == BGE_ASICREV_BCM5717) {
2713                 f = pci_get_function(dev);
2714                 if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) {
2715                         if (CSR_READ_4(sc, BGE_SGDIG_STS) &
2716                             BGE_SGDIGSTS_IS_SERDES)
2717                                 phy_addr = f + 8;
2718                         else
2719                                 phy_addr = f + 1;
2720                 } else if (sc->bge_chipid == BGE_CHIPID_BCM5717_B0) {
2721                         if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2722                             BGE_CPMU_PHY_STRAP_IS_SERDES)
2723                                 phy_addr = f + 8;
2724                         else
2725                                 phy_addr = f + 1;
2726                 }
2727         }
2728
2729         /*
2730          * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2731          * 5705 A0 and A1 chips.
2732          */
2733         if (sc->bge_asicrev != BGE_ASICREV_BCM5700 &&
2734             sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2735             sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2736             sc->bge_chipid != BGE_CHIPID_BCM5705_A1 &&
2737             !BGE_IS_5717_PLUS(sc))
2738                 sc->bge_phy_flags |= BGE_PHY_WIRESPEED;
2739
2740         if (bge_has_eaddr(sc))
2741                 sc->bge_flags |= BGE_FLAG_EADDR;
2742
2743         /* Save chipset family. */
2744         switch (sc->bge_asicrev) {
2745         case BGE_ASICREV_BCM5717:
2746                 sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS |
2747                     BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS | BGE_FLAG_JUMBO |
2748                     BGE_FLAG_SHORT_DMA_BUG | BGE_FLAG_JUMBO_FRAME;
2749                 break;
2750         case BGE_ASICREV_BCM5755:
2751         case BGE_ASICREV_BCM5761:
2752         case BGE_ASICREV_BCM5784:
2753         case BGE_ASICREV_BCM5785:
2754         case BGE_ASICREV_BCM5787:
2755         case BGE_ASICREV_BCM57780:
2756                 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
2757                     BGE_FLAG_5705_PLUS;
2758                 break;
2759         case BGE_ASICREV_BCM5700:
2760         case BGE_ASICREV_BCM5701:
2761         case BGE_ASICREV_BCM5703:
2762         case BGE_ASICREV_BCM5704:
2763                 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2764                 break;
2765         case BGE_ASICREV_BCM5714_A0:
2766         case BGE_ASICREV_BCM5780:
2767         case BGE_ASICREV_BCM5714:
2768                 sc->bge_flags |= BGE_FLAG_5714_FAMILY /* | BGE_FLAG_JUMBO */;
2769                 /* FALLTHROUGH */
2770         case BGE_ASICREV_BCM5750:
2771         case BGE_ASICREV_BCM5752:
2772         case BGE_ASICREV_BCM5906:
2773                 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2774                 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
2775                         sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG;
2776                 /* FALLTHROUGH */
2777         case BGE_ASICREV_BCM5705:
2778                 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2779                 break;
2780         }
2781
2782         /* Set various PHY bug flags. */
2783         if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2784             sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2785                 sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
2786         if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2787             sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2788                 sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
2789         if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2790                 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
2791         if (pci_get_subvendor(dev) == DELL_VENDORID)
2792                 sc->bge_phy_flags |= BGE_PHY_NO_3LED;
2793         if ((BGE_IS_5705_PLUS(sc)) &&
2794             sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2795             sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
2796             sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
2797             sc->bge_asicrev != BGE_ASICREV_BCM57780) {
2798                 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2799                     sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2800                     sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2801                     sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2802                         if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 &&
2803                             pci_get_device(dev) != BCOM_DEVICEID_BCM5756)
2804                                 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
2805                         if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M)
2806                                 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
2807                 } else
2808                         sc->bge_phy_flags |= BGE_PHY_BER_BUG;
2809         }
2810
2811         /* Identify the chips that use an CPMU. */
2812         if (BGE_IS_5717_PLUS(sc) ||
2813             sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2814             sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2815             sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2816             sc->bge_asicrev == BGE_ASICREV_BCM57780)
2817                 sc->bge_flags |= BGE_FLAG_CPMU_PRESENT;
2818         if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0)
2819                 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
2820         else
2821                 sc->bge_mi_mode = BGE_MIMODE_BASE;
2822         /* Enable auto polling for BCM570[0-5]. */
2823         if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705)
2824                 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
2825
2826         /*
2827          * All controllers that are not 5755 or higher have 4GB
2828          * boundary DMA bug.
2829          * Whenever an address crosses a multiple of the 4GB boundary
2830          * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
2831          * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
2832          * state machine will lockup and cause the device to hang.
2833          */
2834         if (BGE_IS_5755_PLUS(sc) == 0)
2835                 sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
2836
2837         misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID;
2838         if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
2839                 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2840                     misccfg == BGE_MISCCFG_BOARD_ID_5788M)
2841                         sc->bge_flags |= BGE_FLAG_5788;
2842         }
2843
2844         capmask = BMSR_DEFCAPMASK;
2845         if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 &&
2846             (misccfg == 0x4000 || misccfg == 0x8000)) ||
2847             (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2848             pci_get_vendor(dev) == BCOM_VENDORID &&
2849             (pci_get_device(dev) == BCOM_DEVICEID_BCM5901 ||
2850             pci_get_device(dev) == BCOM_DEVICEID_BCM5901A2 ||
2851             pci_get_device(dev) == BCOM_DEVICEID_BCM5705F)) ||
2852             (pci_get_vendor(dev) == BCOM_VENDORID &&
2853             (pci_get_device(dev) == BCOM_DEVICEID_BCM5751F ||
2854             pci_get_device(dev) == BCOM_DEVICEID_BCM5753F ||
2855             pci_get_device(dev) == BCOM_DEVICEID_BCM5787F)) ||
2856             pci_get_device(dev) == BCOM_DEVICEID_BCM57790 ||
2857             sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2858                 /* These chips are 10/100 only. */
2859                 capmask &= ~BMSR_EXTSTAT;
2860         }
2861
2862         /*
2863          * Some controllers seem to require a special firmware to use
2864          * TSO. But the firmware is not available to FreeBSD and Linux
2865          * claims that the TSO performed by the firmware is slower than
2866          * hardware based TSO. Moreover the firmware based TSO has one
2867          * known bug which can't handle TSO if ethernet header + IP/TCP
2868          * header is greater than 80 bytes. The workaround for the TSO
2869          * bug exist but it seems it's too expensive than not using
2870          * TSO at all. Some hardwares also have the TSO bug so limit
2871          * the TSO to the controllers that are not affected TSO issues
2872          * (e.g. 5755 or higher).
2873          */
2874         if (BGE_IS_5717_PLUS(sc)) {
2875                 /* BCM5717 requires different TSO configuration. */
2876                 sc->bge_flags |= BGE_FLAG_TSO3;
2877         } else if (BGE_IS_5755_PLUS(sc)) {
2878                 /*
2879                  * BCM5754 and BCM5787 shares the same ASIC id so
2880                  * explicit device id check is required.
2881                  * Due to unknown reason TSO does not work on BCM5755M.
2882                  */
2883                 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 &&
2884                     pci_get_device(dev) != BCOM_DEVICEID_BCM5754M &&
2885                     pci_get_device(dev) != BCOM_DEVICEID_BCM5755M)
2886                         sc->bge_flags |= BGE_FLAG_TSO;
2887         }
2888
2889         /*
2890          * Check if this is a PCI-X or PCI Express device.
2891          */
2892         if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
2893                 /*
2894                  * Found a PCI Express capabilities register, this
2895                  * must be a PCI Express device.
2896                  */
2897                 sc->bge_flags |= BGE_FLAG_PCIE;
2898                 sc->bge_expcap = reg;
2899                 if (pci_get_max_read_req(dev) != 4096)
2900                         pci_set_max_read_req(dev, 4096);
2901         } else {
2902                 /*
2903                  * Check if the device is in PCI-X Mode.
2904                  * (This bit is not valid on PCI Express controllers.)
2905                  */
2906                 if (pci_find_extcap(dev, PCIY_PCIX, &reg) == 0)
2907                         sc->bge_pcixcap = reg;
2908                 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2909                     BGE_PCISTATE_PCI_BUSMODE) == 0)
2910                         sc->bge_flags |= BGE_FLAG_PCIX;
2911         }
2912
2913         /*
2914          * The 40bit DMA bug applies to the 5714/5715 controllers and is
2915          * not actually a MAC controller bug but an issue with the embedded
2916          * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
2917          */
2918         if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
2919                 sc->bge_flags |= BGE_FLAG_40BIT_BUG;
2920         /*
2921          * Allocate the interrupt, using MSI if possible.  These devices
2922          * support 8 MSI messages, but only the first one is used in
2923          * normal operation.
2924          */
2925         rid = 0;
2926         if (pci_find_extcap(sc->bge_dev, PCIY_MSI, &reg) == 0) {
2927                 sc->bge_msicap = reg;
2928                 if (bge_can_use_msi(sc)) {
2929                         msicount = pci_msi_count(dev);
2930                         if (msicount > 1)
2931                                 msicount = 1;
2932                 } else
2933                         msicount = 0;
2934                 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
2935                         rid = 1;
2936                         sc->bge_flags |= BGE_FLAG_MSI;
2937                 }
2938         }
2939
2940         /*
2941          * All controllers except BCM5700 supports tagged status but
2942          * we use tagged status only for MSI case on BCM5717. Otherwise
2943          * MSI on BCM5717 does not work.
2944          */
2945 #ifndef DEVICE_POLLING
2946         if (sc->bge_flags & BGE_FLAG_MSI && BGE_IS_5717_PLUS(sc))
2947                 sc->bge_flags |= BGE_FLAG_TAGGED_STATUS;
2948 #endif
2949
2950         sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2951             RF_SHAREABLE | RF_ACTIVE);
2952
2953         if (sc->bge_irq == NULL) {
2954                 device_printf(sc->bge_dev, "couldn't map interrupt\n");
2955                 error = ENXIO;
2956                 goto fail;
2957         }
2958
2959         device_printf(dev,
2960             "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
2961             sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
2962             (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" :
2963             ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI"));
2964
2965         BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2966
2967         /* Try to reset the chip. */
2968         if (bge_reset(sc)) {
2969                 device_printf(sc->bge_dev, "chip reset failed\n");
2970                 error = ENXIO;
2971                 goto fail;
2972         }
2973
2974         sc->bge_asf_mode = 0;
2975         if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG)
2976             == BGE_MAGIC_NUMBER)) {
2977                 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG)
2978                     & BGE_HWCFG_ASF) {
2979                         sc->bge_asf_mode |= ASF_ENABLE;
2980                         sc->bge_asf_mode |= ASF_STACKUP;
2981                         if (BGE_IS_575X_PLUS(sc))
2982                                 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
2983                 }
2984         }
2985
2986         /* Try to reset the chip again the nice way. */
2987         bge_stop_fw(sc);
2988         bge_sig_pre_reset(sc, BGE_RESET_STOP);
2989         if (bge_reset(sc)) {
2990                 device_printf(sc->bge_dev, "chip reset failed\n");
2991                 error = ENXIO;
2992                 goto fail;
2993         }
2994
2995         bge_sig_legacy(sc, BGE_RESET_STOP);
2996         bge_sig_post_reset(sc, BGE_RESET_STOP);
2997
2998         if (bge_chipinit(sc)) {
2999                 device_printf(sc->bge_dev, "chip initialization failed\n");
3000                 error = ENXIO;
3001                 goto fail;
3002         }
3003
3004         error = bge_get_eaddr(sc, eaddr);
3005         if (error) {
3006                 device_printf(sc->bge_dev,
3007                     "failed to read station address\n");
3008                 error = ENXIO;
3009                 goto fail;
3010         }
3011
3012         /* 5705 limits RX return ring to 512 entries. */
3013         if (BGE_IS_5717_PLUS(sc))
3014                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3015         else if (BGE_IS_5705_PLUS(sc))
3016                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
3017         else
3018                 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3019
3020         if (bge_dma_alloc(sc)) {
3021                 device_printf(sc->bge_dev,
3022                     "failed to allocate DMA resources\n");
3023                 error = ENXIO;
3024                 goto fail;
3025         }
3026
3027         bge_add_sysctls(sc);
3028
3029         /* Set default tuneable values. */
3030         sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
3031         sc->bge_rx_coal_ticks = 150;
3032         sc->bge_tx_coal_ticks = 150;
3033         sc->bge_rx_max_coal_bds = 10;
3034         sc->bge_tx_max_coal_bds = 10;
3035
3036         /* Initialize checksum features to use. */
3037         sc->bge_csum_features = BGE_CSUM_FEATURES;
3038         if (sc->bge_forced_udpcsum != 0)
3039                 sc->bge_csum_features |= CSUM_UDP;
3040
3041         /* Set up ifnet structure */
3042         ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
3043         if (ifp == NULL) {
3044                 device_printf(sc->bge_dev, "failed to if_alloc()\n");
3045                 error = ENXIO;
3046                 goto fail;
3047         }
3048         ifp->if_softc = sc;
3049         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3050         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3051         ifp->if_ioctl = bge_ioctl;
3052         ifp->if_start = bge_start;
3053         ifp->if_init = bge_init;
3054         ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
3055         IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
3056         IFQ_SET_READY(&ifp->if_snd);
3057         ifp->if_hwassist = sc->bge_csum_features;
3058         ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
3059             IFCAP_VLAN_MTU;
3060         if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) {
3061                 ifp->if_hwassist |= CSUM_TSO;
3062                 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO;
3063         }
3064 #ifdef IFCAP_VLAN_HWCSUM
3065         ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
3066 #endif
3067         ifp->if_capenable = ifp->if_capabilities;
3068 #ifdef DEVICE_POLLING
3069         ifp->if_capabilities |= IFCAP_POLLING;
3070 #endif
3071
3072         /*
3073          * 5700 B0 chips do not support checksumming correctly due
3074          * to hardware bugs.
3075          */
3076         if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
3077                 ifp->if_capabilities &= ~IFCAP_HWCSUM;
3078                 ifp->if_capenable &= ~IFCAP_HWCSUM;
3079                 ifp->if_hwassist = 0;
3080         }
3081
3082         /*
3083          * Figure out what sort of media we have by checking the
3084          * hardware config word in the first 32k of NIC internal memory,
3085          * or fall back to examining the EEPROM if necessary.
3086          * Note: on some BCM5700 cards, this value appears to be unset.
3087          * If that's the case, we have to rely on identifying the NIC
3088          * by its PCI subsystem ID, as we do below for the SysKonnect
3089          * SK-9D41.
3090          */
3091         if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
3092                 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
3093         else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
3094             (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3095                 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
3096                     sizeof(hwcfg))) {
3097                         device_printf(sc->bge_dev, "failed to read EEPROM\n");
3098                         error = ENXIO;
3099                         goto fail;
3100                 }
3101                 hwcfg = ntohl(hwcfg);
3102         }
3103
3104         /* The SysKonnect SK-9D41 is a 1000baseSX card. */
3105         if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) ==
3106             SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
3107                 if (BGE_IS_5714_FAMILY(sc))
3108                         sc->bge_flags |= BGE_FLAG_MII_SERDES;
3109                 else
3110                         sc->bge_flags |= BGE_FLAG_TBI;
3111         }
3112
3113         if (sc->bge_flags & BGE_FLAG_TBI) {
3114                 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3115                     bge_ifmedia_sts);
3116                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
3117                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
3118                     0, NULL);
3119                 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
3120                 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
3121                 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
3122         } else {
3123                 /*
3124                  * Do transceiver setup and tell the firmware the
3125                  * driver is down so we can try to get access the
3126                  * probe if ASF is running.  Retry a couple of times
3127                  * if we get a conflict with the ASF firmware accessing
3128                  * the PHY.
3129                  */
3130                 trys = 0;
3131                 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3132 again:
3133                 bge_asf_driver_up(sc);
3134
3135                 error = mii_attach(dev, &sc->bge_miibus, ifp, bge_ifmedia_upd,
3136                     bge_ifmedia_sts, capmask, phy_addr, MII_OFFSET_ANY,
3137                     MIIF_DOPAUSE | MIIF_FORCEPAUSE);
3138                 if (error != 0) {
3139                         if (trys++ < 4) {
3140                                 device_printf(sc->bge_dev, "Try again\n");
3141                                 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
3142                                     BMCR_RESET);
3143                                 goto again;
3144                         }
3145                         device_printf(sc->bge_dev, "attaching PHYs failed\n");
3146                         goto fail;
3147                 }
3148
3149                 /*
3150                  * Now tell the firmware we are going up after probing the PHY
3151                  */
3152                 if (sc->bge_asf_mode & ASF_STACKUP)
3153                         BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3154         }
3155
3156         /*
3157          * When using the BCM5701 in PCI-X mode, data corruption has
3158          * been observed in the first few bytes of some received packets.
3159          * Aligning the packet buffer in memory eliminates the corruption.
3160          * Unfortunately, this misaligns the packet payloads.  On platforms
3161          * which do not support unaligned accesses, we will realign the
3162          * payloads by copying the received packets.
3163          */
3164         if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
3165             sc->bge_flags & BGE_FLAG_PCIX)
3166                 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
3167
3168         /*
3169          * Call MI attach routine.
3170          */
3171         ether_ifattach(ifp, eaddr);
3172         callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
3173
3174         /* Tell upper layer we support long frames. */
3175         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3176
3177         /*
3178          * Hookup IRQ last.
3179          */
3180 #if __FreeBSD_version > 700030
3181         if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
3182                 /* Take advantage of single-shot MSI. */
3183                 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
3184                     ~BGE_MSIMODE_ONE_SHOT_DISABLE);
3185                 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
3186                     taskqueue_thread_enqueue, &sc->bge_tq);
3187                 if (sc->bge_tq == NULL) {
3188                         device_printf(dev, "could not create taskqueue.\n");
3189                         ether_ifdetach(ifp);
3190                         error = ENXIO;
3191                         goto fail;
3192                 }
3193                 taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq",
3194                     device_get_nameunit(sc->bge_dev));
3195                 error = bus_setup_intr(dev, sc->bge_irq,
3196                     INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
3197                     &sc->bge_intrhand);
3198                 if (error)
3199                         ether_ifdetach(ifp);
3200         } else
3201                 error = bus_setup_intr(dev, sc->bge_irq,
3202                     INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
3203                     &sc->bge_intrhand);
3204 #else
3205         error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
3206            bge_intr, sc, &sc->bge_intrhand);
3207 #endif
3208
3209         if (error) {
3210                 bge_detach(dev);
3211                 device_printf(sc->bge_dev, "couldn't set up irq\n");
3212         }
3213
3214         return (0);
3215
3216 fail:
3217         bge_release_resources(sc);
3218
3219         return (error);
3220 }
3221
3222 static int
3223 bge_detach(device_t dev)
3224 {
3225         struct bge_softc *sc;
3226         struct ifnet *ifp;
3227
3228         sc = device_get_softc(dev);
3229         ifp = sc->bge_ifp;
3230
3231 #ifdef DEVICE_POLLING
3232         if (ifp->if_capenable & IFCAP_POLLING)
3233                 ether_poll_deregister(ifp);
3234 #endif
3235
3236         BGE_LOCK(sc);
3237         bge_stop(sc);
3238         bge_reset(sc);
3239         BGE_UNLOCK(sc);
3240
3241         callout_drain(&sc->bge_stat_ch);
3242
3243         if (sc->bge_tq)
3244                 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
3245         ether_ifdetach(ifp);
3246
3247         if (sc->bge_flags & BGE_FLAG_TBI) {
3248                 ifmedia_removeall(&sc->bge_ifmedia);
3249         } else {
3250                 bus_generic_detach(dev);
3251                 device_delete_child(dev, sc->bge_miibus);
3252         }
3253
3254         bge_release_resources(sc);
3255
3256         return (0);
3257 }
3258
3259 static void
3260 bge_release_resources(struct bge_softc *sc)
3261 {
3262         device_t dev;
3263
3264         dev = sc->bge_dev;
3265
3266         if (sc->bge_tq != NULL)
3267                 taskqueue_free(sc->bge_tq);
3268
3269         if (sc->bge_intrhand != NULL)
3270                 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
3271
3272         if (sc->bge_irq != NULL)
3273                 bus_release_resource(dev, SYS_RES_IRQ,
3274                     sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
3275
3276         if (sc->bge_flags & BGE_FLAG_MSI)
3277                 pci_release_msi(dev);
3278
3279         if (sc->bge_res != NULL)
3280                 bus_release_resource(dev, SYS_RES_MEMORY,
3281                     PCIR_BAR(0), sc->bge_res);
3282
3283         if (sc->bge_ifp != NULL)
3284                 if_free(sc->bge_ifp);
3285
3286         bge_dma_free(sc);
3287
3288         if (mtx_initialized(&sc->bge_mtx))      /* XXX */
3289                 BGE_LOCK_DESTROY(sc);
3290 }
3291
3292 static int
3293 bge_reset(struct bge_softc *sc)
3294 {
3295         device_t dev;
3296         uint32_t cachesize, command, pcistate, reset, val;
3297         void (*write_op)(struct bge_softc *, int, int);
3298         uint16_t devctl;
3299         int i;
3300
3301         dev = sc->bge_dev;
3302
3303         if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3304             (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3305                 if (sc->bge_flags & BGE_FLAG_PCIE)
3306                         write_op = bge_writemem_direct;
3307                 else
3308                         write_op = bge_writemem_ind;
3309         } else
3310                 write_op = bge_writereg_ind;
3311
3312         /* Save some important PCI state. */
3313         cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
3314         command = pci_read_config(dev, BGE_PCI_CMD, 4);
3315         pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
3316
3317         pci_write_config(dev, BGE_PCI_MISC_CTL,
3318             BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3319             BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3320
3321         /* Disable fastboot on controllers that support it. */
3322         if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
3323             BGE_IS_5755_PLUS(sc)) {
3324                 if (bootverbose)
3325                         device_printf(dev, "Disabling fastboot\n");
3326                 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
3327         }
3328
3329         /*
3330          * Write the magic number to SRAM at offset 0xB50.
3331          * When firmware finishes its initialization it will
3332          * write ~BGE_MAGIC_NUMBER to the same location.
3333          */
3334         bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
3335
3336         reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3337
3338         /* XXX: Broadcom Linux driver. */
3339         if (sc->bge_flags & BGE_FLAG_PCIE) {
3340                 if (CSR_READ_4(sc, 0x7E2C) == 0x60)     /* PCIE 1.0 */
3341                         CSR_WRITE_4(sc, 0x7E2C, 0x20);
3342                 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3343                         /* Prevent PCIE link training during global reset */
3344                         CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
3345                         reset |= 1 << 29;
3346                 }
3347         }
3348
3349         /*
3350          * Set GPHY Power Down Override to leave GPHY
3351          * powered up in D0 uninitialized.
3352          */
3353         if (BGE_IS_5705_PLUS(sc))
3354                 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
3355
3356         /* Issue global reset */
3357         write_op(sc, BGE_MISC_CFG, reset);
3358
3359         if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3360                 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3361                 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3362                     val | BGE_VCPU_STATUS_DRV_RESET);
3363                 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3364                 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3365                     val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3366         }
3367
3368         DELAY(1000);
3369
3370         /* XXX: Broadcom Linux driver. */
3371         if (sc->bge_flags & BGE_FLAG_PCIE) {
3372                 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3373                         DELAY(500000); /* wait for link training to complete */
3374                         val = pci_read_config(dev, 0xC4, 4);
3375                         pci_write_config(dev, 0xC4, val | (1 << 15), 4);
3376                 }
3377                 devctl = pci_read_config(dev,
3378                     sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
3379                 /* Clear enable no snoop and disable relaxed ordering. */
3380                 devctl &= ~(PCIM_EXP_CTL_RELAXED_ORD_ENABLE |
3381                     PCIM_EXP_CTL_NOSNOOP_ENABLE);
3382                 /* Set PCIE max payload size to 128. */
3383                 devctl &= ~PCIM_EXP_CTL_MAX_PAYLOAD;
3384                 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL,
3385                     devctl, 2);
3386                 /* Clear error status. */
3387                 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_STA,
3388                     PCIM_EXP_STA_CORRECTABLE_ERROR |
3389                     PCIM_EXP_STA_NON_FATAL_ERROR | PCIM_EXP_STA_FATAL_ERROR |
3390                     PCIM_EXP_STA_UNSUPPORTED_REQ, 2);
3391         }
3392
3393         /* Reset some of the PCI state that got zapped by reset. */
3394         pci_write_config(dev, BGE_PCI_MISC_CTL,
3395             BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3396             BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3397         pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
3398         pci_write_config(dev, BGE_PCI_CMD, command, 4);
3399         write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
3400         /*
3401          * Disable PCI-X relaxed ordering to ensure status block update
3402          * comes first then packet buffer DMA. Otherwise driver may
3403          * read stale status block.
3404          */
3405         if (sc->bge_flags & BGE_FLAG_PCIX) {
3406                 devctl = pci_read_config(dev,
3407                     sc->bge_pcixcap + PCIXR_COMMAND, 2);
3408                 devctl &= ~PCIXM_COMMAND_ERO;
3409                 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
3410                         devctl &= ~PCIXM_COMMAND_MAX_READ;
3411                         devctl |= PCIXM_COMMAND_MAX_READ_2048;
3412                 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3413                         devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
3414                             PCIXM_COMMAND_MAX_READ);
3415                         devctl |= PCIXM_COMMAND_MAX_READ_2048;
3416                 }
3417                 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
3418                     devctl, 2);
3419         }
3420         /* Re-enable MSI, if neccesary, and enable the memory arbiter. */
3421         if (BGE_IS_5714_FAMILY(sc)) {
3422                 /* This chip disables MSI on reset. */
3423                 if (sc->bge_flags & BGE_FLAG_MSI) {
3424                         val = pci_read_config(dev,
3425                             sc->bge_msicap + PCIR_MSI_CTRL, 2);
3426                         pci_write_config(dev,
3427                             sc->bge_msicap + PCIR_MSI_CTRL,
3428                             val | PCIM_MSICTRL_MSI_ENABLE, 2);
3429                         val = CSR_READ_4(sc, BGE_MSI_MODE);
3430                         CSR_WRITE_4(sc, BGE_MSI_MODE,
3431                             val | BGE_MSIMODE_ENABLE);
3432                 }
3433                 val = CSR_READ_4(sc, BGE_MARB_MODE);
3434                 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3435         } else
3436                 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3437
3438         if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3439                 for (i = 0; i < BGE_TIMEOUT; i++) {
3440                         val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3441                         if (val & BGE_VCPU_STATUS_INIT_DONE)
3442                                 break;
3443                         DELAY(100);
3444                 }
3445                 if (i == BGE_TIMEOUT) {
3446                         device_printf(dev, "reset timed out\n");
3447                         return (1);
3448                 }
3449         } else {
3450                 /*
3451                  * Poll until we see the 1's complement of the magic number.
3452                  * This indicates that the firmware initialization is complete.
3453                  * We expect this to fail if no chip containing the Ethernet
3454                  * address is fitted though.
3455                  */
3456                 for (i = 0; i < BGE_TIMEOUT; i++) {
3457                         DELAY(10);
3458                         val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
3459                         if (val == ~BGE_MAGIC_NUMBER)
3460                                 break;
3461                 }
3462
3463                 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
3464                         device_printf(dev,
3465                             "firmware handshake timed out, found 0x%08x\n",
3466                             val);
3467         }
3468
3469         /*
3470          * XXX Wait for the value of the PCISTATE register to
3471          * return to its original pre-reset state. This is a
3472          * fairly good indicator of reset completion. If we don't
3473          * wait for the reset to fully complete, trying to read
3474          * from the device's non-PCI registers may yield garbage
3475          * results.
3476          */
3477         for (i = 0; i < BGE_TIMEOUT; i++) {
3478                 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3479                         break;
3480                 DELAY(10);
3481         }
3482
3483         /* Fix up byte swapping. */
3484         CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
3485             BGE_MODECTL_BYTESWAP_DATA);
3486
3487         /* Tell the ASF firmware we are up */
3488         if (sc->bge_asf_mode & ASF_STACKUP)
3489                 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3490
3491         CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3492
3493         /*
3494          * The 5704 in TBI mode apparently needs some special
3495          * adjustment to insure the SERDES drive level is set
3496          * to 1.2V.
3497          */
3498         if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3499             sc->bge_flags & BGE_FLAG_TBI) {
3500                 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3501                 val = (val & ~0xFFF) | 0x880;
3502                 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3503         }
3504
3505         /* XXX: Broadcom Linux driver. */
3506         if (sc->bge_flags & BGE_FLAG_PCIE &&
3507             sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
3508             sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3509             sc->bge_asicrev != BGE_ASICREV_BCM5785) {
3510                 /* Enable Data FIFO protection. */
3511                 val = CSR_READ_4(sc, 0x7C00);
3512                 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3513         }
3514         DELAY(10000);
3515
3516         return (0);
3517 }
3518
3519 static __inline void
3520 bge_rxreuse_std(struct bge_softc *sc, int i)
3521 {
3522         struct bge_rx_bd *r;
3523
3524         r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
3525         r->bge_flags = BGE_RXBDFLAG_END;
3526         r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i];
3527         r->bge_idx = i;
3528         BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3529 }
3530
3531 static __inline void
3532 bge_rxreuse_jumbo(struct bge_softc *sc, int i)
3533 {
3534         struct bge_extrx_bd *r;
3535
3536         r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
3537         r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
3538         r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0];
3539         r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1];
3540         r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2];
3541         r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3];
3542         r->bge_idx = i;
3543         BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3544 }
3545
3546 /*
3547  * Frame reception handling. This is called if there's a frame
3548  * on the receive return list.
3549  *
3550  * Note: we have to be able to handle two possibilities here:
3551  * 1) the frame is from the jumbo receive ring
3552  * 2) the frame is from the standard receive ring
3553  */
3554
3555 static int
3556 bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
3557 {
3558         struct ifnet *ifp;
3559         int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
3560         uint16_t rx_cons;
3561
3562         rx_cons = sc->bge_rx_saved_considx;
3563
3564         /* Nothing to do. */
3565         if (rx_cons == rx_prod)
3566                 return (rx_npkts);
3567
3568         ifp = sc->bge_ifp;
3569
3570         bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3571             sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3572         bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3573             sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
3574         if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
3575             (MCLBYTES - ETHER_ALIGN))
3576                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3577                     sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
3578
3579         while (rx_cons != rx_prod) {
3580                 struct bge_rx_bd        *cur_rx;
3581                 uint32_t                rxidx;
3582                 struct mbuf             *m = NULL;
3583                 uint16_t                vlan_tag = 0;
3584                 int                     have_tag = 0;
3585
3586 #ifdef DEVICE_POLLING
3587                 if (ifp->if_capenable & IFCAP_POLLING) {
3588                         if (sc->rxcycles <= 0)
3589                                 break;
3590                         sc->rxcycles--;
3591                 }
3592 #endif
3593
3594                 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
3595
3596                 rxidx = cur_rx->bge_idx;
3597                 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3598
3599                 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3600                     cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3601                         have_tag = 1;
3602                         vlan_tag = cur_rx->bge_vlan_tag;
3603                 }
3604
3605                 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3606                         jumbocnt++;
3607                         m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3608                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3609                                 bge_rxreuse_jumbo(sc, rxidx);
3610                                 continue;
3611                         }
3612                         if (bge_newbuf_jumbo(sc, rxidx) != 0) {
3613                                 bge_rxreuse_jumbo(sc, rxidx);
3614                                 ifp->if_iqdrops++;
3615                                 continue;
3616                         }
3617                         BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3618                 } else {
3619                         stdcnt++;
3620                         m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3621                         if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3622                                 bge_rxreuse_std(sc, rxidx);
3623                                 continue;
3624                         }
3625                         if (bge_newbuf_std(sc, rxidx) != 0) {
3626                                 bge_rxreuse_std(sc, rxidx);
3627                                 ifp->if_iqdrops++;
3628                                 continue;
3629                         }
3630                         BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3631                 }
3632
3633                 ifp->if_ipackets++;
3634 #ifndef __NO_STRICT_ALIGNMENT
3635                 /*
3636                  * For architectures with strict alignment we must make sure
3637                  * the payload is aligned.
3638                  */
3639                 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3640                         bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3641                             cur_rx->bge_len);
3642                         m->m_data += ETHER_ALIGN;
3643                 }
3644 #endif
3645                 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3646                 m->m_pkthdr.rcvif = ifp;
3647
3648                 if (ifp->if_capenable & IFCAP_RXCSUM)
3649                         bge_rxcsum(sc, cur_rx, m);
3650
3651                 /*
3652                  * If we received a packet with a vlan tag,
3653                  * attach that information to the packet.
3654                  */
3655                 if (have_tag) {
3656 #if __FreeBSD_version > 700022
3657                         m->m_pkthdr.ether_vtag = vlan_tag;
3658                         m->m_flags |= M_VLANTAG;
3659 #else
3660                         VLAN_INPUT_TAG_NEW(ifp, m, vlan_tag);
3661                         if (m == NULL)
3662                                 continue;
3663 #endif
3664                 }
3665
3666                 if (holdlck != 0) {
3667                         BGE_UNLOCK(sc);
3668                         (*ifp->if_input)(ifp, m);
3669                         BGE_LOCK(sc);
3670                 } else
3671                         (*ifp->if_input)(ifp, m);
3672                 rx_npkts++;
3673
3674                 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
3675                         return (rx_npkts);
3676         }
3677
3678         bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3679             sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
3680         if (stdcnt > 0)
3681                 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3682                     sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3683
3684         if (jumbocnt > 0)
3685                 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3686                     sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3687
3688         sc->bge_rx_saved_considx = rx_cons;
3689         bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3690         if (stdcnt)
3691                 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std +
3692                     BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT);
3693         if (jumbocnt)
3694                 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo +
3695                     BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT);
3696 #ifdef notyet
3697         /*
3698          * This register wraps very quickly under heavy packet drops.
3699          * If you need correct statistics, you can enable this check.
3700          */
3701         if (BGE_IS_5705_PLUS(sc))
3702                 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3703 #endif
3704         return (rx_npkts);
3705 }
3706
3707 static void
3708 bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
3709 {
3710
3711         if (BGE_IS_5717_PLUS(sc)) {
3712                 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
3713                         if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3714                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3715                                 if ((cur_rx->bge_error_flag &
3716                                     BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
3717                                         m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3718                         }
3719                         if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
3720                                 m->m_pkthdr.csum_data =
3721                                     cur_rx->bge_tcp_udp_csum;
3722                                 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3723                                     CSUM_PSEUDO_HDR;
3724                         }
3725                 }
3726         } else {
3727                 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3728                         m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3729                         if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
3730                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3731                 }
3732                 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3733                     m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
3734                         m->m_pkthdr.csum_data =
3735                             cur_rx->bge_tcp_udp_csum;
3736                         m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3737                             CSUM_PSEUDO_HDR;
3738                 }
3739         }
3740 }
3741
3742 static void
3743 bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
3744 {
3745         struct bge_tx_bd *cur_tx;
3746         struct ifnet *ifp;
3747
3748         BGE_LOCK_ASSERT(sc);
3749
3750         /* Nothing to do. */
3751         if (sc->bge_tx_saved_considx == tx_cons)
3752                 return;
3753
3754         ifp = sc->bge_ifp;
3755
3756         bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
3757             sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
3758         /*
3759          * Go through our tx ring and free mbufs for those
3760          * frames that have been sent.
3761          */
3762         while (sc->bge_tx_saved_considx != tx_cons) {
3763                 uint32_t                idx;
3764
3765                 idx = sc->bge_tx_saved_considx;
3766                 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
3767                 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3768                         ifp->if_opackets++;
3769                 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
3770                         bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
3771                             sc->bge_cdata.bge_tx_dmamap[idx],
3772                             BUS_DMASYNC_POSTWRITE);
3773                         bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
3774                             sc->bge_cdata.bge_tx_dmamap[idx]);
3775                         m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3776                         sc->bge_cdata.bge_tx_chain[idx] = NULL;
3777                 }
3778                 sc->bge_txcnt--;
3779                 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3780         }
3781
3782         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3783         if (sc->bge_txcnt == 0)
3784                 sc->bge_timer = 0;
3785 }
3786
3787 #ifdef DEVICE_POLLING
3788 static int
3789 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3790 {
3791         struct bge_softc *sc = ifp->if_softc;
3792         uint16_t rx_prod, tx_cons;
3793         uint32_t statusword;
3794         int rx_npkts = 0;
3795
3796         BGE_LOCK(sc);
3797         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3798                 BGE_UNLOCK(sc);
3799                 return (rx_npkts);
3800         }
3801
3802         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3803             sc->bge_cdata.bge_status_map,
3804             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3805         rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3806         tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3807
3808         statusword = sc->bge_ldata.bge_status_block->bge_status;
3809         sc->bge_ldata.bge_status_block->bge_status = 0;
3810
3811         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3812             sc->bge_cdata.bge_status_map,
3813             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3814
3815         /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
3816         if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3817                 sc->bge_link_evt++;
3818
3819         if (cmd == POLL_AND_CHECK_STATUS)
3820                 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3821                     sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3822                     sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3823                         bge_link_upd(sc);
3824
3825         sc->rxcycles = count;
3826         rx_npkts = bge_rxeof(sc, rx_prod, 1);
3827         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3828                 BGE_UNLOCK(sc);
3829                 return (rx_npkts);
3830         }
3831         bge_txeof(sc, tx_cons);
3832         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3833                 bge_start_locked(ifp);
3834
3835         BGE_UNLOCK(sc);
3836         return (rx_npkts);
3837 }
3838 #endif /* DEVICE_POLLING */
3839
3840 static int
3841 bge_msi_intr(void *arg)
3842 {
3843         struct bge_softc *sc;
3844
3845         sc = (struct bge_softc *)arg;
3846         /*
3847          * This interrupt is not shared and controller already
3848          * disabled further interrupt.
3849          */
3850         taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
3851         return (FILTER_HANDLED);
3852 }
3853
3854 static void
3855 bge_intr_task(void *arg, int pending)
3856 {
3857         struct bge_softc *sc;
3858         struct ifnet *ifp;
3859         uint32_t status, status_tag;
3860         uint16_t rx_prod, tx_cons;
3861
3862         sc = (struct bge_softc *)arg;
3863         ifp = sc->bge_ifp;
3864
3865         BGE_LOCK(sc);
3866         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3867                 BGE_UNLOCK(sc);
3868                 return;
3869         }
3870
3871         /* Get updated status block. */
3872         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3873             sc->bge_cdata.bge_status_map,
3874             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3875
3876         /* Save producer/consumer indexess. */
3877         rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3878         tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3879         status = sc->bge_ldata.bge_status_block->bge_status;
3880         status_tag = sc->bge_ldata.bge_status_block->bge_status_tag << 24;
3881         sc->bge_ldata.bge_status_block->bge_status = 0;
3882         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3883             sc->bge_cdata.bge_status_map,
3884             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3885         if ((sc->bge_flags & BGE_FLAG_TAGGED_STATUS) == 0)
3886                 status_tag = 0;
3887
3888         if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0)
3889                 bge_link_upd(sc);
3890
3891         /* Let controller work. */
3892         bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag);
3893
3894         if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3895             sc->bge_rx_saved_considx != rx_prod) {
3896                 /* Check RX return ring producer/consumer. */
3897                 BGE_UNLOCK(sc);
3898                 bge_rxeof(sc, rx_prod, 0);
3899                 BGE_LOCK(sc);
3900         }
3901         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3902                 /* Check TX ring producer/consumer. */
3903                 bge_txeof(sc, tx_cons);
3904                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3905                         bge_start_locked(ifp);
3906         }
3907         BGE_UNLOCK(sc);
3908 }
3909
3910 static void
3911 bge_intr(void *xsc)
3912 {
3913         struct bge_softc *sc;
3914         struct ifnet *ifp;
3915         uint32_t statusword;
3916         uint16_t rx_prod, tx_cons;
3917
3918         sc = xsc;
3919
3920         BGE_LOCK(sc);
3921
3922         ifp = sc->bge_ifp;
3923
3924 #ifdef DEVICE_POLLING
3925         if (ifp->if_capenable & IFCAP_POLLING) {
3926                 BGE_UNLOCK(sc);
3927                 return;
3928         }
3929 #endif
3930
3931         /*
3932          * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO.  Don't
3933          * disable interrupts by writing nonzero like we used to, since with
3934          * our current organization this just gives complications and
3935          * pessimizations for re-enabling interrupts.  We used to have races
3936          * instead of the necessary complications.  Disabling interrupts
3937          * would just reduce the chance of a status update while we are
3938          * running (by switching to the interrupt-mode coalescence
3939          * parameters), but this chance is already very low so it is more
3940          * efficient to get another interrupt than prevent it.
3941          *
3942          * We do the ack first to ensure another interrupt if there is a
3943          * status update after the ack.  We don't check for the status
3944          * changing later because it is more efficient to get another
3945          * interrupt than prevent it, not quite as above (not checking is
3946          * a smaller optimization than not toggling the interrupt enable,
3947          * since checking doesn't involve PCI accesses and toggling require
3948          * the status check).  So toggling would probably be a pessimization
3949          * even with MSI.  It would only be needed for using a task queue.
3950          */
3951         bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3952
3953         /*
3954          * Do the mandatory PCI flush as well as get the link status.
3955          */
3956         statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
3957
3958         /* Make sure the descriptor ring indexes are coherent. */
3959         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3960             sc->bge_cdata.bge_status_map,
3961             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3962         rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3963         tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3964         sc->bge_ldata.bge_status_block->bge_status = 0;
3965         bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3966             sc->bge_cdata.bge_status_map,
3967             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3968
3969         if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3970             sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3971             statusword || sc->bge_link_evt)
3972                 bge_link_upd(sc);
3973
3974         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3975                 /* Check RX return ring producer/consumer. */
3976                 bge_rxeof(sc, rx_prod, 1);
3977         }
3978
3979         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3980                 /* Check TX ring producer/consumer. */
3981                 bge_txeof(sc, tx_cons);
3982         }
3983
3984         if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3985             !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3986                 bge_start_locked(ifp);
3987
3988         BGE_UNLOCK(sc);
3989 }
3990
3991 static void
3992 bge_asf_driver_up(struct bge_softc *sc)
3993 {
3994         if (sc->bge_asf_mode & ASF_STACKUP) {
3995                 /* Send ASF heartbeat aprox. every 2s */
3996                 if (sc->bge_asf_count)
3997                         sc->bge_asf_count --;
3998                 else {
3999                         sc->bge_asf_count = 2;
4000                         bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW,
4001                             BGE_FW_DRV_ALIVE);
4002                         bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4);
4003                         bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3);
4004                         CSR_WRITE_4(sc, BGE_CPU_EVENT,
4005                             CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
4006                 }
4007         }
4008 }
4009
4010 static void
4011 bge_tick(void *xsc)
4012 {
4013         struct bge_softc *sc = xsc;
4014         struct mii_data *mii = NULL;
4015
4016         BGE_LOCK_ASSERT(sc);
4017
4018         /* Synchronize with possible callout reset/stop. */
4019         if (callout_pending(&sc->bge_stat_ch) ||
4020             !callout_active(&sc->bge_stat_ch))
4021                 return;
4022
4023         if (BGE_IS_5705_PLUS(sc))
4024                 bge_stats_update_regs(sc);
4025         else
4026                 bge_stats_update(sc);
4027
4028         if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
4029                 mii = device_get_softc(sc->bge_miibus);
4030                 /*
4031                  * Do not touch PHY if we have link up. This could break
4032                  * IPMI/ASF mode or produce extra input errors
4033                  * (extra errors was reported for bcm5701 & bcm5704).
4034                  */
4035                 if (!sc->bge_link)
4036                         mii_tick(mii);
4037         } else {
4038                 /*
4039                  * Since in TBI mode auto-polling can't be used we should poll
4040                  * link status manually. Here we register pending link event
4041                  * and trigger interrupt.
4042                  */
4043 #ifdef DEVICE_POLLING
4044                 /* In polling mode we poll link state in bge_poll(). */
4045                 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
4046 #endif
4047                 {
4048                 sc->bge_link_evt++;
4049                 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4050                     sc->bge_flags & BGE_FLAG_5788)
4051                         BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4052                 else
4053                         BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4054                 }
4055         }
4056
4057         bge_asf_driver_up(sc);
4058         bge_watchdog(sc);
4059
4060         callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4061 }
4062
4063 static void
4064 bge_stats_update_regs(struct bge_softc *sc)
4065 {
4066         struct ifnet *ifp;
4067         struct bge_mac_stats *stats;
4068
4069         ifp = sc->bge_ifp;
4070         stats = &sc->bge_mac_stats;
4071
4072         stats->ifHCOutOctets +=
4073             CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4074         stats->etherStatsCollisions +=
4075             CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4076         stats->outXonSent +=
4077             CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4078         stats->outXoffSent +=
4079             CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4080         stats->dot3StatsInternalMacTransmitErrors +=
4081             CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4082         stats->dot3StatsSingleCollisionFrames +=
4083             CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4084         stats->dot3StatsMultipleCollisionFrames +=
4085             CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4086         stats->dot3StatsDeferredTransmissions +=
4087             CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4088         stats->dot3StatsExcessiveCollisions +=
4089             CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4090         stats->dot3StatsLateCollisions +=
4091             CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4092         stats->ifHCOutUcastPkts +=
4093             CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4094         stats->ifHCOutMulticastPkts +=
4095             CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4096         stats->ifHCOutBroadcastPkts +=
4097             CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4098
4099         stats->ifHCInOctets +=
4100             CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4101         stats->etherStatsFragments +=
4102             CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4103         stats->ifHCInUcastPkts +=
4104             CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4105         stats->ifHCInMulticastPkts +=
4106             CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4107         stats->ifHCInBroadcastPkts +=
4108             CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4109         stats->dot3StatsFCSErrors +=
4110             CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4111         stats->dot3StatsAlignmentErrors +=
4112             CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4113         stats->xonPauseFramesReceived +=
4114             CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4115         stats->xoffPauseFramesReceived +=
4116             CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4117         stats->macControlFramesReceived +=
4118             CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4119         stats->xoffStateEntered +=
4120             CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4121         stats->dot3StatsFramesTooLong +=
4122             CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4123         stats->etherStatsJabbers +=
4124             CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4125         stats->etherStatsUndersizePkts +=
4126             CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4127
4128         stats->FramesDroppedDueToFilters +=
4129             CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4130         stats->DmaWriteQueueFull +=
4131             CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4132         stats->DmaWriteHighPriQueueFull +=
4133             CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4134         stats->NoMoreRxBDs +=
4135             CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4136         stats->InputDiscards +=
4137             CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4138         stats->InputErrors +=
4139             CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4140         stats->RecvThresholdHit +=
4141             CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4142
4143         ifp->if_collisions = (u_long)stats->etherStatsCollisions;
4144         ifp->if_ierrors = (u_long)(stats->NoMoreRxBDs + stats->InputDiscards +
4145             stats->InputErrors);
4146 }
4147
4148 static void
4149 bge_stats_clear_regs(struct bge_softc *sc)
4150 {
4151
4152         CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4153         CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4154         CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4155         CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4156         CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4157         CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4158         CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4159         CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4160         CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4161         CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4162         CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4163         CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4164         CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4165
4166         CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4167         CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4168         CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4169         CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4170         CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4171         CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4172         CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4173         CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4174         CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4175         CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4176         CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4177         CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4178         CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4179         CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4180
4181         CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4182         CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4183         CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4184         CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4185         CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4186         CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4187         CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4188 }
4189
4190 static void
4191 bge_stats_update(struct bge_softc *sc)
4192 {
4193         struct ifnet *ifp;
4194         bus_size_t stats;
4195         uint32_t cnt;   /* current register value */
4196
4197         ifp = sc->bge_ifp;
4198
4199         stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
4200
4201 #define READ_STAT(sc, stats, stat) \
4202         CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
4203
4204         cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
4205         ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
4206         sc->bge_tx_collisions = cnt;
4207
4208         cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
4209         ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
4210         sc->bge_rx_discards = cnt;
4211
4212         cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
4213         ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
4214         sc->bge_tx_discards = cnt;
4215
4216 #undef  READ_STAT
4217 }
4218
4219 /*
4220  * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
4221  * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
4222  * but when such padded frames employ the bge IP/TCP checksum offload,
4223  * the hardware checksum assist gives incorrect results (possibly
4224  * from incorporating its own padding into the UDP/TCP checksum; who knows).
4225  * If we pad such runts with zeros, the onboard checksum comes out correct.
4226  */
4227 static __inline int
4228 bge_cksum_pad(struct mbuf *m)
4229 {
4230         int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
4231         struct mbuf *last;
4232
4233         /* If there's only the packet-header and we can pad there, use it. */
4234         if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
4235             M_TRAILINGSPACE(m) >= padlen) {
4236                 last = m;
4237         } else {
4238                 /*
4239                  * Walk packet chain to find last mbuf. We will either
4240                  * pad there, or append a new mbuf and pad it.
4241                  */
4242                 for (last = m; last->m_next != NULL; last = last->m_next);
4243                 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
4244                         /* Allocate new empty mbuf, pad it. Compact later. */
4245                         struct mbuf *n;
4246
4247                         MGET(n, M_DONTWAIT, MT_DATA);
4248                         if (n == NULL)
4249                                 return (ENOBUFS);
4250                         n->m_len = 0;
4251                         last->m_next = n;
4252                         last = n;
4253                 }
4254         }
4255
4256         /* Now zero the pad area, to avoid the bge cksum-assist bug. */
4257         memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
4258         last->m_len += padlen;
4259         m->m_pkthdr.len += padlen;
4260
4261         return (0);
4262 }
4263
4264 static struct mbuf *
4265 bge_check_short_dma(struct mbuf *m)
4266 {
4267         struct mbuf *n;
4268         int found;
4269
4270         /*
4271          * If device receive two back-to-back send BDs with less than
4272          * or equal to 8 total bytes then the device may hang.  The two
4273          * back-to-back send BDs must in the same frame for this failure
4274          * to occur.  Scan mbuf chains and see whether two back-to-back
4275          * send BDs are there. If this is the case, allocate new mbuf
4276          * and copy the frame to workaround the silicon bug.
4277          */
4278         for (n = m, found = 0; n != NULL; n = n->m_next) {
4279                 if (n->m_len < 8) {
4280                         found++;
4281                         if (found > 1)
4282                                 break;
4283                         continue;
4284                 }
4285                 found = 0;
4286         }
4287
4288         if (found > 1) {
4289                 n = m_defrag(m, M_DONTWAIT);
4290                 if (n == NULL)
4291                         m_freem(m);
4292         } else
4293                 n = m;
4294         return (n);
4295 }
4296
4297 static struct mbuf *
4298 bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss,
4299     uint16_t *flags)
4300 {
4301         struct ip *ip;
4302         struct tcphdr *tcp;
4303         struct mbuf *n;
4304         uint16_t hlen;
4305         uint32_t poff;
4306
4307         if (M_WRITABLE(m) == 0) {
4308                 /* Get a writable copy. */
4309                 n = m_dup(m, M_DONTWAIT);
4310                 m_freem(m);
4311                 if (n == NULL)
4312                         return (NULL);
4313                 m = n;
4314         }
4315         m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip));
4316         if (m == NULL)
4317                 return (NULL);
4318         ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4319         poff = sizeof(struct ether_header) + (ip->ip_hl << 2);
4320         m = m_pullup(m, poff + sizeof(struct tcphdr));
4321         if (m == NULL)
4322                 return (NULL);
4323         tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4324         m = m_pullup(m, poff + (tcp->th_off << 2));
4325         if (m == NULL)
4326                 return (NULL);
4327         /*
4328          * It seems controller doesn't modify IP length and TCP pseudo
4329          * checksum. These checksum computed by upper stack should be 0.
4330          */
4331         *mss = m->m_pkthdr.tso_segsz;
4332         ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4333         ip->ip_sum = 0;
4334         ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
4335         /* Clear pseudo checksum computed by TCP stack. */
4336         tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4337         tcp->th_sum = 0;
4338         /*
4339          * Broadcom controllers uses different descriptor format for
4340          * TSO depending on ASIC revision. Due to TSO-capable firmware
4341          * license issue and lower performance of firmware based TSO
4342          * we only support hardware based TSO.
4343          */
4344         /* Calculate header length, incl. TCP/IP options, in 32 bit units. */
4345         hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
4346         if (sc->bge_flags & BGE_FLAG_TSO3) {
4347                 /*
4348                  * For BCM5717 and newer controllers, hardware based TSO
4349                  * uses the 14 lower bits of the bge_mss field to store the
4350                  * MSS and the upper 2 bits to store the lowest 2 bits of
4351                  * the IP/TCP header length.  The upper 6 bits of the header
4352                  * length are stored in the bge_flags[14:10,4] field.  Jumbo
4353                  * frames are supported.
4354                  */
4355                 *mss |= ((hlen & 0x3) << 14);
4356                 *flags |= ((hlen & 0xF8) << 7) | ((hlen & 0x4) << 2);
4357         } else {
4358                 /*
4359                  * For BCM5755 and newer controllers, hardware based TSO uses
4360                  * the lower 11 bits to store the MSS and the upper 5 bits to
4361                  * store the IP/TCP header length. Jumbo frames are not
4362                  * supported.
4363                  */
4364                 *mss |= (hlen << 11);
4365         }
4366         return (m);
4367 }
4368
4369 /*
4370  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
4371  * pointers to descriptors.
4372  */
4373 static int
4374 bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
4375 {
4376         bus_dma_segment_t       segs[BGE_NSEG_NEW];
4377         bus_dmamap_t            map;
4378         struct bge_tx_bd        *d;
4379         struct mbuf             *m = *m_head;
4380         uint32_t                idx = *txidx;
4381         uint16_t                csum_flags, mss, vlan_tag;
4382         int                     nsegs, i, error;
4383
4384         csum_flags = 0;
4385         mss = 0;
4386         vlan_tag = 0;
4387         if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 &&
4388             m->m_next != NULL) {
4389                 *m_head = bge_check_short_dma(m);
4390                 if (*m_head == NULL)
4391                         return (ENOBUFS);
4392                 m = *m_head;
4393         }
4394         if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
4395                 *m_head = m = bge_setup_tso(sc, m, &mss, &csum_flags);
4396                 if (*m_head == NULL)
4397                         return (ENOBUFS);
4398                 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
4399                     BGE_TXBDFLAG_CPU_POST_DMA;
4400         } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) {
4401                 if (m->m_pkthdr.csum_flags & CSUM_IP)
4402                         csum_flags |= BGE_TXBDFLAG_IP_CSUM;
4403                 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
4404                         csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
4405                         if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
4406                             (error = bge_cksum_pad(m)) != 0) {
4407                                 m_freem(m);
4408                                 *m_head = NULL;
4409                                 return (error);
4410                         }
4411                 }
4412                 if (m->m_flags & M_LASTFRAG)
4413                         csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
4414                 else if (m->m_flags & M_FRAG)
4415                         csum_flags |= BGE_TXBDFLAG_IP_FRAG;
4416         }
4417
4418         if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
4419                 if (sc->bge_flags & BGE_FLAG_JUMBO_FRAME &&
4420                     m->m_pkthdr.len > ETHER_MAX_LEN)
4421                         csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME;
4422                 if (sc->bge_forced_collapse > 0 &&
4423                     (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
4424                         /*
4425                          * Forcedly collapse mbuf chains to overcome hardware
4426                          * limitation which only support a single outstanding
4427                          * DMA read operation.
4428                          */
4429                         if (sc->bge_forced_collapse == 1)
4430                                 m = m_defrag(m, M_DONTWAIT);
4431                         else
4432                                 m = m_collapse(m, M_DONTWAIT,
4433                                     sc->bge_forced_collapse);
4434                         if (m == NULL)
4435                                 m = *m_head;
4436                         *m_head = m;
4437                 }
4438         }
4439
4440         map = sc->bge_cdata.bge_tx_dmamap[idx];
4441         error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
4442             &nsegs, BUS_DMA_NOWAIT);
4443         if (error == EFBIG) {
4444                 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
4445                 if (m == NULL) {
4446                         m_freem(*m_head);
4447                         *m_head = NULL;
4448                         return (ENOBUFS);
4449                 }
4450                 *m_head = m;
4451                 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
4452                     m, segs, &nsegs, BUS_DMA_NOWAIT);
4453                 if (error) {
4454                         m_freem(m);
4455                         *m_head = NULL;
4456                         return (error);
4457                 }
4458         } else if (error != 0)
4459                 return (error);
4460
4461         /* Check if we have enough free send BDs. */
4462         if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
4463                 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
4464                 return (ENOBUFS);
4465         }
4466
4467         bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
4468
4469 #if __FreeBSD_version > 700022
4470         if (m->m_flags & M_VLANTAG) {
4471                 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4472                 vlan_tag = m->m_pkthdr.ether_vtag;
4473         }
4474 #else
4475         {
4476                 struct m_tag            *mtag;
4477
4478                 if ((mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m)) != NULL) {
4479                         csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4480                         vlan_tag = VLAN_TAG_VALUE(mtag);
4481                 }
4482         }
4483 #endif
4484         for (i = 0; ; i++) {
4485                 d = &sc->bge_ldata.bge_tx_ring[idx];
4486                 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
4487                 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
4488                 d->bge_len = segs[i].ds_len;
4489                 d->bge_flags = csum_flags;
4490                 d->bge_vlan_tag = vlan_tag;
4491                 d->bge_mss = mss;
4492                 if (i == nsegs - 1)
4493                         break;
4494                 BGE_INC(idx, BGE_TX_RING_CNT);
4495         }
4496
4497         /* Mark the last segment as end of packet... */
4498         d->bge_flags |= BGE_TXBDFLAG_END;
4499
4500         /*
4501          * Insure that the map for this transmission
4502          * is placed at the array index of the last descriptor
4503          * in this chain.
4504          */
4505         sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
4506         sc->bge_cdata.bge_tx_dmamap[idx] = map;
4507         sc->bge_cdata.bge_tx_chain[idx] = m;
4508         sc->bge_txcnt += nsegs;
4509
4510         BGE_INC(idx, BGE_TX_RING_CNT);
4511         *txidx = idx;
4512
4513         return (0);
4514 }
4515
4516 /*
4517  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4518  * to the mbuf data regions directly in the transmit descriptors.
4519  */
4520 static void
4521 bge_start_locked(struct ifnet *ifp)
4522 {
4523         struct bge_softc *sc;
4524         struct mbuf *m_head;
4525         uint32_t prodidx;
4526         int count;
4527
4528         sc = ifp->if_softc;
4529         BGE_LOCK_ASSERT(sc);
4530
4531         if (!sc->bge_link ||
4532             (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4533             IFF_DRV_RUNNING)
4534                 return;
4535
4536         prodidx = sc->bge_tx_prodidx;
4537
4538         for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
4539                 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
4540                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4541                         break;
4542                 }
4543                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4544                 if (m_head == NULL)
4545                         break;
4546
4547                 /*
4548                  * XXX
4549                  * The code inside the if() block is never reached since we
4550                  * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
4551                  * requests to checksum TCP/UDP in a fragmented packet.
4552                  *
4553                  * XXX
4554                  * safety overkill.  If this is a fragmented packet chain
4555                  * with delayed TCP/UDP checksums, then only encapsulate
4556                  * it if we have enough descriptors to handle the entire
4557                  * chain at once.
4558                  * (paranoia -- may not actually be needed)
4559                  */
4560                 if (m_head->m_flags & M_FIRSTFRAG &&
4561                     m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4562                         if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
4563                             m_head->m_pkthdr.csum_data + 16) {
4564                                 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4565                                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4566                                 break;
4567                         }
4568                 }
4569
4570                 /*
4571                  * Pack the data into the transmit ring. If we
4572                  * don't have room, set the OACTIVE flag and wait
4573                  * for the NIC to drain the ring.
4574                  */
4575                 if (bge_encap(sc, &m_head, &prodidx)) {
4576                         if (m_head == NULL)
4577                                 break;
4578                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4579                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4580                         break;
4581                 }
4582                 ++count;
4583
4584                 /*
4585                  * If there's a BPF listener, bounce a copy of this frame
4586                  * to him.
4587                  */
4588 #ifdef ETHER_BPF_MTAP
4589                 ETHER_BPF_MTAP(ifp, m_head);
4590 #else
4591                 BPF_MTAP(ifp, m_head);
4592 #endif
4593         }
4594
4595         if (count > 0) {
4596                 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4597                     sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
4598                 /* Transmit. */
4599                 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4600                 /* 5700 b2 errata */
4601                 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
4602                         bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4603
4604                 sc->bge_tx_prodidx = prodidx;
4605
4606                 /*
4607                  * Set a timeout in case the chip goes out to lunch.
4608                  */
4609                 sc->bge_timer = 5;
4610         }
4611 }
4612
4613 /*
4614  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4615  * to the mbuf data regions directly in the transmit descriptors.
4616  */
4617 static void
4618 bge_start(struct ifnet *ifp)
4619 {
4620         struct bge_softc *sc;
4621
4622         sc = ifp->if_softc;
4623         BGE_LOCK(sc);
4624         bge_start_locked(ifp);
4625         BGE_UNLOCK(sc);
4626 }
4627
4628 static void
4629 bge_init_locked(struct bge_softc *sc)
4630 {
4631         struct ifnet *ifp;
4632         uint16_t *m;
4633         uint32_t mode;
4634
4635         BGE_LOCK_ASSERT(sc);
4636
4637         ifp = sc->bge_ifp;
4638
4639         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4640                 return;
4641
4642         /* Cancel pending I/O and flush buffers. */
4643         bge_stop(sc);
4644
4645         bge_stop_fw(sc);
4646         bge_sig_pre_reset(sc, BGE_RESET_START);
4647         bge_reset(sc);
4648         bge_sig_legacy(sc, BGE_RESET_START);
4649         bge_sig_post_reset(sc, BGE_RESET_START);
4650
4651         bge_chipinit(sc);
4652
4653         /*
4654          * Init the various state machines, ring
4655          * control blocks and firmware.
4656          */
4657         if (bge_blockinit(sc)) {
4658                 device_printf(sc->bge_dev, "initialization failure\n");
4659                 return;
4660         }
4661
4662         ifp = sc->bge_ifp;
4663
4664         /* Specify MTU. */
4665         CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
4666             ETHER_HDR_LEN + ETHER_CRC_LEN +
4667             (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
4668
4669         /* Load our MAC address. */
4670         m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
4671         CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4672         CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4673
4674         /* Program promiscuous mode. */
4675         bge_setpromisc(sc);
4676
4677         /* Program multicast filter. */
4678         bge_setmulti(sc);
4679
4680         /* Program VLAN tag stripping. */
4681         bge_setvlan(sc);
4682
4683         /* Override UDP checksum offloading. */
4684         if (sc->bge_forced_udpcsum == 0)
4685                 sc->bge_csum_features &= ~CSUM_UDP;
4686         else
4687                 sc->bge_csum_features |= CSUM_UDP;
4688         if (ifp->if_capabilities & IFCAP_TXCSUM &&
4689             ifp->if_capenable & IFCAP_TXCSUM) {
4690                 ifp->if_hwassist &= ~(BGE_CSUM_FEATURES | CSUM_UDP);
4691                 ifp->if_hwassist |= sc->bge_csum_features;
4692         }
4693
4694         /* Init RX ring. */
4695         if (bge_init_rx_ring_std(sc) != 0) {
4696                 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
4697                 bge_stop(sc);
4698                 return;
4699         }
4700
4701         /*
4702          * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4703          * memory to insure that the chip has in fact read the first
4704          * entry of the ring.
4705          */
4706         if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4707                 uint32_t                v, i;
4708                 for (i = 0; i < 10; i++) {
4709                         DELAY(20);
4710                         v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4711                         if (v == (MCLBYTES - ETHER_ALIGN))
4712                                 break;
4713                 }
4714                 if (i == 10)
4715                         device_printf (sc->bge_dev,
4716                             "5705 A0 chip failed to load RX ring\n");
4717         }
4718
4719         /* Init jumbo RX ring. */
4720         if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
4721             (MCLBYTES - ETHER_ALIGN)) {
4722                 if (bge_init_rx_ring_jumbo(sc) != 0) {
4723                         device_printf(sc->bge_dev,
4724                             "no memory for jumbo Rx buffers.\n");
4725                         bge_stop(sc);
4726                         return;
4727                 }
4728         }
4729
4730         /* Init our RX return ring index. */
4731         sc->bge_rx_saved_considx = 0;
4732
4733         /* Init our RX/TX stat counters. */
4734         sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
4735
4736         /* Init TX ring. */
4737         bge_init_tx_ring(sc);
4738
4739         /* Enable TX MAC state machine lockup fix. */
4740         mode = CSR_READ_4(sc, BGE_TX_MODE);
4741         if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
4742                 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
4743         /* Turn on transmitter. */
4744         CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
4745
4746         /* Turn on receiver. */
4747         BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4748
4749         /*
4750          * Set the number of good frames to receive after RX MBUF
4751          * Low Watermark has been reached. After the RX MAC receives
4752          * this number of frames, it will drop subsequent incoming
4753          * frames until the MBUF High Watermark is reached.
4754          */
4755         CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
4756
4757         /* Clear MAC statistics. */
4758         if (BGE_IS_5705_PLUS(sc))
4759                 bge_stats_clear_regs(sc);
4760
4761         /* Tell firmware we're alive. */
4762         BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4763
4764 #ifdef DEVICE_POLLING
4765         /* Disable interrupts if we are polling. */
4766         if (ifp->if_capenable & IFCAP_POLLING) {
4767                 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4768                     BGE_PCIMISCCTL_MASK_PCI_INTR);
4769                 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4770         } else
4771 #endif
4772
4773         /* Enable host interrupts. */
4774         {
4775         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4776         BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4777         bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4778         }
4779
4780         bge_ifmedia_upd_locked(ifp);
4781
4782         ifp->if_drv_flags |= IFF_DRV_RUNNING;
4783         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4784
4785         callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4786 }
4787
4788 static void
4789 bge_init(void *xsc)
4790 {
4791         struct bge_softc *sc = xsc;
4792
4793         BGE_LOCK(sc);
4794         bge_init_locked(sc);
4795         BGE_UNLOCK(sc);
4796 }
4797
4798 /*
4799  * Set media options.
4800  */
4801 static int
4802 bge_ifmedia_upd(struct ifnet *ifp)
4803 {
4804         struct bge_softc *sc = ifp->if_softc;
4805         int res;
4806
4807         BGE_LOCK(sc);
4808         res = bge_ifmedia_upd_locked(ifp);
4809         BGE_UNLOCK(sc);
4810
4811         return (res);
4812 }
4813
4814 static int
4815 bge_ifmedia_upd_locked(struct ifnet *ifp)
4816 {
4817         struct bge_softc *sc = ifp->if_softc;
4818         struct mii_data *mii;
4819         struct mii_softc *miisc;
4820         struct ifmedia *ifm;
4821
4822         BGE_LOCK_ASSERT(sc);
4823
4824         ifm = &sc->bge_ifmedia;
4825
4826         /* If this is a 1000baseX NIC, enable the TBI port. */
4827         if (sc->bge_flags & BGE_FLAG_TBI) {
4828                 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4829                         return (EINVAL);
4830                 switch(IFM_SUBTYPE(ifm->ifm_media)) {
4831                 case IFM_AUTO:
4832                         /*
4833                          * The BCM5704 ASIC appears to have a special
4834                          * mechanism for programming the autoneg
4835                          * advertisement registers in TBI mode.
4836                          */
4837                         if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
4838                                 uint32_t sgdig;
4839                                 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
4840                                 if (sgdig & BGE_SGDIGSTS_DONE) {
4841                                         CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
4842                                         sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
4843                                         sgdig |= BGE_SGDIGCFG_AUTO |
4844                                             BGE_SGDIGCFG_PAUSE_CAP |
4845                                             BGE_SGDIGCFG_ASYM_PAUSE;
4846                                         CSR_WRITE_4(sc, BGE_SGDIG_CFG,
4847                                             sgdig | BGE_SGDIGCFG_SEND);
4848                                         DELAY(5);
4849                                         CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
4850                                 }
4851                         }
4852                         break;
4853                 case IFM_1000_SX:
4854                         if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
4855                                 BGE_CLRBIT(sc, BGE_MAC_MODE,
4856                                     BGE_MACMODE_HALF_DUPLEX);
4857                         } else {
4858                                 BGE_SETBIT(sc, BGE_MAC_MODE,
4859                                     BGE_MACMODE_HALF_DUPLEX);
4860                         }
4861                         break;
4862                 default:
4863                         return (EINVAL);
4864                 }
4865                 return (0);
4866         }
4867
4868         sc->bge_link_evt++;
4869         mii = device_get_softc(sc->bge_miibus);
4870         if (mii->mii_instance)
4871                 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4872                         mii_phy_reset(miisc);
4873         mii_mediachg(mii);
4874
4875         /*
4876          * Force an interrupt so that we will call bge_link_upd
4877          * if needed and clear any pending link state attention.
4878          * Without this we are not getting any further interrupts
4879          * for link state changes and thus will not UP the link and
4880          * not be able to send in bge_start_locked. The only
4881          * way to get things working was to receive a packet and
4882          * get an RX intr.
4883          * bge_tick should help for fiber cards and we might not
4884          * need to do this here if BGE_FLAG_TBI is set but as
4885          * we poll for fiber anyway it should not harm.
4886          */
4887         if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4888             sc->bge_flags & BGE_FLAG_5788)
4889                 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4890         else
4891                 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4892
4893         return (0);
4894 }
4895
4896 /*
4897  * Report current media status.
4898  */
4899 static void
4900 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4901 {
4902         struct bge_softc *sc = ifp->if_softc;
4903         struct mii_data *mii;
4904
4905         BGE_LOCK(sc);
4906
4907         if (sc->bge_flags & BGE_FLAG_TBI) {
4908                 ifmr->ifm_status = IFM_AVALID;
4909                 ifmr->ifm_active = IFM_ETHER;
4910                 if (CSR_READ_4(sc, BGE_MAC_STS) &
4911                     BGE_MACSTAT_TBI_PCS_SYNCHED)
4912                         ifmr->ifm_status |= IFM_ACTIVE;
4913                 else {
4914                         ifmr->ifm_active |= IFM_NONE;
4915                         BGE_UNLOCK(sc);
4916                         return;
4917                 }
4918                 ifmr->ifm_active |= IFM_1000_SX;
4919                 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
4920                         ifmr->ifm_active |= IFM_HDX;
4921                 else
4922                         ifmr->ifm_active |= IFM_FDX;
4923                 BGE_UNLOCK(sc);
4924                 return;
4925         }
4926
4927         mii = device_get_softc(sc->bge_miibus);
4928         mii_pollstat(mii);
4929         ifmr->ifm_active = mii->mii_media_active;
4930         ifmr->ifm_status = mii->mii_media_status;
4931
4932         BGE_UNLOCK(sc);
4933 }
4934
4935 static int
4936 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4937 {
4938         struct bge_softc *sc = ifp->if_softc;
4939         struct ifreq *ifr = (struct ifreq *) data;
4940         struct mii_data *mii;
4941         int flags, mask, error = 0;
4942
4943         switch (command) {
4944         case SIOCSIFMTU:
4945                 BGE_LOCK(sc);
4946                 if (ifr->ifr_mtu < ETHERMIN ||
4947                     ((BGE_IS_JUMBO_CAPABLE(sc)) &&
4948                     ifr->ifr_mtu > BGE_JUMBO_MTU) ||
4949                     ((!BGE_IS_JUMBO_CAPABLE(sc)) &&
4950                     ifr->ifr_mtu > ETHERMTU))
4951                         error = EINVAL;
4952                 else if (ifp->if_mtu != ifr->ifr_mtu) {
4953                         ifp->if_mtu = ifr->ifr_mtu;
4954                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4955                                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4956                                 bge_init_locked(sc);
4957                         }
4958                 }
4959                 BGE_UNLOCK(sc);
4960                 break;
4961         case SIOCSIFFLAGS:
4962                 BGE_LOCK(sc);
4963                 if (ifp->if_flags & IFF_UP) {
4964                         /*
4965                          * If only the state of the PROMISC flag changed,
4966                          * then just use the 'set promisc mode' command
4967                          * instead of reinitializing the entire NIC. Doing
4968                          * a full re-init means reloading the firmware and
4969                          * waiting for it to start up, which may take a
4970                          * second or two.  Similarly for ALLMULTI.
4971                          */
4972                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4973                                 flags = ifp->if_flags ^ sc->bge_if_flags;
4974                                 if (flags & IFF_PROMISC)
4975                                         bge_setpromisc(sc);
4976                                 if (flags & IFF_ALLMULTI)
4977                                         bge_setmulti(sc);
4978                         } else
4979                                 bge_init_locked(sc);
4980                 } else {
4981                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4982                                 bge_stop(sc);
4983                         }
4984                 }
4985                 sc->bge_if_flags = ifp->if_flags;
4986                 BGE_UNLOCK(sc);
4987                 error = 0;
4988                 break;
4989         case SIOCADDMULTI:
4990         case SIOCDELMULTI:
4991                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4992                         BGE_LOCK(sc);
4993                         bge_setmulti(sc);
4994                         BGE_UNLOCK(sc);
4995                         error = 0;
4996                 }
4997                 break;
4998         case SIOCSIFMEDIA:
4999         case SIOCGIFMEDIA:
5000                 if (sc->bge_flags & BGE_FLAG_TBI) {
5001                         error = ifmedia_ioctl(ifp, ifr,
5002                             &sc->bge_ifmedia, command);
5003                 } else {
5004                         mii = device_get_softc(sc->bge_miibus);
5005                         error = ifmedia_ioctl(ifp, ifr,
5006                             &mii->mii_media, command);
5007                 }
5008                 break;
5009         case SIOCSIFCAP:
5010                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5011 #ifdef DEVICE_POLLING
5012                 if (mask & IFCAP_POLLING) {
5013                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
5014                                 error = ether_poll_register(bge_poll, ifp);
5015                                 if (error)
5016                                         return (error);
5017                                 BGE_LOCK(sc);
5018                                 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
5019                                     BGE_PCIMISCCTL_MASK_PCI_INTR);
5020                                 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5021                                 ifp->if_capenable |= IFCAP_POLLING;
5022                                 BGE_UNLOCK(sc);
5023                         } else {
5024                                 error = ether_poll_deregister(ifp);
5025                                 /* Enable interrupt even in error case */
5026                                 BGE_LOCK(sc);
5027                                 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
5028                                     BGE_PCIMISCCTL_MASK_PCI_INTR);
5029                                 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
5030                                 ifp->if_capenable &= ~IFCAP_POLLING;
5031                                 BGE_UNLOCK(sc);
5032                         }
5033                 }
5034 #endif
5035                 if ((mask & IFCAP_TXCSUM) != 0 &&
5036                     (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
5037                         ifp->if_capenable ^= IFCAP_TXCSUM;
5038                         if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
5039                                 ifp->if_hwassist |= sc->bge_csum_features;
5040                         else
5041                                 ifp->if_hwassist &= ~sc->bge_csum_features;
5042                 }
5043
5044                 if ((mask & IFCAP_RXCSUM) != 0 &&
5045                     (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
5046                         ifp->if_capenable ^= IFCAP_RXCSUM;
5047
5048                 if ((mask & IFCAP_TSO4) != 0 &&
5049                     (ifp->if_capabilities & IFCAP_TSO4) != 0) {
5050                         ifp->if_capenable ^= IFCAP_TSO4;
5051                         if ((ifp->if_capenable & IFCAP_TSO4) != 0)
5052                                 ifp->if_hwassist |= CSUM_TSO;
5053                         else
5054                                 ifp->if_hwassist &= ~CSUM_TSO;
5055                 }
5056
5057                 if (mask & IFCAP_VLAN_MTU) {
5058                         ifp->if_capenable ^= IFCAP_VLAN_MTU;
5059                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5060                         bge_init(sc);
5061                 }
5062
5063                 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
5064                     (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
5065                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5066                 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
5067                     (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
5068                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
5069                         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
5070                                 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
5071                         BGE_LOCK(sc);
5072                         bge_setvlan(sc);
5073                         BGE_UNLOCK(sc);
5074                 }
5075 #ifdef VLAN_CAPABILITIES
5076                 VLAN_CAPABILITIES(ifp);
5077 #endif
5078                 break;
5079         default:
5080                 error = ether_ioctl(ifp, command, data);
5081                 break;
5082         }
5083
5084         return (error);
5085 }
5086
5087 static void
5088 bge_watchdog(struct bge_softc *sc)
5089 {
5090         struct ifnet *ifp;
5091
5092         BGE_LOCK_ASSERT(sc);
5093
5094         if (sc->bge_timer == 0 || --sc->bge_timer)
5095                 return;
5096
5097         ifp = sc->bge_ifp;
5098
5099         if_printf(ifp, "watchdog timeout -- resetting\n");
5100
5101         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5102         bge_init_locked(sc);
5103
5104         ifp->if_oerrors++;
5105 }
5106
5107 /*
5108  * Stop the adapter and free any mbufs allocated to the
5109  * RX and TX lists.
5110  */
5111 static void
5112 bge_stop(struct bge_softc *sc)
5113 {
5114         struct ifnet *ifp;
5115
5116         BGE_LOCK_ASSERT(sc);
5117
5118         ifp = sc->bge_ifp;
5119
5120         callout_stop(&sc->bge_stat_ch);
5121
5122         /* Disable host interrupts. */
5123         BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5124         bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5125
5126         /*
5127          * Tell firmware we're shutting down.
5128          */
5129         bge_stop_fw(sc);
5130         bge_sig_pre_reset(sc, BGE_RESET_STOP);
5131
5132         /*
5133          * Disable all of the receiver blocks.
5134          */
5135         BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
5136         BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
5137         BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
5138         if (!(BGE_IS_5705_PLUS(sc)))
5139                 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
5140         BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
5141         BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
5142         BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
5143
5144         /*
5145          * Disable all of the transmit blocks.
5146          */
5147         BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
5148         BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
5149         BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
5150         BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
5151         BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
5152         if (!(BGE_IS_5705_PLUS(sc)))
5153                 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
5154         BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
5155
5156         /*
5157          * Shut down all of the memory managers and related
5158          * state machines.
5159          */
5160         BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
5161         BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
5162         if (!(BGE_IS_5705_PLUS(sc)))
5163                 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
5164         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
5165         CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
5166         if (!(BGE_IS_5705_PLUS(sc))) {
5167                 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
5168                 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
5169         }
5170         /* Update MAC statistics. */
5171         if (BGE_IS_5705_PLUS(sc))
5172                 bge_stats_update_regs(sc);
5173
5174         bge_reset(sc);
5175         bge_sig_legacy(sc, BGE_RESET_STOP);
5176         bge_sig_post_reset(sc, BGE_RESET_STOP);
5177
5178         /*
5179          * Keep the ASF firmware running if up.
5180          */
5181         if (sc->bge_asf_mode & ASF_STACKUP)
5182                 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5183         else
5184                 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5185
5186         /* Free the RX lists. */
5187         bge_free_rx_ring_std(sc);
5188
5189         /* Free jumbo RX list. */
5190         if (BGE_IS_JUMBO_CAPABLE(sc))
5191                 bge_free_rx_ring_jumbo(sc);
5192
5193         /* Free TX buffers. */
5194         bge_free_tx_ring(sc);
5195
5196         sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
5197
5198         /* Clear MAC's link state (PHY may still have link UP). */
5199         if (bootverbose && sc->bge_link)
5200                 if_printf(sc->bge_ifp, "link DOWN\n");
5201         sc->bge_link = 0;
5202
5203         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
5204 }
5205
5206 /*
5207  * Stop all chip I/O so that the kernel's probe routines don't
5208  * get confused by errant DMAs when rebooting.
5209  */
5210 static int
5211 bge_shutdown(device_t dev)
5212 {
5213         struct bge_softc *sc;
5214
5215         sc = device_get_softc(dev);
5216         BGE_LOCK(sc);
5217         bge_stop(sc);
5218         bge_reset(sc);
5219         BGE_UNLOCK(sc);
5220
5221         return (0);
5222 }
5223
5224 static int
5225 bge_suspend(device_t dev)
5226 {
5227         struct bge_softc *sc;
5228
5229         sc = device_get_softc(dev);
5230         BGE_LOCK(sc);
5231         bge_stop(sc);
5232         BGE_UNLOCK(sc);
5233
5234         return (0);
5235 }
5236
5237 static int
5238 bge_resume(device_t dev)
5239 {
5240         struct bge_softc *sc;
5241         struct ifnet *ifp;
5242
5243         sc = device_get_softc(dev);
5244         BGE_LOCK(sc);
5245         ifp = sc->bge_ifp;
5246         if (ifp->if_flags & IFF_UP) {
5247                 bge_init_locked(sc);
5248                 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5249                         bge_start_locked(ifp);
5250         }
5251         BGE_UNLOCK(sc);
5252
5253         return (0);
5254 }
5255
5256 static void
5257 bge_link_upd(struct bge_softc *sc)
5258 {
5259         struct mii_data *mii;
5260         uint32_t link, status;
5261
5262         BGE_LOCK_ASSERT(sc);
5263
5264         /* Clear 'pending link event' flag. */
5265         sc->bge_link_evt = 0;
5266
5267         /*
5268          * Process link state changes.
5269          * Grrr. The link status word in the status block does
5270          * not work correctly on the BCM5700 rev AX and BX chips,
5271          * according to all available information. Hence, we have
5272          * to enable MII interrupts in order to properly obtain
5273          * async link changes. Unfortunately, this also means that
5274          * we have to read the MAC status register to detect link
5275          * changes, thereby adding an additional register access to
5276          * the interrupt handler.
5277          *
5278          * XXX: perhaps link state detection procedure used for
5279          * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
5280          */
5281
5282         if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5283             sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
5284                 status = CSR_READ_4(sc, BGE_MAC_STS);
5285                 if (status & BGE_MACSTAT_MI_INTERRUPT) {
5286                         mii = device_get_softc(sc->bge_miibus);
5287                         mii_pollstat(mii);
5288                         if (!sc->bge_link &&
5289                             mii->mii_media_status & IFM_ACTIVE &&
5290                             IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5291                                 sc->bge_link++;
5292                                 if (bootverbose)
5293                                         if_printf(sc->bge_ifp, "link UP\n");
5294                         } else if (sc->bge_link &&
5295                             (!(mii->mii_media_status & IFM_ACTIVE) ||
5296                             IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5297                                 sc->bge_link = 0;
5298                                 if (bootverbose)
5299                                         if_printf(sc->bge_ifp, "link DOWN\n");
5300                         }
5301
5302                         /* Clear the interrupt. */
5303                         CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
5304                             BGE_EVTENB_MI_INTERRUPT);
5305                         bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
5306                         bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
5307                             BRGPHY_INTRS);
5308                 }
5309                 return;
5310         }
5311
5312         if (sc->bge_flags & BGE_FLAG_TBI) {
5313                 status = CSR_READ_4(sc, BGE_MAC_STS);
5314                 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
5315                         if (!sc->bge_link) {
5316                                 sc->bge_link++;
5317                                 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
5318                                         BGE_CLRBIT(sc, BGE_MAC_MODE,
5319                                             BGE_MACMODE_TBI_SEND_CFGS);
5320                                 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
5321                                 if (bootverbose)
5322                                         if_printf(sc->bge_ifp, "link UP\n");
5323                                 if_link_state_change(sc->bge_ifp,
5324                                     LINK_STATE_UP);
5325                         }
5326                 } else if (sc->bge_link) {
5327                         sc->bge_link = 0;
5328                         if (bootverbose)
5329                                 if_printf(sc->bge_ifp, "link DOWN\n");
5330                         if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
5331                 }
5332         } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
5333                 /*
5334                  * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
5335                  * in status word always set. Workaround this bug by reading
5336                  * PHY link status directly.
5337                  */
5338                 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
5339
5340                 if (link != sc->bge_link ||
5341                     sc->bge_asicrev == BGE_ASICREV_BCM5700) {
5342                         mii = device_get_softc(sc->bge_miibus);
5343                         mii_pollstat(mii);
5344                         if (!sc->bge_link &&
5345                             mii->mii_media_status & IFM_ACTIVE &&
5346                             IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5347                                 sc->bge_link++;
5348                                 if (bootverbose)
5349                                         if_printf(sc->bge_ifp, "link UP\n");
5350                         } else if (sc->bge_link &&
5351                             (!(mii->mii_media_status & IFM_ACTIVE) ||
5352                             IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5353                                 sc->bge_link = 0;
5354                                 if (bootverbose)
5355                                         if_printf(sc->bge_ifp, "link DOWN\n");
5356                         }
5357                 }
5358         } else {
5359                 /*
5360                  * For controllers that call mii_tick, we have to poll
5361                  * link status.
5362                  */
5363                 mii = device_get_softc(sc->bge_miibus);
5364                 mii_pollstat(mii);
5365                 bge_miibus_statchg(sc->bge_dev);
5366         }
5367
5368         /* Clear the attention. */
5369         CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
5370             BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
5371             BGE_MACSTAT_LINK_CHANGED);
5372 }
5373
5374 static void
5375 bge_add_sysctls(struct bge_softc *sc)
5376 {
5377         struct sysctl_ctx_list *ctx;
5378         struct sysctl_oid_list *children;
5379         char tn[32];
5380         int unit;
5381
5382         ctx = device_get_sysctl_ctx(sc->bge_dev);
5383         children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
5384
5385 #ifdef BGE_REGISTER_DEBUG
5386         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
5387             CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
5388             "Debug Information");
5389
5390         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
5391             CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
5392             "Register Read");
5393
5394         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
5395             CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
5396             "Memory Read");
5397
5398 #endif
5399
5400         unit = device_get_unit(sc->bge_dev);
5401         /*
5402          * A common design characteristic for many Broadcom client controllers
5403          * is that they only support a single outstanding DMA read operation
5404          * on the PCIe bus. This means that it will take twice as long to fetch
5405          * a TX frame that is split into header and payload buffers as it does
5406          * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
5407          * these controllers, coalescing buffers to reduce the number of memory
5408          * reads is effective way to get maximum performance(about 940Mbps).
5409          * Without collapsing TX buffers the maximum TCP bulk transfer
5410          * performance is about 850Mbps. However forcing coalescing mbufs
5411          * consumes a lot of CPU cycles, so leave it off by default.
5412          */
5413         sc->bge_forced_collapse = 0;
5414         snprintf(tn, sizeof(tn), "dev.bge.%d.forced_collapse", unit);
5415         TUNABLE_INT_FETCH(tn, &sc->bge_forced_collapse);
5416         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
5417             CTLFLAG_RW, &sc->bge_forced_collapse, 0,
5418             "Number of fragmented TX buffers of a frame allowed before "
5419             "forced collapsing");
5420
5421         /*
5422          * It seems all Broadcom controllers have a bug that can generate UDP
5423          * datagrams with checksum value 0 when TX UDP checksum offloading is
5424          * enabled.  Generating UDP checksum value 0 is RFC 768 violation.
5425          * Even though the probability of generating such UDP datagrams is
5426          * low, I don't want to see FreeBSD boxes to inject such datagrams
5427          * into network so disable UDP checksum offloading by default.  Users
5428          * still override this behavior by setting a sysctl variable,
5429          * dev.bge.0.forced_udpcsum.
5430          */
5431         sc->bge_forced_udpcsum = 0;
5432         snprintf(tn, sizeof(tn), "dev.bge.%d.bge_forced_udpcsum", unit);
5433         TUNABLE_INT_FETCH(tn, &sc->bge_forced_udpcsum);
5434         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
5435             CTLFLAG_RW, &sc->bge_forced_udpcsum, 0,
5436             "Enable UDP checksum offloading even if controller can "
5437             "generate UDP checksum value 0");
5438
5439         if (BGE_IS_5705_PLUS(sc))
5440                 bge_add_sysctl_stats_regs(sc, ctx, children);
5441         else
5442                 bge_add_sysctl_stats(sc, ctx, children);
5443 }
5444
5445 #define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
5446         SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
5447             sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
5448             desc)
5449
5450 static void
5451 bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5452     struct sysctl_oid_list *parent)
5453 {
5454         struct sysctl_oid *tree;
5455         struct sysctl_oid_list *children, *schildren;
5456
5457         tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5458             NULL, "BGE Statistics");
5459         schildren = children = SYSCTL_CHILDREN(tree);
5460         BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
5461             children, COSFramesDroppedDueToFilters,
5462             "FramesDroppedDueToFilters");
5463         BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
5464             children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
5465         BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
5466             children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
5467         BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
5468             children, nicNoMoreRxBDs, "NoMoreRxBDs");
5469         BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
5470             children, ifInDiscards, "InputDiscards");
5471         BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
5472             children, ifInErrors, "InputErrors");
5473         BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
5474             children, nicRecvThresholdHit, "RecvThresholdHit");
5475         BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
5476             children, nicDmaReadQueueFull, "DmaReadQueueFull");
5477         BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
5478             children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
5479         BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
5480             children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
5481         BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
5482             children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
5483         BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
5484             children, nicRingStatusUpdate, "RingStatusUpdate");
5485         BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
5486             children, nicInterrupts, "Interrupts");
5487         BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
5488             children, nicAvoidedInterrupts, "AvoidedInterrupts");
5489         BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
5490             children, nicSendThresholdHit, "SendThresholdHit");
5491
5492         tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
5493             NULL, "BGE RX Statistics");
5494         children = SYSCTL_CHILDREN(tree);
5495         BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
5496             children, rxstats.ifHCInOctets, "ifHCInOctets");
5497         BGE_SYSCTL_STAT(sc, ctx, "Fragments",
5498             children, rxstats.etherStatsFragments, "Fragments");
5499         BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
5500             children, rxstats.ifHCInUcastPkts, "UnicastPkts");
5501         BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
5502             children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
5503         BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
5504             children, rxstats.dot3StatsFCSErrors, "FCSErrors");
5505         BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
5506             children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
5507         BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
5508             children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
5509         BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
5510             children, rxstats.xoffPauseFramesReceived,
5511             "xoffPauseFramesReceived");
5512         BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
5513             children, rxstats.macControlFramesReceived,
5514             "ControlFramesReceived");
5515         BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
5516             children, rxstats.xoffStateEntered, "xoffStateEntered");
5517         BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
5518             children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
5519         BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
5520             children, rxstats.etherStatsJabbers, "Jabbers");
5521         BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
5522             children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
5523         BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
5524             children, rxstats.inRangeLengthError, "inRangeLengthError");
5525         BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
5526             children, rxstats.outRangeLengthError, "outRangeLengthError");
5527
5528         tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
5529             NULL, "BGE TX Statistics");
5530         children = SYSCTL_CHILDREN(tree);
5531         BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
5532             children, txstats.ifHCOutOctets, "ifHCOutOctets");
5533         BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
5534             children, txstats.etherStatsCollisions, "Collisions");
5535         BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
5536             children, txstats.outXonSent, "XonSent");
5537         BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
5538             children, txstats.outXoffSent, "XoffSent");
5539         BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
5540             children, txstats.flowControlDone, "flowControlDone");
5541         BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
5542             children, txstats.dot3StatsInternalMacTransmitErrors,
5543             "InternalMacTransmitErrors");
5544         BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
5545             children, txstats.dot3StatsSingleCollisionFrames,
5546             "SingleCollisionFrames");
5547         BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
5548             children, txstats.dot3StatsMultipleCollisionFrames,
5549             "MultipleCollisionFrames");
5550         BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
5551             children, txstats.dot3StatsDeferredTransmissions,
5552             "DeferredTransmissions");
5553         BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
5554             children, txstats.dot3StatsExcessiveCollisions,
5555             "ExcessiveCollisions");
5556         BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
5557             children, txstats.dot3StatsLateCollisions,
5558             "LateCollisions");
5559         BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
5560             children, txstats.ifHCOutUcastPkts, "UnicastPkts");
5561         BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
5562             children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
5563         BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
5564             children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
5565         BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
5566             children, txstats.dot3StatsCarrierSenseErrors,
5567             "CarrierSenseErrors");
5568         BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
5569             children, txstats.ifOutDiscards, "Discards");
5570         BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
5571             children, txstats.ifOutErrors, "Errors");
5572 }
5573
5574 #undef BGE_SYSCTL_STAT
5575
5576 #define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d)    \
5577             SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
5578
5579 static void
5580 bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5581     struct sysctl_oid_list *parent)
5582 {
5583         struct sysctl_oid *tree;
5584         struct sysctl_oid_list *child, *schild;
5585         struct bge_mac_stats *stats;
5586
5587         stats = &sc->bge_mac_stats;
5588         tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5589             NULL, "BGE Statistics");
5590         schild = child = SYSCTL_CHILDREN(tree);
5591         BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters",
5592             &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters");
5593         BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull",
5594             &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full");
5595         BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull",
5596             &stats->DmaWriteHighPriQueueFull,
5597             "NIC DMA Write High Priority Queue Full");
5598         BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs",
5599             &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors");
5600         BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards",
5601             &stats->InputDiscards, "Discarded Input Frames");
5602         BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors",
5603             &stats->InputErrors, "Input Errors");
5604         BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit",
5605             &stats->RecvThresholdHit, "NIC Recv Threshold Hit");
5606
5607         tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
5608             NULL, "BGE RX Statistics");
5609         child = SYSCTL_CHILDREN(tree);
5610         BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets",
5611             &stats->ifHCInOctets, "Inbound Octets");
5612         BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments",
5613             &stats->etherStatsFragments, "Fragments");
5614         BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5615             &stats->ifHCInUcastPkts, "Inbound Unicast Packets");
5616         BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5617             &stats->ifHCInMulticastPkts, "Inbound Multicast Packets");
5618         BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5619             &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets");
5620         BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors",
5621             &stats->dot3StatsFCSErrors, "FCS Errors");
5622         BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors",
5623             &stats->dot3StatsAlignmentErrors, "Alignment Errors");
5624         BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived",
5625             &stats->xonPauseFramesReceived, "XON Pause Frames Received");
5626         BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived",
5627             &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received");
5628         BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived",
5629             &stats->macControlFramesReceived, "MAC Control Frames Received");
5630         BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered",
5631             &stats->xoffStateEntered, "XOFF State Entered");
5632         BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong",
5633             &stats->dot3StatsFramesTooLong, "Frames Too Long");
5634         BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers",
5635             &stats->etherStatsJabbers, "Jabbers");
5636         BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts",
5637             &stats->etherStatsUndersizePkts, "Undersized Packets");
5638
5639         tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
5640             NULL, "BGE TX Statistics");
5641         child = SYSCTL_CHILDREN(tree);
5642         BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets",
5643             &stats->ifHCOutOctets, "Outbound Octets");
5644         BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions",
5645             &stats->etherStatsCollisions, "TX Collisions");
5646         BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent",
5647             &stats->outXonSent, "XON Sent");
5648         BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent",
5649             &stats->outXoffSent, "XOFF Sent");
5650         BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors",
5651             &stats->dot3StatsInternalMacTransmitErrors,
5652             "Internal MAC TX Errors");
5653         BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames",
5654             &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames");
5655         BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames",
5656             &stats->dot3StatsMultipleCollisionFrames,
5657             "Multiple Collision Frames");
5658         BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions",
5659             &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions");
5660         BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions",
5661             &stats->dot3StatsExcessiveCollisions, "Excessive Collisions");
5662         BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions",
5663             &stats->dot3StatsLateCollisions, "Late Collisions");
5664         BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5665             &stats->ifHCOutUcastPkts, "Outbound Unicast Packets");
5666         BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5667             &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets");
5668         BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5669             &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets");
5670 }
5671
5672 #undef  BGE_SYSCTL_STAT_ADD64
5673
5674 static int
5675 bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
5676 {
5677         struct bge_softc *sc;
5678         uint32_t result;
5679         int offset;
5680
5681         sc = (struct bge_softc *)arg1;
5682         offset = arg2;
5683         result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
5684             offsetof(bge_hostaddr, bge_addr_lo));
5685         return (sysctl_handle_int(oidp, &result, 0, req));
5686 }
5687
5688 #ifdef BGE_REGISTER_DEBUG
5689 static int
5690 bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5691 {
5692         struct bge_softc *sc;
5693         uint16_t *sbdata;
5694         int error;
5695         int result;
5696         int i, j;
5697
5698         result = -1;
5699         error = sysctl_handle_int(oidp, &result, 0, req);
5700         if (error || (req->newptr == NULL))
5701                 return (error);
5702
5703         if (result == 1) {
5704                 sc = (struct bge_softc *)arg1;
5705
5706                 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
5707                 printf("Status Block:\n");
5708                 for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) {
5709                         printf("%06x:", i);
5710                         for (j = 0; j < 8; j++) {
5711                                 printf(" %04x", sbdata[i]);
5712                                 i += 4;
5713                         }
5714                         printf("\n");
5715                 }
5716
5717                 printf("Registers:\n");
5718                 for (i = 0x800; i < 0xA00; ) {
5719                         printf("%06x:", i);
5720                         for (j = 0; j < 8; j++) {
5721                                 printf(" %08x", CSR_READ_4(sc, i));
5722                                 i += 4;
5723                         }
5724                         printf("\n");
5725                 }
5726
5727                 printf("Hardware Flags:\n");
5728                 if (BGE_IS_5755_PLUS(sc))
5729                         printf(" - 5755 Plus\n");
5730                 if (BGE_IS_575X_PLUS(sc))
5731                         printf(" - 575X Plus\n");
5732                 if (BGE_IS_5705_PLUS(sc))
5733                         printf(" - 5705 Plus\n");
5734                 if (BGE_IS_5714_FAMILY(sc))
5735                         printf(" - 5714 Family\n");
5736                 if (BGE_IS_5700_FAMILY(sc))
5737                         printf(" - 5700 Family\n");
5738                 if (sc->bge_flags & BGE_FLAG_JUMBO)
5739                         printf(" - Supports Jumbo Frames\n");
5740                 if (sc->bge_flags & BGE_FLAG_PCIX)
5741                         printf(" - PCI-X Bus\n");
5742                 if (sc->bge_flags & BGE_FLAG_PCIE)
5743                         printf(" - PCI Express Bus\n");
5744                 if (sc->bge_phy_flags & BGE_PHY_NO_3LED)
5745                         printf(" - No 3 LEDs\n");
5746                 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
5747                         printf(" - RX Alignment Bug\n");
5748         }
5749
5750         return (error);
5751 }
5752
5753 static int
5754 bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5755 {
5756         struct bge_softc *sc;
5757         int error;
5758         uint16_t result;
5759         uint32_t val;
5760
5761         result = -1;
5762         error = sysctl_handle_int(oidp, &result, 0, req);
5763         if (error || (req->newptr == NULL))
5764                 return (error);
5765
5766         if (result < 0x8000) {
5767                 sc = (struct bge_softc *)arg1;
5768                 val = CSR_READ_4(sc, result);
5769                 printf("reg 0x%06X = 0x%08X\n", result, val);
5770         }
5771
5772         return (error);
5773 }
5774
5775 static int
5776 bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
5777 {
5778         struct bge_softc *sc;
5779         int error;
5780         uint16_t result;
5781         uint32_t val;
5782
5783         result = -1;
5784         error = sysctl_handle_int(oidp, &result, 0, req);
5785         if (error || (req->newptr == NULL))
5786                 return (error);
5787
5788         if (result < 0x8000) {
5789                 sc = (struct bge_softc *)arg1;
5790                 val = bge_readmem_ind(sc, result);
5791                 printf("mem 0x%06X = 0x%08X\n", result, val);
5792         }
5793
5794         return (error);
5795 }
5796 #endif
5797
5798 static int
5799 bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
5800 {
5801
5802         if (sc->bge_flags & BGE_FLAG_EADDR)
5803                 return (1);
5804
5805 #ifdef __sparc64__
5806         OF_getetheraddr(sc->bge_dev, ether_addr);
5807         return (0);
5808 #endif
5809         return (1);
5810 }
5811
5812 static int
5813 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
5814 {
5815         uint32_t mac_addr;
5816
5817         mac_addr = bge_readmem_ind(sc, 0x0c14);
5818         if ((mac_addr >> 16) == 0x484b) {
5819                 ether_addr[0] = (uint8_t)(mac_addr >> 8);
5820                 ether_addr[1] = (uint8_t)mac_addr;
5821                 mac_addr = bge_readmem_ind(sc, 0x0c18);
5822                 ether_addr[2] = (uint8_t)(mac_addr >> 24);
5823                 ether_addr[3] = (uint8_t)(mac_addr >> 16);
5824                 ether_addr[4] = (uint8_t)(mac_addr >> 8);
5825                 ether_addr[5] = (uint8_t)mac_addr;
5826                 return (0);
5827         }
5828         return (1);
5829 }
5830
5831 static int
5832 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
5833 {
5834         int mac_offset = BGE_EE_MAC_OFFSET;
5835
5836         if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5837                 mac_offset = BGE_EE_MAC_OFFSET_5906;
5838
5839         return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
5840             ETHER_ADDR_LEN));
5841 }
5842
5843 static int
5844 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
5845 {
5846
5847         if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
5848                 return (1);
5849
5850         return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
5851            ETHER_ADDR_LEN));
5852 }
5853
5854 static int
5855 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
5856 {
5857         static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
5858                 /* NOTE: Order is critical */
5859                 bge_get_eaddr_fw,
5860                 bge_get_eaddr_mem,
5861                 bge_get_eaddr_nvram,
5862                 bge_get_eaddr_eeprom,
5863                 NULL
5864         };
5865         const bge_eaddr_fcn_t *func;
5866
5867         for (func = bge_eaddr_funcs; *func != NULL; ++func) {
5868                 if ((*func)(sc, eaddr) == 0)
5869                         break;
5870         }
5871         return (*func == NULL ? ENXIO : 0);
5872 }