2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
69 #ifdef HAVE_KERNEL_OPTION_HEADERS
70 #include "opt_device_polling.h"
73 #include <sys/param.h>
74 #include <sys/endian.h>
75 #include <sys/systm.h>
76 #include <sys/sockio.h>
78 #include <sys/malloc.h>
79 #include <sys/kernel.h>
80 #include <sys/module.h>
81 #include <sys/socket.h>
82 #include <sys/sysctl.h>
85 #include <net/if_arp.h>
86 #include <net/ethernet.h>
87 #include <net/if_dl.h>
88 #include <net/if_media.h>
92 #include <net/if_types.h>
93 #include <net/if_vlan_var.h>
95 #include <netinet/in_systm.h>
96 #include <netinet/in.h>
97 #include <netinet/ip.h>
99 #include <machine/bus.h>
100 #include <machine/resource.h>
102 #include <sys/rman.h>
104 #include <dev/mii/mii.h>
105 #include <dev/mii/miivar.h>
107 #include <dev/mii/brgphyreg.h>
110 #include <dev/ofw/ofw_bus.h>
111 #include <dev/ofw/openfirm.h>
112 #include <machine/ofw_machdep.h>
113 #include <machine/ver.h>
116 #include <dev/pci/pcireg.h>
117 #include <dev/pci/pcivar.h>
119 #include <dev/bge/if_bgereg.h>
121 #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
122 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
124 MODULE_DEPEND(bge, pci, 1, 1, 1);
125 MODULE_DEPEND(bge, ether, 1, 1, 1);
126 MODULE_DEPEND(bge, miibus, 1, 1, 1);
128 /* "device miibus" required. See GENERIC if you get errors here. */
129 #include "miibus_if.h"
132 * Various supported device vendors/types and their names. Note: the
133 * spec seems to indicate that the hardware still has Alteon's vendor
134 * ID burned into it, though it will always be overriden by the vendor
135 * ID in the EEPROM. Just to be safe, we cover all possibilities.
137 static const struct bge_type {
141 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
142 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
144 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
145 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
148 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
150 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
151 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
201 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
202 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F },
203 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
204 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
205 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
206 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
207 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
208 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
209 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
210 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
211 { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 },
212 { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 },
213 { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 },
214 { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 },
216 { SK_VENDORID, SK_DEVICEID_ALTIMA },
218 { TC_VENDORID, TC_DEVICEID_3C996 },
220 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 },
221 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 },
222 { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 },
227 static const struct bge_vendor {
231 { ALTEON_VENDORID, "Alteon" },
232 { ALTIMA_VENDORID, "Altima" },
233 { APPLE_VENDORID, "Apple" },
234 { BCOM_VENDORID, "Broadcom" },
235 { SK_VENDORID, "SysKonnect" },
236 { TC_VENDORID, "3Com" },
237 { FJTSU_VENDORID, "Fujitsu" },
242 static const struct bge_revision {
245 } bge_revisions[] = {
246 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
247 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
248 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
249 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
250 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
251 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
252 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
253 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
254 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
255 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
256 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
257 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
258 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
259 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
260 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
261 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
262 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
263 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
264 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
265 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
266 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
267 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
268 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
269 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
270 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
271 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
272 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
273 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
274 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
275 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
276 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
277 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
278 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
279 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
280 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
281 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
282 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
283 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
284 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
285 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
286 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
287 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
288 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
289 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
290 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
291 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
292 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
293 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
294 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
295 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
296 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
297 /* 5754 and 5787 share the same ASIC ID */
298 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
299 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
300 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
301 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
302 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
303 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
304 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
310 * Some defaults for major revisions, so that newer steppings
311 * that we don't know about have a shot at working.
313 static const struct bge_revision bge_majorrevs[] = {
314 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
315 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
316 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
317 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
318 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
319 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
320 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
321 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
322 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
323 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
324 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
325 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
326 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
327 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
328 /* 5754 and 5787 share the same ASIC ID */
329 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
330 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
331 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
336 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
337 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
338 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
339 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
340 #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
341 #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
343 const struct bge_revision * bge_lookup_rev(uint32_t);
344 const struct bge_vendor * bge_lookup_vendor(uint16_t);
346 typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
348 static int bge_probe(device_t);
349 static int bge_attach(device_t);
350 static int bge_detach(device_t);
351 static int bge_suspend(device_t);
352 static int bge_resume(device_t);
353 static void bge_release_resources(struct bge_softc *);
354 static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
355 static int bge_dma_alloc(device_t);
356 static void bge_dma_free(struct bge_softc *);
358 static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
359 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
360 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
361 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
362 static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
364 static void bge_txeof(struct bge_softc *);
365 static int bge_rxeof(struct bge_softc *);
367 static void bge_asf_driver_up (struct bge_softc *);
368 static void bge_tick(void *);
369 static void bge_stats_update(struct bge_softc *);
370 static void bge_stats_update_regs(struct bge_softc *);
371 static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
373 static void bge_intr(void *);
374 static void bge_start_locked(struct ifnet *);
375 static void bge_start(struct ifnet *);
376 static int bge_ioctl(struct ifnet *, u_long, caddr_t);
377 static void bge_init_locked(struct bge_softc *);
378 static void bge_init(void *);
379 static void bge_stop(struct bge_softc *);
380 static void bge_watchdog(struct bge_softc *);
381 static int bge_shutdown(device_t);
382 static int bge_ifmedia_upd_locked(struct ifnet *);
383 static int bge_ifmedia_upd(struct ifnet *);
384 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
386 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
387 static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
389 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
390 static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
392 static void bge_setpromisc(struct bge_softc *);
393 static void bge_setmulti(struct bge_softc *);
394 static void bge_setvlan(struct bge_softc *);
396 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *);
397 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
398 static int bge_init_rx_ring_std(struct bge_softc *);
399 static void bge_free_rx_ring_std(struct bge_softc *);
400 static int bge_init_rx_ring_jumbo(struct bge_softc *);
401 static void bge_free_rx_ring_jumbo(struct bge_softc *);
402 static void bge_free_tx_ring(struct bge_softc *);
403 static int bge_init_tx_ring(struct bge_softc *);
405 static int bge_chipinit(struct bge_softc *);
406 static int bge_blockinit(struct bge_softc *);
408 static int bge_has_eaddr(struct bge_softc *);
409 static uint32_t bge_readmem_ind(struct bge_softc *, int);
410 static void bge_writemem_ind(struct bge_softc *, int, int);
411 static void bge_writembx(struct bge_softc *, int, int);
413 static uint32_t bge_readreg_ind(struct bge_softc *, int);
415 static void bge_writemem_direct(struct bge_softc *, int, int);
416 static void bge_writereg_ind(struct bge_softc *, int, int);
417 static void bge_set_max_readrq(struct bge_softc *, int);
419 static int bge_miibus_readreg(device_t, int, int);
420 static int bge_miibus_writereg(device_t, int, int, int);
421 static void bge_miibus_statchg(device_t);
422 #ifdef DEVICE_POLLING
423 static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
426 #define BGE_RESET_START 1
427 #define BGE_RESET_STOP 2
428 static void bge_sig_post_reset(struct bge_softc *, int);
429 static void bge_sig_legacy(struct bge_softc *, int);
430 static void bge_sig_pre_reset(struct bge_softc *, int);
431 static int bge_reset(struct bge_softc *);
432 static void bge_link_upd(struct bge_softc *);
435 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
436 * leak information to untrusted users. It is also known to cause alignment
437 * traps on certain architectures.
439 #ifdef BGE_REGISTER_DEBUG
440 static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
441 static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
442 static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
444 static void bge_add_sysctls(struct bge_softc *);
445 static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
447 static device_method_t bge_methods[] = {
448 /* Device interface */
449 DEVMETHOD(device_probe, bge_probe),
450 DEVMETHOD(device_attach, bge_attach),
451 DEVMETHOD(device_detach, bge_detach),
452 DEVMETHOD(device_shutdown, bge_shutdown),
453 DEVMETHOD(device_suspend, bge_suspend),
454 DEVMETHOD(device_resume, bge_resume),
457 DEVMETHOD(bus_print_child, bus_generic_print_child),
458 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
461 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
462 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
463 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
468 static driver_t bge_driver = {
471 sizeof(struct bge_softc)
474 static devclass_t bge_devclass;
476 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
477 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
479 static int bge_allow_asf = 1;
481 TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
483 SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
484 SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
485 "Allow ASF mode if available");
487 #define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
488 #define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
489 #define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
490 #define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
491 #define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
494 bge_has_eaddr(struct bge_softc *sc)
497 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
504 * The on-board BGEs found in sun4u machines aren't fitted with
505 * an EEPROM which means that we have to obtain the MAC address
506 * via OFW and that some tests will always fail. We distinguish
507 * such BGEs by the subvendor ID, which also has to be obtained
508 * from OFW instead of the PCI configuration space as the latter
509 * indicates Broadcom as the subvendor of the netboot interface.
510 * For early Blade 1500 and 2500 we even have to check the OFW
511 * device path as the subvendor ID always defaults to Broadcom
514 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
515 &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
516 subvendor == SUN_VENDORID)
518 memset(buf, 0, sizeof(buf));
519 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
520 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
521 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
523 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
524 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
532 bge_readmem_ind(struct bge_softc *sc, int off)
539 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
540 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
541 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
546 bge_writemem_ind(struct bge_softc *sc, int off, int val)
552 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
553 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
554 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
561 bge_set_max_readrq(struct bge_softc *sc, int expr_ptr)
566 KASSERT((sc->bge_flags & BGE_FLAG_PCIE) && expr_ptr != 0,
567 ("%s: not applicable", __func__));
571 val = pci_read_config(dev, expr_ptr + BGE_PCIE_DEVCTL, 2);
572 if ((val & BGE_PCIE_DEVCTL_MAX_READRQ_MASK) !=
573 BGE_PCIE_DEVCTL_MAX_READRQ_4096) {
575 device_printf(dev, "adjust device control 0x%04x ",
577 val &= ~BGE_PCIE_DEVCTL_MAX_READRQ_MASK;
578 val |= BGE_PCIE_DEVCTL_MAX_READRQ_4096;
579 pci_write_config(dev, expr_ptr + BGE_PCIE_DEVCTL, val, 2);
581 printf("-> 0x%04x\n", val);
587 bge_readreg_ind(struct bge_softc *sc, int off)
593 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
594 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
599 bge_writereg_ind(struct bge_softc *sc, int off, int val)
605 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
606 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
610 bge_writemem_direct(struct bge_softc *sc, int off, int val)
612 CSR_WRITE_4(sc, off, val);
616 bge_writembx(struct bge_softc *sc, int off, int val)
618 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
619 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
621 CSR_WRITE_4(sc, off, val);
625 * Map a single buffer address.
629 bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
631 struct bge_dmamap_arg *ctx;
638 if (nseg > ctx->bge_maxsegs) {
639 ctx->bge_maxsegs = 0;
643 ctx->bge_busaddr = segs->ds_addr;
647 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
649 uint32_t access, byte = 0;
653 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
654 for (i = 0; i < 8000; i++) {
655 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
663 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
664 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
666 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
667 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
668 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
670 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
676 if (i == BGE_TIMEOUT * 10) {
677 if_printf(sc->bge_ifp, "nvram read timed out\n");
682 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
684 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
686 /* Disable access. */
687 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
690 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
691 CSR_READ_4(sc, BGE_NVRAM_SWARB);
697 * Read a sequence of bytes from NVRAM.
700 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
705 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
708 for (i = 0; i < cnt; i++) {
709 err = bge_nvram_getbyte(sc, off + i, &byte);
715 return (err ? 1 : 0);
719 * Read a byte of data stored in the EEPROM at address 'addr.' The
720 * BCM570x supports both the traditional bitbang interface and an
721 * auto access interface for reading the EEPROM. We use the auto
725 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
731 * Enable use of auto EEPROM access so we can avoid
732 * having to use the bitbang method.
734 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
736 /* Reset the EEPROM, load the clock period. */
737 CSR_WRITE_4(sc, BGE_EE_ADDR,
738 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
741 /* Issue the read EEPROM command. */
742 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
744 /* Wait for completion */
745 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
747 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
751 if (i == BGE_TIMEOUT * 10) {
752 device_printf(sc->bge_dev, "EEPROM read timed out\n");
757 byte = CSR_READ_4(sc, BGE_EE_DATA);
759 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
765 * Read a sequence of bytes from the EEPROM.
768 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
773 for (i = 0; i < cnt; i++) {
774 error = bge_eeprom_getbyte(sc, off + i, &byte);
780 return (error ? 1 : 0);
784 bge_miibus_readreg(device_t dev, int phy, int reg)
786 struct bge_softc *sc;
787 uint32_t val, autopoll;
790 sc = device_get_softc(dev);
793 * Broadcom's own driver always assumes the internal
794 * PHY is at GMII address 1. On some chips, the PHY responds
795 * to accesses at all addresses, which could cause us to
796 * bogusly attach the PHY 32 times at probe type. Always
797 * restricting the lookup to address 1 is simpler than
798 * trying to figure out which chips revisions should be
804 /* Reading with autopolling on may trigger PCI errors */
805 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
806 if (autopoll & BGE_MIMODE_AUTOPOLL) {
807 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
811 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
812 BGE_MIPHY(phy) | BGE_MIREG(reg));
814 for (i = 0; i < BGE_TIMEOUT; i++) {
816 val = CSR_READ_4(sc, BGE_MI_COMM);
817 if (!(val & BGE_MICOMM_BUSY))
821 if (i == BGE_TIMEOUT) {
822 device_printf(sc->bge_dev,
823 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
830 val = CSR_READ_4(sc, BGE_MI_COMM);
833 if (autopoll & BGE_MIMODE_AUTOPOLL) {
834 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
838 if (val & BGE_MICOMM_READFAIL)
841 return (val & 0xFFFF);
845 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
847 struct bge_softc *sc;
851 sc = device_get_softc(dev);
853 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
854 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
857 /* Reading with autopolling on may trigger PCI errors */
858 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
859 if (autopoll & BGE_MIMODE_AUTOPOLL) {
860 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
864 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
865 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
867 for (i = 0; i < BGE_TIMEOUT; i++) {
869 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
871 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
876 if (i == BGE_TIMEOUT) {
877 device_printf(sc->bge_dev,
878 "PHY write timed out (phy %d, reg %d, val %d)\n",
883 if (autopoll & BGE_MIMODE_AUTOPOLL) {
884 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
892 bge_miibus_statchg(device_t dev)
894 struct bge_softc *sc;
895 struct mii_data *mii;
896 sc = device_get_softc(dev);
897 mii = device_get_softc(sc->bge_miibus);
899 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
900 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
901 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
903 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
905 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
906 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
908 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
912 * Intialize a standard receive ring descriptor.
915 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
917 struct mbuf *m_new = NULL;
919 struct bge_dmamap_arg ctx;
923 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
926 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
929 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
930 m_new->m_data = m_new->m_ext.ext_buf;
933 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
934 m_adj(m_new, ETHER_ALIGN);
935 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
936 r = &sc->bge_ldata.bge_rx_std_ring[i];
939 error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
940 sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
941 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
942 if (error || ctx.bge_maxsegs == 0) {
944 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
949 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr);
950 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr);
951 r->bge_flags = BGE_RXBDFLAG_END;
952 r->bge_len = m_new->m_len;
955 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
956 sc->bge_cdata.bge_rx_std_dmamap[i],
957 BUS_DMASYNC_PREREAD);
963 * Initialize a jumbo receive ring descriptor. This allocates
964 * a jumbo buffer from the pool managed internally by the driver.
967 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
969 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
970 struct bge_extrx_bd *r;
971 struct mbuf *m_new = NULL;
976 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
980 m_cljget(m_new, M_DONTWAIT, MJUM9BYTES);
981 if (!(m_new->m_flags & M_EXT)) {
985 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
988 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
989 m_new->m_data = m_new->m_ext.ext_buf;
992 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
993 m_adj(m_new, ETHER_ALIGN);
995 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
996 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
997 m_new, segs, &nsegs, BUS_DMA_NOWAIT);
1003 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
1006 * Fill in the extended RX buffer descriptor.
1008 r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
1009 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1011 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1014 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1015 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1016 r->bge_len3 = segs[3].ds_len;
1018 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1019 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1020 r->bge_len2 = segs[2].ds_len;
1022 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1023 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1024 r->bge_len1 = segs[1].ds_len;
1026 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1027 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1028 r->bge_len0 = segs[0].ds_len;
1031 panic("%s: %d segments\n", __func__, nsegs);
1034 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
1035 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1036 BUS_DMASYNC_PREREAD);
1042 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
1043 * that's 1MB or memory, which is a lot. For now, we fill only the first
1044 * 256 ring entries and hope that our CPU is fast enough to keep up with
1048 bge_init_rx_ring_std(struct bge_softc *sc)
1052 for (i = 0; i < BGE_SSLOTS; i++) {
1053 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
1057 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1058 sc->bge_cdata.bge_rx_std_ring_map,
1059 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1061 sc->bge_std = i - 1;
1062 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1068 bge_free_rx_ring_std(struct bge_softc *sc)
1072 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1073 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1074 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
1075 sc->bge_cdata.bge_rx_std_dmamap[i],
1076 BUS_DMASYNC_POSTREAD);
1077 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
1078 sc->bge_cdata.bge_rx_std_dmamap[i]);
1079 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1080 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1082 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1083 sizeof(struct bge_rx_bd));
1088 bge_init_rx_ring_jumbo(struct bge_softc *sc)
1090 struct bge_rcb *rcb;
1093 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1094 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
1098 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1099 sc->bge_cdata.bge_rx_jumbo_ring_map,
1100 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1102 sc->bge_jumbo = i - 1;
1104 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1105 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1106 BGE_RCB_FLAG_USE_EXT_RX_BD);
1107 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1109 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1115 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1119 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1120 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1121 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1122 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1123 BUS_DMASYNC_POSTREAD);
1124 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1125 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1126 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1127 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1129 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1130 sizeof(struct bge_extrx_bd));
1135 bge_free_tx_ring(struct bge_softc *sc)
1139 if (sc->bge_ldata.bge_tx_ring == NULL)
1142 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1143 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1144 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
1145 sc->bge_cdata.bge_tx_dmamap[i],
1146 BUS_DMASYNC_POSTWRITE);
1147 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
1148 sc->bge_cdata.bge_tx_dmamap[i]);
1149 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1150 sc->bge_cdata.bge_tx_chain[i] = NULL;
1152 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1153 sizeof(struct bge_tx_bd));
1158 bge_init_tx_ring(struct bge_softc *sc)
1161 sc->bge_tx_saved_considx = 0;
1163 /* Initialize transmit producer index for host-memory send ring. */
1164 sc->bge_tx_prodidx = 0;
1165 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1167 /* 5700 b2 errata */
1168 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1169 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1171 /* NIC-memory send ring not used; initialize to zero. */
1172 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1173 /* 5700 b2 errata */
1174 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1175 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1181 bge_setpromisc(struct bge_softc *sc)
1185 BGE_LOCK_ASSERT(sc);
1189 /* Enable or disable promiscuous mode as needed. */
1190 if (ifp->if_flags & IFF_PROMISC)
1191 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1193 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1197 bge_setmulti(struct bge_softc *sc)
1200 struct ifmultiaddr *ifma;
1201 uint32_t hashes[4] = { 0, 0, 0, 0 };
1204 BGE_LOCK_ASSERT(sc);
1208 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1209 for (i = 0; i < 4; i++)
1210 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1214 /* First, zot all the existing filters. */
1215 for (i = 0; i < 4; i++)
1216 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1218 /* Now program new ones. */
1219 if_maddr_rlock(ifp);
1220 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1221 if (ifma->ifma_addr->sa_family != AF_LINK)
1223 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1224 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1225 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1227 if_maddr_runlock(ifp);
1229 for (i = 0; i < 4; i++)
1230 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1234 bge_setvlan(struct bge_softc *sc)
1238 BGE_LOCK_ASSERT(sc);
1242 /* Enable or disable VLAN tag stripping as needed. */
1243 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1244 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1246 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1250 bge_sig_pre_reset(sc, type)
1251 struct bge_softc *sc;
1255 * Some chips don't like this so only do this if ASF is enabled
1257 if (sc->bge_asf_mode)
1258 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1260 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1262 case BGE_RESET_START:
1263 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1265 case BGE_RESET_STOP:
1266 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1273 bge_sig_post_reset(sc, type)
1274 struct bge_softc *sc;
1277 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1279 case BGE_RESET_START:
1280 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001);
1283 case BGE_RESET_STOP:
1284 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002);
1291 bge_sig_legacy(sc, type)
1292 struct bge_softc *sc;
1295 if (sc->bge_asf_mode) {
1297 case BGE_RESET_START:
1298 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1300 case BGE_RESET_STOP:
1301 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1307 void bge_stop_fw(struct bge_softc *);
1310 struct bge_softc *sc;
1314 if (sc->bge_asf_mode) {
1315 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE);
1316 CSR_WRITE_4(sc, BGE_CPU_EVENT,
1317 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
1319 for (i = 0; i < 100; i++ ) {
1320 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14)))
1328 * Do endian, PCI and DMA initialization.
1331 bge_chipinit(struct bge_softc *sc)
1333 uint32_t dma_rw_ctl;
1336 /* Set endianness before we access any non-PCI registers. */
1337 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1339 /* Clear the MAC control register */
1340 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1343 * Clear the MAC statistics block in the NIC's
1346 for (i = BGE_STATS_BLOCK;
1347 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1348 BGE_MEMWIN_WRITE(sc, i, 0);
1350 for (i = BGE_STATUS_BLOCK;
1351 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1352 BGE_MEMWIN_WRITE(sc, i, 0);
1355 * Set up the PCI DMA control register.
1357 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1358 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1359 if (sc->bge_flags & BGE_FLAG_PCIE) {
1360 /* Read watermark not used, 128 bytes for write. */
1361 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1362 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1363 if (BGE_IS_5714_FAMILY(sc)) {
1364 /* 256 bytes for read and write. */
1365 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1366 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1367 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1368 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1369 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1370 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1371 /* 1536 bytes for read, 384 bytes for write. */
1372 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1373 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1375 /* 384 bytes for read and write. */
1376 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1377 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1380 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1381 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1384 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1385 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1386 if (tmp == 6 || tmp == 7)
1388 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1390 /* Set PCI-X DMA write workaround. */
1391 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1394 /* Conventional PCI bus: 256 bytes for read and write. */
1395 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1396 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1398 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1399 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1402 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1403 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1404 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1405 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1406 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1407 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1408 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1409 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1412 * Set up general mode register.
1414 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1415 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1416 BGE_MODECTL_TX_NO_PHDR_CSUM);
1419 * BCM5701 B5 have a bug causing data corruption when using
1420 * 64-bit DMA reads, which can be terminated early and then
1421 * completed later as 32-bit accesses, in combination with
1424 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1425 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1426 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1429 * Tell the firmware the driver is running
1431 if (sc->bge_asf_mode & ASF_STACKUP)
1432 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1435 * Disable memory write invalidate. Apparently it is not supported
1436 * properly by these devices. Also ensure that INTx isn't disabled,
1437 * as these chips need it even when using MSI.
1439 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1440 PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1442 /* Set the timer prescaler (always 66Mhz) */
1443 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1445 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1446 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1447 DELAY(40); /* XXX */
1449 /* Put PHY into ready state */
1450 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1451 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1459 bge_blockinit(struct bge_softc *sc)
1461 struct bge_rcb *rcb;
1468 * Initialize the memory window pointer register so that
1469 * we can access the first 32K of internal NIC RAM. This will
1470 * allow us to set up the TX send ring RCBs and the RX return
1471 * ring RCBs, plus other things which live in NIC memory.
1473 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1475 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1477 if (!(BGE_IS_5705_PLUS(sc))) {
1478 /* Configure mbuf memory pool */
1479 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1480 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1481 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1483 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1485 /* Configure DMA resource pool */
1486 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1487 BGE_DMA_DESCRIPTORS);
1488 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1491 /* Configure mbuf pool watermarks */
1492 if (!BGE_IS_5705_PLUS(sc)) {
1493 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1494 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1495 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1496 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1497 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1498 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1499 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1501 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1502 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1503 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1506 /* Configure DMA resource watermarks */
1507 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1508 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1510 /* Enable buffer manager */
1511 if (!(BGE_IS_5705_PLUS(sc))) {
1512 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1513 BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN);
1515 /* Poll for buffer manager start indication */
1516 for (i = 0; i < BGE_TIMEOUT; i++) {
1518 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1522 if (i == BGE_TIMEOUT) {
1523 device_printf(sc->bge_dev,
1524 "buffer manager failed to start\n");
1529 /* Enable flow-through queues */
1530 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1531 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1533 /* Wait until queue initialization is complete */
1534 for (i = 0; i < BGE_TIMEOUT; i++) {
1536 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1540 if (i == BGE_TIMEOUT) {
1541 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1545 /* Initialize the standard RX ring control block */
1546 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1547 rcb->bge_hostaddr.bge_addr_lo =
1548 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1549 rcb->bge_hostaddr.bge_addr_hi =
1550 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1551 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1552 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1553 if (BGE_IS_5705_PLUS(sc))
1554 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1556 rcb->bge_maxlen_flags =
1557 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1558 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1559 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1560 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1562 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1563 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1566 * Initialize the jumbo RX ring control block
1567 * We set the 'ring disabled' bit in the flags
1568 * field until we're actually ready to start
1569 * using this ring (i.e. once we set the MTU
1570 * high enough to require it).
1572 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1573 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1575 rcb->bge_hostaddr.bge_addr_lo =
1576 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1577 rcb->bge_hostaddr.bge_addr_hi =
1578 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1579 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1580 sc->bge_cdata.bge_rx_jumbo_ring_map,
1581 BUS_DMASYNC_PREREAD);
1582 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1583 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1584 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1585 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1586 rcb->bge_hostaddr.bge_addr_hi);
1587 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1588 rcb->bge_hostaddr.bge_addr_lo);
1590 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1591 rcb->bge_maxlen_flags);
1592 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1594 /* Set up dummy disabled mini ring RCB */
1595 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1596 rcb->bge_maxlen_flags =
1597 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1598 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1599 rcb->bge_maxlen_flags);
1603 * Set the BD ring replentish thresholds. The recommended
1604 * values are 1/8th the number of descriptors allocated to
1606 * XXX The 5754 requires a lower threshold, so it might be a
1607 * requirement of all 575x family chips. The Linux driver sets
1608 * the lower threshold for all 5705 family chips as well, but there
1609 * are reports that it might not need to be so strict.
1611 * XXX Linux does some extra fiddling here for the 5906 parts as
1614 if (BGE_IS_5705_PLUS(sc))
1617 val = BGE_STD_RX_RING_CNT / 8;
1618 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1619 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1622 * Disable all unused send rings by setting the 'ring disabled'
1623 * bit in the flags field of all the TX send ring control blocks.
1624 * These are located in NIC memory.
1626 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1627 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1628 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1629 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1630 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1631 vrcb += sizeof(struct bge_rcb);
1634 /* Configure TX RCB 0 (we use only the first ring) */
1635 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1636 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1637 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1638 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1639 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1640 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1641 if (!(BGE_IS_5705_PLUS(sc)))
1642 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1643 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1645 /* Disable all unused RX return rings */
1646 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1647 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1648 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1649 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1650 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1651 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1652 BGE_RCB_FLAG_RING_DISABLED));
1653 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1654 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1655 (i * (sizeof(uint64_t))), 0);
1656 vrcb += sizeof(struct bge_rcb);
1659 /* Initialize RX ring indexes */
1660 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1661 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1662 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1665 * Set up RX return ring 0
1666 * Note that the NIC address for RX return rings is 0x00000000.
1667 * The return rings live entirely within the host, so the
1668 * nicaddr field in the RCB isn't used.
1670 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1671 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1672 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1673 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1674 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1675 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1676 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1678 /* Set random backoff seed for TX */
1679 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1680 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1681 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1682 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1683 BGE_TX_BACKOFF_SEED_MASK);
1685 /* Set inter-packet gap */
1686 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1689 * Specify which ring to use for packets that don't match
1692 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1695 * Configure number of RX lists. One interrupt distribution
1696 * list, sixteen active lists, one bad frames class.
1698 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1700 /* Inialize RX list placement stats mask. */
1701 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1702 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1704 /* Disable host coalescing until we get it set up */
1705 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1707 /* Poll to make sure it's shut down. */
1708 for (i = 0; i < BGE_TIMEOUT; i++) {
1710 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1714 if (i == BGE_TIMEOUT) {
1715 device_printf(sc->bge_dev,
1716 "host coalescing engine failed to idle\n");
1720 /* Set up host coalescing defaults */
1721 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1722 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1723 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1724 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1725 if (!(BGE_IS_5705_PLUS(sc))) {
1726 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1727 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1729 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1730 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1732 /* Set up address of statistics block */
1733 if (!(BGE_IS_5705_PLUS(sc))) {
1734 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1735 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1736 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1737 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1738 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1739 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1740 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1743 /* Set up address of status block */
1744 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1745 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1746 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1747 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1748 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1749 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1751 /* Turn on host coalescing state machine */
1752 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1754 /* Turn on RX BD completion state machine and enable attentions */
1755 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1756 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
1758 /* Turn on RX list placement state machine */
1759 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1761 /* Turn on RX list selector state machine. */
1762 if (!(BGE_IS_5705_PLUS(sc)))
1763 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1765 /* Turn on DMA, clear stats */
1766 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB |
1767 BGE_MACMODE_RXDMA_ENB | BGE_MACMODE_RX_STATS_CLEAR |
1768 BGE_MACMODE_TX_STATS_CLEAR | BGE_MACMODE_RX_STATS_ENB |
1769 BGE_MACMODE_TX_STATS_ENB | BGE_MACMODE_FRMHDR_DMA_ENB |
1770 ((sc->bge_flags & BGE_FLAG_TBI) ?
1771 BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1773 /* Set misc. local control, enable interrupts on attentions */
1774 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1777 /* Assert GPIO pins for PHY reset */
1778 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
1779 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
1780 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
1781 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
1784 /* Turn on DMA completion state machine */
1785 if (!(BGE_IS_5705_PLUS(sc)))
1786 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1788 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
1790 /* Enable host coalescing bug fix. */
1791 if (BGE_IS_5755_PLUS(sc))
1792 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1794 /* Turn on write DMA state machine */
1795 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1798 /* Turn on read DMA state machine */
1799 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1800 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
1801 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
1802 sc->bge_asicrev == BGE_ASICREV_BCM57780)
1803 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1804 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1805 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1806 if (sc->bge_flags & BGE_FLAG_PCIE)
1807 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1808 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1811 /* Turn on RX data completion state machine */
1812 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1814 /* Turn on RX BD initiator state machine */
1815 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1817 /* Turn on RX data and RX BD initiator state machine */
1818 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1820 /* Turn on Mbuf cluster free state machine */
1821 if (!(BGE_IS_5705_PLUS(sc)))
1822 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1824 /* Turn on send BD completion state machine */
1825 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1827 /* Turn on send data completion state machine */
1828 val = BGE_SDCMODE_ENABLE;
1829 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
1830 val |= BGE_SDCMODE_CDELAY;
1831 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1833 /* Turn on send data initiator state machine */
1834 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1836 /* Turn on send BD initiator state machine */
1837 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1839 /* Turn on send BD selector state machine */
1840 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1842 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1843 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1844 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
1846 /* ack/clear link change events */
1847 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
1848 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
1849 BGE_MACSTAT_LINK_CHANGED);
1850 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1852 /* Enable PHY auto polling (for MII/GMII only) */
1853 if (sc->bge_flags & BGE_FLAG_TBI) {
1854 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1856 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL | (10 << 16));
1857 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1858 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
1859 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1860 BGE_EVTENB_MI_INTERRUPT);
1864 * Clear any pending link state attention.
1865 * Otherwise some link state change events may be lost until attention
1866 * is cleared by bge_intr() -> bge_link_upd() sequence.
1867 * It's not necessary on newer BCM chips - perhaps enabling link
1868 * state change attentions implies clearing pending attention.
1870 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
1871 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
1872 BGE_MACSTAT_LINK_CHANGED);
1874 /* Enable link state change attentions. */
1875 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1880 const struct bge_revision *
1881 bge_lookup_rev(uint32_t chipid)
1883 const struct bge_revision *br;
1885 for (br = bge_revisions; br->br_name != NULL; br++) {
1886 if (br->br_chipid == chipid)
1890 for (br = bge_majorrevs; br->br_name != NULL; br++) {
1891 if (br->br_chipid == BGE_ASICREV(chipid))
1898 const struct bge_vendor *
1899 bge_lookup_vendor(uint16_t vid)
1901 const struct bge_vendor *v;
1903 for (v = bge_vendors; v->v_name != NULL; v++)
1907 panic("%s: unknown vendor %d", __func__, vid);
1912 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1913 * against our list and return its name if we find a match.
1915 * Note that since the Broadcom controller contains VPD support, we
1916 * try to get the device name string from the controller itself instead
1917 * of the compiled-in string. It guarantees we'll always announce the
1918 * right product name. We fall back to the compiled-in string when
1919 * VPD is unavailable or corrupt.
1922 bge_probe(device_t dev)
1924 const struct bge_type *t = bge_devs;
1925 struct bge_softc *sc = device_get_softc(dev);
1929 vid = pci_get_vendor(dev);
1930 did = pci_get_device(dev);
1931 while(t->bge_vid != 0) {
1932 if ((vid == t->bge_vid) && (did == t->bge_did)) {
1933 char model[64], buf[96];
1934 const struct bge_revision *br;
1935 const struct bge_vendor *v;
1938 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
1939 BGE_PCIMISCCTL_ASICREV_SHIFT;
1940 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG)
1941 id = pci_read_config(dev,
1942 BGE_PCI_PRODID_ASICREV, 4);
1943 br = bge_lookup_rev(id);
1944 v = bge_lookup_vendor(vid);
1946 #if __FreeBSD_version > 700024
1949 if (bge_has_eaddr(sc) &&
1950 pci_get_vpd_ident(dev, &pname) == 0)
1951 snprintf(model, 64, "%s", pname);
1954 snprintf(model, 64, "%s %s",
1956 br != NULL ? br->br_name :
1957 "NetXtreme Ethernet Controller");
1959 snprintf(buf, 96, "%s, %sASIC rev. %#08x", model,
1960 br != NULL ? "" : "unknown ", id);
1961 device_set_desc_copy(dev, buf);
1962 if (pci_get_subvendor(dev) == DELL_VENDORID)
1963 sc->bge_flags |= BGE_FLAG_NO_3LED;
1964 if (did == BCOM_DEVICEID_BCM5755M)
1965 sc->bge_flags |= BGE_FLAG_ADJUST_TRIM;
1975 bge_dma_free(struct bge_softc *sc)
1979 /* Destroy DMA maps for RX buffers. */
1980 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1981 if (sc->bge_cdata.bge_rx_std_dmamap[i])
1982 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1983 sc->bge_cdata.bge_rx_std_dmamap[i]);
1986 /* Destroy DMA maps for jumbo RX buffers. */
1987 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1988 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1989 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1990 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1993 /* Destroy DMA maps for TX buffers. */
1994 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1995 if (sc->bge_cdata.bge_tx_dmamap[i])
1996 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1997 sc->bge_cdata.bge_tx_dmamap[i]);
2000 if (sc->bge_cdata.bge_mtag)
2001 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
2004 /* Destroy standard RX ring. */
2005 if (sc->bge_cdata.bge_rx_std_ring_map)
2006 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2007 sc->bge_cdata.bge_rx_std_ring_map);
2008 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
2009 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2010 sc->bge_ldata.bge_rx_std_ring,
2011 sc->bge_cdata.bge_rx_std_ring_map);
2013 if (sc->bge_cdata.bge_rx_std_ring_tag)
2014 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2016 /* Destroy jumbo RX ring. */
2017 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
2018 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2019 sc->bge_cdata.bge_rx_jumbo_ring_map);
2021 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
2022 sc->bge_ldata.bge_rx_jumbo_ring)
2023 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2024 sc->bge_ldata.bge_rx_jumbo_ring,
2025 sc->bge_cdata.bge_rx_jumbo_ring_map);
2027 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2028 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2030 /* Destroy RX return ring. */
2031 if (sc->bge_cdata.bge_rx_return_ring_map)
2032 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2033 sc->bge_cdata.bge_rx_return_ring_map);
2035 if (sc->bge_cdata.bge_rx_return_ring_map &&
2036 sc->bge_ldata.bge_rx_return_ring)
2037 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2038 sc->bge_ldata.bge_rx_return_ring,
2039 sc->bge_cdata.bge_rx_return_ring_map);
2041 if (sc->bge_cdata.bge_rx_return_ring_tag)
2042 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2044 /* Destroy TX ring. */
2045 if (sc->bge_cdata.bge_tx_ring_map)
2046 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2047 sc->bge_cdata.bge_tx_ring_map);
2049 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2050 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2051 sc->bge_ldata.bge_tx_ring,
2052 sc->bge_cdata.bge_tx_ring_map);
2054 if (sc->bge_cdata.bge_tx_ring_tag)
2055 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2057 /* Destroy status block. */
2058 if (sc->bge_cdata.bge_status_map)
2059 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2060 sc->bge_cdata.bge_status_map);
2062 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2063 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2064 sc->bge_ldata.bge_status_block,
2065 sc->bge_cdata.bge_status_map);
2067 if (sc->bge_cdata.bge_status_tag)
2068 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2070 /* Destroy statistics block. */
2071 if (sc->bge_cdata.bge_stats_map)
2072 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2073 sc->bge_cdata.bge_stats_map);
2075 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2076 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2077 sc->bge_ldata.bge_stats,
2078 sc->bge_cdata.bge_stats_map);
2080 if (sc->bge_cdata.bge_stats_tag)
2081 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2083 /* Destroy the parent tag. */
2084 if (sc->bge_cdata.bge_parent_tag)
2085 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2089 bge_dma_alloc(device_t dev)
2091 struct bge_dmamap_arg ctx;
2092 struct bge_softc *sc;
2095 sc = device_get_softc(dev);
2098 * Allocate the parent bus DMA tag appropriate for PCI.
2100 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2101 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2102 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2103 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2106 device_printf(sc->bge_dev,
2107 "could not allocate parent dma tag\n");
2112 * Create tag for mbufs.
2114 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
2115 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2116 NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES,
2117 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag);
2120 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2124 /* Create DMA maps for RX buffers. */
2125 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2126 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
2127 &sc->bge_cdata.bge_rx_std_dmamap[i]);
2129 device_printf(sc->bge_dev,
2130 "can't create DMA map for RX\n");
2135 /* Create DMA maps for TX buffers. */
2136 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2137 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
2138 &sc->bge_cdata.bge_tx_dmamap[i]);
2140 device_printf(sc->bge_dev,
2141 "can't create DMA map for RX\n");
2146 /* Create tag for standard RX ring. */
2147 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2148 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2149 NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
2150 NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
2153 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2157 /* Allocate DMA'able memory for standard RX ring. */
2158 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
2159 (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
2160 &sc->bge_cdata.bge_rx_std_ring_map);
2164 bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
2166 /* Load the address of the standard RX ring. */
2167 ctx.bge_maxsegs = 1;
2170 error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
2171 sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
2172 BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2177 sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
2179 /* Create tags for jumbo mbufs. */
2180 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2181 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2182 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2183 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2184 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2186 device_printf(sc->bge_dev,
2187 "could not allocate jumbo dma tag\n");
2191 /* Create tag for jumbo RX ring. */
2192 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2193 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2194 NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
2195 NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
2198 device_printf(sc->bge_dev,
2199 "could not allocate jumbo ring dma tag\n");
2203 /* Allocate DMA'able memory for jumbo RX ring. */
2204 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2205 (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
2206 BUS_DMA_NOWAIT | BUS_DMA_ZERO,
2207 &sc->bge_cdata.bge_rx_jumbo_ring_map);
2211 /* Load the address of the jumbo RX ring. */
2212 ctx.bge_maxsegs = 1;
2215 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2216 sc->bge_cdata.bge_rx_jumbo_ring_map,
2217 sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
2218 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2223 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
2225 /* Create DMA maps for jumbo RX buffers. */
2226 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2227 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2228 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2230 device_printf(sc->bge_dev,
2231 "can't create DMA map for jumbo RX\n");
2238 /* Create tag for RX return ring. */
2239 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2240 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2241 NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
2242 NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
2245 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2249 /* Allocate DMA'able memory for RX return ring. */
2250 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
2251 (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
2252 &sc->bge_cdata.bge_rx_return_ring_map);
2256 bzero((char *)sc->bge_ldata.bge_rx_return_ring,
2257 BGE_RX_RTN_RING_SZ(sc));
2259 /* Load the address of the RX return ring. */
2260 ctx.bge_maxsegs = 1;
2263 error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
2264 sc->bge_cdata.bge_rx_return_ring_map,
2265 sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
2266 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2271 sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
2273 /* Create tag for TX ring. */
2274 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2275 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2276 NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
2277 &sc->bge_cdata.bge_tx_ring_tag);
2280 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2284 /* Allocate DMA'able memory for TX ring. */
2285 error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
2286 (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
2287 &sc->bge_cdata.bge_tx_ring_map);
2291 bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
2293 /* Load the address of the TX ring. */
2294 ctx.bge_maxsegs = 1;
2297 error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
2298 sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
2299 BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2304 sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
2306 /* Create tag for status block. */
2307 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2308 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2309 NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
2310 NULL, NULL, &sc->bge_cdata.bge_status_tag);
2313 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2317 /* Allocate DMA'able memory for status block. */
2318 error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
2319 (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
2320 &sc->bge_cdata.bge_status_map);
2324 bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
2326 /* Load the address of the status block. */
2328 ctx.bge_maxsegs = 1;
2330 error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
2331 sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
2332 BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2337 sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
2339 /* Create tag for statistics block. */
2340 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2341 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2342 NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
2343 &sc->bge_cdata.bge_stats_tag);
2346 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2350 /* Allocate DMA'able memory for statistics block. */
2351 error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
2352 (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
2353 &sc->bge_cdata.bge_stats_map);
2357 bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
2359 /* Load the address of the statstics block. */
2361 ctx.bge_maxsegs = 1;
2363 error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
2364 sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
2365 BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2370 sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
2375 #if __FreeBSD_version > 602105
2377 * Return true if this device has more than one port.
2380 bge_has_multiple_ports(struct bge_softc *sc)
2382 device_t dev = sc->bge_dev;
2383 u_int b, d, f, fscan, s;
2385 d = pci_get_domain(dev);
2386 b = pci_get_bus(dev);
2387 s = pci_get_slot(dev);
2388 f = pci_get_function(dev);
2389 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2390 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2396 * Return true if MSI can be used with this device.
2399 bge_can_use_msi(struct bge_softc *sc)
2401 int can_use_msi = 0;
2403 switch (sc->bge_asicrev) {
2404 case BGE_ASICREV_BCM5714_A0:
2405 case BGE_ASICREV_BCM5714:
2407 * Apparently, MSI doesn't work when these chips are
2408 * configured in single-port mode.
2410 if (bge_has_multiple_ports(sc))
2413 case BGE_ASICREV_BCM5750:
2414 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2415 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2419 if (BGE_IS_575X_PLUS(sc))
2422 return (can_use_msi);
2427 bge_attach(device_t dev)
2430 struct bge_softc *sc;
2431 uint32_t hwcfg = 0, misccfg;
2432 u_char eaddr[ETHER_ADDR_LEN];
2433 int error, reg, rid, trys;
2435 sc = device_get_softc(dev);
2439 * Map control/status registers.
2441 pci_enable_busmaster(dev);
2444 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2447 if (sc->bge_res == NULL) {
2448 device_printf (sc->bge_dev, "couldn't map memory\n");
2453 /* Save various chip information. */
2455 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2456 BGE_PCIMISCCTL_ASICREV_SHIFT;
2457 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG)
2458 sc->bge_chipid = pci_read_config(dev, BGE_PCI_PRODID_ASICREV,
2460 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2461 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2464 * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2465 * 5705 A0 and A1 chips.
2467 if (sc->bge_asicrev != BGE_ASICREV_BCM5700 &&
2468 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2469 sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2470 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)
2471 sc->bge_flags |= BGE_FLAG_WIRESPEED;
2473 if (bge_has_eaddr(sc))
2474 sc->bge_flags |= BGE_FLAG_EADDR;
2476 /* Save chipset family. */
2477 switch (sc->bge_asicrev) {
2478 case BGE_ASICREV_BCM5755:
2479 case BGE_ASICREV_BCM5761:
2480 case BGE_ASICREV_BCM5784:
2481 case BGE_ASICREV_BCM5785:
2482 case BGE_ASICREV_BCM5787:
2483 case BGE_ASICREV_BCM57780:
2484 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
2487 case BGE_ASICREV_BCM5700:
2488 case BGE_ASICREV_BCM5701:
2489 case BGE_ASICREV_BCM5703:
2490 case BGE_ASICREV_BCM5704:
2491 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2493 case BGE_ASICREV_BCM5714_A0:
2494 case BGE_ASICREV_BCM5780:
2495 case BGE_ASICREV_BCM5714:
2496 sc->bge_flags |= BGE_FLAG_5714_FAMILY /* | BGE_FLAG_JUMBO */;
2498 case BGE_ASICREV_BCM5750:
2499 case BGE_ASICREV_BCM5752:
2500 case BGE_ASICREV_BCM5906:
2501 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2503 case BGE_ASICREV_BCM5705:
2504 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2508 /* Set various bug flags. */
2509 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2510 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2511 sc->bge_flags |= BGE_FLAG_CRC_BUG;
2512 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2513 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2514 sc->bge_flags |= BGE_FLAG_ADC_BUG;
2515 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2516 sc->bge_flags |= BGE_FLAG_5704_A0_BUG;
2517 if (BGE_IS_5705_PLUS(sc) &&
2518 !(sc->bge_flags & BGE_FLAG_ADJUST_TRIM)) {
2519 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2520 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2521 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2522 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2523 if (sc->bge_chipid != BGE_CHIPID_BCM5722_A0)
2524 sc->bge_flags |= BGE_FLAG_JITTER_BUG;
2525 } else if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
2526 sc->bge_flags |= BGE_FLAG_BER_BUG;
2531 * We could possibly check for BCOM_DEVICEID_BCM5788 in bge_probe()
2532 * but I do not know the DEVICEID for the 5788M.
2534 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID;
2535 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2536 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
2537 sc->bge_flags |= BGE_FLAG_5788;
2540 * Check if this is a PCI-X or PCI Express device.
2542 #if __FreeBSD_version > 602101
2543 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) {
2545 * Found a PCI Express capabilities register, this
2546 * must be a PCI Express device.
2549 sc->bge_flags |= BGE_FLAG_PCIE;
2551 if (BGE_IS_5705_PLUS(sc)) {
2552 reg = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
2553 if ((reg & 0xFF) == BGE_PCIE_CAPID) {
2554 sc->bge_flags |= BGE_FLAG_PCIE;
2555 reg = BGE_PCIE_CAPID;
2557 bge_set_max_readrq(sc, reg);
2561 * Check if the device is in PCI-X Mode.
2562 * (This bit is not valid on PCI Express controllers.)
2564 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2565 BGE_PCISTATE_PCI_BUSMODE) == 0)
2566 sc->bge_flags |= BGE_FLAG_PCIX;
2569 #if __FreeBSD_version > 602105
2574 * Allocate the interrupt, using MSI if possible. These devices
2575 * support 8 MSI messages, but only the first one is used in
2578 if (bge_can_use_msi(sc)) {
2579 msicount = pci_msi_count(dev);
2584 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
2586 sc->bge_flags |= BGE_FLAG_MSI;
2594 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2595 RF_SHAREABLE | RF_ACTIVE);
2597 if (sc->bge_irq == NULL) {
2598 device_printf(sc->bge_dev, "couldn't map interrupt\n");
2605 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
2606 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
2607 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" :
2608 ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI"));
2610 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2612 /* Try to reset the chip. */
2613 if (bge_reset(sc)) {
2614 device_printf(sc->bge_dev, "chip reset failed\n");
2619 sc->bge_asf_mode = 0;
2620 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG)
2621 == BGE_MAGIC_NUMBER)) {
2622 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG)
2624 sc->bge_asf_mode |= ASF_ENABLE;
2625 sc->bge_asf_mode |= ASF_STACKUP;
2626 if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
2627 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
2632 /* Try to reset the chip again the nice way. */
2634 bge_sig_pre_reset(sc, BGE_RESET_STOP);
2635 if (bge_reset(sc)) {
2636 device_printf(sc->bge_dev, "chip reset failed\n");
2641 bge_sig_legacy(sc, BGE_RESET_STOP);
2642 bge_sig_post_reset(sc, BGE_RESET_STOP);
2644 if (bge_chipinit(sc)) {
2645 device_printf(sc->bge_dev, "chip initialization failed\n");
2650 error = bge_get_eaddr(sc, eaddr);
2652 device_printf(sc->bge_dev,
2653 "failed to read station address\n");
2658 /* 5705 limits RX return ring to 512 entries. */
2659 if (BGE_IS_5705_PLUS(sc))
2660 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2662 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2664 if (bge_dma_alloc(dev)) {
2665 device_printf(sc->bge_dev,
2666 "failed to allocate DMA resources\n");
2671 /* Set default tuneable values. */
2672 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2673 sc->bge_rx_coal_ticks = 150;
2674 sc->bge_tx_coal_ticks = 150;
2675 sc->bge_rx_max_coal_bds = 10;
2676 sc->bge_tx_max_coal_bds = 10;
2678 /* Set up ifnet structure */
2679 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2681 device_printf(sc->bge_dev, "failed to if_alloc()\n");
2686 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2687 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2688 ifp->if_ioctl = bge_ioctl;
2689 ifp->if_start = bge_start;
2690 ifp->if_init = bge_init;
2691 ifp->if_mtu = ETHERMTU;
2692 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2693 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2694 IFQ_SET_READY(&ifp->if_snd);
2695 ifp->if_hwassist = BGE_CSUM_FEATURES;
2696 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
2698 #ifdef IFCAP_VLAN_HWCSUM
2699 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
2701 ifp->if_capenable = ifp->if_capabilities;
2702 #ifdef DEVICE_POLLING
2703 ifp->if_capabilities |= IFCAP_POLLING;
2707 * 5700 B0 chips do not support checksumming correctly due
2710 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
2711 ifp->if_capabilities &= ~IFCAP_HWCSUM;
2712 ifp->if_capenable &= IFCAP_HWCSUM;
2713 ifp->if_hwassist = 0;
2717 * Figure out what sort of media we have by checking the
2718 * hardware config word in the first 32k of NIC internal memory,
2719 * or fall back to examining the EEPROM if necessary.
2720 * Note: on some BCM5700 cards, this value appears to be unset.
2721 * If that's the case, we have to rely on identifying the NIC
2722 * by its PCI subsystem ID, as we do below for the SysKonnect
2725 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2726 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2727 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
2728 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
2729 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2731 device_printf(sc->bge_dev, "failed to read EEPROM\n");
2735 hwcfg = ntohl(hwcfg);
2738 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2739 sc->bge_flags |= BGE_FLAG_TBI;
2741 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2742 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2743 sc->bge_flags |= BGE_FLAG_TBI;
2745 if (sc->bge_flags & BGE_FLAG_TBI) {
2746 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
2748 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
2749 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
2751 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
2752 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
2753 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2756 * Do transceiver setup and tell the firmware the
2757 * driver is down so we can try to get access the
2758 * probe if ASF is running. Retry a couple of times
2759 * if we get a conflict with the ASF firmware accessing
2763 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2765 bge_asf_driver_up(sc);
2767 if (mii_phy_probe(dev, &sc->bge_miibus,
2768 bge_ifmedia_upd, bge_ifmedia_sts)) {
2770 device_printf(sc->bge_dev, "Try again\n");
2771 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
2776 device_printf(sc->bge_dev, "MII without any PHY!\n");
2782 * Now tell the firmware we are going up after probing the PHY
2784 if (sc->bge_asf_mode & ASF_STACKUP)
2785 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2789 * When using the BCM5701 in PCI-X mode, data corruption has
2790 * been observed in the first few bytes of some received packets.
2791 * Aligning the packet buffer in memory eliminates the corruption.
2792 * Unfortunately, this misaligns the packet payloads. On platforms
2793 * which do not support unaligned accesses, we will realign the
2794 * payloads by copying the received packets.
2796 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
2797 sc->bge_flags & BGE_FLAG_PCIX)
2798 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
2801 * Call MI attach routine.
2803 ether_ifattach(ifp, eaddr);
2804 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
2809 #if __FreeBSD_version > 700030
2810 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2811 NULL, bge_intr, sc, &sc->bge_intrhand);
2813 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2814 bge_intr, sc, &sc->bge_intrhand);
2819 device_printf(sc->bge_dev, "couldn't set up irq\n");
2822 bge_add_sysctls(sc);
2827 bge_release_resources(sc);
2833 bge_detach(device_t dev)
2835 struct bge_softc *sc;
2838 sc = device_get_softc(dev);
2841 #ifdef DEVICE_POLLING
2842 if (ifp->if_capenable & IFCAP_POLLING)
2843 ether_poll_deregister(ifp);
2851 callout_drain(&sc->bge_stat_ch);
2853 ether_ifdetach(ifp);
2855 if (sc->bge_flags & BGE_FLAG_TBI) {
2856 ifmedia_removeall(&sc->bge_ifmedia);
2858 bus_generic_detach(dev);
2859 device_delete_child(dev, sc->bge_miibus);
2862 bge_release_resources(sc);
2868 bge_release_resources(struct bge_softc *sc)
2874 if (sc->bge_intrhand != NULL)
2875 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2877 if (sc->bge_irq != NULL)
2878 bus_release_resource(dev, SYS_RES_IRQ,
2879 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
2881 #if __FreeBSD_version > 602105
2882 if (sc->bge_flags & BGE_FLAG_MSI)
2883 pci_release_msi(dev);
2886 if (sc->bge_res != NULL)
2887 bus_release_resource(dev, SYS_RES_MEMORY,
2888 BGE_PCI_BAR0, sc->bge_res);
2890 if (sc->bge_ifp != NULL)
2891 if_free(sc->bge_ifp);
2895 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
2896 BGE_LOCK_DESTROY(sc);
2900 bge_reset(struct bge_softc *sc)
2903 uint32_t cachesize, command, pcistate, reset, val;
2904 void (*write_op)(struct bge_softc *, int, int);
2909 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
2910 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
2911 if (sc->bge_flags & BGE_FLAG_PCIE)
2912 write_op = bge_writemem_direct;
2914 write_op = bge_writemem_ind;
2916 write_op = bge_writereg_ind;
2918 /* Save some important PCI state. */
2919 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2920 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2921 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2923 pci_write_config(dev, BGE_PCI_MISC_CTL,
2924 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
2925 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
2927 /* Disable fastboot on controllers that support it. */
2928 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
2929 BGE_IS_5755_PLUS(sc)) {
2931 device_printf(sc->bge_dev, "Disabling fastboot\n");
2932 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2936 * Write the magic number to SRAM at offset 0xB50.
2937 * When firmware finishes its initialization it will
2938 * write ~BGE_MAGIC_NUMBER to the same location.
2940 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2942 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
2944 /* XXX: Broadcom Linux driver. */
2945 if (sc->bge_flags & BGE_FLAG_PCIE) {
2946 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
2947 CSR_WRITE_4(sc, 0x7E2C, 0x20);
2948 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2949 /* Prevent PCIE link training during global reset */
2950 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
2956 * Set GPHY Power Down Override to leave GPHY
2957 * powered up in D0 uninitialized.
2959 if (BGE_IS_5705_PLUS(sc))
2960 reset |= 0x04000000;
2962 /* Issue global reset */
2963 write_op(sc, BGE_MISC_CFG, reset);
2965 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2966 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2967 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
2968 val | BGE_VCPU_STATUS_DRV_RESET);
2969 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
2970 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
2971 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
2976 /* XXX: Broadcom Linux driver. */
2977 if (sc->bge_flags & BGE_FLAG_PCIE) {
2978 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2979 DELAY(500000); /* wait for link training to complete */
2980 val = pci_read_config(dev, 0xC4, 4);
2981 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
2984 * Set PCIE max payload size to 128 bytes and clear error
2987 pci_write_config(dev, 0xD8, 0xF5000, 4);
2990 /* Reset some of the PCI state that got zapped by reset. */
2991 pci_write_config(dev, BGE_PCI_MISC_CTL,
2992 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
2993 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
2994 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2995 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2996 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
2998 /* Re-enable MSI, if neccesary, and enable the memory arbiter. */
2999 if (BGE_IS_5714_FAMILY(sc)) {
3000 /* This chip disables MSI on reset. */
3001 if (sc->bge_flags & BGE_FLAG_MSI) {
3002 val = pci_read_config(dev, BGE_PCI_MSI_CTL, 2);
3003 pci_write_config(dev, BGE_PCI_MSI_CTL,
3004 val | PCIM_MSICTRL_MSI_ENABLE, 2);
3005 val = CSR_READ_4(sc, BGE_MSI_MODE);
3006 CSR_WRITE_4(sc, BGE_MSI_MODE,
3007 val | BGE_MSIMODE_ENABLE);
3009 val = CSR_READ_4(sc, BGE_MARB_MODE);
3010 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3012 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3014 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3015 for (i = 0; i < BGE_TIMEOUT; i++) {
3016 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3017 if (val & BGE_VCPU_STATUS_INIT_DONE)
3021 if (i == BGE_TIMEOUT) {
3022 device_printf(sc->bge_dev, "reset timed out\n");
3027 * Poll until we see the 1's complement of the magic number.
3028 * This indicates that the firmware initialization is complete.
3029 * We expect this to fail if no chip containing the Ethernet
3030 * address is fitted though.
3032 for (i = 0; i < BGE_TIMEOUT; i++) {
3034 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
3035 if (val == ~BGE_MAGIC_NUMBER)
3039 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
3040 device_printf(sc->bge_dev, "firmware handshake timed out, "
3041 "found 0x%08x\n", val);
3045 * XXX Wait for the value of the PCISTATE register to
3046 * return to its original pre-reset state. This is a
3047 * fairly good indicator of reset completion. If we don't
3048 * wait for the reset to fully complete, trying to read
3049 * from the device's non-PCI registers may yield garbage
3052 for (i = 0; i < BGE_TIMEOUT; i++) {
3053 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3058 if (sc->bge_flags & BGE_FLAG_PCIE) {
3059 reset = bge_readmem_ind(sc, 0x7C00);
3060 bge_writemem_ind(sc, 0x7C00, reset | (1 << 25));
3063 /* Fix up byte swapping. */
3064 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
3065 BGE_MODECTL_BYTESWAP_DATA);
3067 /* Tell the ASF firmware we are up */
3068 if (sc->bge_asf_mode & ASF_STACKUP)
3069 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3071 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3074 * The 5704 in TBI mode apparently needs some special
3075 * adjustment to insure the SERDES drive level is set
3078 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3079 sc->bge_flags & BGE_FLAG_TBI) {
3080 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3081 val = (val & ~0xFFF) | 0x880;
3082 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3085 /* XXX: Broadcom Linux driver. */
3086 if (sc->bge_flags & BGE_FLAG_PCIE &&
3087 sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3088 val = CSR_READ_4(sc, 0x7C00);
3089 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3097 * Frame reception handling. This is called if there's a frame
3098 * on the receive return list.
3100 * Note: we have to be able to handle two possibilities here:
3101 * 1) the frame is from the jumbo receive ring
3102 * 2) the frame is from the standard receive ring
3106 bge_rxeof(struct bge_softc *sc)
3109 int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
3110 uint16_t rx_prod, rx_cons;
3112 BGE_LOCK_ASSERT(sc);
3113 rx_cons = sc->bge_rx_saved_considx;
3114 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3116 /* Nothing to do. */
3117 if (rx_cons == rx_prod)
3122 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3123 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3124 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3125 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
3126 if (BGE_IS_JUMBO_CAPABLE(sc))
3127 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3128 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
3130 while (rx_cons != rx_prod) {
3131 struct bge_rx_bd *cur_rx;
3133 struct mbuf *m = NULL;
3134 uint16_t vlan_tag = 0;
3137 #ifdef DEVICE_POLLING
3138 if (ifp->if_capenable & IFCAP_POLLING) {
3139 if (sc->rxcycles <= 0)
3145 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
3147 rxidx = cur_rx->bge_idx;
3148 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3150 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3151 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3153 vlan_tag = cur_rx->bge_vlan_tag;
3156 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3157 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3158 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
3159 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
3160 BUS_DMASYNC_POSTREAD);
3161 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
3162 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
3163 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3164 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
3166 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3168 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
3171 if (bge_newbuf_jumbo(sc,
3172 sc->bge_jumbo, NULL) == ENOBUFS) {
3174 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
3178 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3179 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
3180 sc->bge_cdata.bge_rx_std_dmamap[rxidx],
3181 BUS_DMASYNC_POSTREAD);
3182 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
3183 sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
3184 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3185 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
3187 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3189 bge_newbuf_std(sc, sc->bge_std, m);
3192 if (bge_newbuf_std(sc, sc->bge_std,
3195 bge_newbuf_std(sc, sc->bge_std, m);
3201 #ifndef __NO_STRICT_ALIGNMENT
3203 * For architectures with strict alignment we must make sure
3204 * the payload is aligned.
3206 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3207 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3209 m->m_data += ETHER_ALIGN;
3212 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3213 m->m_pkthdr.rcvif = ifp;
3215 if (ifp->if_capenable & IFCAP_RXCSUM) {
3216 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3217 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3218 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
3219 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3221 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3222 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
3223 m->m_pkthdr.csum_data =
3224 cur_rx->bge_tcp_udp_csum;
3225 m->m_pkthdr.csum_flags |=
3226 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
3231 * If we received a packet with a vlan tag,
3232 * attach that information to the packet.
3235 #if __FreeBSD_version > 700022
3236 m->m_pkthdr.ether_vtag = vlan_tag;
3237 m->m_flags |= M_VLANTAG;
3239 VLAN_INPUT_TAG_NEW(ifp, m, vlan_tag);
3246 (*ifp->if_input)(ifp, m);
3250 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
3254 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3255 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
3257 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3258 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3260 if (BGE_IS_JUMBO_CAPABLE(sc) && jumbocnt > 0)
3261 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3262 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3264 sc->bge_rx_saved_considx = rx_cons;
3265 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3267 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
3269 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
3272 * This register wraps very quickly under heavy packet drops.
3273 * If you need correct statistics, you can enable this check.
3275 if (BGE_IS_5705_PLUS(sc))
3276 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3282 bge_txeof(struct bge_softc *sc)
3284 struct bge_tx_bd *cur_tx = NULL;
3287 BGE_LOCK_ASSERT(sc);
3289 /* Nothing to do. */
3290 if (sc->bge_tx_saved_considx ==
3291 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
3296 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
3297 sc->bge_cdata.bge_tx_ring_map,
3298 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3300 * Go through our tx ring and free mbufs for those
3301 * frames that have been sent.
3303 while (sc->bge_tx_saved_considx !=
3304 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
3307 idx = sc->bge_tx_saved_considx;
3308 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
3309 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3311 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
3312 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
3313 sc->bge_cdata.bge_tx_dmamap[idx],
3314 BUS_DMASYNC_POSTWRITE);
3315 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
3316 sc->bge_cdata.bge_tx_dmamap[idx]);
3317 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3318 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3321 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3325 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3326 if (sc->bge_txcnt == 0)
3330 #ifdef DEVICE_POLLING
3332 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3334 struct bge_softc *sc = ifp->if_softc;
3335 uint32_t statusword;
3339 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3344 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3345 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
3347 statusword = atomic_readandclear_32(
3348 &sc->bge_ldata.bge_status_block->bge_status);
3350 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3351 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
3353 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
3354 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3357 if (cmd == POLL_AND_CHECK_STATUS)
3358 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3359 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3360 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3363 sc->rxcycles = count;
3364 rx_npkts = bge_rxeof(sc);
3365 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3370 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3371 bge_start_locked(ifp);
3376 #endif /* DEVICE_POLLING */
3381 struct bge_softc *sc;
3383 uint32_t statusword;
3391 #ifdef DEVICE_POLLING
3392 if (ifp->if_capenable & IFCAP_POLLING) {
3399 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
3400 * disable interrupts by writing nonzero like we used to, since with
3401 * our current organization this just gives complications and
3402 * pessimizations for re-enabling interrupts. We used to have races
3403 * instead of the necessary complications. Disabling interrupts
3404 * would just reduce the chance of a status update while we are
3405 * running (by switching to the interrupt-mode coalescence
3406 * parameters), but this chance is already very low so it is more
3407 * efficient to get another interrupt than prevent it.
3409 * We do the ack first to ensure another interrupt if there is a
3410 * status update after the ack. We don't check for the status
3411 * changing later because it is more efficient to get another
3412 * interrupt than prevent it, not quite as above (not checking is
3413 * a smaller optimization than not toggling the interrupt enable,
3414 * since checking doesn't involve PCI accesses and toggling require
3415 * the status check). So toggling would probably be a pessimization
3416 * even with MSI. It would only be needed for using a task queue.
3418 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3421 * Do the mandatory PCI flush as well as get the link status.
3423 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
3425 /* Make sure the descriptor ring indexes are coherent. */
3426 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3427 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
3429 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3430 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3431 statusword || sc->bge_link_evt)
3434 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3435 /* Check RX return ring producer/consumer. */
3439 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3440 /* Check TX ring producer/consumer. */
3444 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3445 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3446 bge_start_locked(ifp);
3448 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3449 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
3455 bge_asf_driver_up(struct bge_softc *sc)
3457 if (sc->bge_asf_mode & ASF_STACKUP) {
3458 /* Send ASF heartbeat aprox. every 2s */
3459 if (sc->bge_asf_count)
3460 sc->bge_asf_count --;
3462 sc->bge_asf_count = 5;
3463 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW,
3465 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4);
3466 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3);
3467 CSR_WRITE_4(sc, BGE_CPU_EVENT,
3468 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
3476 struct bge_softc *sc = xsc;
3477 struct mii_data *mii = NULL;
3479 BGE_LOCK_ASSERT(sc);
3481 /* Synchronize with possible callout reset/stop. */
3482 if (callout_pending(&sc->bge_stat_ch) ||
3483 !callout_active(&sc->bge_stat_ch))
3486 if (BGE_IS_5705_PLUS(sc))
3487 bge_stats_update_regs(sc);
3489 bge_stats_update(sc);
3491 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
3492 mii = device_get_softc(sc->bge_miibus);
3494 * Do not touch PHY if we have link up. This could break
3495 * IPMI/ASF mode or produce extra input errors
3496 * (extra errors was reported for bcm5701 & bcm5704).
3502 * Since in TBI mode auto-polling can't be used we should poll
3503 * link status manually. Here we register pending link event
3504 * and trigger interrupt.
3506 #ifdef DEVICE_POLLING
3507 /* In polling mode we poll link state in bge_poll(). */
3508 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
3512 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
3513 sc->bge_flags & BGE_FLAG_5788)
3514 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3516 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3520 bge_asf_driver_up(sc);
3523 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3527 bge_stats_update_regs(struct bge_softc *sc)
3533 ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS +
3534 offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
3536 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3540 bge_stats_update(struct bge_softc *sc)
3544 uint32_t cnt; /* current register value */
3548 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
3550 #define READ_STAT(sc, stats, stat) \
3551 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
3553 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
3554 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
3555 sc->bge_tx_collisions = cnt;
3557 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
3558 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
3559 sc->bge_rx_discards = cnt;
3561 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
3562 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
3563 sc->bge_tx_discards = cnt;
3569 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
3570 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
3571 * but when such padded frames employ the bge IP/TCP checksum offload,
3572 * the hardware checksum assist gives incorrect results (possibly
3573 * from incorporating its own padding into the UDP/TCP checksum; who knows).
3574 * If we pad such runts with zeros, the onboard checksum comes out correct.
3577 bge_cksum_pad(struct mbuf *m)
3579 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
3582 /* If there's only the packet-header and we can pad there, use it. */
3583 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
3584 M_TRAILINGSPACE(m) >= padlen) {
3588 * Walk packet chain to find last mbuf. We will either
3589 * pad there, or append a new mbuf and pad it.
3591 for (last = m; last->m_next != NULL; last = last->m_next);
3592 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
3593 /* Allocate new empty mbuf, pad it. Compact later. */
3596 MGET(n, M_DONTWAIT, MT_DATA);
3605 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
3606 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
3607 last->m_len += padlen;
3608 m->m_pkthdr.len += padlen;
3614 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3615 * pointers to descriptors.
3618 bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
3620 bus_dma_segment_t segs[BGE_NSEG_NEW];
3622 struct bge_tx_bd *d;
3623 struct mbuf *m = *m_head;
3624 uint32_t idx = *txidx;
3625 uint16_t csum_flags;
3626 int nsegs, i, error;
3629 if (m->m_pkthdr.csum_flags) {
3630 if (m->m_pkthdr.csum_flags & CSUM_IP)
3631 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3632 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
3633 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3634 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
3635 (error = bge_cksum_pad(m)) != 0) {
3641 if (m->m_flags & M_LASTFRAG)
3642 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3643 else if (m->m_flags & M_FRAG)
3644 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3647 map = sc->bge_cdata.bge_tx_dmamap[idx];
3648 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m, segs,
3649 &nsegs, BUS_DMA_NOWAIT);
3650 if (error == EFBIG) {
3651 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
3658 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m,
3659 segs, &nsegs, BUS_DMA_NOWAIT);
3665 } else if (error != 0)
3669 * Sanity check: avoid coming within 16 descriptors
3670 * of the end of the ring.
3672 if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
3673 bus_dmamap_unload(sc->bge_cdata.bge_mtag, map);
3677 bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE);
3679 for (i = 0; ; i++) {
3680 d = &sc->bge_ldata.bge_tx_ring[idx];
3681 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
3682 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
3683 d->bge_len = segs[i].ds_len;
3684 d->bge_flags = csum_flags;
3687 BGE_INC(idx, BGE_TX_RING_CNT);
3690 /* Mark the last segment as end of packet... */
3691 d->bge_flags |= BGE_TXBDFLAG_END;
3693 /* ... and put VLAN tag into first segment. */
3694 d = &sc->bge_ldata.bge_tx_ring[*txidx];
3695 #if __FreeBSD_version > 700022
3696 if (m->m_flags & M_VLANTAG) {
3697 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3698 d->bge_vlan_tag = m->m_pkthdr.ether_vtag;
3700 d->bge_vlan_tag = 0;
3705 if ((mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m)) != NULL) {
3706 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3707 d->bge_vlan_tag = VLAN_TAG_VALUE(mtag);
3709 d->bge_vlan_tag = 0;
3714 * Insure that the map for this transmission
3715 * is placed at the array index of the last descriptor
3718 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
3719 sc->bge_cdata.bge_tx_dmamap[idx] = map;
3720 sc->bge_cdata.bge_tx_chain[idx] = m;
3721 sc->bge_txcnt += nsegs;
3723 BGE_INC(idx, BGE_TX_RING_CNT);
3730 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3731 * to the mbuf data regions directly in the transmit descriptors.
3734 bge_start_locked(struct ifnet *ifp)
3736 struct bge_softc *sc;
3737 struct mbuf *m_head = NULL;
3743 if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3746 prodidx = sc->bge_tx_prodidx;
3748 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3749 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
3755 * The code inside the if() block is never reached since we
3756 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3757 * requests to checksum TCP/UDP in a fragmented packet.
3760 * safety overkill. If this is a fragmented packet chain
3761 * with delayed TCP/UDP checksums, then only encapsulate
3762 * it if we have enough descriptors to handle the entire
3764 * (paranoia -- may not actually be needed)
3766 if (m_head->m_flags & M_FIRSTFRAG &&
3767 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3768 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3769 m_head->m_pkthdr.csum_data + 16) {
3770 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3771 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3777 * Pack the data into the transmit ring. If we
3778 * don't have room, set the OACTIVE flag and wait
3779 * for the NIC to drain the ring.
3781 if (bge_encap(sc, &m_head, &prodidx)) {
3784 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3785 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3791 * If there's a BPF listener, bounce a copy of this frame
3794 #ifdef ETHER_BPF_MTAP
3795 ETHER_BPF_MTAP(ifp, m_head);
3797 BPF_MTAP(ifp, m_head);
3802 /* No packets were dequeued. */
3806 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3807 /* 5700 b2 errata */
3808 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3809 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3811 sc->bge_tx_prodidx = prodidx;
3814 * Set a timeout in case the chip goes out to lunch.
3820 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3821 * to the mbuf data regions directly in the transmit descriptors.
3824 bge_start(struct ifnet *ifp)
3826 struct bge_softc *sc;
3830 bge_start_locked(ifp);
3835 bge_init_locked(struct bge_softc *sc)
3840 BGE_LOCK_ASSERT(sc);
3844 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3847 /* Cancel pending I/O and flush buffers. */
3851 bge_sig_pre_reset(sc, BGE_RESET_START);
3853 bge_sig_legacy(sc, BGE_RESET_START);
3854 bge_sig_post_reset(sc, BGE_RESET_START);
3859 * Init the various state machines, ring
3860 * control blocks and firmware.
3862 if (bge_blockinit(sc)) {
3863 device_printf(sc->bge_dev, "initialization failure\n");
3870 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3871 ETHER_HDR_LEN + ETHER_CRC_LEN +
3872 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
3874 /* Load our MAC address. */
3875 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
3876 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3877 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3879 /* Program promiscuous mode. */
3882 /* Program multicast filter. */
3885 /* Program VLAN tag stripping. */
3889 bge_init_rx_ring_std(sc);
3892 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3893 * memory to insure that the chip has in fact read the first
3894 * entry of the ring.
3896 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3898 for (i = 0; i < 10; i++) {
3900 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3901 if (v == (MCLBYTES - ETHER_ALIGN))
3905 device_printf (sc->bge_dev,
3906 "5705 A0 chip failed to load RX ring\n");
3909 /* Init jumbo RX ring. */
3910 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3911 bge_init_rx_ring_jumbo(sc);
3913 /* Init our RX return ring index. */
3914 sc->bge_rx_saved_considx = 0;
3916 /* Init our RX/TX stat counters. */
3917 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
3920 bge_init_tx_ring(sc);
3922 /* Turn on transmitter. */
3923 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3925 /* Turn on receiver. */
3926 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3928 /* Tell firmware we're alive. */
3929 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3931 #ifdef DEVICE_POLLING
3932 /* Disable interrupts if we are polling. */
3933 if (ifp->if_capenable & IFCAP_POLLING) {
3934 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3935 BGE_PCIMISCCTL_MASK_PCI_INTR);
3936 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3940 /* Enable host interrupts. */
3942 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3943 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3944 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3947 bge_ifmedia_upd_locked(ifp);
3949 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3950 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3952 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3958 struct bge_softc *sc = xsc;
3961 bge_init_locked(sc);
3966 * Set media options.
3969 bge_ifmedia_upd(struct ifnet *ifp)
3971 struct bge_softc *sc = ifp->if_softc;
3975 res = bge_ifmedia_upd_locked(ifp);
3982 bge_ifmedia_upd_locked(struct ifnet *ifp)
3984 struct bge_softc *sc = ifp->if_softc;
3985 struct mii_data *mii;
3986 struct mii_softc *miisc;
3987 struct ifmedia *ifm;
3989 BGE_LOCK_ASSERT(sc);
3991 ifm = &sc->bge_ifmedia;
3993 /* If this is a 1000baseX NIC, enable the TBI port. */
3994 if (sc->bge_flags & BGE_FLAG_TBI) {
3995 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3997 switch(IFM_SUBTYPE(ifm->ifm_media)) {
4000 * The BCM5704 ASIC appears to have a special
4001 * mechanism for programming the autoneg
4002 * advertisement registers in TBI mode.
4004 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
4006 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
4007 if (sgdig & BGE_SGDIGSTS_DONE) {
4008 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
4009 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
4010 sgdig |= BGE_SGDIGCFG_AUTO |
4011 BGE_SGDIGCFG_PAUSE_CAP |
4012 BGE_SGDIGCFG_ASYM_PAUSE;
4013 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
4014 sgdig | BGE_SGDIGCFG_SEND);
4016 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
4021 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
4022 BGE_CLRBIT(sc, BGE_MAC_MODE,
4023 BGE_MACMODE_HALF_DUPLEX);
4025 BGE_SETBIT(sc, BGE_MAC_MODE,
4026 BGE_MACMODE_HALF_DUPLEX);
4036 mii = device_get_softc(sc->bge_miibus);
4037 if (mii->mii_instance)
4038 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4039 mii_phy_reset(miisc);
4043 * Force an interrupt so that we will call bge_link_upd
4044 * if needed and clear any pending link state attention.
4045 * Without this we are not getting any further interrupts
4046 * for link state changes and thus will not UP the link and
4047 * not be able to send in bge_start_locked. The only
4048 * way to get things working was to receive a packet and
4050 * bge_tick should help for fiber cards and we might not
4051 * need to do this here if BGE_FLAG_TBI is set but as
4052 * we poll for fiber anyway it should not harm.
4054 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4055 sc->bge_flags & BGE_FLAG_5788)
4056 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4058 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4064 * Report current media status.
4067 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4069 struct bge_softc *sc = ifp->if_softc;
4070 struct mii_data *mii;
4074 if (sc->bge_flags & BGE_FLAG_TBI) {
4075 ifmr->ifm_status = IFM_AVALID;
4076 ifmr->ifm_active = IFM_ETHER;
4077 if (CSR_READ_4(sc, BGE_MAC_STS) &
4078 BGE_MACSTAT_TBI_PCS_SYNCHED)
4079 ifmr->ifm_status |= IFM_ACTIVE;
4081 ifmr->ifm_active |= IFM_NONE;
4085 ifmr->ifm_active |= IFM_1000_SX;
4086 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
4087 ifmr->ifm_active |= IFM_HDX;
4089 ifmr->ifm_active |= IFM_FDX;
4094 mii = device_get_softc(sc->bge_miibus);
4096 ifmr->ifm_active = mii->mii_media_active;
4097 ifmr->ifm_status = mii->mii_media_status;
4103 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4105 struct bge_softc *sc = ifp->if_softc;
4106 struct ifreq *ifr = (struct ifreq *) data;
4107 struct mii_data *mii;
4108 int flags, mask, error = 0;
4112 if (ifr->ifr_mtu < ETHERMIN ||
4113 ((BGE_IS_JUMBO_CAPABLE(sc)) &&
4114 ifr->ifr_mtu > BGE_JUMBO_MTU) ||
4115 ((!BGE_IS_JUMBO_CAPABLE(sc)) &&
4116 ifr->ifr_mtu > ETHERMTU))
4118 else if (ifp->if_mtu != ifr->ifr_mtu) {
4119 ifp->if_mtu = ifr->ifr_mtu;
4120 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4126 if (ifp->if_flags & IFF_UP) {
4128 * If only the state of the PROMISC flag changed,
4129 * then just use the 'set promisc mode' command
4130 * instead of reinitializing the entire NIC. Doing
4131 * a full re-init means reloading the firmware and
4132 * waiting for it to start up, which may take a
4133 * second or two. Similarly for ALLMULTI.
4135 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4136 flags = ifp->if_flags ^ sc->bge_if_flags;
4137 if (flags & IFF_PROMISC)
4139 if (flags & IFF_ALLMULTI)
4142 bge_init_locked(sc);
4144 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4148 sc->bge_if_flags = ifp->if_flags;
4154 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4163 if (sc->bge_flags & BGE_FLAG_TBI) {
4164 error = ifmedia_ioctl(ifp, ifr,
4165 &sc->bge_ifmedia, command);
4167 mii = device_get_softc(sc->bge_miibus);
4168 error = ifmedia_ioctl(ifp, ifr,
4169 &mii->mii_media, command);
4173 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4174 #ifdef DEVICE_POLLING
4175 if (mask & IFCAP_POLLING) {
4176 if (ifr->ifr_reqcap & IFCAP_POLLING) {
4177 error = ether_poll_register(bge_poll, ifp);
4181 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4182 BGE_PCIMISCCTL_MASK_PCI_INTR);
4183 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4184 ifp->if_capenable |= IFCAP_POLLING;
4187 error = ether_poll_deregister(ifp);
4188 /* Enable interrupt even in error case */
4190 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
4191 BGE_PCIMISCCTL_MASK_PCI_INTR);
4192 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4193 ifp->if_capenable &= ~IFCAP_POLLING;
4198 if (mask & IFCAP_HWCSUM) {
4199 ifp->if_capenable ^= IFCAP_HWCSUM;
4200 if (IFCAP_HWCSUM & ifp->if_capenable &&
4201 IFCAP_HWCSUM & ifp->if_capabilities)
4202 ifp->if_hwassist = BGE_CSUM_FEATURES;
4204 ifp->if_hwassist = 0;
4205 #ifdef VLAN_CAPABILITIES
4206 VLAN_CAPABILITIES(ifp);
4210 if (mask & IFCAP_VLAN_MTU) {
4211 ifp->if_capenable ^= IFCAP_VLAN_MTU;
4212 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4216 if (mask & IFCAP_VLAN_HWTAGGING) {
4217 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
4221 #ifdef VLAN_CAPABILITIES
4222 VLAN_CAPABILITIES(ifp);
4228 error = ether_ioctl(ifp, command, data);
4236 bge_watchdog(struct bge_softc *sc)
4240 BGE_LOCK_ASSERT(sc);
4242 if (sc->bge_timer == 0 || --sc->bge_timer)
4247 if_printf(ifp, "watchdog timeout -- resetting\n");
4249 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4250 bge_init_locked(sc);
4256 * Stop the adapter and free any mbufs allocated to the
4260 bge_stop(struct bge_softc *sc)
4263 struct ifmedia_entry *ifm;
4264 struct mii_data *mii = NULL;
4267 BGE_LOCK_ASSERT(sc);
4271 if ((sc->bge_flags & BGE_FLAG_TBI) == 0)
4272 mii = device_get_softc(sc->bge_miibus);
4274 callout_stop(&sc->bge_stat_ch);
4276 /* Disable host interrupts. */
4277 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4278 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4281 * Tell firmware we're shutting down.
4284 bge_sig_pre_reset(sc, BGE_RESET_STOP);
4287 * Disable all of the receiver blocks.
4289 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4290 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
4291 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
4292 if (!(BGE_IS_5705_PLUS(sc)))
4293 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
4294 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
4295 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
4296 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
4299 * Disable all of the transmit blocks.
4301 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
4302 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
4303 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
4304 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
4305 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
4306 if (!(BGE_IS_5705_PLUS(sc)))
4307 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
4308 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
4311 * Shut down all of the memory managers and related
4314 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
4315 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
4316 if (!(BGE_IS_5705_PLUS(sc)))
4317 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
4318 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
4319 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
4320 if (!(BGE_IS_5705_PLUS(sc))) {
4321 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
4322 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
4326 bge_sig_legacy(sc, BGE_RESET_STOP);
4327 bge_sig_post_reset(sc, BGE_RESET_STOP);
4330 * Keep the ASF firmware running if up.
4332 if (sc->bge_asf_mode & ASF_STACKUP)
4333 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4335 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4337 /* Free the RX lists. */
4338 bge_free_rx_ring_std(sc);
4340 /* Free jumbo RX list. */
4341 if (BGE_IS_JUMBO_CAPABLE(sc))
4342 bge_free_rx_ring_jumbo(sc);
4344 /* Free TX buffers. */
4345 bge_free_tx_ring(sc);
4348 * Isolate/power down the PHY, but leave the media selection
4349 * unchanged so that things will be put back to normal when
4350 * we bring the interface back up.
4352 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
4353 itmp = ifp->if_flags;
4354 ifp->if_flags |= IFF_UP;
4356 * If we are called from bge_detach(), mii is already NULL.
4359 ifm = mii->mii_media.ifm_cur;
4360 mtmp = ifm->ifm_media;
4361 ifm->ifm_media = IFM_ETHER | IFM_NONE;
4363 ifm->ifm_media = mtmp;
4365 ifp->if_flags = itmp;
4368 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
4370 /* Clear MAC's link state (PHY may still have link UP). */
4371 if (bootverbose && sc->bge_link)
4372 if_printf(sc->bge_ifp, "link DOWN\n");
4375 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4379 * Stop all chip I/O so that the kernel's probe routines don't
4380 * get confused by errant DMAs when rebooting.
4383 bge_shutdown(device_t dev)
4385 struct bge_softc *sc;
4387 sc = device_get_softc(dev);
4397 bge_suspend(device_t dev)
4399 struct bge_softc *sc;
4401 sc = device_get_softc(dev);
4410 bge_resume(device_t dev)
4412 struct bge_softc *sc;
4415 sc = device_get_softc(dev);
4418 if (ifp->if_flags & IFF_UP) {
4419 bge_init_locked(sc);
4420 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4421 bge_start_locked(ifp);
4429 bge_link_upd(struct bge_softc *sc)
4431 struct mii_data *mii;
4432 uint32_t link, status;
4434 BGE_LOCK_ASSERT(sc);
4436 /* Clear 'pending link event' flag. */
4437 sc->bge_link_evt = 0;
4440 * Process link state changes.
4441 * Grrr. The link status word in the status block does
4442 * not work correctly on the BCM5700 rev AX and BX chips,
4443 * according to all available information. Hence, we have
4444 * to enable MII interrupts in order to properly obtain
4445 * async link changes. Unfortunately, this also means that
4446 * we have to read the MAC status register to detect link
4447 * changes, thereby adding an additional register access to
4448 * the interrupt handler.
4450 * XXX: perhaps link state detection procedure used for
4451 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
4454 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4455 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
4456 status = CSR_READ_4(sc, BGE_MAC_STS);
4457 if (status & BGE_MACSTAT_MI_INTERRUPT) {
4458 mii = device_get_softc(sc->bge_miibus);
4460 if (!sc->bge_link &&
4461 mii->mii_media_status & IFM_ACTIVE &&
4462 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4465 if_printf(sc->bge_ifp, "link UP\n");
4466 } else if (sc->bge_link &&
4467 (!(mii->mii_media_status & IFM_ACTIVE) ||
4468 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4471 if_printf(sc->bge_ifp, "link DOWN\n");
4474 /* Clear the interrupt. */
4475 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
4476 BGE_EVTENB_MI_INTERRUPT);
4477 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
4478 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
4484 if (sc->bge_flags & BGE_FLAG_TBI) {
4485 status = CSR_READ_4(sc, BGE_MAC_STS);
4486 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
4487 if (!sc->bge_link) {
4489 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
4490 BGE_CLRBIT(sc, BGE_MAC_MODE,
4491 BGE_MACMODE_TBI_SEND_CFGS);
4492 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
4494 if_printf(sc->bge_ifp, "link UP\n");
4495 if_link_state_change(sc->bge_ifp,
4498 } else if (sc->bge_link) {
4501 if_printf(sc->bge_ifp, "link DOWN\n");
4502 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
4504 } else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
4506 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
4507 * in status word always set. Workaround this bug by reading
4508 * PHY link status directly.
4510 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
4512 if (link != sc->bge_link ||
4513 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
4514 mii = device_get_softc(sc->bge_miibus);
4516 if (!sc->bge_link &&
4517 mii->mii_media_status & IFM_ACTIVE &&
4518 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4521 if_printf(sc->bge_ifp, "link UP\n");
4522 } else if (sc->bge_link &&
4523 (!(mii->mii_media_status & IFM_ACTIVE) ||
4524 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4527 if_printf(sc->bge_ifp, "link DOWN\n");
4532 * Discard link events for MII/GMII controllers
4533 * if MI auto-polling is disabled.
4537 /* Clear the attention. */
4538 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4539 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4540 BGE_MACSTAT_LINK_CHANGED);
4543 #define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
4544 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
4545 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
4549 bge_add_sysctls(struct bge_softc *sc)
4551 struct sysctl_ctx_list *ctx;
4552 struct sysctl_oid_list *children, *schildren;
4553 struct sysctl_oid *tree;
4555 ctx = device_get_sysctl_ctx(sc->bge_dev);
4556 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
4558 #ifdef BGE_REGISTER_DEBUG
4559 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
4560 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
4561 "Debug Information");
4563 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
4564 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
4567 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
4568 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
4573 if (BGE_IS_5705_PLUS(sc))
4576 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4577 NULL, "BGE Statistics");
4578 schildren = children = SYSCTL_CHILDREN(tree);
4579 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
4580 children, COSFramesDroppedDueToFilters,
4581 "FramesDroppedDueToFilters");
4582 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
4583 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
4584 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
4585 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
4586 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
4587 children, nicNoMoreRxBDs, "NoMoreRxBDs");
4588 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
4589 children, ifInDiscards, "InputDiscards");
4590 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
4591 children, ifInErrors, "InputErrors");
4592 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
4593 children, nicRecvThresholdHit, "RecvThresholdHit");
4594 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
4595 children, nicDmaReadQueueFull, "DmaReadQueueFull");
4596 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
4597 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
4598 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
4599 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
4600 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
4601 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
4602 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
4603 children, nicRingStatusUpdate, "RingStatusUpdate");
4604 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
4605 children, nicInterrupts, "Interrupts");
4606 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
4607 children, nicAvoidedInterrupts, "AvoidedInterrupts");
4608 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
4609 children, nicSendThresholdHit, "SendThresholdHit");
4611 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
4612 NULL, "BGE RX Statistics");
4613 children = SYSCTL_CHILDREN(tree);
4614 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
4615 children, rxstats.ifHCInOctets, "Octets");
4616 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
4617 children, rxstats.etherStatsFragments, "Fragments");
4618 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
4619 children, rxstats.ifHCInUcastPkts, "UcastPkts");
4620 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
4621 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
4622 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
4623 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
4624 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
4625 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
4626 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
4627 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
4628 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
4629 children, rxstats.xoffPauseFramesReceived,
4630 "xoffPauseFramesReceived");
4631 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
4632 children, rxstats.macControlFramesReceived,
4633 "ControlFramesReceived");
4634 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
4635 children, rxstats.xoffStateEntered, "xoffStateEntered");
4636 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
4637 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
4638 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
4639 children, rxstats.etherStatsJabbers, "Jabbers");
4640 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
4641 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
4642 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
4643 children, rxstats.inRangeLengthError, "inRangeLengthError");
4644 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
4645 children, rxstats.outRangeLengthError, "outRangeLengthError");
4647 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
4648 NULL, "BGE TX Statistics");
4649 children = SYSCTL_CHILDREN(tree);
4650 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
4651 children, txstats.ifHCOutOctets, "Octets");
4652 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
4653 children, txstats.etherStatsCollisions, "Collisions");
4654 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
4655 children, txstats.outXonSent, "XonSent");
4656 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
4657 children, txstats.outXoffSent, "XoffSent");
4658 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
4659 children, txstats.flowControlDone, "flowControlDone");
4660 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
4661 children, txstats.dot3StatsInternalMacTransmitErrors,
4662 "InternalMacTransmitErrors");
4663 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
4664 children, txstats.dot3StatsSingleCollisionFrames,
4665 "SingleCollisionFrames");
4666 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
4667 children, txstats.dot3StatsMultipleCollisionFrames,
4668 "MultipleCollisionFrames");
4669 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
4670 children, txstats.dot3StatsDeferredTransmissions,
4671 "DeferredTransmissions");
4672 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
4673 children, txstats.dot3StatsExcessiveCollisions,
4674 "ExcessiveCollisions");
4675 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
4676 children, txstats.dot3StatsLateCollisions,
4678 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
4679 children, txstats.ifHCOutUcastPkts, "UcastPkts");
4680 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
4681 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
4682 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
4683 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
4684 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
4685 children, txstats.dot3StatsCarrierSenseErrors,
4686 "CarrierSenseErrors");
4687 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
4688 children, txstats.ifOutDiscards, "Discards");
4689 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
4690 children, txstats.ifOutErrors, "Errors");
4694 bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
4696 struct bge_softc *sc;
4700 sc = (struct bge_softc *)arg1;
4702 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
4703 offsetof(bge_hostaddr, bge_addr_lo));
4704 return (sysctl_handle_int(oidp, &result, 0, req));
4707 #ifdef BGE_REGISTER_DEBUG
4709 bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
4711 struct bge_softc *sc;
4718 error = sysctl_handle_int(oidp, &result, 0, req);
4719 if (error || (req->newptr == NULL))
4723 sc = (struct bge_softc *)arg1;
4725 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
4726 printf("Status Block:\n");
4727 for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) {
4729 for (j = 0; j < 8; j++) {
4730 printf(" %04x", sbdata[i]);
4736 printf("Registers:\n");
4737 for (i = 0x800; i < 0xA00; ) {
4739 for (j = 0; j < 8; j++) {
4740 printf(" %08x", CSR_READ_4(sc, i));
4746 printf("Hardware Flags:\n");
4747 if (BGE_IS_5755_PLUS(sc))
4748 printf(" - 5755 Plus\n");
4749 if (BGE_IS_575X_PLUS(sc))
4750 printf(" - 575X Plus\n");
4751 if (BGE_IS_5705_PLUS(sc))
4752 printf(" - 5705 Plus\n");
4753 if (BGE_IS_5714_FAMILY(sc))
4754 printf(" - 5714 Family\n");
4755 if (BGE_IS_5700_FAMILY(sc))
4756 printf(" - 5700 Family\n");
4757 if (sc->bge_flags & BGE_FLAG_JUMBO)
4758 printf(" - Supports Jumbo Frames\n");
4759 if (sc->bge_flags & BGE_FLAG_PCIX)
4760 printf(" - PCI-X Bus\n");
4761 if (sc->bge_flags & BGE_FLAG_PCIE)
4762 printf(" - PCI Express Bus\n");
4763 if (sc->bge_flags & BGE_FLAG_NO_3LED)
4764 printf(" - No 3 LEDs\n");
4765 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
4766 printf(" - RX Alignment Bug\n");
4773 bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
4775 struct bge_softc *sc;
4781 error = sysctl_handle_int(oidp, &result, 0, req);
4782 if (error || (req->newptr == NULL))
4785 if (result < 0x8000) {
4786 sc = (struct bge_softc *)arg1;
4787 val = CSR_READ_4(sc, result);
4788 printf("reg 0x%06X = 0x%08X\n", result, val);
4795 bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
4797 struct bge_softc *sc;
4803 error = sysctl_handle_int(oidp, &result, 0, req);
4804 if (error || (req->newptr == NULL))
4807 if (result < 0x8000) {
4808 sc = (struct bge_softc *)arg1;
4809 val = bge_readmem_ind(sc, result);
4810 printf("mem 0x%06X = 0x%08X\n", result, val);
4818 bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
4821 if (sc->bge_flags & BGE_FLAG_EADDR)
4825 OF_getetheraddr(sc->bge_dev, ether_addr);
4832 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
4836 mac_addr = bge_readmem_ind(sc, 0x0c14);
4837 if ((mac_addr >> 16) == 0x484b) {
4838 ether_addr[0] = (uint8_t)(mac_addr >> 8);
4839 ether_addr[1] = (uint8_t)mac_addr;
4840 mac_addr = bge_readmem_ind(sc, 0x0c18);
4841 ether_addr[2] = (uint8_t)(mac_addr >> 24);
4842 ether_addr[3] = (uint8_t)(mac_addr >> 16);
4843 ether_addr[4] = (uint8_t)(mac_addr >> 8);
4844 ether_addr[5] = (uint8_t)mac_addr;
4851 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
4853 int mac_offset = BGE_EE_MAC_OFFSET;
4855 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
4856 mac_offset = BGE_EE_MAC_OFFSET_5906;
4858 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
4863 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
4866 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
4869 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
4874 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
4876 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
4877 /* NOTE: Order is critical */
4880 bge_get_eaddr_nvram,
4881 bge_get_eaddr_eeprom,
4884 const bge_eaddr_fcn_t *func;
4886 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
4887 if ((*func)(sc, eaddr) == 0)
4890 return (*func == NULL ? ENXIO : 0);