2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
69 #ifdef HAVE_KERNEL_OPTION_HEADERS
70 #include "opt_device_polling.h"
73 #include <sys/param.h>
74 #include <sys/endian.h>
75 #include <sys/systm.h>
76 #include <sys/sockio.h>
78 #include <sys/malloc.h>
79 #include <sys/kernel.h>
80 #include <sys/module.h>
81 #include <sys/socket.h>
82 #include <sys/sysctl.h>
83 #include <sys/taskqueue.h>
86 #include <net/if_arp.h>
87 #include <net/ethernet.h>
88 #include <net/if_dl.h>
89 #include <net/if_media.h>
93 #include <net/if_types.h>
94 #include <net/if_vlan_var.h>
96 #include <netinet/in_systm.h>
97 #include <netinet/in.h>
98 #include <netinet/ip.h>
99 #include <netinet/tcp.h>
101 #include <machine/bus.h>
102 #include <machine/resource.h>
104 #include <sys/rman.h>
106 #include <dev/mii/mii.h>
107 #include <dev/mii/miivar.h>
109 #include <dev/mii/brgphyreg.h>
112 #include <dev/ofw/ofw_bus.h>
113 #include <dev/ofw/openfirm.h>
114 #include <machine/ofw_machdep.h>
115 #include <machine/ver.h>
118 #include <dev/pci/pcireg.h>
119 #include <dev/pci/pcivar.h>
121 #include <dev/bge/if_bgereg.h>
123 #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
124 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
126 MODULE_DEPEND(bge, pci, 1, 1, 1);
127 MODULE_DEPEND(bge, ether, 1, 1, 1);
128 MODULE_DEPEND(bge, miibus, 1, 1, 1);
130 /* "device miibus" required. See GENERIC if you get errors here. */
131 #include "miibus_if.h"
134 * Various supported device vendors/types and their names. Note: the
135 * spec seems to indicate that the hardware still has Alteon's vendor
136 * ID burned into it, though it will always be overriden by the vendor
137 * ID in the EEPROM. Just to be safe, we cover all possibilities.
139 static const struct bge_type {
142 } const bge_devs[] = {
143 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
144 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
147 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
148 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
150 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5717 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5718 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5719 },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
201 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
202 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
203 { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 },
204 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F },
205 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G },
206 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
207 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
208 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F },
209 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
210 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
211 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
212 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
213 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
214 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
215 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
216 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
217 { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 },
218 { BCOM_VENDORID, BCOM_DEVICEID_BCM57761 },
219 { BCOM_VENDORID, BCOM_DEVICEID_BCM57765 },
220 { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 },
221 { BCOM_VENDORID, BCOM_DEVICEID_BCM57781 },
222 { BCOM_VENDORID, BCOM_DEVICEID_BCM57785 },
223 { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 },
224 { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 },
225 { BCOM_VENDORID, BCOM_DEVICEID_BCM57791 },
226 { BCOM_VENDORID, BCOM_DEVICEID_BCM57795 },
228 { SK_VENDORID, SK_DEVICEID_ALTIMA },
230 { TC_VENDORID, TC_DEVICEID_3C996 },
232 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 },
233 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 },
234 { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 },
239 static const struct bge_vendor {
242 } const bge_vendors[] = {
243 { ALTEON_VENDORID, "Alteon" },
244 { ALTIMA_VENDORID, "Altima" },
245 { APPLE_VENDORID, "Apple" },
246 { BCOM_VENDORID, "Broadcom" },
247 { SK_VENDORID, "SysKonnect" },
248 { TC_VENDORID, "3Com" },
249 { FJTSU_VENDORID, "Fujitsu" },
254 static const struct bge_revision {
257 } const bge_revisions[] = {
258 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
259 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
260 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
261 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
262 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
263 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
264 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
265 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
266 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
267 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
268 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
269 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
270 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
271 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
272 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
273 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
274 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
275 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
276 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
277 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
278 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
279 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
280 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
281 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
282 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
283 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
284 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
285 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
286 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
287 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
288 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
289 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
290 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
291 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
292 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
293 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
294 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
295 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
296 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
297 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
298 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
299 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
300 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
301 { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
302 { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
303 { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" },
304 { BGE_CHIPID_BCM5720_A0, "BCM5720 A0" },
305 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
306 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
307 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
308 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
309 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
310 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
311 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
312 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
313 /* 5754 and 5787 share the same ASIC ID */
314 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
315 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
316 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
317 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
318 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
319 { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" },
320 { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" },
321 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
322 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
328 * Some defaults for major revisions, so that newer steppings
329 * that we don't know about have a shot at working.
331 static const struct bge_revision const bge_majorrevs[] = {
332 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
333 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
334 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
335 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
336 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
337 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
338 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
339 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
340 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
341 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
342 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
343 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
344 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
345 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
346 /* 5754 and 5787 share the same ASIC ID */
347 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
348 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
349 { BGE_ASICREV_BCM57765, "unknown BCM57765" },
350 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
351 { BGE_ASICREV_BCM5717, "unknown BCM5717" },
352 { BGE_ASICREV_BCM5719, "unknown BCM5719" },
353 { BGE_ASICREV_BCM5720, "unknown BCM5720" },
358 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
359 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
360 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
361 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
362 #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
363 #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
364 #define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5717_PLUS)
366 const struct bge_revision * bge_lookup_rev(uint32_t);
367 const struct bge_vendor * bge_lookup_vendor(uint16_t);
369 typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
371 static int bge_probe(device_t);
372 static int bge_attach(device_t);
373 static int bge_detach(device_t);
374 static int bge_suspend(device_t);
375 static int bge_resume(device_t);
376 static void bge_release_resources(struct bge_softc *);
377 static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
378 static int bge_dma_alloc(struct bge_softc *);
379 static void bge_dma_free(struct bge_softc *);
380 static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t,
381 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
383 static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
384 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
385 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
386 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
387 static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
389 static void bge_txeof(struct bge_softc *, uint16_t);
390 static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
391 static int bge_rxeof(struct bge_softc *, uint16_t, int);
393 static void bge_asf_driver_up (struct bge_softc *);
394 static void bge_tick(void *);
395 static void bge_stats_clear_regs(struct bge_softc *);
396 static void bge_stats_update(struct bge_softc *);
397 static void bge_stats_update_regs(struct bge_softc *);
398 static struct mbuf *bge_check_short_dma(struct mbuf *);
399 static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
400 uint16_t *, uint16_t *);
401 static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
403 static void bge_intr(void *);
404 static int bge_msi_intr(void *);
405 static void bge_intr_task(void *, int);
406 static void bge_start_locked(struct ifnet *);
407 static void bge_start(struct ifnet *);
408 static int bge_ioctl(struct ifnet *, u_long, caddr_t);
409 static void bge_init_locked(struct bge_softc *);
410 static void bge_init(void *);
411 static void bge_stop_block(struct bge_softc *, bus_size_t, uint32_t);
412 static void bge_stop(struct bge_softc *);
413 static void bge_watchdog(struct bge_softc *);
414 static int bge_shutdown(device_t);
415 static int bge_ifmedia_upd_locked(struct ifnet *);
416 static int bge_ifmedia_upd(struct ifnet *);
417 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
419 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
420 static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
422 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
423 static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
425 static void bge_setpromisc(struct bge_softc *);
426 static void bge_setmulti(struct bge_softc *);
427 static void bge_setvlan(struct bge_softc *);
429 static __inline void bge_rxreuse_std(struct bge_softc *, int);
430 static __inline void bge_rxreuse_jumbo(struct bge_softc *, int);
431 static int bge_newbuf_std(struct bge_softc *, int);
432 static int bge_newbuf_jumbo(struct bge_softc *, int);
433 static int bge_init_rx_ring_std(struct bge_softc *);
434 static void bge_free_rx_ring_std(struct bge_softc *);
435 static int bge_init_rx_ring_jumbo(struct bge_softc *);
436 static void bge_free_rx_ring_jumbo(struct bge_softc *);
437 static void bge_free_tx_ring(struct bge_softc *);
438 static int bge_init_tx_ring(struct bge_softc *);
440 static int bge_chipinit(struct bge_softc *);
441 static int bge_blockinit(struct bge_softc *);
442 static uint32_t bge_dma_swap_options(struct bge_softc *);
444 static int bge_has_eaddr(struct bge_softc *);
445 static uint32_t bge_readmem_ind(struct bge_softc *, int);
446 static void bge_writemem_ind(struct bge_softc *, int, int);
447 static void bge_writembx(struct bge_softc *, int, int);
449 static uint32_t bge_readreg_ind(struct bge_softc *, int);
451 static void bge_writemem_direct(struct bge_softc *, int, int);
452 static void bge_writereg_ind(struct bge_softc *, int, int);
454 static int bge_miibus_readreg(device_t, int, int);
455 static int bge_miibus_writereg(device_t, int, int, int);
456 static void bge_miibus_statchg(device_t);
457 #ifdef DEVICE_POLLING
458 static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
461 #define BGE_RESET_START 1
462 #define BGE_RESET_STOP 2
463 static void bge_sig_post_reset(struct bge_softc *, int);
464 static void bge_sig_legacy(struct bge_softc *, int);
465 static void bge_sig_pre_reset(struct bge_softc *, int);
466 static void bge_stop_fw(struct bge_softc *);
467 static int bge_reset(struct bge_softc *);
468 static void bge_link_upd(struct bge_softc *);
471 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
472 * leak information to untrusted users. It is also known to cause alignment
473 * traps on certain architectures.
475 #ifdef BGE_REGISTER_DEBUG
476 static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
477 static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
478 static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
480 static void bge_add_sysctls(struct bge_softc *);
481 static void bge_add_sysctl_stats_regs(struct bge_softc *,
482 struct sysctl_ctx_list *, struct sysctl_oid_list *);
483 static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *,
484 struct sysctl_oid_list *);
485 static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
487 static device_method_t bge_methods[] = {
488 /* Device interface */
489 DEVMETHOD(device_probe, bge_probe),
490 DEVMETHOD(device_attach, bge_attach),
491 DEVMETHOD(device_detach, bge_detach),
492 DEVMETHOD(device_shutdown, bge_shutdown),
493 DEVMETHOD(device_suspend, bge_suspend),
494 DEVMETHOD(device_resume, bge_resume),
497 DEVMETHOD(bus_print_child, bus_generic_print_child),
498 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
501 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
502 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
503 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
508 static driver_t bge_driver = {
511 sizeof(struct bge_softc)
514 static devclass_t bge_devclass;
516 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
517 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
519 static int bge_allow_asf = 1;
521 TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
523 SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
524 SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
525 "Allow ASF mode if available");
527 #define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
528 #define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
529 #define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
530 #define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
531 #define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
534 bge_has_eaddr(struct bge_softc *sc)
537 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
544 * The on-board BGEs found in sun4u machines aren't fitted with
545 * an EEPROM which means that we have to obtain the MAC address
546 * via OFW and that some tests will always fail. We distinguish
547 * such BGEs by the subvendor ID, which also has to be obtained
548 * from OFW instead of the PCI configuration space as the latter
549 * indicates Broadcom as the subvendor of the netboot interface.
550 * For early Blade 1500 and 2500 we even have to check the OFW
551 * device path as the subvendor ID always defaults to Broadcom
554 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
555 &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
556 (subvendor == FJTSU_VENDORID || subvendor == SUN_VENDORID))
558 memset(buf, 0, sizeof(buf));
559 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
560 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
561 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
563 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
564 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
572 bge_readmem_ind(struct bge_softc *sc, int off)
577 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
578 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
583 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
584 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
585 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
590 bge_writemem_ind(struct bge_softc *sc, int off, int val)
594 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
595 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
600 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
601 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
602 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
607 bge_readreg_ind(struct bge_softc *sc, int off)
613 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
614 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
619 bge_writereg_ind(struct bge_softc *sc, int off, int val)
625 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
626 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
630 bge_writemem_direct(struct bge_softc *sc, int off, int val)
632 CSR_WRITE_4(sc, off, val);
636 bge_writembx(struct bge_softc *sc, int off, int val)
638 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
639 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
641 CSR_WRITE_4(sc, off, val);
645 * Map a single buffer address.
649 bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
651 struct bge_dmamap_arg *ctx;
656 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
659 ctx->bge_busaddr = segs->ds_addr;
663 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
665 uint32_t access, byte = 0;
669 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
670 for (i = 0; i < 8000; i++) {
671 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
679 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
680 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
682 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
683 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
684 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
686 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
692 if (i == BGE_TIMEOUT * 10) {
693 if_printf(sc->bge_ifp, "nvram read timed out\n");
698 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
700 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
702 /* Disable access. */
703 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
706 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
707 CSR_READ_4(sc, BGE_NVRAM_SWARB);
713 * Read a sequence of bytes from NVRAM.
716 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
721 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
724 for (i = 0; i < cnt; i++) {
725 err = bge_nvram_getbyte(sc, off + i, &byte);
731 return (err ? 1 : 0);
735 * Read a byte of data stored in the EEPROM at address 'addr.' The
736 * BCM570x supports both the traditional bitbang interface and an
737 * auto access interface for reading the EEPROM. We use the auto
741 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
747 * Enable use of auto EEPROM access so we can avoid
748 * having to use the bitbang method.
750 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
752 /* Reset the EEPROM, load the clock period. */
753 CSR_WRITE_4(sc, BGE_EE_ADDR,
754 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
757 /* Issue the read EEPROM command. */
758 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
760 /* Wait for completion */
761 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
763 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
767 if (i == BGE_TIMEOUT * 10) {
768 device_printf(sc->bge_dev, "EEPROM read timed out\n");
773 byte = CSR_READ_4(sc, BGE_EE_DATA);
775 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
781 * Read a sequence of bytes from the EEPROM.
784 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
789 for (i = 0; i < cnt; i++) {
790 error = bge_eeprom_getbyte(sc, off + i, &byte);
796 return (error ? 1 : 0);
800 bge_miibus_readreg(device_t dev, int phy, int reg)
802 struct bge_softc *sc;
806 sc = device_get_softc(dev);
808 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
809 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
810 CSR_WRITE_4(sc, BGE_MI_MODE,
811 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
815 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
816 BGE_MIPHY(phy) | BGE_MIREG(reg));
818 /* Poll for the PHY register access to complete. */
819 for (i = 0; i < BGE_TIMEOUT; i++) {
821 val = CSR_READ_4(sc, BGE_MI_COMM);
822 if ((val & BGE_MICOMM_BUSY) == 0) {
824 val = CSR_READ_4(sc, BGE_MI_COMM);
829 if (i == BGE_TIMEOUT) {
830 device_printf(sc->bge_dev,
831 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
836 /* Restore the autopoll bit if necessary. */
837 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
838 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
842 if (val & BGE_MICOMM_READFAIL)
845 return (val & 0xFFFF);
849 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
851 struct bge_softc *sc;
854 sc = device_get_softc(dev);
856 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
857 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
860 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
861 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
862 CSR_WRITE_4(sc, BGE_MI_MODE,
863 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
867 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
868 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
870 for (i = 0; i < BGE_TIMEOUT; i++) {
872 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
874 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
879 /* Restore the autopoll bit if necessary. */
880 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
881 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
885 if (i == BGE_TIMEOUT)
886 device_printf(sc->bge_dev,
887 "PHY write timed out (phy %d, reg %d, val %d)\n",
894 bge_miibus_statchg(device_t dev)
896 struct bge_softc *sc;
897 struct mii_data *mii;
898 sc = device_get_softc(dev);
899 mii = device_get_softc(sc->bge_miibus);
901 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
902 (IFM_ACTIVE | IFM_AVALID)) {
903 switch (IFM_SUBTYPE(mii->mii_media_active)) {
911 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
922 if (sc->bge_link == 0)
924 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
925 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
926 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
927 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
929 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
931 if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0) {
932 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
933 if ((IFM_OPTIONS(mii->mii_media_active) &
934 IFM_ETH_TXPAUSE) != 0)
935 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
937 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
938 if ((IFM_OPTIONS(mii->mii_media_active) &
939 IFM_ETH_RXPAUSE) != 0)
940 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
942 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
944 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
945 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
946 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
951 * Intialize a standard receive ring descriptor.
954 bge_newbuf_std(struct bge_softc *sc, int i)
958 bus_dma_segment_t segs[1];
962 if (sc->bge_flags & BGE_FLAG_JUMBO_STD &&
963 (sc->bge_ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
964 ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) {
965 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
968 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
970 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
973 m->m_len = m->m_pkthdr.len = MCLBYTES;
975 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
976 m_adj(m, ETHER_ALIGN);
978 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
979 sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
984 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
985 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
986 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
987 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
988 sc->bge_cdata.bge_rx_std_dmamap[i]);
990 map = sc->bge_cdata.bge_rx_std_dmamap[i];
991 sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
992 sc->bge_cdata.bge_rx_std_sparemap = map;
993 sc->bge_cdata.bge_rx_std_chain[i] = m;
994 sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len;
995 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
996 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
997 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
998 r->bge_flags = BGE_RXBDFLAG_END;
999 r->bge_len = segs[0].ds_len;
1002 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1003 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
1009 * Initialize a jumbo receive ring descriptor. This allocates
1010 * a jumbo buffer from the pool managed internally by the driver.
1013 bge_newbuf_jumbo(struct bge_softc *sc, int i)
1015 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
1017 struct bge_extrx_bd *r;
1021 MGETHDR(m, M_DONTWAIT, MT_DATA);
1025 m_cljget(m, M_DONTWAIT, MJUM9BYTES);
1026 if (!(m->m_flags & M_EXT)) {
1030 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1031 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
1032 m_adj(m, ETHER_ALIGN);
1034 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
1035 sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
1041 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1042 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1043 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
1044 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1045 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1047 map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
1048 sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
1049 sc->bge_cdata.bge_rx_jumbo_sparemap;
1050 sc->bge_cdata.bge_rx_jumbo_sparemap = map;
1051 sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1052 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0;
1053 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0;
1054 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0;
1055 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0;
1058 * Fill in the extended RX buffer descriptor.
1060 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
1061 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1063 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1066 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1067 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1068 r->bge_len3 = segs[3].ds_len;
1069 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len;
1071 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1072 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1073 r->bge_len2 = segs[2].ds_len;
1074 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len;
1076 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1077 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1078 r->bge_len1 = segs[1].ds_len;
1079 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len;
1081 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1082 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1083 r->bge_len0 = segs[0].ds_len;
1084 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len;
1087 panic("%s: %d segments\n", __func__, nsegs);
1090 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1091 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
1097 bge_init_rx_ring_std(struct bge_softc *sc)
1101 bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1103 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1104 if ((error = bge_newbuf_std(sc, i)) != 0)
1106 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1109 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1110 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
1113 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1);
1119 bge_free_rx_ring_std(struct bge_softc *sc)
1123 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1124 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1125 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1126 sc->bge_cdata.bge_rx_std_dmamap[i],
1127 BUS_DMASYNC_POSTREAD);
1128 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1129 sc->bge_cdata.bge_rx_std_dmamap[i]);
1130 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1131 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1133 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1134 sizeof(struct bge_rx_bd));
1139 bge_init_rx_ring_jumbo(struct bge_softc *sc)
1141 struct bge_rcb *rcb;
1144 bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
1146 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1147 if ((error = bge_newbuf_jumbo(sc, i)) != 0)
1149 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1152 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1153 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
1157 /* Enable the jumbo receive producer ring. */
1158 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1159 rcb->bge_maxlen_flags =
1160 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
1161 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1163 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1);
1169 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1173 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1174 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1175 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1176 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1177 BUS_DMASYNC_POSTREAD);
1178 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1179 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1180 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1181 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1183 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1184 sizeof(struct bge_extrx_bd));
1189 bge_free_tx_ring(struct bge_softc *sc)
1193 if (sc->bge_ldata.bge_tx_ring == NULL)
1196 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1197 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1198 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
1199 sc->bge_cdata.bge_tx_dmamap[i],
1200 BUS_DMASYNC_POSTWRITE);
1201 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1202 sc->bge_cdata.bge_tx_dmamap[i]);
1203 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1204 sc->bge_cdata.bge_tx_chain[i] = NULL;
1206 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1207 sizeof(struct bge_tx_bd));
1212 bge_init_tx_ring(struct bge_softc *sc)
1215 sc->bge_tx_saved_considx = 0;
1217 bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1218 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
1219 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1221 /* Initialize transmit producer index for host-memory send ring. */
1222 sc->bge_tx_prodidx = 0;
1223 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1225 /* 5700 b2 errata */
1226 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1227 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1229 /* NIC-memory send ring not used; initialize to zero. */
1230 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1231 /* 5700 b2 errata */
1232 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1233 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1239 bge_setpromisc(struct bge_softc *sc)
1243 BGE_LOCK_ASSERT(sc);
1247 /* Enable or disable promiscuous mode as needed. */
1248 if (ifp->if_flags & IFF_PROMISC)
1249 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1251 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1255 bge_setmulti(struct bge_softc *sc)
1258 struct ifmultiaddr *ifma;
1259 uint32_t hashes[4] = { 0, 0, 0, 0 };
1262 BGE_LOCK_ASSERT(sc);
1266 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1267 for (i = 0; i < 4; i++)
1268 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1272 /* First, zot all the existing filters. */
1273 for (i = 0; i < 4; i++)
1274 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1276 /* Now program new ones. */
1277 if_maddr_rlock(ifp);
1278 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1279 if (ifma->ifma_addr->sa_family != AF_LINK)
1281 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1282 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1283 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1285 if_maddr_runlock(ifp);
1287 for (i = 0; i < 4; i++)
1288 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1292 bge_setvlan(struct bge_softc *sc)
1296 BGE_LOCK_ASSERT(sc);
1300 /* Enable or disable VLAN tag stripping as needed. */
1301 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1302 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1304 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1308 bge_sig_pre_reset(struct bge_softc *sc, int type)
1312 * Some chips don't like this so only do this if ASF is enabled
1314 if (sc->bge_asf_mode)
1315 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
1317 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1319 case BGE_RESET_START:
1320 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1321 BGE_FW_DRV_STATE_START);
1323 case BGE_RESET_STOP:
1324 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1325 BGE_FW_DRV_STATE_UNLOAD);
1332 bge_sig_post_reset(struct bge_softc *sc, int type)
1335 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1337 case BGE_RESET_START:
1338 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1339 BGE_FW_DRV_STATE_START_DONE);
1342 case BGE_RESET_STOP:
1343 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1344 BGE_FW_DRV_STATE_UNLOAD_DONE);
1351 bge_sig_legacy(struct bge_softc *sc, int type)
1354 if (sc->bge_asf_mode) {
1356 case BGE_RESET_START:
1357 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1358 BGE_FW_DRV_STATE_START);
1360 case BGE_RESET_STOP:
1361 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1362 BGE_FW_DRV_STATE_UNLOAD);
1369 bge_stop_fw(struct bge_softc *sc)
1373 if (sc->bge_asf_mode) {
1374 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE);
1375 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
1376 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT);
1378 for (i = 0; i < 100; i++ ) {
1379 if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) &
1380 BGE_RX_CPU_DRV_EVENT))
1388 bge_dma_swap_options(struct bge_softc *sc)
1390 uint32_t dma_options;
1392 dma_options = BGE_MODECTL_WORDSWAP_NONFRAME |
1393 BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA;
1394 #if BYTE_ORDER == BIG_ENDIAN
1395 dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME;
1397 if ((sc)->bge_asicrev == BGE_ASICREV_BCM5720)
1398 dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA |
1399 BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE |
1400 BGE_MODECTL_HTX2B_ENABLE;
1402 return (dma_options);
1406 * Do endian, PCI and DMA initialization.
1409 bge_chipinit(struct bge_softc *sc)
1411 uint32_t dma_rw_ctl, misc_ctl, mode_ctl;
1415 /* Set endianness before we access any non-PCI registers. */
1416 misc_ctl = BGE_INIT;
1417 if (sc->bge_flags & BGE_FLAG_TAGGED_STATUS)
1418 misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
1419 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, misc_ctl, 4);
1421 /* Clear the MAC control register */
1422 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1425 * Clear the MAC statistics block in the NIC's
1428 for (i = BGE_STATS_BLOCK;
1429 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1430 BGE_MEMWIN_WRITE(sc, i, 0);
1432 for (i = BGE_STATUS_BLOCK;
1433 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1434 BGE_MEMWIN_WRITE(sc, i, 0);
1436 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1438 * Fix data corruption caused by non-qword write with WB.
1439 * Fix master abort in PCI mode.
1440 * Fix PCI latency timer.
1442 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1443 val |= (1 << 10) | (1 << 12) | (1 << 13);
1444 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1448 * Set up the PCI DMA control register.
1450 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1451 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1452 if (sc->bge_flags & BGE_FLAG_PCIE) {
1453 /* Read watermark not used, 128 bytes for write. */
1454 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1455 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1456 if (BGE_IS_5714_FAMILY(sc)) {
1457 /* 256 bytes for read and write. */
1458 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1459 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1460 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1461 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1462 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1463 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
1465 * In the BCM5703, the DMA read watermark should
1466 * be set to less than or equal to the maximum
1467 * memory read byte count of the PCI-X command
1470 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
1471 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1472 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1473 /* 1536 bytes for read, 384 bytes for write. */
1474 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1475 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1477 /* 384 bytes for read and write. */
1478 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1479 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1482 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1483 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1486 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1487 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1488 if (tmp == 6 || tmp == 7)
1490 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1492 /* Set PCI-X DMA write workaround. */
1493 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1496 /* Conventional PCI bus: 256 bytes for read and write. */
1497 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1498 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1500 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1501 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1504 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1505 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1506 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1507 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1508 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1509 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1510 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1511 if (BGE_IS_5717_PLUS(sc)) {
1512 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1513 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
1514 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1516 * Enable HW workaround for controllers that misinterpret
1517 * a status tag update and leave interrupts permanently
1520 if (sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
1521 sc->bge_asicrev != BGE_ASICREV_BCM57765)
1522 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1524 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1527 * Set up general mode register.
1529 mode_ctl = bge_dma_swap_options(sc) | BGE_MODECTL_MAC_ATTN_INTR |
1530 BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM;
1533 * BCM5701 B5 have a bug causing data corruption when using
1534 * 64-bit DMA reads, which can be terminated early and then
1535 * completed later as 32-bit accesses, in combination with
1538 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1539 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1540 mode_ctl |= BGE_MODECTL_FORCE_PCI32;
1543 * Tell the firmware the driver is running
1545 if (sc->bge_asf_mode & ASF_STACKUP)
1546 mode_ctl |= BGE_MODECTL_STACKUP;
1548 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1551 * Disable memory write invalidate. Apparently it is not supported
1552 * properly by these devices. Also ensure that INTx isn't disabled,
1553 * as these chips need it even when using MSI.
1555 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1556 PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1558 /* Set the timer prescaler (always 66Mhz) */
1559 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1561 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1562 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1563 DELAY(40); /* XXX */
1565 /* Put PHY into ready state */
1566 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1567 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1575 bge_blockinit(struct bge_softc *sc)
1577 struct bge_rcb *rcb;
1580 uint32_t dmactl, val;
1584 * Initialize the memory window pointer register so that
1585 * we can access the first 32K of internal NIC RAM. This will
1586 * allow us to set up the TX send ring RCBs and the RX return
1587 * ring RCBs, plus other things which live in NIC memory.
1589 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1591 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1593 if (!(BGE_IS_5705_PLUS(sc))) {
1594 /* Configure mbuf memory pool */
1595 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1596 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1597 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1599 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1601 /* Configure DMA resource pool */
1602 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1603 BGE_DMA_DESCRIPTORS);
1604 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1607 /* Configure mbuf pool watermarks */
1608 if (BGE_IS_5717_PLUS(sc)) {
1609 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1610 if (sc->bge_ifp->if_mtu > ETHERMTU) {
1611 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1612 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1614 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1615 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1617 } else if (!BGE_IS_5705_PLUS(sc)) {
1618 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1619 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1620 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1621 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1622 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1623 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1624 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1626 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1627 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1628 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1631 /* Configure DMA resource watermarks */
1632 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1633 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1635 /* Enable buffer manager */
1636 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1638 * Change the arbitration algorithm of TXMBUF read request to
1639 * round-robin instead of priority based for BCM5719. When
1640 * TXFIFO is almost empty, RDMA will hold its request until
1641 * TXFIFO is not almost empty.
1643 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
1644 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
1645 CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1647 /* Poll for buffer manager start indication */
1648 for (i = 0; i < BGE_TIMEOUT; i++) {
1650 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1654 if (i == BGE_TIMEOUT) {
1655 device_printf(sc->bge_dev, "buffer manager failed to start\n");
1659 /* Enable flow-through queues */
1660 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1661 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1663 /* Wait until queue initialization is complete */
1664 for (i = 0; i < BGE_TIMEOUT; i++) {
1666 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1670 if (i == BGE_TIMEOUT) {
1671 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1676 * Summary of rings supported by the controller:
1678 * Standard Receive Producer Ring
1679 * - This ring is used to feed receive buffers for "standard"
1680 * sized frames (typically 1536 bytes) to the controller.
1682 * Jumbo Receive Producer Ring
1683 * - This ring is used to feed receive buffers for jumbo sized
1684 * frames (i.e. anything bigger than the "standard" frames)
1685 * to the controller.
1687 * Mini Receive Producer Ring
1688 * - This ring is used to feed receive buffers for "mini"
1689 * sized frames to the controller.
1690 * - This feature required external memory for the controller
1691 * but was never used in a production system. Should always
1694 * Receive Return Ring
1695 * - After the controller has placed an incoming frame into a
1696 * receive buffer that buffer is moved into a receive return
1697 * ring. The driver is then responsible to passing the
1698 * buffer up to the stack. Many versions of the controller
1699 * support multiple RR rings.
1702 * - This ring is used for outgoing frames. Many versions of
1703 * the controller support multiple send rings.
1706 /* Initialize the standard receive producer ring control block. */
1707 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1708 rcb->bge_hostaddr.bge_addr_lo =
1709 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1710 rcb->bge_hostaddr.bge_addr_hi =
1711 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1712 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1713 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1714 if (BGE_IS_5717_PLUS(sc)) {
1716 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1717 * Bits 15-2 : Maximum RX frame size
1718 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1721 rcb->bge_maxlen_flags =
1722 BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
1723 } else if (BGE_IS_5705_PLUS(sc)) {
1725 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1726 * Bits 15-2 : Reserved (should be 0)
1727 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1730 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1733 * Ring size is always XXX entries
1734 * Bits 31-16: Maximum RX frame size
1735 * Bits 15-2 : Reserved (should be 0)
1736 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1739 rcb->bge_maxlen_flags =
1740 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1742 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1743 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
1744 sc->bge_asicrev == BGE_ASICREV_BCM5720)
1745 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1747 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1748 /* Write the standard receive producer ring control block. */
1749 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1750 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1751 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1752 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1754 /* Reset the standard receive producer ring producer index. */
1755 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1758 * Initialize the jumbo RX producer ring control
1759 * block. We set the 'ring disabled' bit in the
1760 * flags field until we're actually ready to start
1761 * using this ring (i.e. once we set the MTU
1762 * high enough to require it).
1764 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1765 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1766 /* Get the jumbo receive producer ring RCB parameters. */
1767 rcb->bge_hostaddr.bge_addr_lo =
1768 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1769 rcb->bge_hostaddr.bge_addr_hi =
1770 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1771 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1772 sc->bge_cdata.bge_rx_jumbo_ring_map,
1773 BUS_DMASYNC_PREREAD);
1774 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1775 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1776 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1777 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
1778 sc->bge_asicrev == BGE_ASICREV_BCM5720)
1779 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1781 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1782 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1783 rcb->bge_hostaddr.bge_addr_hi);
1784 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1785 rcb->bge_hostaddr.bge_addr_lo);
1786 /* Program the jumbo receive producer ring RCB parameters. */
1787 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1788 rcb->bge_maxlen_flags);
1789 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1790 /* Reset the jumbo receive producer ring producer index. */
1791 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1794 /* Disable the mini receive producer ring RCB. */
1795 if (BGE_IS_5700_FAMILY(sc)) {
1796 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1797 rcb->bge_maxlen_flags =
1798 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1799 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1800 rcb->bge_maxlen_flags);
1801 /* Reset the mini receive producer ring producer index. */
1802 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1805 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1806 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1807 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
1808 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
1809 sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
1810 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1811 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1814 * The BD ring replenish thresholds control how often the
1815 * hardware fetches new BD's from the producer rings in host
1816 * memory. Setting the value too low on a busy system can
1817 * starve the hardware and recue the throughpout.
1819 * Set the BD ring replentish thresholds. The recommended
1820 * values are 1/8th the number of descriptors allocated to
1822 * XXX The 5754 requires a lower threshold, so it might be a
1823 * requirement of all 575x family chips. The Linux driver sets
1824 * the lower threshold for all 5705 family chips as well, but there
1825 * are reports that it might not need to be so strict.
1827 * XXX Linux does some extra fiddling here for the 5906 parts as
1830 if (BGE_IS_5705_PLUS(sc))
1833 val = BGE_STD_RX_RING_CNT / 8;
1834 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1835 if (BGE_IS_JUMBO_CAPABLE(sc))
1836 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1837 BGE_JUMBO_RX_RING_CNT/8);
1838 if (BGE_IS_5717_PLUS(sc)) {
1839 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1840 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1844 * Disable all send rings by setting the 'ring disabled' bit
1845 * in the flags field of all the TX send ring control blocks,
1846 * located in NIC memory.
1848 if (!BGE_IS_5705_PLUS(sc))
1849 /* 5700 to 5704 had 16 send rings. */
1850 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
1853 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1854 for (i = 0; i < limit; i++) {
1855 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1856 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1857 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1858 vrcb += sizeof(struct bge_rcb);
1861 /* Configure send ring RCB 0 (we use only the first ring) */
1862 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1863 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1864 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1865 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1866 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1867 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
1868 sc->bge_asicrev == BGE_ASICREV_BCM5720)
1869 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
1871 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1872 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1873 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1874 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1877 * Disable all receive return rings by setting the
1878 * 'ring diabled' bit in the flags field of all the receive
1879 * return ring control blocks, located in NIC memory.
1881 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1882 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
1883 sc->bge_asicrev == BGE_ASICREV_BCM5720) {
1884 /* Should be 17, use 16 until we get an SRAM map. */
1886 } else if (!BGE_IS_5705_PLUS(sc))
1887 limit = BGE_RX_RINGS_MAX;
1888 else if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1889 sc->bge_asicrev == BGE_ASICREV_BCM57765)
1893 /* Disable all receive return rings. */
1894 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1895 for (i = 0; i < limit; i++) {
1896 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1897 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1898 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1899 BGE_RCB_FLAG_RING_DISABLED);
1900 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1901 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1902 (i * (sizeof(uint64_t))), 0);
1903 vrcb += sizeof(struct bge_rcb);
1907 * Set up receive return ring 0. Note that the NIC address
1908 * for RX return rings is 0x0. The return rings live entirely
1909 * within the host, so the nicaddr field in the RCB isn't used.
1911 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1912 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1913 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1914 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1915 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1916 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1917 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1919 /* Set random backoff seed for TX */
1920 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1921 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1922 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1923 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1924 BGE_TX_BACKOFF_SEED_MASK);
1926 /* Set inter-packet gap */
1928 if (sc->bge_asicrev == BGE_ASICREV_BCM5720)
1929 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
1930 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
1931 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
1934 * Specify which ring to use for packets that don't match
1937 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1940 * Configure number of RX lists. One interrupt distribution
1941 * list, sixteen active lists, one bad frames class.
1943 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1945 /* Inialize RX list placement stats mask. */
1946 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1947 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1949 /* Disable host coalescing until we get it set up */
1950 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1952 /* Poll to make sure it's shut down. */
1953 for (i = 0; i < BGE_TIMEOUT; i++) {
1955 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1959 if (i == BGE_TIMEOUT) {
1960 device_printf(sc->bge_dev,
1961 "host coalescing engine failed to idle\n");
1965 /* Set up host coalescing defaults */
1966 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1967 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1968 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1969 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1970 if (!(BGE_IS_5705_PLUS(sc))) {
1971 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1972 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1974 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1975 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1977 /* Set up address of statistics block */
1978 if (!(BGE_IS_5705_PLUS(sc))) {
1979 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1980 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1981 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1982 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1983 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1984 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1985 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1988 /* Set up address of status block */
1989 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1990 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1991 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1992 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1994 /* Set up status block size. */
1995 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1996 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
1997 val = BGE_STATBLKSZ_FULL;
1998 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
2000 val = BGE_STATBLKSZ_32BYTE;
2001 bzero(sc->bge_ldata.bge_status_block, 32);
2003 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2004 sc->bge_cdata.bge_status_map,
2005 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2007 /* Turn on host coalescing state machine */
2008 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
2010 /* Turn on RX BD completion state machine and enable attentions */
2011 CSR_WRITE_4(sc, BGE_RBDC_MODE,
2012 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
2014 /* Turn on RX list placement state machine */
2015 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2017 /* Turn on RX list selector state machine. */
2018 if (!(BGE_IS_5705_PLUS(sc)))
2019 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2021 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
2022 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
2023 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
2024 BGE_MACMODE_FRMHDR_DMA_ENB;
2026 if (sc->bge_flags & BGE_FLAG_TBI)
2027 val |= BGE_PORTMODE_TBI;
2028 else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
2029 val |= BGE_PORTMODE_GMII;
2031 val |= BGE_PORTMODE_MII;
2033 /* Turn on DMA, clear stats */
2034 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
2036 /* Set misc. local control, enable interrupts on attentions */
2037 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
2040 /* Assert GPIO pins for PHY reset */
2041 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
2042 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
2043 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
2044 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
2047 /* Turn on DMA completion state machine */
2048 if (!(BGE_IS_5705_PLUS(sc)))
2049 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2051 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
2053 /* Enable host coalescing bug fix. */
2054 if (BGE_IS_5755_PLUS(sc))
2055 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
2057 /* Request larger DMA burst size to get better performance. */
2058 if (sc->bge_asicrev == BGE_ASICREV_BCM5785)
2059 val |= BGE_WDMAMODE_BURST_ALL_DATA;
2061 /* Turn on write DMA state machine */
2062 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
2065 /* Turn on read DMA state machine */
2066 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
2068 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
2069 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
2071 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2072 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2073 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2074 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
2075 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
2076 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
2077 if (sc->bge_flags & BGE_FLAG_PCIE)
2078 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
2079 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2080 val |= BGE_RDMAMODE_TSO4_ENABLE;
2081 if (sc->bge_flags & BGE_FLAG_TSO3 ||
2082 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2083 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2084 val |= BGE_RDMAMODE_TSO6_ENABLE;
2087 if (sc->bge_asicrev == BGE_ASICREV_BCM5720)
2088 val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
2089 BGE_RDMAMODE_H2BNC_VLAN_DET;
2091 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2092 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2093 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2094 sc->bge_asicrev == BGE_ASICREV_BCM57780 ||
2095 BGE_IS_5717_PLUS(sc)) {
2096 dmactl = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL);
2098 * Adjust tx margin to prevent TX data corruption and
2099 * fix internal FIFO overflow.
2101 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
2102 sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2103 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
2104 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
2105 BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
2106 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
2107 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
2108 BGE_RDMA_RSRVCTRL_TXMRGN_320B;
2111 * Enable fix for read DMA FIFO overruns.
2112 * The fix is to limit the number of RX BDs
2113 * the hardware would fetch at a fime.
2115 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL, dmactl |
2116 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2119 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
2120 sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2121 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2122 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2123 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2124 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2127 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
2130 /* Turn on RX data completion state machine */
2131 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2133 /* Turn on RX BD initiator state machine */
2134 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2136 /* Turn on RX data and RX BD initiator state machine */
2137 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
2139 /* Turn on Mbuf cluster free state machine */
2140 if (!(BGE_IS_5705_PLUS(sc)))
2141 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2143 /* Turn on send BD completion state machine */
2144 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2146 /* Turn on send data completion state machine */
2147 val = BGE_SDCMODE_ENABLE;
2148 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
2149 val |= BGE_SDCMODE_CDELAY;
2150 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
2152 /* Turn on send data initiator state machine */
2153 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3))
2154 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
2155 BGE_SDIMODE_HW_LSO_PRE_DMA);
2157 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2159 /* Turn on send BD initiator state machine */
2160 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2162 /* Turn on send BD selector state machine */
2163 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2165 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
2166 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2167 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
2169 /* ack/clear link change events */
2170 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2171 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2172 BGE_MACSTAT_LINK_CHANGED);
2173 CSR_WRITE_4(sc, BGE_MI_STS, 0);
2176 * Enable attention when the link has changed state for
2177 * devices that use auto polling.
2179 if (sc->bge_flags & BGE_FLAG_TBI) {
2180 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2182 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
2183 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
2186 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2187 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
2188 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2189 BGE_EVTENB_MI_INTERRUPT);
2193 * Clear any pending link state attention.
2194 * Otherwise some link state change events may be lost until attention
2195 * is cleared by bge_intr() -> bge_link_upd() sequence.
2196 * It's not necessary on newer BCM chips - perhaps enabling link
2197 * state change attentions implies clearing pending attention.
2199 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2200 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2201 BGE_MACSTAT_LINK_CHANGED);
2203 /* Enable link state change attentions. */
2204 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2209 const struct bge_revision *
2210 bge_lookup_rev(uint32_t chipid)
2212 const struct bge_revision *br;
2214 for (br = bge_revisions; br->br_name != NULL; br++) {
2215 if (br->br_chipid == chipid)
2219 for (br = bge_majorrevs; br->br_name != NULL; br++) {
2220 if (br->br_chipid == BGE_ASICREV(chipid))
2227 const struct bge_vendor *
2228 bge_lookup_vendor(uint16_t vid)
2230 const struct bge_vendor *v;
2232 for (v = bge_vendors; v->v_name != NULL; v++)
2236 panic("%s: unknown vendor %d", __func__, vid);
2241 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2242 * against our list and return its name if we find a match.
2244 * Note that since the Broadcom controller contains VPD support, we
2245 * try to get the device name string from the controller itself instead
2246 * of the compiled-in string. It guarantees we'll always announce the
2247 * right product name. We fall back to the compiled-in string when
2248 * VPD is unavailable or corrupt.
2251 bge_probe(device_t dev)
2255 const struct bge_revision *br;
2257 struct bge_softc *sc = device_get_softc(dev);
2258 const struct bge_type *t = bge_devs;
2259 const struct bge_vendor *v;
2264 vid = pci_get_vendor(dev);
2265 did = pci_get_device(dev);
2266 while(t->bge_vid != 0) {
2267 if ((vid == t->bge_vid) && (did == t->bge_did)) {
2268 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2269 BGE_PCIMISCCTL_ASICREV_SHIFT;
2270 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) {
2272 * Find the ASCI revision. Different chips
2273 * use different registers.
2275 switch (pci_get_device(dev)) {
2276 case BCOM_DEVICEID_BCM5717:
2277 case BCOM_DEVICEID_BCM5718:
2278 case BCOM_DEVICEID_BCM5719:
2279 case BCOM_DEVICEID_BCM5720:
2280 id = pci_read_config(dev,
2281 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2283 case BCOM_DEVICEID_BCM57761:
2284 case BCOM_DEVICEID_BCM57765:
2285 case BCOM_DEVICEID_BCM57781:
2286 case BCOM_DEVICEID_BCM57785:
2287 case BCOM_DEVICEID_BCM57791:
2288 case BCOM_DEVICEID_BCM57795:
2289 id = pci_read_config(dev,
2290 BGE_PCI_GEN15_PRODID_ASICREV, 4);
2293 id = pci_read_config(dev,
2294 BGE_PCI_PRODID_ASICREV, 4);
2297 br = bge_lookup_rev(id);
2298 v = bge_lookup_vendor(vid);
2299 if (bge_has_eaddr(sc) &&
2300 pci_get_vpd_ident(dev, &pname) == 0)
2301 snprintf(model, 64, "%s", pname);
2303 snprintf(model, 64, "%s %s", v->v_name,
2304 br != NULL ? br->br_name :
2305 "NetXtreme Ethernet Controller");
2306 snprintf(buf, 96, "%s, %sASIC rev. %#08x", model,
2307 br != NULL ? "" : "unknown ", id);
2308 device_set_desc_copy(dev, buf);
2318 bge_dma_free(struct bge_softc *sc)
2322 /* Destroy DMA maps for RX buffers. */
2323 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2324 if (sc->bge_cdata.bge_rx_std_dmamap[i])
2325 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2326 sc->bge_cdata.bge_rx_std_dmamap[i]);
2328 if (sc->bge_cdata.bge_rx_std_sparemap)
2329 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2330 sc->bge_cdata.bge_rx_std_sparemap);
2332 /* Destroy DMA maps for jumbo RX buffers. */
2333 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2334 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
2335 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2336 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2338 if (sc->bge_cdata.bge_rx_jumbo_sparemap)
2339 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2340 sc->bge_cdata.bge_rx_jumbo_sparemap);
2342 /* Destroy DMA maps for TX buffers. */
2343 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2344 if (sc->bge_cdata.bge_tx_dmamap[i])
2345 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
2346 sc->bge_cdata.bge_tx_dmamap[i]);
2349 if (sc->bge_cdata.bge_rx_mtag)
2350 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
2351 if (sc->bge_cdata.bge_tx_mtag)
2352 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
2355 /* Destroy standard RX ring. */
2356 if (sc->bge_cdata.bge_rx_std_ring_map)
2357 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2358 sc->bge_cdata.bge_rx_std_ring_map);
2359 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
2360 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2361 sc->bge_ldata.bge_rx_std_ring,
2362 sc->bge_cdata.bge_rx_std_ring_map);
2364 if (sc->bge_cdata.bge_rx_std_ring_tag)
2365 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2367 /* Destroy jumbo RX ring. */
2368 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
2369 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2370 sc->bge_cdata.bge_rx_jumbo_ring_map);
2372 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
2373 sc->bge_ldata.bge_rx_jumbo_ring)
2374 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2375 sc->bge_ldata.bge_rx_jumbo_ring,
2376 sc->bge_cdata.bge_rx_jumbo_ring_map);
2378 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2379 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2381 /* Destroy RX return ring. */
2382 if (sc->bge_cdata.bge_rx_return_ring_map)
2383 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2384 sc->bge_cdata.bge_rx_return_ring_map);
2386 if (sc->bge_cdata.bge_rx_return_ring_map &&
2387 sc->bge_ldata.bge_rx_return_ring)
2388 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2389 sc->bge_ldata.bge_rx_return_ring,
2390 sc->bge_cdata.bge_rx_return_ring_map);
2392 if (sc->bge_cdata.bge_rx_return_ring_tag)
2393 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2395 /* Destroy TX ring. */
2396 if (sc->bge_cdata.bge_tx_ring_map)
2397 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2398 sc->bge_cdata.bge_tx_ring_map);
2400 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2401 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2402 sc->bge_ldata.bge_tx_ring,
2403 sc->bge_cdata.bge_tx_ring_map);
2405 if (sc->bge_cdata.bge_tx_ring_tag)
2406 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2408 /* Destroy status block. */
2409 if (sc->bge_cdata.bge_status_map)
2410 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2411 sc->bge_cdata.bge_status_map);
2413 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2414 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2415 sc->bge_ldata.bge_status_block,
2416 sc->bge_cdata.bge_status_map);
2418 if (sc->bge_cdata.bge_status_tag)
2419 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2421 /* Destroy statistics block. */
2422 if (sc->bge_cdata.bge_stats_map)
2423 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2424 sc->bge_cdata.bge_stats_map);
2426 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2427 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2428 sc->bge_ldata.bge_stats,
2429 sc->bge_cdata.bge_stats_map);
2431 if (sc->bge_cdata.bge_stats_tag)
2432 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2434 if (sc->bge_cdata.bge_buffer_tag)
2435 bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag);
2437 /* Destroy the parent tag. */
2438 if (sc->bge_cdata.bge_parent_tag)
2439 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2443 bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment,
2444 bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
2445 bus_addr_t *paddr, const char *msg)
2447 struct bge_dmamap_arg ctx;
2449 bus_size_t ring_end;
2452 lowaddr = BUS_SPACE_MAXADDR;
2454 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2455 alignment, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2456 NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
2458 device_printf(sc->bge_dev,
2459 "could not create %s dma tag\n", msg);
2462 /* Allocate DMA'able memory for ring. */
2463 error = bus_dmamem_alloc(*tag, (void **)ring,
2464 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
2466 device_printf(sc->bge_dev,
2467 "could not allocate DMA'able memory for %s\n", msg);
2470 /* Load the address of the ring. */
2471 ctx.bge_busaddr = 0;
2472 error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr,
2473 &ctx, BUS_DMA_NOWAIT);
2475 device_printf(sc->bge_dev,
2476 "could not load DMA'able memory for %s\n", msg);
2479 *paddr = ctx.bge_busaddr;
2480 ring_end = *paddr + maxsize;
2481 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 &&
2482 BGE_ADDR_HI(*paddr) != BGE_ADDR_HI(ring_end)) {
2484 * 4GB boundary crossed. Limit maximum allowable DMA
2485 * address space to 32bit and try again.
2487 bus_dmamap_unload(*tag, *map);
2488 bus_dmamem_free(*tag, *ring, *map);
2489 bus_dma_tag_destroy(*tag);
2491 device_printf(sc->bge_dev, "4GB boundary crossed, "
2492 "limit DMA address space to 32bit for %s\n", msg);
2496 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2503 bge_dma_alloc(struct bge_softc *sc)
2506 bus_size_t boundary, sbsz, rxmaxsegsz, txsegsz, txmaxsegsz;
2509 lowaddr = BUS_SPACE_MAXADDR;
2510 if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
2511 lowaddr = BGE_DMA_MAXADDR;
2513 * Allocate the parent bus DMA tag appropriate for PCI.
2515 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2516 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2517 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2518 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2520 device_printf(sc->bge_dev,
2521 "could not allocate parent dma tag\n");
2525 /* Create tag for standard RX ring. */
2526 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ,
2527 &sc->bge_cdata.bge_rx_std_ring_tag,
2528 (uint8_t **)&sc->bge_ldata.bge_rx_std_ring,
2529 &sc->bge_cdata.bge_rx_std_ring_map,
2530 &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring");
2534 /* Create tag for RX return ring. */
2535 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc),
2536 &sc->bge_cdata.bge_rx_return_ring_tag,
2537 (uint8_t **)&sc->bge_ldata.bge_rx_return_ring,
2538 &sc->bge_cdata.bge_rx_return_ring_map,
2539 &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring");
2543 /* Create tag for TX ring. */
2544 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ,
2545 &sc->bge_cdata.bge_tx_ring_tag,
2546 (uint8_t **)&sc->bge_ldata.bge_tx_ring,
2547 &sc->bge_cdata.bge_tx_ring_map,
2548 &sc->bge_ldata.bge_tx_ring_paddr, "TX ring");
2553 * Create tag for status block.
2554 * Because we only use single Tx/Rx/Rx return ring, use
2555 * minimum status block size except BCM5700 AX/BX which
2556 * seems to want to see full status block size regardless
2557 * of configured number of ring.
2559 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2560 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
2561 sbsz = BGE_STATUS_BLK_SZ;
2564 error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz,
2565 &sc->bge_cdata.bge_status_tag,
2566 (uint8_t **)&sc->bge_ldata.bge_status_block,
2567 &sc->bge_cdata.bge_status_map,
2568 &sc->bge_ldata.bge_status_block_paddr, "status block");
2572 /* Create tag for statistics block. */
2573 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ,
2574 &sc->bge_cdata.bge_stats_tag,
2575 (uint8_t **)&sc->bge_ldata.bge_stats,
2576 &sc->bge_cdata.bge_stats_map,
2577 &sc->bge_ldata.bge_stats_paddr, "statistics block");
2581 /* Create tag for jumbo RX ring. */
2582 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2583 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ,
2584 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
2585 (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring,
2586 &sc->bge_cdata.bge_rx_jumbo_ring_map,
2587 &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring");
2592 /* Create parent tag for buffers. */
2594 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) {
2595 boundary = BGE_DMA_BNDRY;
2598 * watchdog timeout issue was observed on BCM5704 which
2599 * lives behind PCI-X bridge(e.g AMD 8131 PCI-X bridge).
2600 * Limiting DMA address space to 32bits seems to address
2603 if (sc->bge_flags & BGE_FLAG_PCIX)
2604 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2606 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2607 1, boundary, lowaddr, BUS_SPACE_MAXADDR, NULL,
2608 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2609 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag);
2611 device_printf(sc->bge_dev,
2612 "could not allocate buffer dma tag\n");
2615 /* Create tag for Tx mbufs. */
2616 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2617 txsegsz = BGE_TSOSEG_SZ;
2618 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
2621 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
2623 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1,
2624 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2625 txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
2626 &sc->bge_cdata.bge_tx_mtag);
2629 device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
2633 /* Create tag for Rx mbufs. */
2634 if (sc->bge_flags & BGE_FLAG_JUMBO_STD)
2635 rxmaxsegsz = MJUM9BYTES;
2637 rxmaxsegsz = MCLBYTES;
2638 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0,
2639 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, rxmaxsegsz, 1,
2640 rxmaxsegsz, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
2643 device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
2647 /* Create DMA maps for RX buffers. */
2648 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2649 &sc->bge_cdata.bge_rx_std_sparemap);
2651 device_printf(sc->bge_dev,
2652 "can't create spare DMA map for RX\n");
2655 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2656 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2657 &sc->bge_cdata.bge_rx_std_dmamap[i]);
2659 device_printf(sc->bge_dev,
2660 "can't create DMA map for RX\n");
2665 /* Create DMA maps for TX buffers. */
2666 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2667 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
2668 &sc->bge_cdata.bge_tx_dmamap[i]);
2670 device_printf(sc->bge_dev,
2671 "can't create DMA map for TX\n");
2676 /* Create tags for jumbo RX buffers. */
2677 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2678 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag,
2679 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2680 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2681 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2683 device_printf(sc->bge_dev,
2684 "could not allocate jumbo dma tag\n");
2687 /* Create DMA maps for jumbo RX buffers. */
2688 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2689 0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
2691 device_printf(sc->bge_dev,
2692 "can't create spare DMA map for jumbo RX\n");
2695 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2696 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2697 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2699 device_printf(sc->bge_dev,
2700 "can't create DMA map for jumbo RX\n");
2710 * Return true if this device has more than one port.
2713 bge_has_multiple_ports(struct bge_softc *sc)
2715 device_t dev = sc->bge_dev;
2716 u_int b, d, f, fscan, s;
2718 d = pci_get_domain(dev);
2719 b = pci_get_bus(dev);
2720 s = pci_get_slot(dev);
2721 f = pci_get_function(dev);
2722 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2723 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2729 * Return true if MSI can be used with this device.
2732 bge_can_use_msi(struct bge_softc *sc)
2734 int can_use_msi = 0;
2736 /* Disable MSI for polling(4). */
2737 #ifdef DEVICE_POLLING
2740 switch (sc->bge_asicrev) {
2741 case BGE_ASICREV_BCM5714_A0:
2742 case BGE_ASICREV_BCM5714:
2744 * Apparently, MSI doesn't work when these chips are
2745 * configured in single-port mode.
2747 if (bge_has_multiple_ports(sc))
2750 case BGE_ASICREV_BCM5750:
2751 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2752 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2756 if (BGE_IS_575X_PLUS(sc))
2759 return (can_use_msi);
2763 bge_attach(device_t dev)
2766 struct bge_softc *sc;
2767 uint32_t hwcfg = 0, misccfg;
2768 u_char eaddr[ETHER_ADDR_LEN];
2769 int capmask, error, f, msicount, phy_addr, reg, rid, trys;
2771 sc = device_get_softc(dev);
2774 TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
2777 * Map control/status registers.
2779 pci_enable_busmaster(dev);
2782 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2785 if (sc->bge_res == NULL) {
2786 device_printf (sc->bge_dev, "couldn't map memory\n");
2791 /* Save various chip information. */
2793 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2794 BGE_PCIMISCCTL_ASICREV_SHIFT;
2795 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
2797 * Find the ASCI revision. Different chips use different
2800 switch (pci_get_device(dev)) {
2801 case BCOM_DEVICEID_BCM5717:
2802 case BCOM_DEVICEID_BCM5718:
2803 case BCOM_DEVICEID_BCM5719:
2804 case BCOM_DEVICEID_BCM5720:
2805 sc->bge_chipid = pci_read_config(dev,
2806 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2808 case BCOM_DEVICEID_BCM57761:
2809 case BCOM_DEVICEID_BCM57765:
2810 case BCOM_DEVICEID_BCM57781:
2811 case BCOM_DEVICEID_BCM57785:
2812 case BCOM_DEVICEID_BCM57791:
2813 case BCOM_DEVICEID_BCM57795:
2814 sc->bge_chipid = pci_read_config(dev,
2815 BGE_PCI_GEN15_PRODID_ASICREV, 4);
2818 sc->bge_chipid = pci_read_config(dev,
2819 BGE_PCI_PRODID_ASICREV, 4);
2822 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2823 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2825 /* Set default PHY address. */
2828 * PHY address mapping for various devices.
2830 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2831 * ---------+-------+-------+-------+-------+
2832 * BCM57XX | 1 | X | X | X |
2833 * BCM5704 | 1 | X | 1 | X |
2834 * BCM5717 | 1 | 8 | 2 | 9 |
2835 * BCM5719 | 1 | 8 | 2 | 9 |
2836 * BCM5720 | 1 | 8 | 2 | 9 |
2838 * Other addresses may respond but they are not
2839 * IEEE compliant PHYs and should be ignored.
2841 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
2842 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
2843 sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2844 f = pci_get_function(dev);
2845 if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) {
2846 if (CSR_READ_4(sc, BGE_SGDIG_STS) &
2847 BGE_SGDIGSTS_IS_SERDES)
2852 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2853 BGE_CPMU_PHY_STRAP_IS_SERDES)
2861 * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2862 * 5705 A0 and A1 chips.
2864 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
2865 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2866 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2867 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
2868 sc->bge_asicrev == BGE_ASICREV_BCM5906)
2869 sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
2871 if (bge_has_eaddr(sc))
2872 sc->bge_flags |= BGE_FLAG_EADDR;
2874 /* Save chipset family. */
2875 switch (sc->bge_asicrev) {
2876 case BGE_ASICREV_BCM5717:
2877 case BGE_ASICREV_BCM5719:
2878 case BGE_ASICREV_BCM5720:
2879 case BGE_ASICREV_BCM57765:
2880 sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS |
2881 BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS | BGE_FLAG_JUMBO |
2882 BGE_FLAG_JUMBO_FRAME;
2883 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
2884 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
2885 /* Jumbo frame on BCM5719 A0 does not work. */
2886 sc->bge_flags &= ~BGE_FLAG_JUMBO;
2889 case BGE_ASICREV_BCM5755:
2890 case BGE_ASICREV_BCM5761:
2891 case BGE_ASICREV_BCM5784:
2892 case BGE_ASICREV_BCM5785:
2893 case BGE_ASICREV_BCM5787:
2894 case BGE_ASICREV_BCM57780:
2895 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
2898 case BGE_ASICREV_BCM5700:
2899 case BGE_ASICREV_BCM5701:
2900 case BGE_ASICREV_BCM5703:
2901 case BGE_ASICREV_BCM5704:
2902 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2904 case BGE_ASICREV_BCM5714_A0:
2905 case BGE_ASICREV_BCM5780:
2906 case BGE_ASICREV_BCM5714:
2907 sc->bge_flags |= BGE_FLAG_5714_FAMILY | BGE_FLAG_JUMBO_STD;
2909 case BGE_ASICREV_BCM5750:
2910 case BGE_ASICREV_BCM5752:
2911 case BGE_ASICREV_BCM5906:
2912 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2914 case BGE_ASICREV_BCM5705:
2915 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2919 /* Set various PHY bug flags. */
2920 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2921 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2922 sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
2923 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2924 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2925 sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
2926 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2927 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
2928 if (pci_get_subvendor(dev) == DELL_VENDORID)
2929 sc->bge_phy_flags |= BGE_PHY_NO_3LED;
2930 if ((BGE_IS_5705_PLUS(sc)) &&
2931 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2932 sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
2933 sc->bge_asicrev != BGE_ASICREV_BCM5719 &&
2934 sc->bge_asicrev != BGE_ASICREV_BCM5720 &&
2935 sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
2936 sc->bge_asicrev != BGE_ASICREV_BCM57765 &&
2937 sc->bge_asicrev != BGE_ASICREV_BCM57780) {
2938 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2939 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2940 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2941 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2942 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 &&
2943 pci_get_device(dev) != BCOM_DEVICEID_BCM5756)
2944 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
2945 if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M)
2946 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
2948 sc->bge_phy_flags |= BGE_PHY_BER_BUG;
2951 /* Identify the chips that use an CPMU. */
2952 if (BGE_IS_5717_PLUS(sc) ||
2953 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2954 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2955 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2956 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2957 sc->bge_flags |= BGE_FLAG_CPMU_PRESENT;
2958 if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0)
2959 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
2961 sc->bge_mi_mode = BGE_MIMODE_BASE;
2962 /* Enable auto polling for BCM570[0-5]. */
2963 if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705)
2964 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
2967 * All Broadcom controllers have 4GB boundary DMA bug.
2968 * Whenever an address crosses a multiple of the 4GB boundary
2969 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
2970 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
2971 * state machine will lockup and cause the device to hang.
2973 sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
2975 /* BCM5755 or higher and BCM5906 have short DMA bug. */
2976 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
2977 sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG;
2980 * BCM5719 cannot handle DMA requests for DMA segments that
2981 * have larger than 4KB in size. However the maximum DMA
2982 * segment size created in DMA tag is 4KB for TSO, so we
2983 * wouldn't encounter the issue here.
2985 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
2986 sc->bge_flags |= BGE_FLAG_4K_RDMA_BUG;
2988 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID;
2989 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
2990 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2991 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
2992 sc->bge_flags |= BGE_FLAG_5788;
2995 capmask = BMSR_DEFCAPMASK;
2996 if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 &&
2997 (misccfg == 0x4000 || misccfg == 0x8000)) ||
2998 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2999 pci_get_vendor(dev) == BCOM_VENDORID &&
3000 (pci_get_device(dev) == BCOM_DEVICEID_BCM5901 ||
3001 pci_get_device(dev) == BCOM_DEVICEID_BCM5901A2 ||
3002 pci_get_device(dev) == BCOM_DEVICEID_BCM5705F)) ||
3003 (pci_get_vendor(dev) == BCOM_VENDORID &&
3004 (pci_get_device(dev) == BCOM_DEVICEID_BCM5751F ||
3005 pci_get_device(dev) == BCOM_DEVICEID_BCM5753F ||
3006 pci_get_device(dev) == BCOM_DEVICEID_BCM5787F)) ||
3007 pci_get_device(dev) == BCOM_DEVICEID_BCM57790 ||
3008 sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3009 /* These chips are 10/100 only. */
3010 capmask &= ~BMSR_EXTSTAT;
3014 * Some controllers seem to require a special firmware to use
3015 * TSO. But the firmware is not available to FreeBSD and Linux
3016 * claims that the TSO performed by the firmware is slower than
3017 * hardware based TSO. Moreover the firmware based TSO has one
3018 * known bug which can't handle TSO if ethernet header + IP/TCP
3019 * header is greater than 80 bytes. The workaround for the TSO
3020 * bug exist but it seems it's too expensive than not using
3021 * TSO at all. Some hardwares also have the TSO bug so limit
3022 * the TSO to the controllers that are not affected TSO issues
3023 * (e.g. 5755 or higher).
3025 if (BGE_IS_5717_PLUS(sc)) {
3026 /* BCM5717 requires different TSO configuration. */
3027 sc->bge_flags |= BGE_FLAG_TSO3;
3028 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
3029 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
3030 /* TSO on BCM5719 A0 does not work. */
3031 sc->bge_flags &= ~BGE_FLAG_TSO3;
3033 } else if (BGE_IS_5755_PLUS(sc)) {
3035 * BCM5754 and BCM5787 shares the same ASIC id so
3036 * explicit device id check is required.
3037 * Due to unknown reason TSO does not work on BCM5755M.
3039 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 &&
3040 pci_get_device(dev) != BCOM_DEVICEID_BCM5754M &&
3041 pci_get_device(dev) != BCOM_DEVICEID_BCM5755M)
3042 sc->bge_flags |= BGE_FLAG_TSO;
3046 * Check if this is a PCI-X or PCI Express device.
3048 if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) {
3050 * Found a PCI Express capabilities register, this
3051 * must be a PCI Express device.
3053 sc->bge_flags |= BGE_FLAG_PCIE;
3054 sc->bge_expcap = reg;
3055 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
3056 sc->bge_asicrev == BGE_ASICREV_BCM5720)
3057 pci_set_max_read_req(dev, 2048);
3058 else if (pci_get_max_read_req(dev) != 4096)
3059 pci_set_max_read_req(dev, 4096);
3062 * Check if the device is in PCI-X Mode.
3063 * (This bit is not valid on PCI Express controllers.)
3065 if (pci_find_cap(dev, PCIY_PCIX, ®) == 0)
3066 sc->bge_pcixcap = reg;
3067 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
3068 BGE_PCISTATE_PCI_BUSMODE) == 0)
3069 sc->bge_flags |= BGE_FLAG_PCIX;
3073 * The 40bit DMA bug applies to the 5714/5715 controllers and is
3074 * not actually a MAC controller bug but an issue with the embedded
3075 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
3077 if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
3078 sc->bge_flags |= BGE_FLAG_40BIT_BUG;
3080 * Allocate the interrupt, using MSI if possible. These devices
3081 * support 8 MSI messages, but only the first one is used in
3085 if (pci_find_cap(sc->bge_dev, PCIY_MSI, ®) == 0) {
3086 sc->bge_msicap = reg;
3087 if (bge_can_use_msi(sc)) {
3088 msicount = pci_msi_count(dev);
3093 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
3095 sc->bge_flags |= BGE_FLAG_MSI;
3100 * All controllers except BCM5700 supports tagged status but
3101 * we use tagged status only for MSI case on BCM5717. Otherwise
3102 * MSI on BCM5717 does not work.
3104 #ifndef DEVICE_POLLING
3105 if (sc->bge_flags & BGE_FLAG_MSI && BGE_IS_5717_PLUS(sc))
3106 sc->bge_flags |= BGE_FLAG_TAGGED_STATUS;
3109 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
3110 RF_SHAREABLE | RF_ACTIVE);
3112 if (sc->bge_irq == NULL) {
3113 device_printf(sc->bge_dev, "couldn't map interrupt\n");
3119 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
3120 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
3121 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" :
3122 ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI"));
3124 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
3126 /* Try to reset the chip. */
3127 if (bge_reset(sc)) {
3128 device_printf(sc->bge_dev, "chip reset failed\n");
3133 sc->bge_asf_mode = 0;
3134 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) ==
3135 BGE_SRAM_DATA_SIG_MAGIC)) {
3136 if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG)
3138 sc->bge_asf_mode |= ASF_ENABLE;
3139 sc->bge_asf_mode |= ASF_STACKUP;
3140 if (BGE_IS_575X_PLUS(sc))
3141 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
3145 /* Try to reset the chip again the nice way. */
3147 bge_sig_pre_reset(sc, BGE_RESET_STOP);
3148 if (bge_reset(sc)) {
3149 device_printf(sc->bge_dev, "chip reset failed\n");
3154 bge_sig_legacy(sc, BGE_RESET_STOP);
3155 bge_sig_post_reset(sc, BGE_RESET_STOP);
3157 if (bge_chipinit(sc)) {
3158 device_printf(sc->bge_dev, "chip initialization failed\n");
3163 error = bge_get_eaddr(sc, eaddr);
3165 device_printf(sc->bge_dev,
3166 "failed to read station address\n");
3171 /* 5705 limits RX return ring to 512 entries. */
3172 if (BGE_IS_5717_PLUS(sc))
3173 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3174 else if (BGE_IS_5705_PLUS(sc))
3175 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
3177 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3179 if (bge_dma_alloc(sc)) {
3180 device_printf(sc->bge_dev,
3181 "failed to allocate DMA resources\n");
3186 bge_add_sysctls(sc);
3188 /* Set default tuneable values. */
3189 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
3190 sc->bge_rx_coal_ticks = 150;
3191 sc->bge_tx_coal_ticks = 150;
3192 sc->bge_rx_max_coal_bds = 10;
3193 sc->bge_tx_max_coal_bds = 10;
3195 /* Initialize checksum features to use. */
3196 sc->bge_csum_features = BGE_CSUM_FEATURES;
3197 if (sc->bge_forced_udpcsum != 0)
3198 sc->bge_csum_features |= CSUM_UDP;
3200 /* Set up ifnet structure */
3201 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
3203 device_printf(sc->bge_dev, "failed to if_alloc()\n");
3208 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3209 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3210 ifp->if_ioctl = bge_ioctl;
3211 ifp->if_start = bge_start;
3212 ifp->if_init = bge_init;
3213 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
3214 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
3215 IFQ_SET_READY(&ifp->if_snd);
3216 ifp->if_hwassist = sc->bge_csum_features;
3217 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
3219 if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) {
3220 ifp->if_hwassist |= CSUM_TSO;
3221 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO;
3223 #ifdef IFCAP_VLAN_HWCSUM
3224 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
3226 ifp->if_capenable = ifp->if_capabilities;
3227 #ifdef DEVICE_POLLING
3228 ifp->if_capabilities |= IFCAP_POLLING;
3232 * 5700 B0 chips do not support checksumming correctly due
3235 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
3236 ifp->if_capabilities &= ~IFCAP_HWCSUM;
3237 ifp->if_capenable &= ~IFCAP_HWCSUM;
3238 ifp->if_hwassist = 0;
3242 * Figure out what sort of media we have by checking the
3243 * hardware config word in the first 32k of NIC internal memory,
3244 * or fall back to examining the EEPROM if necessary.
3245 * Note: on some BCM5700 cards, this value appears to be unset.
3246 * If that's the case, we have to rely on identifying the NIC
3247 * by its PCI subsystem ID, as we do below for the SysKonnect
3250 if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC)
3251 hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG);
3252 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
3253 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3254 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
3256 device_printf(sc->bge_dev, "failed to read EEPROM\n");
3260 hwcfg = ntohl(hwcfg);
3263 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
3264 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) ==
3265 SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
3266 if (BGE_IS_5714_FAMILY(sc))
3267 sc->bge_flags |= BGE_FLAG_MII_SERDES;
3269 sc->bge_flags |= BGE_FLAG_TBI;
3272 if (sc->bge_flags & BGE_FLAG_TBI) {
3273 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3275 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
3276 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
3278 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
3279 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
3280 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
3283 * Do transceiver setup and tell the firmware the
3284 * driver is down so we can try to get access the
3285 * probe if ASF is running. Retry a couple of times
3286 * if we get a conflict with the ASF firmware accessing
3290 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3292 bge_asf_driver_up(sc);
3294 error = mii_attach(dev, &sc->bge_miibus, ifp, bge_ifmedia_upd,
3295 bge_ifmedia_sts, capmask, phy_addr, MII_OFFSET_ANY,
3299 device_printf(sc->bge_dev, "Try again\n");
3300 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
3304 device_printf(sc->bge_dev, "attaching PHYs failed\n");
3309 * Now tell the firmware we are going up after probing the PHY
3311 if (sc->bge_asf_mode & ASF_STACKUP)
3312 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3316 * When using the BCM5701 in PCI-X mode, data corruption has
3317 * been observed in the first few bytes of some received packets.
3318 * Aligning the packet buffer in memory eliminates the corruption.
3319 * Unfortunately, this misaligns the packet payloads. On platforms
3320 * which do not support unaligned accesses, we will realign the
3321 * payloads by copying the received packets.
3323 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
3324 sc->bge_flags & BGE_FLAG_PCIX)
3325 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
3328 * Call MI attach routine.
3330 ether_ifattach(ifp, eaddr);
3331 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
3333 /* Tell upper layer we support long frames. */
3334 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3339 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
3340 /* Take advantage of single-shot MSI. */
3341 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
3342 ~BGE_MSIMODE_ONE_SHOT_DISABLE);
3343 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
3344 taskqueue_thread_enqueue, &sc->bge_tq);
3345 if (sc->bge_tq == NULL) {
3346 device_printf(dev, "could not create taskqueue.\n");
3347 ether_ifdetach(ifp);
3351 taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq",
3352 device_get_nameunit(sc->bge_dev));
3353 error = bus_setup_intr(dev, sc->bge_irq,
3354 INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
3357 ether_ifdetach(ifp);
3359 error = bus_setup_intr(dev, sc->bge_irq,
3360 INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
3365 device_printf(sc->bge_dev, "couldn't set up irq\n");
3371 bge_release_resources(sc);
3377 bge_detach(device_t dev)
3379 struct bge_softc *sc;
3382 sc = device_get_softc(dev);
3385 #ifdef DEVICE_POLLING
3386 if (ifp->if_capenable & IFCAP_POLLING)
3387 ether_poll_deregister(ifp);
3395 callout_drain(&sc->bge_stat_ch);
3398 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
3399 ether_ifdetach(ifp);
3401 if (sc->bge_flags & BGE_FLAG_TBI) {
3402 ifmedia_removeall(&sc->bge_ifmedia);
3404 bus_generic_detach(dev);
3405 device_delete_child(dev, sc->bge_miibus);
3408 bge_release_resources(sc);
3414 bge_release_resources(struct bge_softc *sc)
3420 if (sc->bge_tq != NULL)
3421 taskqueue_free(sc->bge_tq);
3423 if (sc->bge_intrhand != NULL)
3424 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
3426 if (sc->bge_irq != NULL)
3427 bus_release_resource(dev, SYS_RES_IRQ,
3428 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
3430 if (sc->bge_flags & BGE_FLAG_MSI)
3431 pci_release_msi(dev);
3433 if (sc->bge_res != NULL)
3434 bus_release_resource(dev, SYS_RES_MEMORY,
3435 PCIR_BAR(0), sc->bge_res);
3437 if (sc->bge_ifp != NULL)
3438 if_free(sc->bge_ifp);
3442 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
3443 BGE_LOCK_DESTROY(sc);
3447 bge_reset(struct bge_softc *sc)
3450 uint32_t cachesize, command, pcistate, reset, val;
3451 void (*write_op)(struct bge_softc *, int, int);
3457 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3458 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3459 if (sc->bge_flags & BGE_FLAG_PCIE)
3460 write_op = bge_writemem_direct;
3462 write_op = bge_writemem_ind;
3464 write_op = bge_writereg_ind;
3466 /* Save some important PCI state. */
3467 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
3468 command = pci_read_config(dev, BGE_PCI_CMD, 4);
3469 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
3471 pci_write_config(dev, BGE_PCI_MISC_CTL,
3472 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3473 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3475 /* Disable fastboot on controllers that support it. */
3476 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
3477 BGE_IS_5755_PLUS(sc)) {
3479 device_printf(dev, "Disabling fastboot\n");
3480 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
3484 * Write the magic number to SRAM at offset 0xB50.
3485 * When firmware finishes its initialization it will
3486 * write ~BGE_SRAM_FW_MB_MAGIC to the same location.
3488 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
3490 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3492 /* XXX: Broadcom Linux driver. */
3493 if (sc->bge_flags & BGE_FLAG_PCIE) {
3494 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
3495 CSR_WRITE_4(sc, 0x7E2C, 0x20);
3496 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3497 /* Prevent PCIE link training during global reset */
3498 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
3504 * Set GPHY Power Down Override to leave GPHY
3505 * powered up in D0 uninitialized.
3507 if (BGE_IS_5705_PLUS(sc) &&
3508 (sc->bge_flags & BGE_FLAG_CPMU_PRESENT) == 0)
3509 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
3511 /* Issue global reset */
3512 write_op(sc, BGE_MISC_CFG, reset);
3514 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3515 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3516 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3517 val | BGE_VCPU_STATUS_DRV_RESET);
3518 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3519 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3520 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3525 /* XXX: Broadcom Linux driver. */
3526 if (sc->bge_flags & BGE_FLAG_PCIE) {
3527 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3528 DELAY(500000); /* wait for link training to complete */
3529 val = pci_read_config(dev, 0xC4, 4);
3530 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
3532 devctl = pci_read_config(dev,
3533 sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
3534 /* Clear enable no snoop and disable relaxed ordering. */
3535 devctl &= ~(PCIM_EXP_CTL_RELAXED_ORD_ENABLE |
3536 PCIM_EXP_CTL_NOSNOOP_ENABLE);
3537 /* Set PCIE max payload size to 128. */
3538 devctl &= ~PCIM_EXP_CTL_MAX_PAYLOAD;
3539 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL,
3541 /* Clear error status. */
3542 pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_STA,
3543 PCIM_EXP_STA_CORRECTABLE_ERROR |
3544 PCIM_EXP_STA_NON_FATAL_ERROR | PCIM_EXP_STA_FATAL_ERROR |
3545 PCIM_EXP_STA_UNSUPPORTED_REQ, 2);
3548 /* Reset some of the PCI state that got zapped by reset. */
3549 pci_write_config(dev, BGE_PCI_MISC_CTL,
3550 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3551 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3552 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
3553 pci_write_config(dev, BGE_PCI_CMD, command, 4);
3554 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
3556 * Disable PCI-X relaxed ordering to ensure status block update
3557 * comes first then packet buffer DMA. Otherwise driver may
3558 * read stale status block.
3560 if (sc->bge_flags & BGE_FLAG_PCIX) {
3561 devctl = pci_read_config(dev,
3562 sc->bge_pcixcap + PCIXR_COMMAND, 2);
3563 devctl &= ~PCIXM_COMMAND_ERO;
3564 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
3565 devctl &= ~PCIXM_COMMAND_MAX_READ;
3566 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3567 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3568 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
3569 PCIXM_COMMAND_MAX_READ);
3570 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3572 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
3575 /* Re-enable MSI, if necessary, and enable the memory arbiter. */
3576 if (BGE_IS_5714_FAMILY(sc)) {
3577 /* This chip disables MSI on reset. */
3578 if (sc->bge_flags & BGE_FLAG_MSI) {
3579 val = pci_read_config(dev,
3580 sc->bge_msicap + PCIR_MSI_CTRL, 2);
3581 pci_write_config(dev,
3582 sc->bge_msicap + PCIR_MSI_CTRL,
3583 val | PCIM_MSICTRL_MSI_ENABLE, 2);
3584 val = CSR_READ_4(sc, BGE_MSI_MODE);
3585 CSR_WRITE_4(sc, BGE_MSI_MODE,
3586 val | BGE_MSIMODE_ENABLE);
3588 val = CSR_READ_4(sc, BGE_MARB_MODE);
3589 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3591 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3593 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3594 for (i = 0; i < BGE_TIMEOUT; i++) {
3595 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3596 if (val & BGE_VCPU_STATUS_INIT_DONE)
3600 if (i == BGE_TIMEOUT) {
3601 device_printf(dev, "reset timed out\n");
3606 * Poll until we see the 1's complement of the magic number.
3607 * This indicates that the firmware initialization is complete.
3608 * We expect this to fail if no chip containing the Ethernet
3609 * address is fitted though.
3611 for (i = 0; i < BGE_TIMEOUT; i++) {
3613 val = bge_readmem_ind(sc, BGE_SRAM_FW_MB);
3614 if (val == ~BGE_SRAM_FW_MB_MAGIC)
3618 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
3620 "firmware handshake timed out, found 0x%08x\n",
3622 /* BCM57765 A0 needs additional time before accessing. */
3623 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
3624 DELAY(10 * 1000); /* XXX */
3628 * XXX Wait for the value of the PCISTATE register to
3629 * return to its original pre-reset state. This is a
3630 * fairly good indicator of reset completion. If we don't
3631 * wait for the reset to fully complete, trying to read
3632 * from the device's non-PCI registers may yield garbage
3635 for (i = 0; i < BGE_TIMEOUT; i++) {
3636 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3641 /* Fix up byte swapping. */
3642 CSR_WRITE_4(sc, BGE_MODE_CTL, bge_dma_swap_options(sc));
3644 /* Tell the ASF firmware we are up */
3645 if (sc->bge_asf_mode & ASF_STACKUP)
3646 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3648 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3651 * The 5704 in TBI mode apparently needs some special
3652 * adjustment to insure the SERDES drive level is set
3655 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3656 sc->bge_flags & BGE_FLAG_TBI) {
3657 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3658 val = (val & ~0xFFF) | 0x880;
3659 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3662 /* XXX: Broadcom Linux driver. */
3663 if (sc->bge_flags & BGE_FLAG_PCIE &&
3664 !BGE_IS_5717_PLUS(sc) &&
3665 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3666 sc->bge_asicrev != BGE_ASICREV_BCM5785) {
3667 /* Enable Data FIFO protection. */
3668 val = CSR_READ_4(sc, 0x7C00);
3669 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3673 if (sc->bge_asicrev == BGE_ASICREV_BCM5720)
3674 BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
3675 CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
3680 static __inline void
3681 bge_rxreuse_std(struct bge_softc *sc, int i)
3683 struct bge_rx_bd *r;
3685 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
3686 r->bge_flags = BGE_RXBDFLAG_END;
3687 r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i];
3689 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3692 static __inline void
3693 bge_rxreuse_jumbo(struct bge_softc *sc, int i)
3695 struct bge_extrx_bd *r;
3697 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
3698 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
3699 r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0];
3700 r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1];
3701 r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2];
3702 r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3];
3704 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3708 * Frame reception handling. This is called if there's a frame
3709 * on the receive return list.
3711 * Note: we have to be able to handle two possibilities here:
3712 * 1) the frame is from the jumbo receive ring
3713 * 2) the frame is from the standard receive ring
3717 bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
3720 int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
3723 rx_cons = sc->bge_rx_saved_considx;
3725 /* Nothing to do. */
3726 if (rx_cons == rx_prod)
3731 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3732 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3733 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3734 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
3735 if (BGE_IS_JUMBO_CAPABLE(sc) &&
3736 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
3737 (MCLBYTES - ETHER_ALIGN))
3738 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3739 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
3741 while (rx_cons != rx_prod) {
3742 struct bge_rx_bd *cur_rx;
3744 struct mbuf *m = NULL;
3745 uint16_t vlan_tag = 0;
3748 #ifdef DEVICE_POLLING
3749 if (ifp->if_capenable & IFCAP_POLLING) {
3750 if (sc->rxcycles <= 0)
3756 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
3758 rxidx = cur_rx->bge_idx;
3759 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3761 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3762 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3764 vlan_tag = cur_rx->bge_vlan_tag;
3767 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3769 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3770 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3771 bge_rxreuse_jumbo(sc, rxidx);
3774 if (bge_newbuf_jumbo(sc, rxidx) != 0) {
3775 bge_rxreuse_jumbo(sc, rxidx);
3779 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3782 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3783 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3784 bge_rxreuse_std(sc, rxidx);
3787 if (bge_newbuf_std(sc, rxidx) != 0) {
3788 bge_rxreuse_std(sc, rxidx);
3792 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3796 #ifndef __NO_STRICT_ALIGNMENT
3798 * For architectures with strict alignment we must make sure
3799 * the payload is aligned.
3801 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3802 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3804 m->m_data += ETHER_ALIGN;
3807 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3808 m->m_pkthdr.rcvif = ifp;
3810 if (ifp->if_capenable & IFCAP_RXCSUM)
3811 bge_rxcsum(sc, cur_rx, m);
3814 * If we received a packet with a vlan tag,
3815 * attach that information to the packet.
3818 m->m_pkthdr.ether_vtag = vlan_tag;
3819 m->m_flags |= M_VLANTAG;
3824 (*ifp->if_input)(ifp, m);
3827 (*ifp->if_input)(ifp, m);
3830 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
3834 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3835 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
3837 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3838 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3841 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3842 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3844 sc->bge_rx_saved_considx = rx_cons;
3845 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3847 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std +
3848 BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT);
3850 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo +
3851 BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT);
3854 * This register wraps very quickly under heavy packet drops.
3855 * If you need correct statistics, you can enable this check.
3857 if (BGE_IS_5705_PLUS(sc))
3858 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3864 bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
3867 if (BGE_IS_5717_PLUS(sc)) {
3868 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
3869 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3870 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3871 if ((cur_rx->bge_error_flag &
3872 BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
3873 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3875 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
3876 m->m_pkthdr.csum_data =
3877 cur_rx->bge_tcp_udp_csum;
3878 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3883 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3884 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3885 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
3886 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3888 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3889 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
3890 m->m_pkthdr.csum_data =
3891 cur_rx->bge_tcp_udp_csum;
3892 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
3899 bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
3901 struct bge_tx_bd *cur_tx;
3904 BGE_LOCK_ASSERT(sc);
3906 /* Nothing to do. */
3907 if (sc->bge_tx_saved_considx == tx_cons)
3912 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
3913 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
3915 * Go through our tx ring and free mbufs for those
3916 * frames that have been sent.
3918 while (sc->bge_tx_saved_considx != tx_cons) {
3921 idx = sc->bge_tx_saved_considx;
3922 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
3923 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3925 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
3926 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
3927 sc->bge_cdata.bge_tx_dmamap[idx],
3928 BUS_DMASYNC_POSTWRITE);
3929 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
3930 sc->bge_cdata.bge_tx_dmamap[idx]);
3931 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3932 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3935 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3938 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3939 if (sc->bge_txcnt == 0)
3943 #ifdef DEVICE_POLLING
3945 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3947 struct bge_softc *sc = ifp->if_softc;
3948 uint16_t rx_prod, tx_cons;
3949 uint32_t statusword;
3953 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3958 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3959 sc->bge_cdata.bge_status_map,
3960 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3961 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3962 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
3964 statusword = sc->bge_ldata.bge_status_block->bge_status;
3965 sc->bge_ldata.bge_status_block->bge_status = 0;
3967 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3968 sc->bge_cdata.bge_status_map,
3969 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3971 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
3972 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3975 if (cmd == POLL_AND_CHECK_STATUS)
3976 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3977 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3978 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3981 sc->rxcycles = count;
3982 rx_npkts = bge_rxeof(sc, rx_prod, 1);
3983 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3987 bge_txeof(sc, tx_cons);
3988 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3989 bge_start_locked(ifp);
3994 #endif /* DEVICE_POLLING */
3997 bge_msi_intr(void *arg)
3999 struct bge_softc *sc;
4001 sc = (struct bge_softc *)arg;
4003 * This interrupt is not shared and controller already
4004 * disabled further interrupt.
4006 taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
4007 return (FILTER_HANDLED);
4011 bge_intr_task(void *arg, int pending)
4013 struct bge_softc *sc;
4015 uint32_t status, status_tag;
4016 uint16_t rx_prod, tx_cons;
4018 sc = (struct bge_softc *)arg;
4022 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
4027 /* Get updated status block. */
4028 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4029 sc->bge_cdata.bge_status_map,
4030 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4032 /* Save producer/consumer indexess. */
4033 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4034 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4035 status = sc->bge_ldata.bge_status_block->bge_status;
4036 status_tag = sc->bge_ldata.bge_status_block->bge_status_tag << 24;
4037 sc->bge_ldata.bge_status_block->bge_status = 0;
4038 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4039 sc->bge_cdata.bge_status_map,
4040 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4041 if ((sc->bge_flags & BGE_FLAG_TAGGED_STATUS) == 0)
4044 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0)
4047 /* Let controller work. */
4048 bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag);
4050 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
4051 sc->bge_rx_saved_considx != rx_prod) {
4052 /* Check RX return ring producer/consumer. */
4054 bge_rxeof(sc, rx_prod, 0);
4057 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4058 /* Check TX ring producer/consumer. */
4059 bge_txeof(sc, tx_cons);
4060 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4061 bge_start_locked(ifp);
4069 struct bge_softc *sc;
4071 uint32_t statusword;
4072 uint16_t rx_prod, tx_cons;
4080 #ifdef DEVICE_POLLING
4081 if (ifp->if_capenable & IFCAP_POLLING) {
4088 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
4089 * disable interrupts by writing nonzero like we used to, since with
4090 * our current organization this just gives complications and
4091 * pessimizations for re-enabling interrupts. We used to have races
4092 * instead of the necessary complications. Disabling interrupts
4093 * would just reduce the chance of a status update while we are
4094 * running (by switching to the interrupt-mode coalescence
4095 * parameters), but this chance is already very low so it is more
4096 * efficient to get another interrupt than prevent it.
4098 * We do the ack first to ensure another interrupt if there is a
4099 * status update after the ack. We don't check for the status
4100 * changing later because it is more efficient to get another
4101 * interrupt than prevent it, not quite as above (not checking is
4102 * a smaller optimization than not toggling the interrupt enable,
4103 * since checking doesn't involve PCI accesses and toggling require
4104 * the status check). So toggling would probably be a pessimization
4105 * even with MSI. It would only be needed for using a task queue.
4107 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4110 * Do the mandatory PCI flush as well as get the link status.
4112 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
4114 /* Make sure the descriptor ring indexes are coherent. */
4115 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4116 sc->bge_cdata.bge_status_map,
4117 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4118 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4119 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4120 sc->bge_ldata.bge_status_block->bge_status = 0;
4121 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4122 sc->bge_cdata.bge_status_map,
4123 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4125 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4126 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
4127 statusword || sc->bge_link_evt)
4130 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4131 /* Check RX return ring producer/consumer. */
4132 bge_rxeof(sc, rx_prod, 1);
4135 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4136 /* Check TX ring producer/consumer. */
4137 bge_txeof(sc, tx_cons);
4140 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
4141 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4142 bge_start_locked(ifp);
4148 bge_asf_driver_up(struct bge_softc *sc)
4150 if (sc->bge_asf_mode & ASF_STACKUP) {
4151 /* Send ASF heartbeat aprox. every 2s */
4152 if (sc->bge_asf_count)
4153 sc->bge_asf_count --;
4155 sc->bge_asf_count = 2;
4156 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB,
4157 BGE_FW_CMD_DRV_ALIVE);
4158 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4);
4159 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB,
4160 BGE_FW_HB_TIMEOUT_SEC);
4161 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
4162 CSR_READ_4(sc, BGE_RX_CPU_EVENT) |
4163 BGE_RX_CPU_DRV_EVENT);
4171 struct bge_softc *sc = xsc;
4172 struct mii_data *mii = NULL;
4174 BGE_LOCK_ASSERT(sc);
4176 /* Synchronize with possible callout reset/stop. */
4177 if (callout_pending(&sc->bge_stat_ch) ||
4178 !callout_active(&sc->bge_stat_ch))
4181 if (BGE_IS_5705_PLUS(sc))
4182 bge_stats_update_regs(sc);
4184 bge_stats_update(sc);
4186 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
4187 mii = device_get_softc(sc->bge_miibus);
4189 * Do not touch PHY if we have link up. This could break
4190 * IPMI/ASF mode or produce extra input errors
4191 * (extra errors was reported for bcm5701 & bcm5704).
4197 * Since in TBI mode auto-polling can't be used we should poll
4198 * link status manually. Here we register pending link event
4199 * and trigger interrupt.
4201 #ifdef DEVICE_POLLING
4202 /* In polling mode we poll link state in bge_poll(). */
4203 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
4207 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4208 sc->bge_flags & BGE_FLAG_5788)
4209 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4211 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4215 bge_asf_driver_up(sc);
4218 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4222 bge_stats_update_regs(struct bge_softc *sc)
4225 struct bge_mac_stats *stats;
4228 stats = &sc->bge_mac_stats;
4230 stats->ifHCOutOctets +=
4231 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4232 stats->etherStatsCollisions +=
4233 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4234 stats->outXonSent +=
4235 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4236 stats->outXoffSent +=
4237 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4238 stats->dot3StatsInternalMacTransmitErrors +=
4239 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4240 stats->dot3StatsSingleCollisionFrames +=
4241 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4242 stats->dot3StatsMultipleCollisionFrames +=
4243 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4244 stats->dot3StatsDeferredTransmissions +=
4245 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4246 stats->dot3StatsExcessiveCollisions +=
4247 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4248 stats->dot3StatsLateCollisions +=
4249 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4250 stats->ifHCOutUcastPkts +=
4251 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4252 stats->ifHCOutMulticastPkts +=
4253 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4254 stats->ifHCOutBroadcastPkts +=
4255 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4257 stats->ifHCInOctets +=
4258 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4259 stats->etherStatsFragments +=
4260 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4261 stats->ifHCInUcastPkts +=
4262 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4263 stats->ifHCInMulticastPkts +=
4264 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4265 stats->ifHCInBroadcastPkts +=
4266 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4267 stats->dot3StatsFCSErrors +=
4268 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4269 stats->dot3StatsAlignmentErrors +=
4270 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4271 stats->xonPauseFramesReceived +=
4272 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4273 stats->xoffPauseFramesReceived +=
4274 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4275 stats->macControlFramesReceived +=
4276 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4277 stats->xoffStateEntered +=
4278 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4279 stats->dot3StatsFramesTooLong +=
4280 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4281 stats->etherStatsJabbers +=
4282 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4283 stats->etherStatsUndersizePkts +=
4284 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4286 stats->FramesDroppedDueToFilters +=
4287 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4288 stats->DmaWriteQueueFull +=
4289 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4290 stats->DmaWriteHighPriQueueFull +=
4291 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4292 stats->NoMoreRxBDs +=
4293 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4294 stats->InputDiscards +=
4295 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4296 stats->InputErrors +=
4297 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4298 stats->RecvThresholdHit +=
4299 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4301 ifp->if_collisions = (u_long)stats->etherStatsCollisions;
4302 ifp->if_ierrors = (u_long)(stats->NoMoreRxBDs + stats->InputDiscards +
4303 stats->InputErrors);
4307 bge_stats_clear_regs(struct bge_softc *sc)
4310 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4311 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4312 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4313 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4314 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4315 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4316 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4317 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4318 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4319 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4320 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4321 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4322 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4324 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4325 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4326 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4327 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4328 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4329 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4330 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4331 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4332 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4333 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4334 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4335 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4336 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4337 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4339 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4340 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4341 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4342 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4343 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4344 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4345 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4349 bge_stats_update(struct bge_softc *sc)
4353 uint32_t cnt; /* current register value */
4357 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
4359 #define READ_STAT(sc, stats, stat) \
4360 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
4362 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
4363 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
4364 sc->bge_tx_collisions = cnt;
4366 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
4367 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
4368 sc->bge_rx_discards = cnt;
4370 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
4371 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
4372 sc->bge_tx_discards = cnt;
4378 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
4379 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
4380 * but when such padded frames employ the bge IP/TCP checksum offload,
4381 * the hardware checksum assist gives incorrect results (possibly
4382 * from incorporating its own padding into the UDP/TCP checksum; who knows).
4383 * If we pad such runts with zeros, the onboard checksum comes out correct.
4386 bge_cksum_pad(struct mbuf *m)
4388 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
4391 /* If there's only the packet-header and we can pad there, use it. */
4392 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
4393 M_TRAILINGSPACE(m) >= padlen) {
4397 * Walk packet chain to find last mbuf. We will either
4398 * pad there, or append a new mbuf and pad it.
4400 for (last = m; last->m_next != NULL; last = last->m_next);
4401 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
4402 /* Allocate new empty mbuf, pad it. Compact later. */
4405 MGET(n, M_DONTWAIT, MT_DATA);
4414 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
4415 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
4416 last->m_len += padlen;
4417 m->m_pkthdr.len += padlen;
4422 static struct mbuf *
4423 bge_check_short_dma(struct mbuf *m)
4429 * If device receive two back-to-back send BDs with less than
4430 * or equal to 8 total bytes then the device may hang. The two
4431 * back-to-back send BDs must in the same frame for this failure
4432 * to occur. Scan mbuf chains and see whether two back-to-back
4433 * send BDs are there. If this is the case, allocate new mbuf
4434 * and copy the frame to workaround the silicon bug.
4436 for (n = m, found = 0; n != NULL; n = n->m_next) {
4447 n = m_defrag(m, M_DONTWAIT);
4455 static struct mbuf *
4456 bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss,
4465 if (M_WRITABLE(m) == 0) {
4466 /* Get a writable copy. */
4467 n = m_dup(m, M_DONTWAIT);
4473 m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip));
4476 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4477 poff = sizeof(struct ether_header) + (ip->ip_hl << 2);
4478 m = m_pullup(m, poff + sizeof(struct tcphdr));
4481 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4482 m = m_pullup(m, poff + (tcp->th_off << 2));
4486 * It seems controller doesn't modify IP length and TCP pseudo
4487 * checksum. These checksum computed by upper stack should be 0.
4489 *mss = m->m_pkthdr.tso_segsz;
4490 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4492 ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
4493 /* Clear pseudo checksum computed by TCP stack. */
4494 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4497 * Broadcom controllers uses different descriptor format for
4498 * TSO depending on ASIC revision. Due to TSO-capable firmware
4499 * license issue and lower performance of firmware based TSO
4500 * we only support hardware based TSO.
4502 /* Calculate header length, incl. TCP/IP options, in 32 bit units. */
4503 hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
4504 if (sc->bge_flags & BGE_FLAG_TSO3) {
4506 * For BCM5717 and newer controllers, hardware based TSO
4507 * uses the 14 lower bits of the bge_mss field to store the
4508 * MSS and the upper 2 bits to store the lowest 2 bits of
4509 * the IP/TCP header length. The upper 6 bits of the header
4510 * length are stored in the bge_flags[14:10,4] field. Jumbo
4511 * frames are supported.
4513 *mss |= ((hlen & 0x3) << 14);
4514 *flags |= ((hlen & 0xF8) << 7) | ((hlen & 0x4) << 2);
4517 * For BCM5755 and newer controllers, hardware based TSO uses
4518 * the lower 11 bits to store the MSS and the upper 5 bits to
4519 * store the IP/TCP header length. Jumbo frames are not
4522 *mss |= (hlen << 11);
4528 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
4529 * pointers to descriptors.
4532 bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
4534 bus_dma_segment_t segs[BGE_NSEG_NEW];
4536 struct bge_tx_bd *d;
4537 struct mbuf *m = *m_head;
4538 uint32_t idx = *txidx;
4539 uint16_t csum_flags, mss, vlan_tag;
4540 int nsegs, i, error;
4545 if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 &&
4546 m->m_next != NULL) {
4547 *m_head = bge_check_short_dma(m);
4548 if (*m_head == NULL)
4552 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
4553 *m_head = m = bge_setup_tso(sc, m, &mss, &csum_flags);
4554 if (*m_head == NULL)
4556 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
4557 BGE_TXBDFLAG_CPU_POST_DMA;
4558 } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) {
4559 if (m->m_pkthdr.csum_flags & CSUM_IP)
4560 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
4561 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
4562 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
4563 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
4564 (error = bge_cksum_pad(m)) != 0) {
4570 if (m->m_flags & M_LASTFRAG)
4571 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
4572 else if (m->m_flags & M_FRAG)
4573 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
4576 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
4577 if (sc->bge_flags & BGE_FLAG_JUMBO_FRAME &&
4578 m->m_pkthdr.len > ETHER_MAX_LEN)
4579 csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME;
4580 if (sc->bge_forced_collapse > 0 &&
4581 (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
4583 * Forcedly collapse mbuf chains to overcome hardware
4584 * limitation which only support a single outstanding
4585 * DMA read operation.
4587 if (sc->bge_forced_collapse == 1)
4588 m = m_defrag(m, M_DONTWAIT);
4590 m = m_collapse(m, M_DONTWAIT,
4591 sc->bge_forced_collapse);
4598 map = sc->bge_cdata.bge_tx_dmamap[idx];
4599 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
4600 &nsegs, BUS_DMA_NOWAIT);
4601 if (error == EFBIG) {
4602 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
4609 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
4610 m, segs, &nsegs, BUS_DMA_NOWAIT);
4616 } else if (error != 0)
4619 /* Check if we have enough free send BDs. */
4620 if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
4621 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
4625 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
4627 if (m->m_flags & M_VLANTAG) {
4628 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4629 vlan_tag = m->m_pkthdr.ether_vtag;
4631 for (i = 0; ; i++) {
4632 d = &sc->bge_ldata.bge_tx_ring[idx];
4633 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
4634 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
4635 d->bge_len = segs[i].ds_len;
4636 d->bge_flags = csum_flags;
4637 d->bge_vlan_tag = vlan_tag;
4641 BGE_INC(idx, BGE_TX_RING_CNT);
4644 /* Mark the last segment as end of packet... */
4645 d->bge_flags |= BGE_TXBDFLAG_END;
4648 * Insure that the map for this transmission
4649 * is placed at the array index of the last descriptor
4652 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
4653 sc->bge_cdata.bge_tx_dmamap[idx] = map;
4654 sc->bge_cdata.bge_tx_chain[idx] = m;
4655 sc->bge_txcnt += nsegs;
4657 BGE_INC(idx, BGE_TX_RING_CNT);
4664 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4665 * to the mbuf data regions directly in the transmit descriptors.
4668 bge_start_locked(struct ifnet *ifp)
4670 struct bge_softc *sc;
4671 struct mbuf *m_head;
4676 BGE_LOCK_ASSERT(sc);
4678 if (!sc->bge_link ||
4679 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4683 prodidx = sc->bge_tx_prodidx;
4685 for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
4686 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
4687 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4690 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4696 * The code inside the if() block is never reached since we
4697 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
4698 * requests to checksum TCP/UDP in a fragmented packet.
4701 * safety overkill. If this is a fragmented packet chain
4702 * with delayed TCP/UDP checksums, then only encapsulate
4703 * it if we have enough descriptors to handle the entire
4705 * (paranoia -- may not actually be needed)
4707 if (m_head->m_flags & M_FIRSTFRAG &&
4708 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4709 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
4710 m_head->m_pkthdr.csum_data + 16) {
4711 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4712 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4718 * Pack the data into the transmit ring. If we
4719 * don't have room, set the OACTIVE flag and wait
4720 * for the NIC to drain the ring.
4722 if (bge_encap(sc, &m_head, &prodidx)) {
4725 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4726 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4732 * If there's a BPF listener, bounce a copy of this frame
4735 #ifdef ETHER_BPF_MTAP
4736 ETHER_BPF_MTAP(ifp, m_head);
4738 BPF_MTAP(ifp, m_head);
4743 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4744 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
4746 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4747 /* 5700 b2 errata */
4748 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
4749 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4751 sc->bge_tx_prodidx = prodidx;
4754 * Set a timeout in case the chip goes out to lunch.
4761 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4762 * to the mbuf data regions directly in the transmit descriptors.
4765 bge_start(struct ifnet *ifp)
4767 struct bge_softc *sc;
4771 bge_start_locked(ifp);
4776 bge_init_locked(struct bge_softc *sc)
4782 BGE_LOCK_ASSERT(sc);
4786 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4789 /* Cancel pending I/O and flush buffers. */
4793 bge_sig_pre_reset(sc, BGE_RESET_START);
4795 bge_sig_legacy(sc, BGE_RESET_START);
4796 bge_sig_post_reset(sc, BGE_RESET_START);
4801 * Init the various state machines, ring
4802 * control blocks and firmware.
4804 if (bge_blockinit(sc)) {
4805 device_printf(sc->bge_dev, "initialization failure\n");
4812 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
4813 ETHER_HDR_LEN + ETHER_CRC_LEN +
4814 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
4816 /* Load our MAC address. */
4817 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
4818 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4819 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4821 /* Program promiscuous mode. */
4824 /* Program multicast filter. */
4827 /* Program VLAN tag stripping. */
4830 /* Override UDP checksum offloading. */
4831 if (sc->bge_forced_udpcsum == 0)
4832 sc->bge_csum_features &= ~CSUM_UDP;
4834 sc->bge_csum_features |= CSUM_UDP;
4835 if (ifp->if_capabilities & IFCAP_TXCSUM &&
4836 ifp->if_capenable & IFCAP_TXCSUM) {
4837 ifp->if_hwassist &= ~(BGE_CSUM_FEATURES | CSUM_UDP);
4838 ifp->if_hwassist |= sc->bge_csum_features;
4842 if (bge_init_rx_ring_std(sc) != 0) {
4843 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
4849 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4850 * memory to insure that the chip has in fact read the first
4851 * entry of the ring.
4853 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4855 for (i = 0; i < 10; i++) {
4857 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4858 if (v == (MCLBYTES - ETHER_ALIGN))
4862 device_printf (sc->bge_dev,
4863 "5705 A0 chip failed to load RX ring\n");
4866 /* Init jumbo RX ring. */
4867 if (BGE_IS_JUMBO_CAPABLE(sc) &&
4868 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
4869 (MCLBYTES - ETHER_ALIGN)) {
4870 if (bge_init_rx_ring_jumbo(sc) != 0) {
4871 device_printf(sc->bge_dev,
4872 "no memory for jumbo Rx buffers.\n");
4878 /* Init our RX return ring index. */
4879 sc->bge_rx_saved_considx = 0;
4881 /* Init our RX/TX stat counters. */
4882 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
4885 bge_init_tx_ring(sc);
4887 /* Enable TX MAC state machine lockup fix. */
4888 mode = CSR_READ_4(sc, BGE_TX_MODE);
4889 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
4890 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
4891 if (sc->bge_asicrev == BGE_ASICREV_BCM5720) {
4892 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
4893 mode |= CSR_READ_4(sc, BGE_TX_MODE) &
4894 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
4896 /* Turn on transmitter. */
4897 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
4899 /* Turn on receiver. */
4900 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4903 * Set the number of good frames to receive after RX MBUF
4904 * Low Watermark has been reached. After the RX MAC receives
4905 * this number of frames, it will drop subsequent incoming
4906 * frames until the MBUF High Watermark is reached.
4908 if (sc->bge_asicrev == BGE_ASICREV_BCM57765)
4909 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
4911 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
4913 /* Clear MAC statistics. */
4914 if (BGE_IS_5705_PLUS(sc))
4915 bge_stats_clear_regs(sc);
4917 /* Tell firmware we're alive. */
4918 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4920 #ifdef DEVICE_POLLING
4921 /* Disable interrupts if we are polling. */
4922 if (ifp->if_capenable & IFCAP_POLLING) {
4923 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4924 BGE_PCIMISCCTL_MASK_PCI_INTR);
4925 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4929 /* Enable host interrupts. */
4931 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4932 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4933 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4936 bge_ifmedia_upd_locked(ifp);
4938 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4939 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4941 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4947 struct bge_softc *sc = xsc;
4950 bge_init_locked(sc);
4955 * Set media options.
4958 bge_ifmedia_upd(struct ifnet *ifp)
4960 struct bge_softc *sc = ifp->if_softc;
4964 res = bge_ifmedia_upd_locked(ifp);
4971 bge_ifmedia_upd_locked(struct ifnet *ifp)
4973 struct bge_softc *sc = ifp->if_softc;
4974 struct mii_data *mii;
4975 struct mii_softc *miisc;
4976 struct ifmedia *ifm;
4978 BGE_LOCK_ASSERT(sc);
4980 ifm = &sc->bge_ifmedia;
4982 /* If this is a 1000baseX NIC, enable the TBI port. */
4983 if (sc->bge_flags & BGE_FLAG_TBI) {
4984 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4986 switch(IFM_SUBTYPE(ifm->ifm_media)) {
4989 * The BCM5704 ASIC appears to have a special
4990 * mechanism for programming the autoneg
4991 * advertisement registers in TBI mode.
4993 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
4995 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
4996 if (sgdig & BGE_SGDIGSTS_DONE) {
4997 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
4998 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
4999 sgdig |= BGE_SGDIGCFG_AUTO |
5000 BGE_SGDIGCFG_PAUSE_CAP |
5001 BGE_SGDIGCFG_ASYM_PAUSE;
5002 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
5003 sgdig | BGE_SGDIGCFG_SEND);
5005 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
5010 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
5011 BGE_CLRBIT(sc, BGE_MAC_MODE,
5012 BGE_MACMODE_HALF_DUPLEX);
5014 BGE_SETBIT(sc, BGE_MAC_MODE,
5015 BGE_MACMODE_HALF_DUPLEX);
5025 mii = device_get_softc(sc->bge_miibus);
5026 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
5031 * Force an interrupt so that we will call bge_link_upd
5032 * if needed and clear any pending link state attention.
5033 * Without this we are not getting any further interrupts
5034 * for link state changes and thus will not UP the link and
5035 * not be able to send in bge_start_locked. The only
5036 * way to get things working was to receive a packet and
5038 * bge_tick should help for fiber cards and we might not
5039 * need to do this here if BGE_FLAG_TBI is set but as
5040 * we poll for fiber anyway it should not harm.
5042 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
5043 sc->bge_flags & BGE_FLAG_5788)
5044 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
5046 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
5052 * Report current media status.
5055 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
5057 struct bge_softc *sc = ifp->if_softc;
5058 struct mii_data *mii;
5062 if (sc->bge_flags & BGE_FLAG_TBI) {
5063 ifmr->ifm_status = IFM_AVALID;
5064 ifmr->ifm_active = IFM_ETHER;
5065 if (CSR_READ_4(sc, BGE_MAC_STS) &
5066 BGE_MACSTAT_TBI_PCS_SYNCHED)
5067 ifmr->ifm_status |= IFM_ACTIVE;
5069 ifmr->ifm_active |= IFM_NONE;
5073 ifmr->ifm_active |= IFM_1000_SX;
5074 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
5075 ifmr->ifm_active |= IFM_HDX;
5077 ifmr->ifm_active |= IFM_FDX;
5082 mii = device_get_softc(sc->bge_miibus);
5084 ifmr->ifm_active = mii->mii_media_active;
5085 ifmr->ifm_status = mii->mii_media_status;
5091 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
5093 struct bge_softc *sc = ifp->if_softc;
5094 struct ifreq *ifr = (struct ifreq *) data;
5095 struct mii_data *mii;
5096 int flags, mask, error = 0;
5100 if (BGE_IS_JUMBO_CAPABLE(sc) ||
5101 (sc->bge_flags & BGE_FLAG_JUMBO_STD)) {
5102 if (ifr->ifr_mtu < ETHERMIN ||
5103 ifr->ifr_mtu > BGE_JUMBO_MTU) {
5107 } else if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) {
5112 if (ifp->if_mtu != ifr->ifr_mtu) {
5113 ifp->if_mtu = ifr->ifr_mtu;
5114 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5115 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5116 bge_init_locked(sc);
5123 if (ifp->if_flags & IFF_UP) {
5125 * If only the state of the PROMISC flag changed,
5126 * then just use the 'set promisc mode' command
5127 * instead of reinitializing the entire NIC. Doing
5128 * a full re-init means reloading the firmware and
5129 * waiting for it to start up, which may take a
5130 * second or two. Similarly for ALLMULTI.
5132 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5133 flags = ifp->if_flags ^ sc->bge_if_flags;
5134 if (flags & IFF_PROMISC)
5136 if (flags & IFF_ALLMULTI)
5139 bge_init_locked(sc);
5141 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5145 sc->bge_if_flags = ifp->if_flags;
5151 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5160 if (sc->bge_flags & BGE_FLAG_TBI) {
5161 error = ifmedia_ioctl(ifp, ifr,
5162 &sc->bge_ifmedia, command);
5164 mii = device_get_softc(sc->bge_miibus);
5165 error = ifmedia_ioctl(ifp, ifr,
5166 &mii->mii_media, command);
5170 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5171 #ifdef DEVICE_POLLING
5172 if (mask & IFCAP_POLLING) {
5173 if (ifr->ifr_reqcap & IFCAP_POLLING) {
5174 error = ether_poll_register(bge_poll, ifp);
5178 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
5179 BGE_PCIMISCCTL_MASK_PCI_INTR);
5180 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5181 ifp->if_capenable |= IFCAP_POLLING;
5184 error = ether_poll_deregister(ifp);
5185 /* Enable interrupt even in error case */
5187 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
5188 BGE_PCIMISCCTL_MASK_PCI_INTR);
5189 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
5190 ifp->if_capenable &= ~IFCAP_POLLING;
5195 if ((mask & IFCAP_TXCSUM) != 0 &&
5196 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
5197 ifp->if_capenable ^= IFCAP_TXCSUM;
5198 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
5199 ifp->if_hwassist |= sc->bge_csum_features;
5201 ifp->if_hwassist &= ~sc->bge_csum_features;
5204 if ((mask & IFCAP_RXCSUM) != 0 &&
5205 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
5206 ifp->if_capenable ^= IFCAP_RXCSUM;
5208 if ((mask & IFCAP_TSO4) != 0 &&
5209 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
5210 ifp->if_capenable ^= IFCAP_TSO4;
5211 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
5212 ifp->if_hwassist |= CSUM_TSO;
5214 ifp->if_hwassist &= ~CSUM_TSO;
5217 if (mask & IFCAP_VLAN_MTU) {
5218 ifp->if_capenable ^= IFCAP_VLAN_MTU;
5219 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5223 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
5224 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
5225 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5226 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
5227 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
5228 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
5229 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
5230 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
5235 #ifdef VLAN_CAPABILITIES
5236 VLAN_CAPABILITIES(ifp);
5240 error = ether_ioctl(ifp, command, data);
5248 bge_watchdog(struct bge_softc *sc)
5252 BGE_LOCK_ASSERT(sc);
5254 if (sc->bge_timer == 0 || --sc->bge_timer)
5259 if_printf(ifp, "watchdog timeout -- resetting\n");
5261 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5262 bge_init_locked(sc);
5268 bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit)
5272 BGE_CLRBIT(sc, reg, bit);
5274 for (i = 0; i < BGE_TIMEOUT; i++) {
5275 if ((CSR_READ_4(sc, reg) & bit) == 0)
5282 * Stop the adapter and free any mbufs allocated to the
5286 bge_stop(struct bge_softc *sc)
5290 BGE_LOCK_ASSERT(sc);
5294 callout_stop(&sc->bge_stat_ch);
5296 /* Disable host interrupts. */
5297 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5298 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5301 * Tell firmware we're shutting down.
5304 bge_sig_pre_reset(sc, BGE_RESET_STOP);
5307 * Disable all of the receiver blocks.
5309 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
5310 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
5311 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
5312 if (BGE_IS_5700_FAMILY(sc))
5313 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
5314 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
5315 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
5316 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
5319 * Disable all of the transmit blocks.
5321 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
5322 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
5323 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
5324 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
5325 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
5326 if (BGE_IS_5700_FAMILY(sc))
5327 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
5328 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
5331 * Shut down all of the memory managers and related
5334 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
5335 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
5336 if (BGE_IS_5700_FAMILY(sc))
5337 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
5339 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
5340 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
5341 if (!(BGE_IS_5705_PLUS(sc))) {
5342 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
5343 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
5345 /* Update MAC statistics. */
5346 if (BGE_IS_5705_PLUS(sc))
5347 bge_stats_update_regs(sc);
5350 bge_sig_legacy(sc, BGE_RESET_STOP);
5351 bge_sig_post_reset(sc, BGE_RESET_STOP);
5354 * Keep the ASF firmware running if up.
5356 if (sc->bge_asf_mode & ASF_STACKUP)
5357 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5359 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5361 /* Free the RX lists. */
5362 bge_free_rx_ring_std(sc);
5364 /* Free jumbo RX list. */
5365 if (BGE_IS_JUMBO_CAPABLE(sc))
5366 bge_free_rx_ring_jumbo(sc);
5368 /* Free TX buffers. */
5369 bge_free_tx_ring(sc);
5371 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
5373 /* Clear MAC's link state (PHY may still have link UP). */
5374 if (bootverbose && sc->bge_link)
5375 if_printf(sc->bge_ifp, "link DOWN\n");
5378 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
5382 * Stop all chip I/O so that the kernel's probe routines don't
5383 * get confused by errant DMAs when rebooting.
5386 bge_shutdown(device_t dev)
5388 struct bge_softc *sc;
5390 sc = device_get_softc(dev);
5400 bge_suspend(device_t dev)
5402 struct bge_softc *sc;
5404 sc = device_get_softc(dev);
5413 bge_resume(device_t dev)
5415 struct bge_softc *sc;
5418 sc = device_get_softc(dev);
5421 if (ifp->if_flags & IFF_UP) {
5422 bge_init_locked(sc);
5423 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5424 bge_start_locked(ifp);
5432 bge_link_upd(struct bge_softc *sc)
5434 struct mii_data *mii;
5435 uint32_t link, status;
5437 BGE_LOCK_ASSERT(sc);
5439 /* Clear 'pending link event' flag. */
5440 sc->bge_link_evt = 0;
5443 * Process link state changes.
5444 * Grrr. The link status word in the status block does
5445 * not work correctly on the BCM5700 rev AX and BX chips,
5446 * according to all available information. Hence, we have
5447 * to enable MII interrupts in order to properly obtain
5448 * async link changes. Unfortunately, this also means that
5449 * we have to read the MAC status register to detect link
5450 * changes, thereby adding an additional register access to
5451 * the interrupt handler.
5453 * XXX: perhaps link state detection procedure used for
5454 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
5457 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5458 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
5459 status = CSR_READ_4(sc, BGE_MAC_STS);
5460 if (status & BGE_MACSTAT_MI_INTERRUPT) {
5461 mii = device_get_softc(sc->bge_miibus);
5463 if (!sc->bge_link &&
5464 mii->mii_media_status & IFM_ACTIVE &&
5465 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5468 if_printf(sc->bge_ifp, "link UP\n");
5469 } else if (sc->bge_link &&
5470 (!(mii->mii_media_status & IFM_ACTIVE) ||
5471 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5474 if_printf(sc->bge_ifp, "link DOWN\n");
5477 /* Clear the interrupt. */
5478 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
5479 BGE_EVTENB_MI_INTERRUPT);
5480 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
5481 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
5487 if (sc->bge_flags & BGE_FLAG_TBI) {
5488 status = CSR_READ_4(sc, BGE_MAC_STS);
5489 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
5490 if (!sc->bge_link) {
5492 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
5493 BGE_CLRBIT(sc, BGE_MAC_MODE,
5494 BGE_MACMODE_TBI_SEND_CFGS);
5495 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
5497 if_printf(sc->bge_ifp, "link UP\n");
5498 if_link_state_change(sc->bge_ifp,
5501 } else if (sc->bge_link) {
5504 if_printf(sc->bge_ifp, "link DOWN\n");
5505 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
5507 } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
5509 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
5510 * in status word always set. Workaround this bug by reading
5511 * PHY link status directly.
5513 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
5515 if (link != sc->bge_link ||
5516 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
5517 mii = device_get_softc(sc->bge_miibus);
5519 if (!sc->bge_link &&
5520 mii->mii_media_status & IFM_ACTIVE &&
5521 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5524 if_printf(sc->bge_ifp, "link UP\n");
5525 } else if (sc->bge_link &&
5526 (!(mii->mii_media_status & IFM_ACTIVE) ||
5527 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5530 if_printf(sc->bge_ifp, "link DOWN\n");
5535 * For controllers that call mii_tick, we have to poll
5538 mii = device_get_softc(sc->bge_miibus);
5540 bge_miibus_statchg(sc->bge_dev);
5543 /* Clear the attention. */
5544 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
5545 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
5546 BGE_MACSTAT_LINK_CHANGED);
5550 bge_add_sysctls(struct bge_softc *sc)
5552 struct sysctl_ctx_list *ctx;
5553 struct sysctl_oid_list *children;
5557 ctx = device_get_sysctl_ctx(sc->bge_dev);
5558 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
5560 #ifdef BGE_REGISTER_DEBUG
5561 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
5562 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
5563 "Debug Information");
5565 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
5566 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
5569 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
5570 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
5575 unit = device_get_unit(sc->bge_dev);
5577 * A common design characteristic for many Broadcom client controllers
5578 * is that they only support a single outstanding DMA read operation
5579 * on the PCIe bus. This means that it will take twice as long to fetch
5580 * a TX frame that is split into header and payload buffers as it does
5581 * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
5582 * these controllers, coalescing buffers to reduce the number of memory
5583 * reads is effective way to get maximum performance(about 940Mbps).
5584 * Without collapsing TX buffers the maximum TCP bulk transfer
5585 * performance is about 850Mbps. However forcing coalescing mbufs
5586 * consumes a lot of CPU cycles, so leave it off by default.
5588 sc->bge_forced_collapse = 0;
5589 snprintf(tn, sizeof(tn), "dev.bge.%d.forced_collapse", unit);
5590 TUNABLE_INT_FETCH(tn, &sc->bge_forced_collapse);
5591 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
5592 CTLFLAG_RW, &sc->bge_forced_collapse, 0,
5593 "Number of fragmented TX buffers of a frame allowed before "
5594 "forced collapsing");
5597 * It seems all Broadcom controllers have a bug that can generate UDP
5598 * datagrams with checksum value 0 when TX UDP checksum offloading is
5599 * enabled. Generating UDP checksum value 0 is RFC 768 violation.
5600 * Even though the probability of generating such UDP datagrams is
5601 * low, I don't want to see FreeBSD boxes to inject such datagrams
5602 * into network so disable UDP checksum offloading by default. Users
5603 * still override this behavior by setting a sysctl variable,
5604 * dev.bge.0.forced_udpcsum.
5606 sc->bge_forced_udpcsum = 0;
5607 snprintf(tn, sizeof(tn), "dev.bge.%d.bge_forced_udpcsum", unit);
5608 TUNABLE_INT_FETCH(tn, &sc->bge_forced_udpcsum);
5609 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
5610 CTLFLAG_RW, &sc->bge_forced_udpcsum, 0,
5611 "Enable UDP checksum offloading even if controller can "
5612 "generate UDP checksum value 0");
5614 if (BGE_IS_5705_PLUS(sc))
5615 bge_add_sysctl_stats_regs(sc, ctx, children);
5617 bge_add_sysctl_stats(sc, ctx, children);
5620 #define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
5621 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
5622 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
5626 bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5627 struct sysctl_oid_list *parent)
5629 struct sysctl_oid *tree;
5630 struct sysctl_oid_list *children, *schildren;
5632 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5633 NULL, "BGE Statistics");
5634 schildren = children = SYSCTL_CHILDREN(tree);
5635 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
5636 children, COSFramesDroppedDueToFilters,
5637 "FramesDroppedDueToFilters");
5638 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
5639 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
5640 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
5641 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
5642 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
5643 children, nicNoMoreRxBDs, "NoMoreRxBDs");
5644 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
5645 children, ifInDiscards, "InputDiscards");
5646 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
5647 children, ifInErrors, "InputErrors");
5648 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
5649 children, nicRecvThresholdHit, "RecvThresholdHit");
5650 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
5651 children, nicDmaReadQueueFull, "DmaReadQueueFull");
5652 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
5653 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
5654 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
5655 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
5656 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
5657 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
5658 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
5659 children, nicRingStatusUpdate, "RingStatusUpdate");
5660 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
5661 children, nicInterrupts, "Interrupts");
5662 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
5663 children, nicAvoidedInterrupts, "AvoidedInterrupts");
5664 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
5665 children, nicSendThresholdHit, "SendThresholdHit");
5667 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
5668 NULL, "BGE RX Statistics");
5669 children = SYSCTL_CHILDREN(tree);
5670 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
5671 children, rxstats.ifHCInOctets, "ifHCInOctets");
5672 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
5673 children, rxstats.etherStatsFragments, "Fragments");
5674 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
5675 children, rxstats.ifHCInUcastPkts, "UnicastPkts");
5676 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
5677 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
5678 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
5679 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
5680 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
5681 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
5682 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
5683 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
5684 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
5685 children, rxstats.xoffPauseFramesReceived,
5686 "xoffPauseFramesReceived");
5687 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
5688 children, rxstats.macControlFramesReceived,
5689 "ControlFramesReceived");
5690 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
5691 children, rxstats.xoffStateEntered, "xoffStateEntered");
5692 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
5693 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
5694 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
5695 children, rxstats.etherStatsJabbers, "Jabbers");
5696 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
5697 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
5698 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
5699 children, rxstats.inRangeLengthError, "inRangeLengthError");
5700 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
5701 children, rxstats.outRangeLengthError, "outRangeLengthError");
5703 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
5704 NULL, "BGE TX Statistics");
5705 children = SYSCTL_CHILDREN(tree);
5706 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
5707 children, txstats.ifHCOutOctets, "ifHCOutOctets");
5708 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
5709 children, txstats.etherStatsCollisions, "Collisions");
5710 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
5711 children, txstats.outXonSent, "XonSent");
5712 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
5713 children, txstats.outXoffSent, "XoffSent");
5714 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
5715 children, txstats.flowControlDone, "flowControlDone");
5716 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
5717 children, txstats.dot3StatsInternalMacTransmitErrors,
5718 "InternalMacTransmitErrors");
5719 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
5720 children, txstats.dot3StatsSingleCollisionFrames,
5721 "SingleCollisionFrames");
5722 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
5723 children, txstats.dot3StatsMultipleCollisionFrames,
5724 "MultipleCollisionFrames");
5725 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
5726 children, txstats.dot3StatsDeferredTransmissions,
5727 "DeferredTransmissions");
5728 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
5729 children, txstats.dot3StatsExcessiveCollisions,
5730 "ExcessiveCollisions");
5731 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
5732 children, txstats.dot3StatsLateCollisions,
5734 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
5735 children, txstats.ifHCOutUcastPkts, "UnicastPkts");
5736 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
5737 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
5738 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
5739 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
5740 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
5741 children, txstats.dot3StatsCarrierSenseErrors,
5742 "CarrierSenseErrors");
5743 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
5744 children, txstats.ifOutDiscards, "Discards");
5745 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
5746 children, txstats.ifOutErrors, "Errors");
5749 #undef BGE_SYSCTL_STAT
5751 #define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
5752 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
5755 bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5756 struct sysctl_oid_list *parent)
5758 struct sysctl_oid *tree;
5759 struct sysctl_oid_list *child, *schild;
5760 struct bge_mac_stats *stats;
5762 stats = &sc->bge_mac_stats;
5763 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5764 NULL, "BGE Statistics");
5765 schild = child = SYSCTL_CHILDREN(tree);
5766 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters",
5767 &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters");
5768 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull",
5769 &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full");
5770 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull",
5771 &stats->DmaWriteHighPriQueueFull,
5772 "NIC DMA Write High Priority Queue Full");
5773 BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs",
5774 &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors");
5775 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards",
5776 &stats->InputDiscards, "Discarded Input Frames");
5777 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors",
5778 &stats->InputErrors, "Input Errors");
5779 BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit",
5780 &stats->RecvThresholdHit, "NIC Recv Threshold Hit");
5782 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
5783 NULL, "BGE RX Statistics");
5784 child = SYSCTL_CHILDREN(tree);
5785 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets",
5786 &stats->ifHCInOctets, "Inbound Octets");
5787 BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments",
5788 &stats->etherStatsFragments, "Fragments");
5789 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5790 &stats->ifHCInUcastPkts, "Inbound Unicast Packets");
5791 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5792 &stats->ifHCInMulticastPkts, "Inbound Multicast Packets");
5793 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5794 &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets");
5795 BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors",
5796 &stats->dot3StatsFCSErrors, "FCS Errors");
5797 BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors",
5798 &stats->dot3StatsAlignmentErrors, "Alignment Errors");
5799 BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived",
5800 &stats->xonPauseFramesReceived, "XON Pause Frames Received");
5801 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived",
5802 &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received");
5803 BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived",
5804 &stats->macControlFramesReceived, "MAC Control Frames Received");
5805 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered",
5806 &stats->xoffStateEntered, "XOFF State Entered");
5807 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong",
5808 &stats->dot3StatsFramesTooLong, "Frames Too Long");
5809 BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers",
5810 &stats->etherStatsJabbers, "Jabbers");
5811 BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts",
5812 &stats->etherStatsUndersizePkts, "Undersized Packets");
5814 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
5815 NULL, "BGE TX Statistics");
5816 child = SYSCTL_CHILDREN(tree);
5817 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets",
5818 &stats->ifHCOutOctets, "Outbound Octets");
5819 BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions",
5820 &stats->etherStatsCollisions, "TX Collisions");
5821 BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent",
5822 &stats->outXonSent, "XON Sent");
5823 BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent",
5824 &stats->outXoffSent, "XOFF Sent");
5825 BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors",
5826 &stats->dot3StatsInternalMacTransmitErrors,
5827 "Internal MAC TX Errors");
5828 BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames",
5829 &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames");
5830 BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames",
5831 &stats->dot3StatsMultipleCollisionFrames,
5832 "Multiple Collision Frames");
5833 BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions",
5834 &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions");
5835 BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions",
5836 &stats->dot3StatsExcessiveCollisions, "Excessive Collisions");
5837 BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions",
5838 &stats->dot3StatsLateCollisions, "Late Collisions");
5839 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5840 &stats->ifHCOutUcastPkts, "Outbound Unicast Packets");
5841 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5842 &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets");
5843 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5844 &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets");
5847 #undef BGE_SYSCTL_STAT_ADD64
5850 bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
5852 struct bge_softc *sc;
5856 sc = (struct bge_softc *)arg1;
5858 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
5859 offsetof(bge_hostaddr, bge_addr_lo));
5860 return (sysctl_handle_int(oidp, &result, 0, req));
5863 #ifdef BGE_REGISTER_DEBUG
5865 bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5867 struct bge_softc *sc;
5869 int error, result, sbsz;
5873 error = sysctl_handle_int(oidp, &result, 0, req);
5874 if (error || (req->newptr == NULL))
5878 sc = (struct bge_softc *)arg1;
5880 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5881 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
5882 sbsz = BGE_STATUS_BLK_SZ;
5885 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
5886 printf("Status Block:\n");
5888 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
5889 sc->bge_cdata.bge_status_map,
5890 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
5891 for (i = 0x0; i < sbsz / sizeof(uint16_t); ) {
5893 for (j = 0; j < 8; j++)
5894 printf(" %04x", sbdata[i++]);
5898 printf("Registers:\n");
5899 for (i = 0x800; i < 0xA00; ) {
5901 for (j = 0; j < 8; j++) {
5902 printf(" %08x", CSR_READ_4(sc, i));
5909 printf("Hardware Flags:\n");
5910 if (BGE_IS_5717_PLUS(sc))
5911 printf(" - 5717 Plus\n");
5912 if (BGE_IS_5755_PLUS(sc))
5913 printf(" - 5755 Plus\n");
5914 if (BGE_IS_575X_PLUS(sc))
5915 printf(" - 575X Plus\n");
5916 if (BGE_IS_5705_PLUS(sc))
5917 printf(" - 5705 Plus\n");
5918 if (BGE_IS_5714_FAMILY(sc))
5919 printf(" - 5714 Family\n");
5920 if (BGE_IS_5700_FAMILY(sc))
5921 printf(" - 5700 Family\n");
5922 if (sc->bge_flags & BGE_FLAG_JUMBO)
5923 printf(" - Supports Jumbo Frames\n");
5924 if (sc->bge_flags & BGE_FLAG_PCIX)
5925 printf(" - PCI-X Bus\n");
5926 if (sc->bge_flags & BGE_FLAG_PCIE)
5927 printf(" - PCI Express Bus\n");
5928 if (sc->bge_phy_flags & BGE_PHY_NO_3LED)
5929 printf(" - No 3 LEDs\n");
5930 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
5931 printf(" - RX Alignment Bug\n");
5938 bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5940 struct bge_softc *sc;
5946 error = sysctl_handle_int(oidp, &result, 0, req);
5947 if (error || (req->newptr == NULL))
5950 if (result < 0x8000) {
5951 sc = (struct bge_softc *)arg1;
5952 val = CSR_READ_4(sc, result);
5953 printf("reg 0x%06X = 0x%08X\n", result, val);
5960 bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
5962 struct bge_softc *sc;
5968 error = sysctl_handle_int(oidp, &result, 0, req);
5969 if (error || (req->newptr == NULL))
5972 if (result < 0x8000) {
5973 sc = (struct bge_softc *)arg1;
5974 val = bge_readmem_ind(sc, result);
5975 printf("mem 0x%06X = 0x%08X\n", result, val);
5983 bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
5986 if (sc->bge_flags & BGE_FLAG_EADDR)
5990 OF_getetheraddr(sc->bge_dev, ether_addr);
5997 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
6001 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB);
6002 if ((mac_addr >> 16) == 0x484b) {
6003 ether_addr[0] = (uint8_t)(mac_addr >> 8);
6004 ether_addr[1] = (uint8_t)mac_addr;
6005 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB);
6006 ether_addr[2] = (uint8_t)(mac_addr >> 24);
6007 ether_addr[3] = (uint8_t)(mac_addr >> 16);
6008 ether_addr[4] = (uint8_t)(mac_addr >> 8);
6009 ether_addr[5] = (uint8_t)mac_addr;
6016 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
6018 int mac_offset = BGE_EE_MAC_OFFSET;
6020 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
6021 mac_offset = BGE_EE_MAC_OFFSET_5906;
6023 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
6028 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
6031 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
6034 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
6039 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
6041 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
6042 /* NOTE: Order is critical */
6045 bge_get_eaddr_nvram,
6046 bge_get_eaddr_eeprom,
6049 const bge_eaddr_fcn_t *func;
6051 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
6052 if ((*func)(sc, eaddr) == 0)
6055 return (*func == NULL ? ENXIO : 0);