2 * Copyright (c) 2006-2009 Broadcom Corporation
3 * David Christensen <davidch@broadcom.com>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written consent.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
35 * The following controllers are supported by this driver:
43 * The following controllers are not supported by this driver:
44 * BCM5706C A0, A1 (pre-production)
45 * BCM5706S A0, A1 (pre-production)
46 * BCM5708C A0, B0 (pre-production)
47 * BCM5708S A0, B0 (pre-production)
48 * BCM5709C A0 B0, B1, B2 (pre-production)
49 * BCM5709S A0, A1, B0, B1, B2, C0 (pre-production)
54 #include <dev/bce/if_bcereg.h>
55 #include <dev/bce/if_bcefw.h>
57 /****************************************************************************/
58 /* BCE Debug Options */
59 /****************************************************************************/
61 u32 bce_debug = BCE_WARN;
64 /* 1 = 1 in 2,147,483,648 */
65 /* 256 = 1 in 8,388,608 */
66 /* 2048 = 1 in 1,048,576 */
67 /* 65536 = 1 in 32,768 */
68 /* 1048576 = 1 in 2,048 */
69 /* 268435456 = 1 in 8 */
70 /* 536870912 = 1 in 4 */
71 /* 1073741824 = 1 in 2 */
73 /* Controls how often the l2_fhdr frame error check will fail. */
74 int l2fhdr_error_sim_control = 0;
76 /* Controls how often the unexpected attention check will fail. */
77 int unexpected_attention_sim_control = 0;
79 /* Controls how often to simulate an mbuf allocation failure. */
80 int mbuf_alloc_failed_sim_control = 0;
82 /* Controls how often to simulate a DMA mapping failure. */
83 int dma_map_addr_failed_sim_control = 0;
85 /* Controls how often to simulate a bootcode failure. */
86 int bootcode_running_failure_sim_control = 0;
89 /****************************************************************************/
90 /* BCE Build Time Options */
91 /****************************************************************************/
92 /* #define BCE_NVRAM_WRITE_SUPPORT 1 */
95 /****************************************************************************/
96 /* PCI Device ID Table */
98 /* Used by bce_probe() to identify the devices supported by this driver. */
99 /****************************************************************************/
100 #define BCE_DEVDESC_MAX 64
102 static struct bce_type bce_devs[] = {
103 /* BCM5706C Controllers and OEM boards. */
104 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101,
105 "HP NC370T Multifunction Gigabit Server Adapter" },
106 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106,
107 "HP NC370i Multifunction Gigabit Server Adapter" },
108 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3070,
109 "HP NC380T PCIe DP Multifunc Gig Server Adapter" },
110 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x1709,
111 "HP NC371i Multifunction Gigabit Server Adapter" },
112 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID,
113 "Broadcom NetXtreme II BCM5706 1000Base-T" },
115 /* BCM5706S controllers and OEM boards. */
116 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
117 "HP NC370F Multifunction Gigabit Server Adapter" },
118 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID,
119 "Broadcom NetXtreme II BCM5706 1000Base-SX" },
121 /* BCM5708C controllers and OEM boards. */
122 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7037,
123 "HP NC373T PCIe Multifunction Gig Server Adapter" },
124 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7038,
125 "HP NC373i Multifunction Gigabit Server Adapter" },
126 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7045,
127 "HP NC374m PCIe Multifunction Adapter" },
128 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID,
129 "Broadcom NetXtreme II BCM5708 1000Base-T" },
131 /* BCM5708S controllers and OEM boards. */
132 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x1706,
133 "HP NC373m Multifunction Gigabit Server Adapter" },
134 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703b,
135 "HP NC373i Multifunction Gigabit Server Adapter" },
136 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703d,
137 "HP NC373F PCIe Multifunc Giga Server Adapter" },
138 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID,
139 "Broadcom NetXtreme II BCM5708 1000Base-SX" },
141 /* BCM5709C controllers and OEM boards. */
142 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7055,
143 "HP NC382i DP Multifunction Gigabit Server Adapter" },
144 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7059,
145 "HP NC382T PCIe DP Multifunction Gigabit Server Adapter" },
146 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, PCI_ANY_ID, PCI_ANY_ID,
147 "Broadcom NetXtreme II BCM5709 1000Base-T" },
149 /* BCM5709S controllers and OEM boards. */
150 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x171d,
151 "HP NC382m DP 1GbE Multifunction BL-c Adapter" },
152 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x7056,
153 "HP NC382i DP Multifunction Gigabit Server Adapter" },
154 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, PCI_ANY_ID, PCI_ANY_ID,
155 "Broadcom NetXtreme II BCM5709 1000Base-SX" },
157 /* BCM5716 controllers and OEM boards. */
158 { BRCM_VENDORID, BRCM_DEVICEID_BCM5716, PCI_ANY_ID, PCI_ANY_ID,
159 "Broadcom NetXtreme II BCM5716 1000Base-T" },
165 /****************************************************************************/
166 /* Supported Flash NVRAM device data. */
167 /****************************************************************************/
168 static struct flash_spec flash_table[] =
170 #define BUFFERED_FLAGS (BCE_NV_BUFFERED | BCE_NV_TRANSLATE)
171 #define NONBUFFERED_FLAGS (BCE_NV_WREN)
174 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
175 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
176 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
178 /* Expansion entry 0001 */
179 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
180 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
181 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
183 /* Saifun SA25F010 (non-buffered flash) */
184 /* strap, cfg1, & write1 need updates */
185 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
186 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
187 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
188 "Non-buffered flash (128kB)"},
189 /* Saifun SA25F020 (non-buffered flash) */
190 /* strap, cfg1, & write1 need updates */
191 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
192 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
194 "Non-buffered flash (256kB)"},
195 /* Expansion entry 0100 */
196 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
197 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
198 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
201 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
202 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
203 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
204 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
205 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
206 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
207 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
208 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
209 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
210 /* Saifun SA25F005 (non-buffered flash) */
211 /* strap, cfg1, & write1 need updates */
212 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
213 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
214 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
215 "Non-buffered flash (64kB)"},
217 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
218 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
219 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
221 /* Expansion entry 1001 */
222 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
223 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
226 /* Expansion entry 1010 */
227 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
228 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
229 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
231 /* ATMEL AT45DB011B (buffered flash) */
232 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
233 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
235 "Buffered flash (128kB)"},
236 /* Expansion entry 1100 */
237 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
238 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
239 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
241 /* Expansion entry 1101 */
242 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
243 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
244 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
246 /* Ateml Expansion entry 1110 */
247 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
248 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
249 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
250 "Entry 1110 (Atmel)"},
251 /* ATMEL AT45DB021B (buffered flash) */
252 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
253 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
254 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
255 "Buffered flash (256kB)"},
259 * The BCM5709 controllers transparently handle the
260 * differences between Atmel 264 byte pages and all
261 * flash devices which use 256 byte pages, so no
262 * logical-to-physical mapping is required in the
265 static struct flash_spec flash_5709 = {
266 .flags = BCE_NV_BUFFERED,
267 .page_bits = BCM5709_FLASH_PAGE_BITS,
268 .page_size = BCM5709_FLASH_PAGE_SIZE,
269 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
270 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2,
271 .name = "5709/5716 buffered flash (256kB)",
275 /****************************************************************************/
276 /* FreeBSD device entry points. */
277 /****************************************************************************/
278 static int bce_probe (device_t);
279 static int bce_attach (device_t);
280 static int bce_detach (device_t);
281 static int bce_shutdown (device_t);
284 /****************************************************************************/
285 /* BCE Debug Data Structure Dump Routines */
286 /****************************************************************************/
288 static u32 bce_reg_rd (struct bce_softc *, u32);
289 static void bce_reg_wr (struct bce_softc *, u32, u32);
290 static void bce_reg_wr16 (struct bce_softc *, u32, u16);
291 static u32 bce_ctx_rd (struct bce_softc *, u32, u32);
292 static void bce_dump_enet (struct bce_softc *, struct mbuf *);
293 static void bce_dump_mbuf (struct bce_softc *, struct mbuf *);
294 static void bce_dump_tx_mbuf_chain (struct bce_softc *, u16, int);
295 static void bce_dump_rx_mbuf_chain (struct bce_softc *, u16, int);
296 #ifdef BCE_JUMBO_HDRSPLIT
297 static void bce_dump_pg_mbuf_chain (struct bce_softc *, u16, int);
299 static void bce_dump_txbd (struct bce_softc *, int, struct tx_bd *);
300 static void bce_dump_rxbd (struct bce_softc *, int, struct rx_bd *);
301 #ifdef BCE_JUMBO_HDRSPLIT
302 static void bce_dump_pgbd (struct bce_softc *, int, struct rx_bd *);
304 static void bce_dump_l2fhdr (struct bce_softc *, int, struct l2_fhdr *);
305 static void bce_dump_ctx (struct bce_softc *, u16);
306 static void bce_dump_ftqs (struct bce_softc *);
307 static void bce_dump_tx_chain (struct bce_softc *, u16, int);
308 static void bce_dump_rx_chain (struct bce_softc *, u16, int);
309 #ifdef BCE_JUMBO_HDRSPLIT
310 static void bce_dump_pg_chain (struct bce_softc *, u16, int);
312 static void bce_dump_status_block (struct bce_softc *);
313 static void bce_dump_stats_block (struct bce_softc *);
314 static void bce_dump_driver_state (struct bce_softc *);
315 static void bce_dump_hw_state (struct bce_softc *);
316 static void bce_dump_mq_regs (struct bce_softc *);
317 static void bce_dump_bc_state (struct bce_softc *);
318 static void bce_dump_txp_state (struct bce_softc *, int);
319 static void bce_dump_rxp_state (struct bce_softc *, int);
320 static void bce_dump_tpat_state (struct bce_softc *, int);
321 static void bce_dump_cp_state (struct bce_softc *, int);
322 static void bce_dump_com_state (struct bce_softc *, int);
323 static void bce_breakpoint (struct bce_softc *);
327 /****************************************************************************/
328 /* BCE Register/Memory Access Routines */
329 /****************************************************************************/
330 static u32 bce_reg_rd_ind (struct bce_softc *, u32);
331 static void bce_reg_wr_ind (struct bce_softc *, u32, u32);
332 static void bce_shmem_wr (struct bce_softc *, u32, u32);
333 static u32 bce_shmem_rd (struct bce_softc *, u32);
334 static void bce_ctx_wr (struct bce_softc *, u32, u32, u32);
335 static int bce_miibus_read_reg (device_t, int, int);
336 static int bce_miibus_write_reg (device_t, int, int, int);
337 static void bce_miibus_statchg (device_t);
340 /****************************************************************************/
341 /* BCE NVRAM Access Routines */
342 /****************************************************************************/
343 static int bce_acquire_nvram_lock (struct bce_softc *);
344 static int bce_release_nvram_lock (struct bce_softc *);
345 static void bce_enable_nvram_access (struct bce_softc *);
346 static void bce_disable_nvram_access(struct bce_softc *);
347 static int bce_nvram_read_dword (struct bce_softc *, u32, u8 *, u32);
348 static int bce_init_nvram (struct bce_softc *);
349 static int bce_nvram_read (struct bce_softc *, u32, u8 *, int);
350 static int bce_nvram_test (struct bce_softc *);
351 #ifdef BCE_NVRAM_WRITE_SUPPORT
352 static int bce_enable_nvram_write (struct bce_softc *);
353 static void bce_disable_nvram_write (struct bce_softc *);
354 static int bce_nvram_erase_page (struct bce_softc *, u32);
355 static int bce_nvram_write_dword (struct bce_softc *, u32, u8 *, u32);
356 static int bce_nvram_write (struct bce_softc *, u32, u8 *, int);
359 /****************************************************************************/
361 /****************************************************************************/
362 static void bce_get_media (struct bce_softc *);
363 static void bce_dma_map_addr (void *, bus_dma_segment_t *, int, int);
364 static int bce_dma_alloc (device_t);
365 static void bce_dma_free (struct bce_softc *);
366 static void bce_release_resources (struct bce_softc *);
368 /****************************************************************************/
369 /* BCE Firmware Synchronization and Load */
370 /****************************************************************************/
371 static int bce_fw_sync (struct bce_softc *, u32);
372 static void bce_load_rv2p_fw (struct bce_softc *, u32 *, u32, u32);
373 static void bce_load_cpu_fw (struct bce_softc *, struct cpu_reg *, struct fw_info *);
374 static void bce_start_cpu (struct bce_softc *, struct cpu_reg *);
375 static void bce_halt_cpu (struct bce_softc *, struct cpu_reg *);
376 static void bce_start_rxp_cpu (struct bce_softc *);
377 static void bce_init_rxp_cpu (struct bce_softc *);
378 static void bce_init_txp_cpu (struct bce_softc *);
379 static void bce_init_tpat_cpu (struct bce_softc *);
380 static void bce_init_cp_cpu (struct bce_softc *);
381 static void bce_init_com_cpu (struct bce_softc *);
382 static void bce_init_cpus (struct bce_softc *);
384 static void bce_print_adapter_info (struct bce_softc *);
385 static void bce_probe_pci_caps (device_t, struct bce_softc *);
386 static void bce_stop (struct bce_softc *);
387 static int bce_reset (struct bce_softc *, u32);
388 static int bce_chipinit (struct bce_softc *);
389 static int bce_blockinit (struct bce_softc *);
391 static int bce_init_tx_chain (struct bce_softc *);
392 static void bce_free_tx_chain (struct bce_softc *);
394 static int bce_get_rx_buf (struct bce_softc *, struct mbuf *, u16 *, u16 *, u32 *);
395 static int bce_init_rx_chain (struct bce_softc *);
396 static void bce_fill_rx_chain (struct bce_softc *);
397 static void bce_free_rx_chain (struct bce_softc *);
399 #ifdef BCE_JUMBO_HDRSPLIT
400 static int bce_get_pg_buf (struct bce_softc *, struct mbuf *, u16 *, u16 *);
401 static int bce_init_pg_chain (struct bce_softc *);
402 static void bce_fill_pg_chain (struct bce_softc *);
403 static void bce_free_pg_chain (struct bce_softc *);
406 static int bce_tx_encap (struct bce_softc *, struct mbuf **);
407 static void bce_start_locked (struct ifnet *);
408 static void bce_start (struct ifnet *);
409 static int bce_ioctl (struct ifnet *, u_long, caddr_t);
410 static void bce_watchdog (struct bce_softc *);
411 static int bce_ifmedia_upd (struct ifnet *);
412 static void bce_ifmedia_upd_locked (struct ifnet *);
413 static void bce_ifmedia_sts (struct ifnet *, struct ifmediareq *);
414 static void bce_init_locked (struct bce_softc *);
415 static void bce_init (void *);
416 static void bce_mgmt_init_locked (struct bce_softc *sc);
418 static void bce_init_ctx (struct bce_softc *);
419 static void bce_get_mac_addr (struct bce_softc *);
420 static void bce_set_mac_addr (struct bce_softc *);
421 static void bce_phy_intr (struct bce_softc *);
422 static inline u16 bce_get_hw_rx_cons(struct bce_softc *);
423 static void bce_rx_intr (struct bce_softc *);
424 static void bce_tx_intr (struct bce_softc *);
425 static void bce_disable_intr (struct bce_softc *);
426 static void bce_enable_intr (struct bce_softc *, int);
428 static void bce_intr (void *);
429 static void bce_set_rx_mode (struct bce_softc *);
430 static void bce_stats_update (struct bce_softc *);
431 static void bce_tick (void *);
432 static void bce_pulse (void *);
433 static void bce_add_sysctls (struct bce_softc *);
436 /****************************************************************************/
437 /* FreeBSD device dispatch table. */
438 /****************************************************************************/
439 static device_method_t bce_methods[] = {
440 /* Device interface (device_if.h) */
441 DEVMETHOD(device_probe, bce_probe),
442 DEVMETHOD(device_attach, bce_attach),
443 DEVMETHOD(device_detach, bce_detach),
444 DEVMETHOD(device_shutdown, bce_shutdown),
445 /* Supported by device interface but not used here. */
446 /* DEVMETHOD(device_identify, bce_identify), */
447 /* DEVMETHOD(device_suspend, bce_suspend), */
448 /* DEVMETHOD(device_resume, bce_resume), */
449 /* DEVMETHOD(device_quiesce, bce_quiesce), */
451 /* Bus interface (bus_if.h) */
452 DEVMETHOD(bus_print_child, bus_generic_print_child),
453 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
455 /* MII interface (miibus_if.h) */
456 DEVMETHOD(miibus_readreg, bce_miibus_read_reg),
457 DEVMETHOD(miibus_writereg, bce_miibus_write_reg),
458 DEVMETHOD(miibus_statchg, bce_miibus_statchg),
459 /* Supported by MII interface but not used here. */
460 /* DEVMETHOD(miibus_linkchg, bce_miibus_linkchg), */
461 /* DEVMETHOD(miibus_mediainit, bce_miibus_mediainit), */
466 static driver_t bce_driver = {
469 sizeof(struct bce_softc)
472 static devclass_t bce_devclass;
474 MODULE_DEPEND(bce, pci, 1, 1, 1);
475 MODULE_DEPEND(bce, ether, 1, 1, 1);
476 MODULE_DEPEND(bce, miibus, 1, 1, 1);
478 DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, 0, 0);
479 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
482 /****************************************************************************/
483 /* Tunable device values */
484 /****************************************************************************/
485 SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD, 0, "bce driver parameters");
487 /* Allowable values are TRUE or FALSE */
488 static int bce_tso_enable = TRUE;
489 TUNABLE_INT("hw.bce.tso_enable", &bce_tso_enable);
490 SYSCTL_UINT(_hw_bce, OID_AUTO, tso_enable, CTLFLAG_RDTUN, &bce_tso_enable, 0,
491 "TSO Enable/Disable");
493 /* Allowable values are 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
494 /* ToDo: Add MSI-X support. */
495 static int bce_msi_enable = 1;
496 TUNABLE_INT("hw.bce.msi_enable", &bce_msi_enable);
497 SYSCTL_UINT(_hw_bce, OID_AUTO, msi_enable, CTLFLAG_RDTUN, &bce_msi_enable, 0,
498 "MSI-X|MSI|INTx selector");
500 /* ToDo: Add tunable to enable/disable strict MTU handling. */
501 /* Currently allows "loose" RX MTU checking (i.e. sets the */
502 /* H/W RX MTU to the size of the largest receive buffer, or */
503 /* 2048 bytes). This will cause a UNH failure but is more */
504 /* desireable from a functional perspective. */
507 /****************************************************************************/
508 /* Device probe function. */
510 /* Compares the device to the driver's list of supported devices and */
511 /* reports back to the OS whether this is the right driver for the device. */
514 /* BUS_PROBE_DEFAULT on success, positive value on failure. */
515 /****************************************************************************/
517 bce_probe(device_t dev)
520 struct bce_softc *sc;
522 u16 vid = 0, did = 0, svid = 0, sdid = 0;
526 sc = device_get_softc(dev);
527 bzero(sc, sizeof(struct bce_softc));
528 sc->bce_unit = device_get_unit(dev);
531 /* Get the data for the device to be probed. */
532 vid = pci_get_vendor(dev);
533 did = pci_get_device(dev);
534 svid = pci_get_subvendor(dev);
535 sdid = pci_get_subdevice(dev);
537 DBPRINT(sc, BCE_EXTREME_LOAD,
538 "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
539 "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
541 /* Look through the list of known devices for a match. */
542 while(t->bce_name != NULL) {
544 if ((vid == t->bce_vid) && (did == t->bce_did) &&
545 ((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) &&
546 ((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) {
548 descbuf = malloc(BCE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
553 /* Print out the device identity. */
554 snprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
556 (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
557 (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
559 device_set_desc_copy(dev, descbuf);
560 free(descbuf, M_TEMP);
561 return(BUS_PROBE_DEFAULT);
570 /****************************************************************************/
571 /* PCI Capabilities Probe Function. */
573 /* Walks the PCI capabiites list for the device to find what features are */
578 /****************************************************************************/
580 bce_print_adapter_info(struct bce_softc *sc)
584 DBENTER(BCE_VERBOSE_LOAD);
586 BCE_PRINTF("ASIC (0x%08X); ", sc->bce_chipid);
587 printf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
588 ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4));
591 if (sc->bce_flags & BCE_PCIE_FLAG) {
592 printf("Bus (PCIe x%d, ", sc->link_width);
593 switch (sc->link_speed) {
594 case 1: printf("2.5Gbps); "); break;
595 case 2: printf("5Gbps); "); break;
596 default: printf("Unknown link speed); ");
599 printf("Bus (PCI%s, %s, %dMHz); ",
600 ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
601 ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
605 /* Firmware version and device features. */
606 printf("B/C (%s); Flags (", sc->bce_bc_ver);
608 #ifdef BCE_JUMBO_HDRSPLIT
613 if (sc->bce_flags & BCE_USING_MSI_FLAG) {
614 if (i > 0) printf("|");
618 if (sc->bce_flags & BCE_USING_MSIX_FLAG) {
619 if (i > 0) printf("|");
620 printf("MSI-X"); i++;
623 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) {
624 if (i > 0) printf("|");
628 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
629 if (i > 0) printf("|");
630 printf("MFW); MFW (%s)\n", sc->bce_mfw_ver);
635 DBEXIT(BCE_VERBOSE_LOAD);
639 /****************************************************************************/
640 /* PCI Capabilities Probe Function. */
642 /* Walks the PCI capabiites list for the device to find what features are */
647 /****************************************************************************/
649 bce_probe_pci_caps(device_t dev, struct bce_softc *sc)
653 DBENTER(BCE_VERBOSE_LOAD);
655 /* Check if PCI-X capability is enabled. */
656 if (pci_find_extcap(dev, PCIY_PCIX, ®) == 0) {
658 sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG;
661 /* Check if PCIe capability is enabled. */
662 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) {
664 u16 link_status = pci_read_config(dev, reg + 0x12, 2);
665 DBPRINT(sc, BCE_INFO_LOAD, "PCIe link_status = 0x%08X\n",
667 sc->link_speed = link_status & 0xf;
668 sc->link_width = (link_status >> 4) & 0x3f;
669 sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG;
670 sc->bce_flags |= BCE_PCIE_FLAG;
674 /* Check if MSI capability is enabled. */
675 if (pci_find_extcap(dev, PCIY_MSI, ®) == 0) {
677 sc->bce_cap_flags |= BCE_MSI_CAPABLE_FLAG;
680 /* Check if MSI-X capability is enabled. */
681 if (pci_find_extcap(dev, PCIY_MSIX, ®) == 0) {
683 sc->bce_cap_flags |= BCE_MSIX_CAPABLE_FLAG;
686 DBEXIT(BCE_VERBOSE_LOAD);
690 /****************************************************************************/
691 /* Device attach function. */
693 /* Allocates device resources, performs secondary chip identification, */
694 /* resets and initializes the hardware, and initializes driver instance */
698 /* 0 on success, positive value on failure. */
699 /****************************************************************************/
701 bce_attach(device_t dev)
703 struct bce_softc *sc;
706 int error, rid, rc = 0;
708 sc = device_get_softc(dev);
711 DBENTER(BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
713 sc->bce_unit = device_get_unit(dev);
715 /* Set initial device and PHY flags */
717 sc->bce_phy_flags = 0;
719 pci_enable_busmaster(dev);
721 /* Allocate PCI memory resources. */
723 sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
726 if (sc->bce_res_mem == NULL) {
727 BCE_PRINTF("%s(%d): PCI memory allocation failed\n",
730 goto bce_attach_fail;
733 /* Get various resource handles. */
734 sc->bce_btag = rman_get_bustag(sc->bce_res_mem);
735 sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
736 sc->bce_vhandle = (vm_offset_t) rman_get_virtual(sc->bce_res_mem);
738 bce_probe_pci_caps(dev, sc);
742 /* Try allocating MSI-X interrupts. */
743 if ((sc->bce_cap_flags & BCE_MSIX_CAPABLE_FLAG) &&
744 (bce_msi_enable >= 2) &&
745 ((sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
746 &rid, RF_ACTIVE)) != NULL)) {
748 msi_needed = sc->bce_msi_count = 1;
750 if (((error = pci_alloc_msix(dev, &sc->bce_msi_count)) != 0) ||
751 (sc->bce_msi_count != msi_needed)) {
752 BCE_PRINTF("%s(%d): MSI-X allocation failed! Requested = %d,"
753 "Received = %d, error = %d\n", __FILE__, __LINE__,
754 msi_needed, sc->bce_msi_count, error);
755 sc->bce_msi_count = 0;
756 pci_release_msi(dev);
757 bus_release_resource(dev, SYS_RES_MEMORY, rid,
759 sc->bce_res_irq = NULL;
761 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using MSI-X interrupt.\n",
763 sc->bce_flags |= BCE_USING_MSIX_FLAG;
764 sc->bce_intr = bce_intr;
769 /* Try allocating a MSI interrupt. */
770 if ((sc->bce_cap_flags & BCE_MSI_CAPABLE_FLAG) &&
771 (bce_msi_enable >= 1) && (sc->bce_msi_count == 0)) {
772 sc->bce_msi_count = 1;
773 if ((error = pci_alloc_msi(dev, &sc->bce_msi_count)) != 0) {
774 BCE_PRINTF("%s(%d): MSI allocation failed! error = %d\n",
775 __FILE__, __LINE__, error);
776 sc->bce_msi_count = 0;
777 pci_release_msi(dev);
779 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using MSI interrupt.\n",
781 sc->bce_flags |= BCE_USING_MSI_FLAG;
782 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
783 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716))
784 sc->bce_flags |= BCE_ONE_SHOT_MSI_FLAG;
786 sc->bce_intr = bce_intr;
790 /* Try allocating a legacy interrupt. */
791 if (sc->bce_msi_count == 0) {
792 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using INTx interrupt.\n",
795 sc->bce_intr = bce_intr;
798 sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
799 &rid, RF_SHAREABLE | RF_ACTIVE);
801 sc->bce_irq_rid = rid;
803 /* Report any IRQ allocation errors. */
804 if (sc->bce_res_irq == NULL) {
805 BCE_PRINTF("%s(%d): PCI map interrupt failed!\n",
808 goto bce_attach_fail;
811 /* Initialize mutex for the current device instance. */
812 BCE_LOCK_INIT(sc, device_get_nameunit(dev));
815 * Configure byte swap and enable indirect register access.
816 * Rely on CPU to do target byte swapping on big endian systems.
817 * Access to registers outside of PCI configurtion space are not
818 * valid until this is done.
820 pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
821 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
822 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
824 /* Save ASIC revsion info. */
825 sc->bce_chipid = REG_RD(sc, BCE_MISC_ID);
827 /* Weed out any non-production controller revisions. */
828 switch(BCE_CHIP_ID(sc)) {
829 case BCE_CHIP_ID_5706_A0:
830 case BCE_CHIP_ID_5706_A1:
831 case BCE_CHIP_ID_5708_A0:
832 case BCE_CHIP_ID_5708_B0:
833 case BCE_CHIP_ID_5709_A0:
834 case BCE_CHIP_ID_5709_B0:
835 case BCE_CHIP_ID_5709_B1:
836 case BCE_CHIP_ID_5709_B2:
837 BCE_PRINTF("%s(%d): Unsupported controller revision (%c%d)!\n",
839 (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
840 (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
842 goto bce_attach_fail;
846 * The embedded PCIe to PCI-X bridge (EPB)
847 * in the 5708 cannot address memory above
848 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
850 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
851 sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR;
853 sc->max_bus_addr = BUS_SPACE_MAXADDR;
856 * Find the base address for shared memory access.
857 * Newer versions of bootcode use a signature and offset
858 * while older versions use a fixed address.
860 val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
861 if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG)
862 /* Multi-port devices use different offsets in shared memory. */
863 sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0 +
864 (pci_get_function(sc->bce_dev) << 2));
866 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
868 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): bce_shmem_base = 0x%08X\n",
869 __FUNCTION__, sc->bce_shmem_base);
871 /* Fetch the bootcode revision. */
872 val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV);
873 for (int i = 0, j = 0; i < 3; i++) {
876 num = (u8) (val >> (24 - (i * 8)));
877 for (int k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
878 if (num >= k || !skip0 || k == 1) {
879 sc->bce_bc_ver[j++] = (num / k) + '0';
884 sc->bce_bc_ver[j++] = '.';
887 /* Check if any management firwmare is running. */
888 val = bce_shmem_rd(sc, BCE_PORT_FEATURE);
889 if (val & BCE_PORT_FEATURE_ASF_ENABLED) {
890 sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
892 /* Allow time for firmware to enter the running state. */
893 for (int i = 0; i < 30; i++) {
894 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
895 if (val & BCE_CONDITION_MFW_RUN_MASK)
901 /* Check the current bootcode state. */
902 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
903 val &= BCE_CONDITION_MFW_RUN_MASK;
904 if (val != BCE_CONDITION_MFW_RUN_UNKNOWN &&
905 val != BCE_CONDITION_MFW_RUN_NONE) {
906 u32 addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR);
909 for (int j = 0; j < 3; j++) {
910 val = bce_reg_rd_ind(sc, addr + j * 4);
912 memcpy(&sc->bce_mfw_ver[i], &val, 4);
917 /* Get PCI bus information (speed and type). */
918 val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
919 if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
922 sc->bce_flags |= BCE_PCIX_FLAG;
924 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS);
926 clkreg &= BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
928 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
929 sc->bus_speed_mhz = 133;
932 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
933 sc->bus_speed_mhz = 100;
936 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
937 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
938 sc->bus_speed_mhz = 66;
941 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
942 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
943 sc->bus_speed_mhz = 50;
946 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
947 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
948 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
949 sc->bus_speed_mhz = 33;
953 if (val & BCE_PCICFG_MISC_STATUS_M66EN)
954 sc->bus_speed_mhz = 66;
956 sc->bus_speed_mhz = 33;
959 if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
960 sc->bce_flags |= BCE_PCI_32BIT_FLAG;
962 /* Reset the controller and announce to bootcode that driver is present. */
963 if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
964 BCE_PRINTF("%s(%d): Controller reset failed!\n",
967 goto bce_attach_fail;
970 /* Initialize the controller. */
971 if (bce_chipinit(sc)) {
972 BCE_PRINTF("%s(%d): Controller initialization failed!\n",
975 goto bce_attach_fail;
978 /* Perform NVRAM test. */
979 if (bce_nvram_test(sc)) {
980 BCE_PRINTF("%s(%d): NVRAM test failed!\n",
983 goto bce_attach_fail;
986 /* Fetch the permanent Ethernet MAC address. */
987 bce_get_mac_addr(sc);
990 * Trip points control how many BDs
991 * should be ready before generating an
992 * interrupt while ticks control how long
993 * a BD can sit in the chain before
994 * generating an interrupt. Set the default
995 * values for the RX and TX chains.
999 /* Force more frequent interrupts. */
1000 sc->bce_tx_quick_cons_trip_int = 1;
1001 sc->bce_tx_quick_cons_trip = 1;
1002 sc->bce_tx_ticks_int = 0;
1003 sc->bce_tx_ticks = 0;
1005 sc->bce_rx_quick_cons_trip_int = 1;
1006 sc->bce_rx_quick_cons_trip = 1;
1007 sc->bce_rx_ticks_int = 0;
1008 sc->bce_rx_ticks = 0;
1010 /* Improve throughput at the expense of increased latency. */
1011 sc->bce_tx_quick_cons_trip_int = 20;
1012 sc->bce_tx_quick_cons_trip = 20;
1013 sc->bce_tx_ticks_int = 80;
1014 sc->bce_tx_ticks = 80;
1016 sc->bce_rx_quick_cons_trip_int = 6;
1017 sc->bce_rx_quick_cons_trip = 6;
1018 sc->bce_rx_ticks_int = 18;
1019 sc->bce_rx_ticks = 18;
1022 /* Update statistics once every second. */
1023 sc->bce_stats_ticks = 1000000 & 0xffff00;
1025 /* Find the media type for the adapter. */
1028 /* Store data needed by PHY driver for backplane applications */
1029 sc->bce_shared_hw_cfg = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG);
1030 sc->bce_port_hw_cfg = bce_shmem_rd(sc, BCE_PORT_HW_CFG_CONFIG);
1032 /* Allocate DMA memory resources. */
1033 if (bce_dma_alloc(dev)) {
1034 BCE_PRINTF("%s(%d): DMA resource allocation failed!\n",
1035 __FILE__, __LINE__);
1037 goto bce_attach_fail;
1040 /* Allocate an ifnet structure. */
1041 ifp = sc->bce_ifp = if_alloc(IFT_ETHER);
1043 BCE_PRINTF("%s(%d): Interface allocation failed!\n",
1044 __FILE__, __LINE__);
1046 goto bce_attach_fail;
1049 /* Initialize the ifnet interface. */
1051 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1052 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1053 ifp->if_ioctl = bce_ioctl;
1054 ifp->if_start = bce_start;
1055 ifp->if_init = bce_init;
1056 ifp->if_mtu = ETHERMTU;
1058 if (bce_tso_enable) {
1059 ifp->if_hwassist = BCE_IF_HWASSIST | CSUM_TSO;
1060 ifp->if_capabilities = BCE_IF_CAPABILITIES | IFCAP_TSO4;
1062 ifp->if_hwassist = BCE_IF_HWASSIST;
1063 ifp->if_capabilities = BCE_IF_CAPABILITIES;
1066 ifp->if_capenable = ifp->if_capabilities;
1069 * Assume standard mbuf sizes for buffer allocation.
1070 * This may change later if the MTU size is set to
1071 * something other than 1500.
1073 #ifdef BCE_JUMBO_HDRSPLIT
1074 sc->rx_bd_mbuf_alloc_size = MHLEN;
1075 /* Make sure offset is 16 byte aligned for hardware. */
1076 sc->rx_bd_mbuf_align_pad = roundup2((MSIZE - MHLEN), 16) -
1078 sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size -
1079 sc->rx_bd_mbuf_align_pad;
1080 sc->pg_bd_mbuf_alloc_size = MCLBYTES;
1082 sc->rx_bd_mbuf_alloc_size = MCLBYTES;
1083 sc->rx_bd_mbuf_align_pad = roundup2(MCLBYTES, 16) - MCLBYTES;
1084 sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size -
1085 sc->rx_bd_mbuf_align_pad;
1088 ifp->if_snd.ifq_drv_maxlen = USABLE_TX_BD;
1089 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1090 IFQ_SET_READY(&ifp->if_snd);
1092 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
1093 ifp->if_baudrate = IF_Mbps(2500ULL);
1095 ifp->if_baudrate = IF_Mbps(1000);
1097 /* Check for an MII child bus by probing the PHY. */
1098 if (mii_phy_probe(dev, &sc->bce_miibus, bce_ifmedia_upd,
1100 BCE_PRINTF("%s(%d): No PHY found on child MII bus!\n",
1101 __FILE__, __LINE__);
1103 goto bce_attach_fail;
1106 /* Attach to the Ethernet interface list. */
1107 ether_ifattach(ifp, sc->eaddr);
1109 #if __FreeBSD_version < 500000
1110 callout_init(&sc->bce_tick_callout);
1111 callout_init(&sc->bce_pulse_callout);
1113 callout_init_mtx(&sc->bce_tick_callout, &sc->bce_mtx, 0);
1114 callout_init_mtx(&sc->bce_pulse_callout, &sc->bce_mtx, 0);
1117 /* Hookup IRQ last. */
1118 rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_TYPE_NET | INTR_MPSAFE,
1119 NULL, bce_intr, sc, &sc->bce_intrhand);
1122 BCE_PRINTF("%s(%d): Failed to setup IRQ!\n",
1123 __FILE__, __LINE__);
1125 goto bce_attach_exit;
1129 * At this point we've acquired all the resources
1130 * we need to run so there's no turning back, we're
1131 * cleared for launch.
1134 /* Print some important debugging info. */
1135 DBRUNMSG(BCE_INFO, bce_dump_driver_state(sc));
1137 /* Add the supported sysctls to the kernel. */
1138 bce_add_sysctls(sc);
1143 * The chip reset earlier notified the bootcode that
1144 * a driver is present. We now need to start our pulse
1145 * routine so that the bootcode is reminded that we're
1150 bce_mgmt_init_locked(sc);
1153 /* Finally, print some useful adapter info */
1154 bce_print_adapter_info(sc);
1155 DBPRINT(sc, BCE_FATAL, "%s(): sc = %p\n",
1158 goto bce_attach_exit;
1161 bce_release_resources(sc);
1165 DBEXIT(BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
1171 /****************************************************************************/
1172 /* Device detach function. */
1174 /* Stops the controller, resets the controller, and releases resources. */
1177 /* 0 on success, positive value on failure. */
1178 /****************************************************************************/
1180 bce_detach(device_t dev)
1182 struct bce_softc *sc = device_get_softc(dev);
1186 DBENTER(BCE_VERBOSE_UNLOAD | BCE_VERBOSE_RESET);
1190 /* Stop and reset the controller. */
1193 /* Stop the pulse so the bootcode can go to driver absent state. */
1194 callout_stop(&sc->bce_pulse_callout);
1197 if (sc->bce_flags & BCE_NO_WOL_FLAG)
1198 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1200 msg = BCE_DRV_MSG_CODE_UNLOAD;
1205 ether_ifdetach(ifp);
1207 /* If we have a child device on the MII bus remove it too. */
1208 bus_generic_detach(dev);
1209 device_delete_child(dev, sc->bce_miibus);
1211 /* Release all remaining resources. */
1212 bce_release_resources(sc);
1214 DBEXIT(BCE_VERBOSE_UNLOAD | BCE_VERBOSE_RESET);
1220 /****************************************************************************/
1221 /* Device shutdown function. */
1223 /* Stops and resets the controller. */
1226 /* 0 on success, positive value on failure. */
1227 /****************************************************************************/
1229 bce_shutdown(device_t dev)
1231 struct bce_softc *sc = device_get_softc(dev);
1234 DBENTER(BCE_VERBOSE);
1238 if (sc->bce_flags & BCE_NO_WOL_FLAG)
1239 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1241 msg = BCE_DRV_MSG_CODE_UNLOAD;
1245 DBEXIT(BCE_VERBOSE);
1252 /****************************************************************************/
1253 /* Register read. */
1256 /* The value of the register. */
1257 /****************************************************************************/
1259 bce_reg_rd(struct bce_softc *sc, u32 offset)
1261 u32 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, offset);
1262 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
1263 __FUNCTION__, offset, val);
1268 /****************************************************************************/
1269 /* Register write (16 bit). */
1273 /****************************************************************************/
1275 bce_reg_wr16(struct bce_softc *sc, u32 offset, u16 val)
1277 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%04X\n",
1278 __FUNCTION__, offset, val);
1279 bus_space_write_2(sc->bce_btag, sc->bce_bhandle, offset, val);
1283 /****************************************************************************/
1284 /* Register write. */
1288 /****************************************************************************/
1290 bce_reg_wr(struct bce_softc *sc, u32 offset, u32 val)
1292 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
1293 __FUNCTION__, offset, val);
1294 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, offset, val);
1298 /****************************************************************************/
1299 /* Indirect register read. */
1301 /* Reads NetXtreme II registers using an index/data register pair in PCI */
1302 /* configuration space. Using this mechanism avoids issues with posted */
1303 /* reads but is much slower than memory-mapped I/O. */
1306 /* The value of the register. */
1307 /****************************************************************************/
1309 bce_reg_rd_ind(struct bce_softc *sc, u32 offset)
1314 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1318 val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
1319 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
1320 __FUNCTION__, offset, val);
1324 return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
1329 /****************************************************************************/
1330 /* Indirect register write. */
1332 /* Writes NetXtreme II registers using an index/data register pair in PCI */
1333 /* configuration space. Using this mechanism avoids issues with posted */
1334 /* writes but is muchh slower than memory-mapped I/O. */
1338 /****************************************************************************/
1340 bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val)
1345 DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
1346 __FUNCTION__, offset, val);
1348 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1349 pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
1353 /****************************************************************************/
1354 /* Shared memory write. */
1356 /* Writes NetXtreme II shared memory region. */
1360 /****************************************************************************/
1362 bce_shmem_wr(struct bce_softc *sc, u32 offset, u32 val)
1364 bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val);
1368 /****************************************************************************/
1369 /* Shared memory read. */
1371 /* Reads NetXtreme II shared memory region. */
1374 /* The 32 bit value read. */
1375 /****************************************************************************/
1377 bce_shmem_rd(struct bce_softc *sc, u32 offset)
1379 return (bce_reg_rd_ind(sc, sc->bce_shmem_base + offset));
1384 /****************************************************************************/
1385 /* Context memory read. */
1387 /* The NetXtreme II controller uses context memory to track connection */
1388 /* information for L2 and higher network protocols. */
1391 /* The requested 32 bit value of context memory. */
1392 /****************************************************************************/
1394 bce_ctx_rd(struct bce_softc *sc, u32 cid_addr, u32 ctx_offset)
1396 u32 idx, offset, retry_cnt = 5, val;
1398 DBRUNIF((cid_addr > MAX_CID_ADDR || ctx_offset & 0x3 || cid_addr & CTX_MASK),
1399 BCE_PRINTF("%s(): Invalid CID address: 0x%08X.\n",
1400 __FUNCTION__, cid_addr));
1402 offset = ctx_offset + cid_addr;
1404 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
1405 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
1407 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_READ_REQ));
1409 for (idx = 0; idx < retry_cnt; idx++) {
1410 val = REG_RD(sc, BCE_CTX_CTX_CTRL);
1411 if ((val & BCE_CTX_CTX_CTRL_READ_REQ) == 0)
1416 if (val & BCE_CTX_CTX_CTRL_READ_REQ)
1417 BCE_PRINTF("%s(%d); Unable to read CTX memory: "
1418 "cid_addr = 0x%08X, offset = 0x%08X!\n",
1419 __FILE__, __LINE__, cid_addr, ctx_offset);
1421 val = REG_RD(sc, BCE_CTX_CTX_DATA);
1423 REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1424 val = REG_RD(sc, BCE_CTX_DATA);
1427 DBPRINT(sc, BCE_EXTREME_CTX, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
1428 "val = 0x%08X\n", __FUNCTION__, cid_addr, ctx_offset, val);
1435 /****************************************************************************/
1436 /* Context memory write. */
1438 /* The NetXtreme II controller uses context memory to track connection */
1439 /* information for L2 and higher network protocols. */
1443 /****************************************************************************/
1445 bce_ctx_wr(struct bce_softc *sc, u32 cid_addr, u32 ctx_offset, u32 ctx_val)
1447 u32 idx, offset = ctx_offset + cid_addr;
1448 u32 val, retry_cnt = 5;
1450 DBPRINT(sc, BCE_EXTREME_CTX, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
1451 "val = 0x%08X\n", __FUNCTION__, cid_addr, ctx_offset, ctx_val);
1453 DBRUNIF((cid_addr > MAX_CID_ADDR || ctx_offset & 0x3 || cid_addr & CTX_MASK),
1454 BCE_PRINTF("%s(): Invalid CID address: 0x%08X.\n",
1455 __FUNCTION__, cid_addr));
1457 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
1458 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
1460 REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val);
1461 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ));
1463 for (idx = 0; idx < retry_cnt; idx++) {
1464 val = REG_RD(sc, BCE_CTX_CTX_CTRL);
1465 if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0)
1470 if (val & BCE_CTX_CTX_CTRL_WRITE_REQ)
1471 BCE_PRINTF("%s(%d); Unable to write CTX memory: "
1472 "cid_addr = 0x%08X, offset = 0x%08X!\n",
1473 __FILE__, __LINE__, cid_addr, ctx_offset);
1476 REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1477 REG_WR(sc, BCE_CTX_DATA, ctx_val);
1482 /****************************************************************************/
1483 /* PHY register read. */
1485 /* Implements register reads on the MII bus. */
1488 /* The value of the register. */
1489 /****************************************************************************/
1491 bce_miibus_read_reg(device_t dev, int phy, int reg)
1493 struct bce_softc *sc;
1497 sc = device_get_softc(dev);
1499 /* Make sure we are accessing the correct PHY address. */
1500 if (phy != sc->bce_phy_addr) {
1501 DBPRINT(sc, BCE_INSANE_PHY, "Invalid PHY address %d for PHY read!\n", phy);
1505 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1506 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1507 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1509 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1510 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1516 val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
1517 BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
1518 BCE_EMAC_MDIO_COMM_START_BUSY;
1519 REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
1521 for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1524 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1525 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1528 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1529 val &= BCE_EMAC_MDIO_COMM_DATA;
1535 if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1536 BCE_PRINTF("%s(%d): Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
1537 __FILE__, __LINE__, phy, reg);
1540 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1544 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1545 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1546 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1548 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1549 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1554 DB_PRINT_PHY_REG(reg, val);
1555 return (val & 0xffff);
1560 /****************************************************************************/
1561 /* PHY register write. */
1563 /* Implements register writes on the MII bus. */
1566 /* The value of the register. */
1567 /****************************************************************************/
1569 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1571 struct bce_softc *sc;
1575 sc = device_get_softc(dev);
1577 /* Make sure we are accessing the correct PHY address. */
1578 if (phy != sc->bce_phy_addr) {
1579 DBPRINT(sc, BCE_INSANE_PHY, "Invalid PHY address %d for PHY write!\n", phy);
1583 DB_PRINT_PHY_REG(reg, val);
1585 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1586 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1587 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1589 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1590 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1595 val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1596 BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1597 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1598 REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1600 for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1603 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1604 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1610 if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1611 BCE_PRINTF("%s(%d): PHY write timeout!\n",
1612 __FILE__, __LINE__);
1614 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1615 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1616 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1618 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1619 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1628 /****************************************************************************/
1629 /* MII bus status change. */
1631 /* Called by the MII bus driver when the PHY establishes link to set the */
1632 /* MAC interface registers. */
1636 /****************************************************************************/
1638 bce_miibus_statchg(device_t dev)
1640 struct bce_softc *sc;
1641 struct mii_data *mii;
1644 sc = device_get_softc(dev);
1646 DBENTER(BCE_VERBOSE_PHY);
1648 mii = device_get_softc(sc->bce_miibus);
1650 val = REG_RD(sc, BCE_EMAC_MODE);
1651 val &= ~(BCE_EMAC_MODE_PORT | BCE_EMAC_MODE_HALF_DUPLEX |
1652 BCE_EMAC_MODE_MAC_LOOP | BCE_EMAC_MODE_FORCE_LINK |
1655 /* Set MII or GMII interface based on the speed negotiated by the PHY. */
1656 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1658 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
1659 DBPRINT(sc, BCE_INFO, "Enabling 10Mb interface.\n");
1660 val |= BCE_EMAC_MODE_PORT_MII_10;
1665 DBPRINT(sc, BCE_INFO, "Enabling MII interface.\n");
1666 val |= BCE_EMAC_MODE_PORT_MII;
1669 DBPRINT(sc, BCE_INFO, "Enabling 2.5G MAC mode.\n");
1670 val |= BCE_EMAC_MODE_25G;
1674 DBPRINT(sc, BCE_INFO, "Enabling GMII interface.\n");
1675 val |= BCE_EMAC_MODE_PORT_GMII;
1678 DBPRINT(sc, BCE_INFO, "Unknown speed, enabling default GMII "
1680 val |= BCE_EMAC_MODE_PORT_GMII;
1683 /* Set half or full duplex based on the duplicity negotiated by the PHY. */
1684 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
1685 DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n");
1686 val |= BCE_EMAC_MODE_HALF_DUPLEX;
1688 DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n");
1690 REG_WR(sc, BCE_EMAC_MODE, val);
1693 /* ToDo: Enable flow control support in brgphy and bge. */
1694 /* FLAG0 is set if RX is enabled and FLAG1 if TX is enabled */
1695 if (mii->mii_media_active & IFM_FLAG0)
1696 BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN);
1697 if (mii->mii_media_active & IFM_FLAG1)
1698 BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_TX_MODE_FLOW_EN);
1701 DBEXIT(BCE_VERBOSE_PHY);
1705 /****************************************************************************/
1706 /* Acquire NVRAM lock. */
1708 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */
1709 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1710 /* for use by the driver. */
1713 /* 0 on success, positive value on failure. */
1714 /****************************************************************************/
1716 bce_acquire_nvram_lock(struct bce_softc *sc)
1721 DBENTER(BCE_VERBOSE_NVRAM);
1723 /* Request access to the flash interface. */
1724 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1725 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1726 val = REG_RD(sc, BCE_NVM_SW_ARB);
1727 if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1733 if (j >= NVRAM_TIMEOUT_COUNT) {
1734 DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n");
1738 DBEXIT(BCE_VERBOSE_NVRAM);
1743 /****************************************************************************/
1744 /* Release NVRAM lock. */
1746 /* When the caller is finished accessing NVRAM the lock must be released. */
1747 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1748 /* for use by the driver. */
1751 /* 0 on success, positive value on failure. */
1752 /****************************************************************************/
1754 bce_release_nvram_lock(struct bce_softc *sc)
1759 DBENTER(BCE_VERBOSE_NVRAM);
1762 * Relinquish nvram interface.
1764 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1766 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1767 val = REG_RD(sc, BCE_NVM_SW_ARB);
1768 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1774 if (j >= NVRAM_TIMEOUT_COUNT) {
1775 DBPRINT(sc, BCE_WARN, "Timeout releasing NVRAM lock!\n");
1779 DBEXIT(BCE_VERBOSE_NVRAM);
1784 #ifdef BCE_NVRAM_WRITE_SUPPORT
1785 /****************************************************************************/
1786 /* Enable NVRAM write access. */
1788 /* Before writing to NVRAM the caller must enable NVRAM writes. */
1791 /* 0 on success, positive value on failure. */
1792 /****************************************************************************/
1794 bce_enable_nvram_write(struct bce_softc *sc)
1799 DBENTER(BCE_VERBOSE_NVRAM);
1801 val = REG_RD(sc, BCE_MISC_CFG);
1802 REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI);
1804 if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
1807 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1808 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT);
1810 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1813 val = REG_RD(sc, BCE_NVM_COMMAND);
1814 if (val & BCE_NVM_COMMAND_DONE)
1818 if (j >= NVRAM_TIMEOUT_COUNT) {
1819 DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n");
1824 DBENTER(BCE_VERBOSE_NVRAM);
1829 /****************************************************************************/
1830 /* Disable NVRAM write access. */
1832 /* When the caller is finished writing to NVRAM write access must be */
1837 /****************************************************************************/
1839 bce_disable_nvram_write(struct bce_softc *sc)
1843 DBENTER(BCE_VERBOSE_NVRAM);
1845 val = REG_RD(sc, BCE_MISC_CFG);
1846 REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN);
1848 DBEXIT(BCE_VERBOSE_NVRAM);
1854 /****************************************************************************/
1855 /* Enable NVRAM access. */
1857 /* Before accessing NVRAM for read or write operations the caller must */
1858 /* enabled NVRAM access. */
1862 /****************************************************************************/
1864 bce_enable_nvram_access(struct bce_softc *sc)
1868 DBENTER(BCE_VERBOSE_NVRAM);
1870 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1871 /* Enable both bits, even on read. */
1872 REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1873 val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1875 DBEXIT(BCE_VERBOSE_NVRAM);
1879 /****************************************************************************/
1880 /* Disable NVRAM access. */
1882 /* When the caller is finished accessing NVRAM access must be disabled. */
1886 /****************************************************************************/
1888 bce_disable_nvram_access(struct bce_softc *sc)
1892 DBENTER(BCE_VERBOSE_NVRAM);
1894 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1896 /* Disable both bits, even after read. */
1897 REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1898 val & ~(BCE_NVM_ACCESS_ENABLE_EN |
1899 BCE_NVM_ACCESS_ENABLE_WR_EN));
1901 DBEXIT(BCE_VERBOSE_NVRAM);
1905 #ifdef BCE_NVRAM_WRITE_SUPPORT
1906 /****************************************************************************/
1907 /* Erase NVRAM page before writing. */
1909 /* Non-buffered flash parts require that a page be erased before it is */
1913 /* 0 on success, positive value on failure. */
1914 /****************************************************************************/
1916 bce_nvram_erase_page(struct bce_softc *sc, u32 offset)
1921 DBENTER(BCE_VERBOSE_NVRAM);
1923 /* Buffered flash doesn't require an erase. */
1924 if (sc->bce_flash_info->flags & BCE_NV_BUFFERED)
1925 goto bce_nvram_erase_page_exit;
1927 /* Build an erase command. */
1928 cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR |
1929 BCE_NVM_COMMAND_DOIT;
1932 * Clear the DONE bit separately, set the NVRAM adress to erase,
1933 * and issue the erase command.
1935 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1936 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1937 REG_WR(sc, BCE_NVM_COMMAND, cmd);
1939 /* Wait for completion. */
1940 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1945 val = REG_RD(sc, BCE_NVM_COMMAND);
1946 if (val & BCE_NVM_COMMAND_DONE)
1950 if (j >= NVRAM_TIMEOUT_COUNT) {
1951 DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n");
1955 bce_nvram_erase_page_exit:
1956 DBEXIT(BCE_VERBOSE_NVRAM);
1959 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1962 /****************************************************************************/
1963 /* Read a dword (32 bits) from NVRAM. */
1965 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */
1966 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */
1969 /* 0 on success and the 32 bit value read, positive value on failure. */
1970 /****************************************************************************/
1972 bce_nvram_read_dword(struct bce_softc *sc, u32 offset, u8 *ret_val,
1978 DBENTER(BCE_EXTREME_NVRAM);
1980 /* Build the command word. */
1981 cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1983 /* Calculate the offset for buffered flash if translation is used. */
1984 if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) {
1985 offset = ((offset / sc->bce_flash_info->page_size) <<
1986 sc->bce_flash_info->page_bits) +
1987 (offset % sc->bce_flash_info->page_size);
1991 * Clear the DONE bit separately, set the address to read,
1992 * and issue the read.
1994 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1995 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1996 REG_WR(sc, BCE_NVM_COMMAND, cmd);
1998 /* Wait for completion. */
1999 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
2004 val = REG_RD(sc, BCE_NVM_COMMAND);
2005 if (val & BCE_NVM_COMMAND_DONE) {
2006 val = REG_RD(sc, BCE_NVM_READ);
2008 val = bce_be32toh(val);
2009 memcpy(ret_val, &val, 4);
2014 /* Check for errors. */
2015 if (i >= NVRAM_TIMEOUT_COUNT) {
2016 BCE_PRINTF("%s(%d): Timeout error reading NVRAM at offset 0x%08X!\n",
2017 __FILE__, __LINE__, offset);
2021 DBEXIT(BCE_EXTREME_NVRAM);
2026 #ifdef BCE_NVRAM_WRITE_SUPPORT
2027 /****************************************************************************/
2028 /* Write a dword (32 bits) to NVRAM. */
2030 /* Write a 32 bit word to NVRAM. The caller is assumed to have already */
2031 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and */
2032 /* enabled NVRAM write access. */
2035 /* 0 on success, positive value on failure. */
2036 /****************************************************************************/
2038 bce_nvram_write_dword(struct bce_softc *sc, u32 offset, u8 *val,
2044 DBENTER(BCE_VERBOSE_NVRAM);
2046 /* Build the command word. */
2047 cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags;
2049 /* Calculate the offset for buffered flash if translation is used. */
2050 if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) {
2051 offset = ((offset / sc->bce_flash_info->page_size) <<
2052 sc->bce_flash_info->page_bits) +
2053 (offset % sc->bce_flash_info->page_size);
2057 * Clear the DONE bit separately, convert NVRAM data to big-endian,
2058 * set the NVRAM address to write, and issue the write command
2060 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
2061 memcpy(&val32, val, 4);
2062 val32 = htobe32(val32);
2063 REG_WR(sc, BCE_NVM_WRITE, val32);
2064 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
2065 REG_WR(sc, BCE_NVM_COMMAND, cmd);
2067 /* Wait for completion. */
2068 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2071 if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE)
2074 if (j >= NVRAM_TIMEOUT_COUNT) {
2075 BCE_PRINTF("%s(%d): Timeout error writing NVRAM at offset 0x%08X\n",
2076 __FILE__, __LINE__, offset);
2080 DBEXIT(BCE_VERBOSE_NVRAM);
2083 #endif /* BCE_NVRAM_WRITE_SUPPORT */
2086 /****************************************************************************/
2087 /* Initialize NVRAM access. */
2089 /* Identify the NVRAM device in use and prepare the NVRAM interface to */
2090 /* access that device. */
2093 /* 0 on success, positive value on failure. */
2094 /****************************************************************************/
2096 bce_init_nvram(struct bce_softc *sc)
2099 int j, entry_count, rc = 0;
2100 struct flash_spec *flash;
2102 DBENTER(BCE_VERBOSE_NVRAM);
2104 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
2105 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
2106 sc->bce_flash_info = &flash_5709;
2107 goto bce_init_nvram_get_flash_size;
2110 /* Determine the selected interface. */
2111 val = REG_RD(sc, BCE_NVM_CFG1);
2113 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2116 * Flash reconfiguration is required to support additional
2117 * NVRAM devices not directly supported in hardware.
2118 * Check if the flash interface was reconfigured
2122 if (val & 0x40000000) {
2123 /* Flash interface reconfigured by bootcode. */
2125 DBPRINT(sc,BCE_INFO_LOAD,
2126 "bce_init_nvram(): Flash WAS reconfigured.\n");
2128 for (j = 0, flash = &flash_table[0]; j < entry_count;
2130 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2131 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2132 sc->bce_flash_info = flash;
2137 /* Flash interface not yet reconfigured. */
2140 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Flash was NOT reconfigured.\n",
2143 if (val & (1 << 23))
2144 mask = FLASH_BACKUP_STRAP_MASK;
2146 mask = FLASH_STRAP_MASK;
2148 /* Look for the matching NVRAM device configuration data. */
2149 for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) {
2151 /* Check if the device matches any of the known devices. */
2152 if ((val & mask) == (flash->strapping & mask)) {
2153 /* Found a device match. */
2154 sc->bce_flash_info = flash;
2156 /* Request access to the flash interface. */
2157 if ((rc = bce_acquire_nvram_lock(sc)) != 0)
2160 /* Reconfigure the flash interface. */
2161 bce_enable_nvram_access(sc);
2162 REG_WR(sc, BCE_NVM_CFG1, flash->config1);
2163 REG_WR(sc, BCE_NVM_CFG2, flash->config2);
2164 REG_WR(sc, BCE_NVM_CFG3, flash->config3);
2165 REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
2166 bce_disable_nvram_access(sc);
2167 bce_release_nvram_lock(sc);
2174 /* Check if a matching device was found. */
2175 if (j == entry_count) {
2176 sc->bce_flash_info = NULL;
2177 BCE_PRINTF("%s(%d): Unknown Flash NVRAM found!\n",
2178 __FILE__, __LINE__);
2182 bce_init_nvram_get_flash_size:
2183 /* Write the flash config data to the shared memory interface. */
2184 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2);
2185 val &= BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
2187 sc->bce_flash_size = val;
2189 sc->bce_flash_size = sc->bce_flash_info->total_size;
2191 DBPRINT(sc, BCE_INFO_LOAD, "%s(): Found %s, size = 0x%08X\n",
2192 __FUNCTION__, sc->bce_flash_info->name,
2193 sc->bce_flash_info->total_size);
2195 DBEXIT(BCE_VERBOSE_NVRAM);
2200 /****************************************************************************/
2201 /* Read an arbitrary range of data from NVRAM. */
2203 /* Prepares the NVRAM interface for access and reads the requested data */
2204 /* into the supplied buffer. */
2207 /* 0 on success and the data read, positive value on failure. */
2208 /****************************************************************************/
2210 bce_nvram_read(struct bce_softc *sc, u32 offset, u8 *ret_buf,
2214 u32 cmd_flags, offset32, len32, extra;
2216 DBENTER(BCE_VERBOSE_NVRAM);
2219 goto bce_nvram_read_exit;
2221 /* Request access to the flash interface. */
2222 if ((rc = bce_acquire_nvram_lock(sc)) != 0)
2223 goto bce_nvram_read_exit;
2225 /* Enable access to flash interface */
2226 bce_enable_nvram_access(sc);
2239 pre_len = 4 - (offset & 3);
2241 if (pre_len >= len32) {
2243 cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
2246 cmd_flags = BCE_NVM_COMMAND_FIRST;
2249 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
2254 memcpy(ret_buf, buf + (offset & 3), pre_len);
2262 extra = 4 - (len32 & 3);
2263 len32 = (len32 + 4) & ~3;
2270 cmd_flags = BCE_NVM_COMMAND_LAST;
2272 cmd_flags = BCE_NVM_COMMAND_FIRST |
2273 BCE_NVM_COMMAND_LAST;
2275 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
2277 memcpy(ret_buf, buf, 4 - extra);
2279 else if (len32 > 0) {
2282 /* Read the first word. */
2286 cmd_flags = BCE_NVM_COMMAND_FIRST;
2288 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
2290 /* Advance to the next dword. */
2295 while (len32 > 4 && rc == 0) {
2296 rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
2298 /* Advance to the next dword. */
2305 goto bce_nvram_read_locked_exit;
2307 cmd_flags = BCE_NVM_COMMAND_LAST;
2308 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
2310 memcpy(ret_buf, buf, 4 - extra);
2313 bce_nvram_read_locked_exit:
2314 /* Disable access to flash interface and release the lock. */
2315 bce_disable_nvram_access(sc);
2316 bce_release_nvram_lock(sc);
2318 bce_nvram_read_exit:
2319 DBEXIT(BCE_VERBOSE_NVRAM);
2324 #ifdef BCE_NVRAM_WRITE_SUPPORT
2325 /****************************************************************************/
2326 /* Write an arbitrary range of data from NVRAM. */
2328 /* Prepares the NVRAM interface for write access and writes the requested */
2329 /* data from the supplied buffer. The caller is responsible for */
2330 /* calculating any appropriate CRCs. */
2333 /* 0 on success, positive value on failure. */
2334 /****************************************************************************/
2336 bce_nvram_write(struct bce_softc *sc, u32 offset, u8 *data_buf,
2339 u32 written, offset32, len32;
2340 u8 *buf, start[4], end[4];
2342 int align_start, align_end;
2344 DBENTER(BCE_VERBOSE_NVRAM);
2349 align_start = align_end = 0;
2351 if ((align_start = (offset32 & 3))) {
2353 len32 += align_start;
2354 if ((rc = bce_nvram_read(sc, offset32, start, 4)))
2355 goto bce_nvram_write_exit;
2359 if ((len32 > 4) || !align_start) {
2360 align_end = 4 - (len32 & 3);
2362 if ((rc = bce_nvram_read(sc, offset32 + len32 - 4,
2364 goto bce_nvram_write_exit;
2369 if (align_start || align_end) {
2370 buf = malloc(len32, M_DEVBUF, M_NOWAIT);
2373 goto bce_nvram_write_exit;
2377 memcpy(buf, start, 4);
2381 memcpy(buf + len32 - 4, end, 4);
2383 memcpy(buf + align_start, data_buf, buf_size);
2387 while ((written < len32) && (rc == 0)) {
2388 u32 page_start, page_end, data_start, data_end;
2389 u32 addr, cmd_flags;
2391 u8 flash_buffer[264];
2393 /* Find the page_start addr */
2394 page_start = offset32 + written;
2395 page_start -= (page_start % sc->bce_flash_info->page_size);
2396 /* Find the page_end addr */
2397 page_end = page_start + sc->bce_flash_info->page_size;
2398 /* Find the data_start addr */
2399 data_start = (written == 0) ? offset32 : page_start;
2400 /* Find the data_end addr */
2401 data_end = (page_end > offset32 + len32) ?
2402 (offset32 + len32) : page_end;
2404 /* Request access to the flash interface. */
2405 if ((rc = bce_acquire_nvram_lock(sc)) != 0)
2406 goto bce_nvram_write_exit;
2408 /* Enable access to flash interface */
2409 bce_enable_nvram_access(sc);
2411 cmd_flags = BCE_NVM_COMMAND_FIRST;
2412 if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
2415 /* Read the whole page into the buffer
2416 * (non-buffer flash only) */
2417 for (j = 0; j < sc->bce_flash_info->page_size; j += 4) {
2418 if (j == (sc->bce_flash_info->page_size - 4)) {
2419 cmd_flags |= BCE_NVM_COMMAND_LAST;
2421 rc = bce_nvram_read_dword(sc,
2427 goto bce_nvram_write_locked_exit;
2433 /* Enable writes to flash interface (unlock write-protect) */
2434 if ((rc = bce_enable_nvram_write(sc)) != 0)
2435 goto bce_nvram_write_locked_exit;
2437 /* Erase the page */
2438 if ((rc = bce_nvram_erase_page(sc, page_start)) != 0)
2439 goto bce_nvram_write_locked_exit;
2441 /* Re-enable the write again for the actual write */
2442 bce_enable_nvram_write(sc);
2444 /* Loop to write back the buffer data from page_start to
2447 if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
2448 for (addr = page_start; addr < data_start;
2449 addr += 4, i += 4) {
2451 rc = bce_nvram_write_dword(sc, addr,
2452 &flash_buffer[i], cmd_flags);
2455 goto bce_nvram_write_locked_exit;
2461 /* Loop to write the new data from data_start to data_end */
2462 for (addr = data_start; addr < data_end; addr += 4, i++) {
2463 if ((addr == page_end - 4) ||
2464 ((sc->bce_flash_info->flags & BCE_NV_BUFFERED) &&
2465 (addr == data_end - 4))) {
2467 cmd_flags |= BCE_NVM_COMMAND_LAST;
2469 rc = bce_nvram_write_dword(sc, addr, buf,
2473 goto bce_nvram_write_locked_exit;
2479 /* Loop to write back the buffer data from data_end
2481 if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
2482 for (addr = data_end; addr < page_end;
2483 addr += 4, i += 4) {
2485 if (addr == page_end-4) {
2486 cmd_flags = BCE_NVM_COMMAND_LAST;
2488 rc = bce_nvram_write_dword(sc, addr,
2489 &flash_buffer[i], cmd_flags);
2492 goto bce_nvram_write_locked_exit;
2498 /* Disable writes to flash interface (lock write-protect) */
2499 bce_disable_nvram_write(sc);
2501 /* Disable access to flash interface */
2502 bce_disable_nvram_access(sc);
2503 bce_release_nvram_lock(sc);
2505 /* Increment written */
2506 written += data_end - data_start;
2509 goto bce_nvram_write_exit;
2511 bce_nvram_write_locked_exit:
2512 bce_disable_nvram_write(sc);
2513 bce_disable_nvram_access(sc);
2514 bce_release_nvram_lock(sc);
2516 bce_nvram_write_exit:
2517 if (align_start || align_end)
2518 free(buf, M_DEVBUF);
2520 DBEXIT(BCE_VERBOSE_NVRAM);
2523 #endif /* BCE_NVRAM_WRITE_SUPPORT */
2526 /****************************************************************************/
2527 /* Verifies that NVRAM is accessible and contains valid data. */
2529 /* Reads the configuration data from NVRAM and verifies that the CRC is */
2533 /* 0 on success, positive value on failure. */
2534 /****************************************************************************/
2536 bce_nvram_test(struct bce_softc *sc)
2538 u32 buf[BCE_NVRAM_SIZE / 4];
2539 u8 *data = (u8 *) buf;
2543 DBENTER(BCE_VERBOSE_NVRAM | BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
2546 * Check that the device NVRAM is valid by reading
2547 * the magic value at offset 0.
2549 if ((rc = bce_nvram_read(sc, 0, data, 4)) != 0) {
2550 BCE_PRINTF("%s(%d): Unable to read NVRAM!\n", __FILE__, __LINE__);
2551 goto bce_nvram_test_exit;
2555 * Verify that offset 0 of the NVRAM contains
2556 * a valid magic number.
2558 magic = bce_be32toh(buf[0]);
2559 if (magic != BCE_NVRAM_MAGIC) {
2561 BCE_PRINTF("%s(%d): Invalid NVRAM magic value! Expected: 0x%08X, "
2563 __FILE__, __LINE__, BCE_NVRAM_MAGIC, magic);
2564 goto bce_nvram_test_exit;
2568 * Verify that the device NVRAM includes valid
2569 * configuration data.
2571 if ((rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE)) != 0) {
2572 BCE_PRINTF("%s(%d): Unable to read Manufacturing Information from "
2573 "NVRAM!\n", __FILE__, __LINE__);
2574 goto bce_nvram_test_exit;
2577 csum = ether_crc32_le(data, 0x100);
2578 if (csum != BCE_CRC32_RESIDUAL) {
2580 BCE_PRINTF("%s(%d): Invalid Manufacturing Information NVRAM CRC! "
2581 "Expected: 0x%08X, Found: 0x%08X\n",
2582 __FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
2583 goto bce_nvram_test_exit;
2586 csum = ether_crc32_le(data + 0x100, 0x100);
2587 if (csum != BCE_CRC32_RESIDUAL) {
2589 BCE_PRINTF("%s(%d): Invalid Feature Configuration Information "
2590 "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
2591 __FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
2594 bce_nvram_test_exit:
2595 DBEXIT(BCE_VERBOSE_NVRAM | BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
2600 /****************************************************************************/
2601 /* Identifies the current media type of the controller and sets the PHY */
2606 /****************************************************************************/
2608 bce_get_media(struct bce_softc *sc)
2612 DBENTER(BCE_VERBOSE);
2614 /* Assume PHY address for copper controllers. */
2615 sc->bce_phy_addr = 1;
2617 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
2618 u32 val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL);
2619 u32 bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID;
2623 * The BCM5709S is software configurable
2624 * for Copper or SerDes operation.
2626 if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) {
2627 DBPRINT(sc, BCE_INFO_LOAD, "5709 bonded for copper.\n");
2628 goto bce_get_media_exit;
2629 } else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
2630 DBPRINT(sc, BCE_INFO_LOAD, "5709 bonded for dual media.\n");
2631 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
2632 goto bce_get_media_exit;
2635 if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
2636 strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
2638 strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
2640 if (pci_get_function(sc->bce_dev) == 0) {
2645 DBPRINT(sc, BCE_INFO_LOAD,
2646 "BCM5709 s/w configured for SerDes.\n");
2647 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
2649 DBPRINT(sc, BCE_INFO_LOAD,
2650 "BCM5709 s/w configured for Copper.\n");
2657 DBPRINT(sc, BCE_INFO_LOAD,
2658 "BCM5709 s/w configured for SerDes.\n");
2659 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
2661 DBPRINT(sc, BCE_INFO_LOAD,
2662 "BCM5709 s/w configured for Copper.\n");
2666 } else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT)
2667 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
2669 if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
2670 sc->bce_flags |= BCE_NO_WOL_FLAG;
2671 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
2672 sc->bce_phy_addr = 2;
2673 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG);
2674 if (val & BCE_SHARED_HW_CFG_PHY_2_5G) {
2675 sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
2676 DBPRINT(sc, BCE_INFO_LOAD, "Found 2.5Gb capable adapter\n");
2679 } else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) ||
2680 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708))
2681 sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG;
2684 DBPRINT(sc, (BCE_INFO_LOAD | BCE_INFO_PHY),
2685 "Using PHY address %d.\n", sc->bce_phy_addr);
2687 DBEXIT(BCE_VERBOSE);
2691 /****************************************************************************/
2692 /* Free any DMA memory owned by the driver. */
2694 /* Scans through each data structre that requires DMA memory and frees */
2695 /* the memory if allocated. */
2699 /****************************************************************************/
2701 bce_dma_free(struct bce_softc *sc)
2705 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_UNLOAD | BCE_VERBOSE_CTX);
2707 /* Free, unmap, and destroy the status block. */
2708 if (sc->status_block != NULL) {
2713 sc->status_block = NULL;
2716 if (sc->status_map != NULL) {
2720 bus_dmamap_destroy(sc->status_tag,
2722 sc->status_map = NULL;
2725 if (sc->status_tag != NULL) {
2726 bus_dma_tag_destroy(sc->status_tag);
2727 sc->status_tag = NULL;
2731 /* Free, unmap, and destroy the statistics block. */
2732 if (sc->stats_block != NULL) {
2737 sc->stats_block = NULL;
2740 if (sc->stats_map != NULL) {
2744 bus_dmamap_destroy(sc->stats_tag,
2746 sc->stats_map = NULL;
2749 if (sc->stats_tag != NULL) {
2750 bus_dma_tag_destroy(sc->stats_tag);
2751 sc->stats_tag = NULL;
2755 /* Free, unmap and destroy all context memory pages. */
2756 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
2757 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
2758 for (i = 0; i < sc->ctx_pages; i++ ) {
2759 if (sc->ctx_block[i] != NULL) {
2764 sc->ctx_block[i] = NULL;
2767 if (sc->ctx_map[i] != NULL) {
2774 sc->ctx_map[i] = NULL;
2778 /* Destroy the context memory tag. */
2779 if (sc->ctx_tag != NULL) {
2780 bus_dma_tag_destroy(sc->ctx_tag);
2786 /* Free, unmap and destroy all TX buffer descriptor chain pages. */
2787 for (i = 0; i < TX_PAGES; i++ ) {
2788 if (sc->tx_bd_chain[i] != NULL) {
2790 sc->tx_bd_chain_tag,
2792 sc->tx_bd_chain_map[i]);
2793 sc->tx_bd_chain[i] = NULL;
2796 if (sc->tx_bd_chain_map[i] != NULL) {
2798 sc->tx_bd_chain_tag,
2799 sc->tx_bd_chain_map[i]);
2801 sc->tx_bd_chain_tag,
2802 sc->tx_bd_chain_map[i]);
2803 sc->tx_bd_chain_map[i] = NULL;
2807 /* Destroy the TX buffer descriptor tag. */
2808 if (sc->tx_bd_chain_tag != NULL) {
2809 bus_dma_tag_destroy(sc->tx_bd_chain_tag);
2810 sc->tx_bd_chain_tag = NULL;
2814 /* Free, unmap and destroy all RX buffer descriptor chain pages. */
2815 for (i = 0; i < RX_PAGES; i++ ) {
2816 if (sc->rx_bd_chain[i] != NULL) {
2818 sc->rx_bd_chain_tag,
2820 sc->rx_bd_chain_map[i]);
2821 sc->rx_bd_chain[i] = NULL;
2824 if (sc->rx_bd_chain_map[i] != NULL) {
2826 sc->rx_bd_chain_tag,
2827 sc->rx_bd_chain_map[i]);
2829 sc->rx_bd_chain_tag,
2830 sc->rx_bd_chain_map[i]);
2831 sc->rx_bd_chain_map[i] = NULL;
2835 /* Destroy the RX buffer descriptor tag. */
2836 if (sc->rx_bd_chain_tag != NULL) {
2837 bus_dma_tag_destroy(sc->rx_bd_chain_tag);
2838 sc->rx_bd_chain_tag = NULL;
2842 #ifdef BCE_JUMBO_HDRSPLIT
2843 /* Free, unmap and destroy all page buffer descriptor chain pages. */
2844 for (i = 0; i < PG_PAGES; i++ ) {
2845 if (sc->pg_bd_chain[i] != NULL) {
2847 sc->pg_bd_chain_tag,
2849 sc->pg_bd_chain_map[i]);
2850 sc->pg_bd_chain[i] = NULL;
2853 if (sc->pg_bd_chain_map[i] != NULL) {
2855 sc->pg_bd_chain_tag,
2856 sc->pg_bd_chain_map[i]);
2858 sc->pg_bd_chain_tag,
2859 sc->pg_bd_chain_map[i]);
2860 sc->pg_bd_chain_map[i] = NULL;
2864 /* Destroy the page buffer descriptor tag. */
2865 if (sc->pg_bd_chain_tag != NULL) {
2866 bus_dma_tag_destroy(sc->pg_bd_chain_tag);
2867 sc->pg_bd_chain_tag = NULL;
2872 /* Unload and destroy the TX mbuf maps. */
2873 for (i = 0; i < TOTAL_TX_BD; i++) {
2874 if (sc->tx_mbuf_map[i] != NULL) {
2875 bus_dmamap_unload(sc->tx_mbuf_tag,
2876 sc->tx_mbuf_map[i]);
2877 bus_dmamap_destroy(sc->tx_mbuf_tag,
2878 sc->tx_mbuf_map[i]);
2879 sc->tx_mbuf_map[i] = NULL;
2883 /* Destroy the TX mbuf tag. */
2884 if (sc->tx_mbuf_tag != NULL) {
2885 bus_dma_tag_destroy(sc->tx_mbuf_tag);
2886 sc->tx_mbuf_tag = NULL;
2889 /* Unload and destroy the RX mbuf maps. */
2890 for (i = 0; i < TOTAL_RX_BD; i++) {
2891 if (sc->rx_mbuf_map[i] != NULL) {
2892 bus_dmamap_unload(sc->rx_mbuf_tag,
2893 sc->rx_mbuf_map[i]);
2894 bus_dmamap_destroy(sc->rx_mbuf_tag,
2895 sc->rx_mbuf_map[i]);
2896 sc->rx_mbuf_map[i] = NULL;
2900 /* Destroy the RX mbuf tag. */
2901 if (sc->rx_mbuf_tag != NULL) {
2902 bus_dma_tag_destroy(sc->rx_mbuf_tag);
2903 sc->rx_mbuf_tag = NULL;
2906 #ifdef BCE_JUMBO_HDRSPLIT
2907 /* Unload and destroy the page mbuf maps. */
2908 for (i = 0; i < TOTAL_PG_BD; i++) {
2909 if (sc->pg_mbuf_map[i] != NULL) {
2910 bus_dmamap_unload(sc->pg_mbuf_tag,
2911 sc->pg_mbuf_map[i]);
2912 bus_dmamap_destroy(sc->pg_mbuf_tag,
2913 sc->pg_mbuf_map[i]);
2914 sc->pg_mbuf_map[i] = NULL;
2918 /* Destroy the page mbuf tag. */
2919 if (sc->pg_mbuf_tag != NULL) {
2920 bus_dma_tag_destroy(sc->pg_mbuf_tag);
2921 sc->pg_mbuf_tag = NULL;
2925 /* Destroy the parent tag */
2926 if (sc->parent_tag != NULL) {
2927 bus_dma_tag_destroy(sc->parent_tag);
2928 sc->parent_tag = NULL;
2931 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_UNLOAD | BCE_VERBOSE_CTX);
2935 /****************************************************************************/
2936 /* Get DMA memory from the OS. */
2938 /* Validates that the OS has provided DMA buffers in response to a */
2939 /* bus_dmamap_load() call and saves the physical address of those buffers. */
2940 /* When the callback is used the OS will return 0 for the mapping function */
2941 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */
2942 /* failures back to the caller. */
2946 /****************************************************************************/
2948 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2950 bus_addr_t *busaddr = arg;
2952 /* Simulate a mapping failure. */
2953 DBRUNIF(DB_RANDOMTRUE(dma_map_addr_failed_sim_control),
2956 /* Check for an error and signal the caller that an error occurred. */
2960 *busaddr = segs->ds_addr;
2967 /****************************************************************************/
2968 /* Allocate any DMA memory needed by the driver. */
2970 /* Allocates DMA memory needed for the various global structures needed by */
2973 /* Memory alignment requirements: */
2974 /* +-----------------+----------+----------+----------+----------+ */
2975 /* | | 5706 | 5708 | 5709 | 5716 | */
2976 /* +-----------------+----------+----------+----------+----------+ */
2977 /* |Status Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */
2978 /* |Statistics Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */
2979 /* |RX Buffers | 16 bytes | 16 bytes | 16 bytes | 16 bytes | */
2980 /* |PG Buffers | none | none | none | none | */
2981 /* |TX Buffers | none | none | none | none | */
2982 /* |Chain Pages(1) | 4KiB | 4KiB | 4KiB | 4KiB | */
2983 /* |Context Memory | | | | | */
2984 /* +-----------------+----------+----------+----------+----------+ */
2986 /* (1) Must align with CPU page size (BCM_PAGE_SZIE). */
2989 /* 0 for success, positive value for failure. */
2990 /****************************************************************************/
2992 bce_dma_alloc(device_t dev)
2994 struct bce_softc *sc;
2995 int i, error, rc = 0;
2996 bus_size_t max_size, max_seg_size;
2999 sc = device_get_softc(dev);
3001 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
3004 * Allocate the parent bus DMA tag appropriate for PCI.
3006 if (bus_dma_tag_create(NULL,
3013 BUS_SPACE_UNRESTRICTED,
3014 BUS_SPACE_MAXSIZE_32BIT,
3018 BCE_PRINTF("%s(%d): Could not allocate parent DMA tag!\n",
3019 __FILE__, __LINE__);
3021 goto bce_dma_alloc_exit;
3025 * Create a DMA tag for the status block, allocate and clear the
3026 * memory, map the memory into DMA space, and fetch the physical
3027 * address of the block.
3029 if (bus_dma_tag_create(sc->parent_tag,
3041 BCE_PRINTF("%s(%d): Could not allocate status block DMA tag!\n",
3042 __FILE__, __LINE__);
3044 goto bce_dma_alloc_exit;
3047 if(bus_dmamem_alloc(sc->status_tag,
3048 (void **)&sc->status_block,
3051 BCE_PRINTF("%s(%d): Could not allocate status block DMA memory!\n",
3052 __FILE__, __LINE__);
3054 goto bce_dma_alloc_exit;
3057 bzero((char *)sc->status_block, BCE_STATUS_BLK_SZ);
3059 error = bus_dmamap_load(sc->status_tag,
3064 &sc->status_block_paddr,
3068 BCE_PRINTF("%s(%d): Could not map status block DMA memory!\n",
3069 __FILE__, __LINE__);
3071 goto bce_dma_alloc_exit;
3074 DBPRINT(sc, BCE_INFO, "%s(): status_block_paddr = 0x%jX\n",
3075 __FUNCTION__, (uintmax_t) sc->status_block_paddr);
3078 * Create a DMA tag for the statistics block, allocate and clear the
3079 * memory, map the memory into DMA space, and fetch the physical
3080 * address of the block.
3082 if (bus_dma_tag_create(sc->parent_tag,
3094 BCE_PRINTF("%s(%d): Could not allocate statistics block DMA tag!\n",
3095 __FILE__, __LINE__);
3097 goto bce_dma_alloc_exit;
3100 if (bus_dmamem_alloc(sc->stats_tag,
3101 (void **)&sc->stats_block,
3104 BCE_PRINTF("%s(%d): Could not allocate statistics block DMA memory!\n",
3105 __FILE__, __LINE__);
3107 goto bce_dma_alloc_exit;
3110 bzero((char *)sc->stats_block, BCE_STATS_BLK_SZ);
3112 error = bus_dmamap_load(sc->stats_tag,
3117 &sc->stats_block_paddr,
3121 BCE_PRINTF("%s(%d): Could not map statistics block DMA memory!\n",
3122 __FILE__, __LINE__);
3124 goto bce_dma_alloc_exit;
3127 DBPRINT(sc, BCE_INFO, "%s(): stats_block_paddr = 0x%jX\n",
3128 __FUNCTION__, (uintmax_t) sc->stats_block_paddr);
3130 /* BCM5709 uses host memory as cache for context memory. */
3131 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
3132 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
3133 sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
3134 if (sc->ctx_pages == 0)
3137 DBRUNIF((sc->ctx_pages > 512),
3138 BCE_PRINTF("%s(%d): Too many CTX pages! %d > 512\n",
3139 __FILE__, __LINE__, sc->ctx_pages));
3142 * Create a DMA tag for the context pages,
3143 * allocate and clear the memory, map the
3144 * memory into DMA space, and fetch the
3145 * physical address of the block.
3147 if(bus_dma_tag_create(sc->parent_tag,
3159 BCE_PRINTF("%s(%d): Could not allocate CTX DMA tag!\n",
3160 __FILE__, __LINE__);
3162 goto bce_dma_alloc_exit;
3165 for (i = 0; i < sc->ctx_pages; i++) {
3167 if(bus_dmamem_alloc(sc->ctx_tag,
3168 (void **)&sc->ctx_block[i],
3171 BCE_PRINTF("%s(%d): Could not allocate CTX "
3172 "DMA memory!\n", __FILE__, __LINE__);
3174 goto bce_dma_alloc_exit;
3177 bzero((char *)sc->ctx_block[i], BCM_PAGE_SIZE);
3179 error = bus_dmamap_load(sc->ctx_tag,
3188 BCE_PRINTF("%s(%d): Could not map CTX DMA memory!\n",
3189 __FILE__, __LINE__);
3191 goto bce_dma_alloc_exit;
3194 DBPRINT(sc, BCE_INFO, "%s(): ctx_paddr[%d] = 0x%jX\n",
3195 __FUNCTION__, i, (uintmax_t) sc->ctx_paddr[i]);
3200 * Create a DMA tag for the TX buffer descriptor chain,
3201 * allocate and clear the memory, and fetch the
3202 * physical address of the block.
3204 if(bus_dma_tag_create(sc->parent_tag,
3210 BCE_TX_CHAIN_PAGE_SZ,
3212 BCE_TX_CHAIN_PAGE_SZ,
3215 &sc->tx_bd_chain_tag)) {
3216 BCE_PRINTF("%s(%d): Could not allocate TX descriptor chain DMA tag!\n",
3217 __FILE__, __LINE__);
3219 goto bce_dma_alloc_exit;
3222 for (i = 0; i < TX_PAGES; i++) {
3224 if(bus_dmamem_alloc(sc->tx_bd_chain_tag,
3225 (void **)&sc->tx_bd_chain[i],
3227 &sc->tx_bd_chain_map[i])) {
3228 BCE_PRINTF("%s(%d): Could not allocate TX descriptor "
3229 "chain DMA memory!\n", __FILE__, __LINE__);
3231 goto bce_dma_alloc_exit;
3234 error = bus_dmamap_load(sc->tx_bd_chain_tag,
3235 sc->tx_bd_chain_map[i],
3237 BCE_TX_CHAIN_PAGE_SZ,
3239 &sc->tx_bd_chain_paddr[i],
3243 BCE_PRINTF("%s(%d): Could not map TX descriptor chain DMA memory!\n",
3244 __FILE__, __LINE__);
3246 goto bce_dma_alloc_exit;
3249 DBPRINT(sc, BCE_INFO, "%s(): tx_bd_chain_paddr[%d] = 0x%jX\n",
3250 __FUNCTION__, i, (uintmax_t) sc->tx_bd_chain_paddr[i]);
3253 /* Check the required size before mapping to conserve resources. */
3254 if (bce_tso_enable) {
3255 max_size = BCE_TSO_MAX_SIZE;
3256 max_segments = BCE_MAX_SEGMENTS;
3257 max_seg_size = BCE_TSO_MAX_SEG_SIZE;
3259 max_size = MCLBYTES * BCE_MAX_SEGMENTS;
3260 max_segments = BCE_MAX_SEGMENTS;
3261 max_seg_size = MCLBYTES;
3264 /* Create a DMA tag for TX mbufs. */
3265 if (bus_dma_tag_create(sc->parent_tag,
3276 &sc->tx_mbuf_tag)) {
3277 BCE_PRINTF("%s(%d): Could not allocate TX mbuf DMA tag!\n",
3278 __FILE__, __LINE__);
3280 goto bce_dma_alloc_exit;
3283 /* Create DMA maps for the TX mbufs clusters. */
3284 for (i = 0; i < TOTAL_TX_BD; i++) {
3285 if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT,
3286 &sc->tx_mbuf_map[i])) {
3287 BCE_PRINTF("%s(%d): Unable to create TX mbuf DMA map!\n",
3288 __FILE__, __LINE__);
3290 goto bce_dma_alloc_exit;
3295 * Create a DMA tag for the RX buffer descriptor chain,
3296 * allocate and clear the memory, and fetch the physical
3297 * address of the blocks.
3299 if (bus_dma_tag_create(sc->parent_tag,
3305 BCE_RX_CHAIN_PAGE_SZ,
3307 BCE_RX_CHAIN_PAGE_SZ,
3310 &sc->rx_bd_chain_tag)) {
3311 BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain DMA tag!\n",
3312 __FILE__, __LINE__);
3314 goto bce_dma_alloc_exit;
3317 for (i = 0; i < RX_PAGES; i++) {
3319 if (bus_dmamem_alloc(sc->rx_bd_chain_tag,
3320 (void **)&sc->rx_bd_chain[i],
3322 &sc->rx_bd_chain_map[i])) {
3323 BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain "
3324 "DMA memory!\n", __FILE__, __LINE__);
3326 goto bce_dma_alloc_exit;
3329 bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
3331 error = bus_dmamap_load(sc->rx_bd_chain_tag,
3332 sc->rx_bd_chain_map[i],
3334 BCE_RX_CHAIN_PAGE_SZ,
3336 &sc->rx_bd_chain_paddr[i],
3340 BCE_PRINTF("%s(%d): Could not map RX descriptor chain DMA memory!\n",
3341 __FILE__, __LINE__);
3343 goto bce_dma_alloc_exit;
3346 DBPRINT(sc, BCE_INFO, "%s(): rx_bd_chain_paddr[%d] = 0x%jX\n",
3347 __FUNCTION__, i, (uintmax_t) sc->rx_bd_chain_paddr[i]);
3351 * Create a DMA tag for RX mbufs.
3353 #ifdef BCE_JUMBO_HDRSPLIT
3354 max_size = max_seg_size = ((sc->rx_bd_mbuf_alloc_size < MCLBYTES) ?
3355 MCLBYTES : sc->rx_bd_mbuf_alloc_size);
3357 max_size = max_seg_size = MJUM9BYTES;
3361 DBPRINT(sc, BCE_INFO, "%s(): Creating rx_mbuf_tag (max size = 0x%jX "
3362 "max segments = %d, max segment size = 0x%jX)\n", __FUNCTION__,
3363 (uintmax_t) max_size, max_segments, (uintmax_t) max_seg_size);
3365 if (bus_dma_tag_create(sc->parent_tag,
3376 &sc->rx_mbuf_tag)) {
3377 BCE_PRINTF("%s(%d): Could not allocate RX mbuf DMA tag!\n",
3378 __FILE__, __LINE__);
3380 goto bce_dma_alloc_exit;
3383 /* Create DMA maps for the RX mbuf clusters. */
3384 for (i = 0; i < TOTAL_RX_BD; i++) {
3385 if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT,
3386 &sc->rx_mbuf_map[i])) {
3387 BCE_PRINTF("%s(%d): Unable to create RX mbuf DMA map!\n",
3388 __FILE__, __LINE__);
3390 goto bce_dma_alloc_exit;
3394 #ifdef BCE_JUMBO_HDRSPLIT
3396 * Create a DMA tag for the page buffer descriptor chain,
3397 * allocate and clear the memory, and fetch the physical
3398 * address of the blocks.
3400 if (bus_dma_tag_create(sc->parent_tag,
3406 BCE_PG_CHAIN_PAGE_SZ,
3408 BCE_PG_CHAIN_PAGE_SZ,
3411 &sc->pg_bd_chain_tag)) {
3412 BCE_PRINTF("%s(%d): Could not allocate page descriptor chain DMA tag!\n",
3413 __FILE__, __LINE__);
3415 goto bce_dma_alloc_exit;
3418 for (i = 0; i < PG_PAGES; i++) {
3420 if (bus_dmamem_alloc(sc->pg_bd_chain_tag,
3421 (void **)&sc->pg_bd_chain[i],
3423 &sc->pg_bd_chain_map[i])) {
3424 BCE_PRINTF("%s(%d): Could not allocate page descriptor chain "
3425 "DMA memory!\n", __FILE__, __LINE__);
3427 goto bce_dma_alloc_exit;
3430 bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ);
3432 error = bus_dmamap_load(sc->pg_bd_chain_tag,
3433 sc->pg_bd_chain_map[i],
3435 BCE_PG_CHAIN_PAGE_SZ,
3437 &sc->pg_bd_chain_paddr[i],
3441 BCE_PRINTF("%s(%d): Could not map page descriptor chain DMA memory!\n",
3442 __FILE__, __LINE__);
3444 goto bce_dma_alloc_exit;
3447 DBPRINT(sc, BCE_INFO, "%s(): pg_bd_chain_paddr[%d] = 0x%jX\n",
3448 __FUNCTION__, i, (uintmax_t) sc->pg_bd_chain_paddr[i]);
3452 * Create a DMA tag for page mbufs.
3454 max_size = max_seg_size = ((sc->pg_bd_mbuf_alloc_size < MCLBYTES) ?
3455 MCLBYTES : sc->pg_bd_mbuf_alloc_size);
3457 if (bus_dma_tag_create(sc->parent_tag,
3468 &sc->pg_mbuf_tag)) {
3469 BCE_PRINTF("%s(%d): Could not allocate page mbuf DMA tag!\n",
3470 __FILE__, __LINE__);
3472 goto bce_dma_alloc_exit;
3475 /* Create DMA maps for the page mbuf clusters. */
3476 for (i = 0; i < TOTAL_PG_BD; i++) {
3477 if (bus_dmamap_create(sc->pg_mbuf_tag, BUS_DMA_NOWAIT,
3478 &sc->pg_mbuf_map[i])) {
3479 BCE_PRINTF("%s(%d): Unable to create page mbuf DMA map!\n",
3480 __FILE__, __LINE__);
3482 goto bce_dma_alloc_exit;
3488 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
3493 /****************************************************************************/
3494 /* Release all resources used by the driver. */
3496 /* Releases all resources acquired by the driver including interrupts, */
3497 /* interrupt handler, interfaces, mutexes, and DMA memory. */
3501 /****************************************************************************/
3503 bce_release_resources(struct bce_softc *sc)
3507 DBENTER(BCE_VERBOSE_RESET);
3513 if (sc->bce_intrhand != NULL) {
3514 DBPRINT(sc, BCE_INFO_RESET, "Removing interrupt handler.\n");
3515 bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand);
3518 if (sc->bce_res_irq != NULL) {
3519 DBPRINT(sc, BCE_INFO_RESET, "Releasing IRQ.\n");
3520 bus_release_resource(dev, SYS_RES_IRQ, sc->bce_irq_rid,
3524 if (sc->bce_flags & (BCE_USING_MSI_FLAG | BCE_USING_MSIX_FLAG)) {
3525 DBPRINT(sc, BCE_INFO_RESET, "Releasing MSI/MSI-X vector.\n");
3526 pci_release_msi(dev);
3529 if (sc->bce_res_mem != NULL) {
3530 DBPRINT(sc, BCE_INFO_RESET, "Releasing PCI memory.\n");
3531 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), sc->bce_res_mem);
3534 if (sc->bce_ifp != NULL) {
3535 DBPRINT(sc, BCE_INFO_RESET, "Releasing IF.\n");
3536 if_free(sc->bce_ifp);
3539 if (mtx_initialized(&sc->bce_mtx))
3540 BCE_LOCK_DESTROY(sc);
3542 DBEXIT(BCE_VERBOSE_RESET);
3546 /****************************************************************************/
3547 /* Firmware synchronization. */
3549 /* Before performing certain events such as a chip reset, synchronize with */
3550 /* the firmware first. */
3553 /* 0 for success, positive value for failure. */
3554 /****************************************************************************/
3556 bce_fw_sync(struct bce_softc *sc, u32 msg_data)
3561 DBENTER(BCE_VERBOSE_RESET);
3563 /* Don't waste any time if we've timed out before. */
3564 if (sc->bce_fw_timed_out) {
3566 goto bce_fw_sync_exit;
3569 /* Increment the message sequence number. */
3570 sc->bce_fw_wr_seq++;
3571 msg_data |= sc->bce_fw_wr_seq;
3573 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "bce_fw_sync(): msg_data = 0x%08X\n",
3576 /* Send the message to the bootcode driver mailbox. */
3577 bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
3579 /* Wait for the bootcode to acknowledge the message. */
3580 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
3581 /* Check for a response in the bootcode firmware mailbox. */
3582 val = bce_shmem_rd(sc, BCE_FW_MB);
3583 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
3588 /* If we've timed out, tell the bootcode that we've stopped waiting. */
3589 if (((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ)) &&
3590 ((msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0)) {
3592 BCE_PRINTF("%s(%d): Firmware synchronization timeout! "
3593 "msg_data = 0x%08X\n",
3594 __FILE__, __LINE__, msg_data);
3596 msg_data &= ~BCE_DRV_MSG_CODE;
3597 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
3599 bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
3601 sc->bce_fw_timed_out = 1;
3606 DBEXIT(BCE_VERBOSE_RESET);
3611 /****************************************************************************/
3612 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */
3616 /****************************************************************************/
3618 bce_load_rv2p_fw(struct bce_softc *sc, u32 *rv2p_code,
3619 u32 rv2p_code_len, u32 rv2p_proc)
3624 DBENTER(BCE_VERBOSE_RESET);
3626 /* Set the page size used by RV2P. */
3627 if (rv2p_proc == RV2P_PROC2) {
3628 BCE_RV2P_PROC2_CHG_MAX_BD_PAGE(USABLE_RX_BD_PER_PAGE);
3631 for (i = 0; i < rv2p_code_len; i += 8) {
3632 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
3634 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
3637 if (rv2p_proc == RV2P_PROC1) {
3638 val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
3639 REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
3642 val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
3643 REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
3647 /* Reset the processor, un-stall is done later. */
3648 if (rv2p_proc == RV2P_PROC1) {
3649 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
3652 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
3655 DBEXIT(BCE_VERBOSE_RESET);
3659 /****************************************************************************/
3660 /* Load RISC processor firmware. */
3662 /* Loads firmware from the file if_bcefw.h into the scratchpad memory */
3663 /* associated with a particular processor. */
3667 /****************************************************************************/
3669 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
3674 DBENTER(BCE_VERBOSE_RESET);
3676 bce_halt_cpu(sc, cpu_reg);
3678 /* Load the Text area. */
3679 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3683 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3684 REG_WR_IND(sc, offset, fw->text[j]);
3688 /* Load the Data area. */
3689 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3693 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3694 REG_WR_IND(sc, offset, fw->data[j]);
3698 /* Load the SBSS area. */
3699 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3703 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3704 REG_WR_IND(sc, offset, fw->sbss[j]);
3708 /* Load the BSS area. */
3709 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3713 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3714 REG_WR_IND(sc, offset, fw->bss[j]);
3718 /* Load the Read-Only area. */
3719 offset = cpu_reg->spad_base +
3720 (fw->rodata_addr - cpu_reg->mips_view_base);
3724 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3725 REG_WR_IND(sc, offset, fw->rodata[j]);
3729 /* Clear the pre-fetch instruction and set the FW start address. */
3730 REG_WR_IND(sc, cpu_reg->inst, 0);
3731 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
3733 DBEXIT(BCE_VERBOSE_RESET);
3737 /****************************************************************************/
3738 /* Starts the RISC processor. */
3740 /* Assumes the CPU starting address has already been set. */
3744 /****************************************************************************/
3746 bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
3750 DBENTER(BCE_VERBOSE_RESET);
3752 /* Start the CPU. */
3753 val = REG_RD_IND(sc, cpu_reg->mode);
3754 val &= ~cpu_reg->mode_value_halt;
3755 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
3756 REG_WR_IND(sc, cpu_reg->mode, val);
3758 DBEXIT(BCE_VERBOSE_RESET);
3762 /****************************************************************************/
3763 /* Halts the RISC processor. */
3767 /****************************************************************************/
3769 bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
3773 DBENTER(BCE_VERBOSE_RESET);
3776 val = REG_RD_IND(sc, cpu_reg->mode);
3777 val |= cpu_reg->mode_value_halt;
3778 REG_WR_IND(sc, cpu_reg->mode, val);
3779 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
3781 DBEXIT(BCE_VERBOSE_RESET);
3785 /****************************************************************************/
3786 /* Initialize the RX CPU. */
3790 /****************************************************************************/
3792 bce_start_rxp_cpu(struct bce_softc *sc)
3794 struct cpu_reg cpu_reg;
3796 DBENTER(BCE_VERBOSE_RESET);
3798 cpu_reg.mode = BCE_RXP_CPU_MODE;
3799 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
3800 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
3801 cpu_reg.state = BCE_RXP_CPU_STATE;
3802 cpu_reg.state_value_clear = 0xffffff;
3803 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
3804 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
3805 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
3806 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
3807 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
3808 cpu_reg.spad_base = BCE_RXP_SCRATCH;
3809 cpu_reg.mips_view_base = 0x8000000;
3811 DBPRINT(sc, BCE_INFO_RESET, "Starting RX firmware.\n");
3812 bce_start_cpu(sc, &cpu_reg);
3814 DBEXIT(BCE_VERBOSE_RESET);
3818 /****************************************************************************/
3819 /* Initialize the RX CPU. */
3823 /****************************************************************************/
3825 bce_init_rxp_cpu(struct bce_softc *sc)
3827 struct cpu_reg cpu_reg;
3830 DBENTER(BCE_VERBOSE_RESET);
3832 cpu_reg.mode = BCE_RXP_CPU_MODE;
3833 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
3834 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
3835 cpu_reg.state = BCE_RXP_CPU_STATE;
3836 cpu_reg.state_value_clear = 0xffffff;
3837 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
3838 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
3839 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
3840 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
3841 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
3842 cpu_reg.spad_base = BCE_RXP_SCRATCH;
3843 cpu_reg.mips_view_base = 0x8000000;
3845 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
3846 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
3847 fw.ver_major = bce_RXP_b09FwReleaseMajor;
3848 fw.ver_minor = bce_RXP_b09FwReleaseMinor;
3849 fw.ver_fix = bce_RXP_b09FwReleaseFix;
3850 fw.start_addr = bce_RXP_b09FwStartAddr;
3852 fw.text_addr = bce_RXP_b09FwTextAddr;
3853 fw.text_len = bce_RXP_b09FwTextLen;
3855 fw.text = bce_RXP_b09FwText;
3857 fw.data_addr = bce_RXP_b09FwDataAddr;
3858 fw.data_len = bce_RXP_b09FwDataLen;
3860 fw.data = bce_RXP_b09FwData;
3862 fw.sbss_addr = bce_RXP_b09FwSbssAddr;
3863 fw.sbss_len = bce_RXP_b09FwSbssLen;
3865 fw.sbss = bce_RXP_b09FwSbss;
3867 fw.bss_addr = bce_RXP_b09FwBssAddr;
3868 fw.bss_len = bce_RXP_b09FwBssLen;
3870 fw.bss = bce_RXP_b09FwBss;
3872 fw.rodata_addr = bce_RXP_b09FwRodataAddr;
3873 fw.rodata_len = bce_RXP_b09FwRodataLen;
3874 fw.rodata_index = 0;
3875 fw.rodata = bce_RXP_b09FwRodata;
3877 fw.ver_major = bce_RXP_b06FwReleaseMajor;
3878 fw.ver_minor = bce_RXP_b06FwReleaseMinor;
3879 fw.ver_fix = bce_RXP_b06FwReleaseFix;
3880 fw.start_addr = bce_RXP_b06FwStartAddr;
3882 fw.text_addr = bce_RXP_b06FwTextAddr;
3883 fw.text_len = bce_RXP_b06FwTextLen;
3885 fw.text = bce_RXP_b06FwText;
3887 fw.data_addr = bce_RXP_b06FwDataAddr;
3888 fw.data_len = bce_RXP_b06FwDataLen;
3890 fw.data = bce_RXP_b06FwData;
3892 fw.sbss_addr = bce_RXP_b06FwSbssAddr;
3893 fw.sbss_len = bce_RXP_b06FwSbssLen;
3895 fw.sbss = bce_RXP_b06FwSbss;
3897 fw.bss_addr = bce_RXP_b06FwBssAddr;
3898 fw.bss_len = bce_RXP_b06FwBssLen;
3900 fw.bss = bce_RXP_b06FwBss;
3902 fw.rodata_addr = bce_RXP_b06FwRodataAddr;
3903 fw.rodata_len = bce_RXP_b06FwRodataLen;
3904 fw.rodata_index = 0;
3905 fw.rodata = bce_RXP_b06FwRodata;
3908 DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n");
3909 bce_load_cpu_fw(sc, &cpu_reg, &fw);
3911 /* Delay RXP start until initialization is complete. */
3913 DBEXIT(BCE_VERBOSE_RESET);
3917 /****************************************************************************/
3918 /* Initialize the TX CPU. */
3922 /****************************************************************************/
3924 bce_init_txp_cpu(struct bce_softc *sc)
3926 struct cpu_reg cpu_reg;
3929 DBENTER(BCE_VERBOSE_RESET);
3931 cpu_reg.mode = BCE_TXP_CPU_MODE;
3932 cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
3933 cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
3934 cpu_reg.state = BCE_TXP_CPU_STATE;
3935 cpu_reg.state_value_clear = 0xffffff;
3936 cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
3937 cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
3938 cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
3939 cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
3940 cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
3941 cpu_reg.spad_base = BCE_TXP_SCRATCH;
3942 cpu_reg.mips_view_base = 0x8000000;
3944 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
3945 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
3946 fw.ver_major = bce_TXP_b09FwReleaseMajor;
3947 fw.ver_minor = bce_TXP_b09FwReleaseMinor;
3948 fw.ver_fix = bce_TXP_b09FwReleaseFix;
3949 fw.start_addr = bce_TXP_b09FwStartAddr;
3951 fw.text_addr = bce_TXP_b09FwTextAddr;
3952 fw.text_len = bce_TXP_b09FwTextLen;
3954 fw.text = bce_TXP_b09FwText;
3956 fw.data_addr = bce_TXP_b09FwDataAddr;
3957 fw.data_len = bce_TXP_b09FwDataLen;
3959 fw.data = bce_TXP_b09FwData;
3961 fw.sbss_addr = bce_TXP_b09FwSbssAddr;
3962 fw.sbss_len = bce_TXP_b09FwSbssLen;
3964 fw.sbss = bce_TXP_b09FwSbss;
3966 fw.bss_addr = bce_TXP_b09FwBssAddr;
3967 fw.bss_len = bce_TXP_b09FwBssLen;
3969 fw.bss = bce_TXP_b09FwBss;
3971 fw.rodata_addr = bce_TXP_b09FwRodataAddr;
3972 fw.rodata_len = bce_TXP_b09FwRodataLen;
3973 fw.rodata_index = 0;
3974 fw.rodata = bce_TXP_b09FwRodata;
3976 fw.ver_major = bce_TXP_b06FwReleaseMajor;
3977 fw.ver_minor = bce_TXP_b06FwReleaseMinor;
3978 fw.ver_fix = bce_TXP_b06FwReleaseFix;
3979 fw.start_addr = bce_TXP_b06FwStartAddr;
3981 fw.text_addr = bce_TXP_b06FwTextAddr;
3982 fw.text_len = bce_TXP_b06FwTextLen;
3984 fw.text = bce_TXP_b06FwText;
3986 fw.data_addr = bce_TXP_b06FwDataAddr;
3987 fw.data_len = bce_TXP_b06FwDataLen;
3989 fw.data = bce_TXP_b06FwData;
3991 fw.sbss_addr = bce_TXP_b06FwSbssAddr;
3992 fw.sbss_len = bce_TXP_b06FwSbssLen;
3994 fw.sbss = bce_TXP_b06FwSbss;
3996 fw.bss_addr = bce_TXP_b06FwBssAddr;
3997 fw.bss_len = bce_TXP_b06FwBssLen;
3999 fw.bss = bce_TXP_b06FwBss;
4001 fw.rodata_addr = bce_TXP_b06FwRodataAddr;
4002 fw.rodata_len = bce_TXP_b06FwRodataLen;
4003 fw.rodata_index = 0;
4004 fw.rodata = bce_TXP_b06FwRodata;
4007 DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n");
4008 bce_load_cpu_fw(sc, &cpu_reg, &fw);
4009 bce_start_cpu(sc, &cpu_reg);
4011 DBEXIT(BCE_VERBOSE_RESET);
4015 /****************************************************************************/
4016 /* Initialize the TPAT CPU. */
4020 /****************************************************************************/
4022 bce_init_tpat_cpu(struct bce_softc *sc)
4024 struct cpu_reg cpu_reg;
4027 DBENTER(BCE_VERBOSE_RESET);
4029 cpu_reg.mode = BCE_TPAT_CPU_MODE;
4030 cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
4031 cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
4032 cpu_reg.state = BCE_TPAT_CPU_STATE;
4033 cpu_reg.state_value_clear = 0xffffff;
4034 cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
4035 cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
4036 cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
4037 cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
4038 cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
4039 cpu_reg.spad_base = BCE_TPAT_SCRATCH;
4040 cpu_reg.mips_view_base = 0x8000000;
4042 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4043 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4044 fw.ver_major = bce_TPAT_b09FwReleaseMajor;
4045 fw.ver_minor = bce_TPAT_b09FwReleaseMinor;
4046 fw.ver_fix = bce_TPAT_b09FwReleaseFix;
4047 fw.start_addr = bce_TPAT_b09FwStartAddr;
4049 fw.text_addr = bce_TPAT_b09FwTextAddr;
4050 fw.text_len = bce_TPAT_b09FwTextLen;
4052 fw.text = bce_TPAT_b09FwText;
4054 fw.data_addr = bce_TPAT_b09FwDataAddr;
4055 fw.data_len = bce_TPAT_b09FwDataLen;
4057 fw.data = bce_TPAT_b09FwData;
4059 fw.sbss_addr = bce_TPAT_b09FwSbssAddr;
4060 fw.sbss_len = bce_TPAT_b09FwSbssLen;
4062 fw.sbss = bce_TPAT_b09FwSbss;
4064 fw.bss_addr = bce_TPAT_b09FwBssAddr;
4065 fw.bss_len = bce_TPAT_b09FwBssLen;
4067 fw.bss = bce_TPAT_b09FwBss;
4069 fw.rodata_addr = bce_TPAT_b09FwRodataAddr;
4070 fw.rodata_len = bce_TPAT_b09FwRodataLen;
4071 fw.rodata_index = 0;
4072 fw.rodata = bce_TPAT_b09FwRodata;
4074 fw.ver_major = bce_TPAT_b06FwReleaseMajor;
4075 fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
4076 fw.ver_fix = bce_TPAT_b06FwReleaseFix;
4077 fw.start_addr = bce_TPAT_b06FwStartAddr;
4079 fw.text_addr = bce_TPAT_b06FwTextAddr;
4080 fw.text_len = bce_TPAT_b06FwTextLen;
4082 fw.text = bce_TPAT_b06FwText;
4084 fw.data_addr = bce_TPAT_b06FwDataAddr;
4085 fw.data_len = bce_TPAT_b06FwDataLen;
4087 fw.data = bce_TPAT_b06FwData;
4089 fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
4090 fw.sbss_len = bce_TPAT_b06FwSbssLen;
4092 fw.sbss = bce_TPAT_b06FwSbss;
4094 fw.bss_addr = bce_TPAT_b06FwBssAddr;
4095 fw.bss_len = bce_TPAT_b06FwBssLen;
4097 fw.bss = bce_TPAT_b06FwBss;
4099 fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
4100 fw.rodata_len = bce_TPAT_b06FwRodataLen;
4101 fw.rodata_index = 0;
4102 fw.rodata = bce_TPAT_b06FwRodata;
4105 DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n");
4106 bce_load_cpu_fw(sc, &cpu_reg, &fw);
4107 bce_start_cpu(sc, &cpu_reg);
4109 DBEXIT(BCE_VERBOSE_RESET);
4113 /****************************************************************************/
4114 /* Initialize the CP CPU. */
4118 /****************************************************************************/
4120 bce_init_cp_cpu(struct bce_softc *sc)
4122 struct cpu_reg cpu_reg;
4125 DBENTER(BCE_VERBOSE_RESET);
4127 cpu_reg.mode = BCE_CP_CPU_MODE;
4128 cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT;
4129 cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA;
4130 cpu_reg.state = BCE_CP_CPU_STATE;
4131 cpu_reg.state_value_clear = 0xffffff;
4132 cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE;
4133 cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK;
4134 cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER;
4135 cpu_reg.inst = BCE_CP_CPU_INSTRUCTION;
4136 cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT;
4137 cpu_reg.spad_base = BCE_CP_SCRATCH;
4138 cpu_reg.mips_view_base = 0x8000000;
4140 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4141 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4142 fw.ver_major = bce_CP_b09FwReleaseMajor;
4143 fw.ver_minor = bce_CP_b09FwReleaseMinor;
4144 fw.ver_fix = bce_CP_b09FwReleaseFix;
4145 fw.start_addr = bce_CP_b09FwStartAddr;
4147 fw.text_addr = bce_CP_b09FwTextAddr;
4148 fw.text_len = bce_CP_b09FwTextLen;
4150 fw.text = bce_CP_b09FwText;
4152 fw.data_addr = bce_CP_b09FwDataAddr;
4153 fw.data_len = bce_CP_b09FwDataLen;
4155 fw.data = bce_CP_b09FwData;
4157 fw.sbss_addr = bce_CP_b09FwSbssAddr;
4158 fw.sbss_len = bce_CP_b09FwSbssLen;
4160 fw.sbss = bce_CP_b09FwSbss;
4162 fw.bss_addr = bce_CP_b09FwBssAddr;
4163 fw.bss_len = bce_CP_b09FwBssLen;
4165 fw.bss = bce_CP_b09FwBss;
4167 fw.rodata_addr = bce_CP_b09FwRodataAddr;
4168 fw.rodata_len = bce_CP_b09FwRodataLen;
4169 fw.rodata_index = 0;
4170 fw.rodata = bce_CP_b09FwRodata;
4172 fw.ver_major = bce_CP_b06FwReleaseMajor;
4173 fw.ver_minor = bce_CP_b06FwReleaseMinor;
4174 fw.ver_fix = bce_CP_b06FwReleaseFix;
4175 fw.start_addr = bce_CP_b06FwStartAddr;
4177 fw.text_addr = bce_CP_b06FwTextAddr;
4178 fw.text_len = bce_CP_b06FwTextLen;
4180 fw.text = bce_CP_b06FwText;
4182 fw.data_addr = bce_CP_b06FwDataAddr;
4183 fw.data_len = bce_CP_b06FwDataLen;
4185 fw.data = bce_CP_b06FwData;
4187 fw.sbss_addr = bce_CP_b06FwSbssAddr;
4188 fw.sbss_len = bce_CP_b06FwSbssLen;
4190 fw.sbss = bce_CP_b06FwSbss;
4192 fw.bss_addr = bce_CP_b06FwBssAddr;
4193 fw.bss_len = bce_CP_b06FwBssLen;
4195 fw.bss = bce_CP_b06FwBss;
4197 fw.rodata_addr = bce_CP_b06FwRodataAddr;
4198 fw.rodata_len = bce_CP_b06FwRodataLen;
4199 fw.rodata_index = 0;
4200 fw.rodata = bce_CP_b06FwRodata;
4203 DBPRINT(sc, BCE_INFO_RESET, "Loading CP firmware.\n");
4204 bce_load_cpu_fw(sc, &cpu_reg, &fw);
4205 bce_start_cpu(sc, &cpu_reg);
4207 DBEXIT(BCE_VERBOSE_RESET);
4211 /****************************************************************************/
4212 /* Initialize the COM CPU. */
4216 /****************************************************************************/
4218 bce_init_com_cpu(struct bce_softc *sc)
4220 struct cpu_reg cpu_reg;
4223 DBENTER(BCE_VERBOSE_RESET);
4225 cpu_reg.mode = BCE_COM_CPU_MODE;
4226 cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
4227 cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
4228 cpu_reg.state = BCE_COM_CPU_STATE;
4229 cpu_reg.state_value_clear = 0xffffff;
4230 cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
4231 cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
4232 cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
4233 cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
4234 cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
4235 cpu_reg.spad_base = BCE_COM_SCRATCH;
4236 cpu_reg.mips_view_base = 0x8000000;
4238 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4239 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4240 fw.ver_major = bce_COM_b09FwReleaseMajor;
4241 fw.ver_minor = bce_COM_b09FwReleaseMinor;
4242 fw.ver_fix = bce_COM_b09FwReleaseFix;
4243 fw.start_addr = bce_COM_b09FwStartAddr;
4245 fw.text_addr = bce_COM_b09FwTextAddr;
4246 fw.text_len = bce_COM_b09FwTextLen;
4248 fw.text = bce_COM_b09FwText;
4250 fw.data_addr = bce_COM_b09FwDataAddr;
4251 fw.data_len = bce_COM_b09FwDataLen;
4253 fw.data = bce_COM_b09FwData;
4255 fw.sbss_addr = bce_COM_b09FwSbssAddr;
4256 fw.sbss_len = bce_COM_b09FwSbssLen;
4258 fw.sbss = bce_COM_b09FwSbss;
4260 fw.bss_addr = bce_COM_b09FwBssAddr;
4261 fw.bss_len = bce_COM_b09FwBssLen;
4263 fw.bss = bce_COM_b09FwBss;
4265 fw.rodata_addr = bce_COM_b09FwRodataAddr;
4266 fw.rodata_len = bce_COM_b09FwRodataLen;
4267 fw.rodata_index = 0;
4268 fw.rodata = bce_COM_b09FwRodata;
4270 fw.ver_major = bce_COM_b06FwReleaseMajor;
4271 fw.ver_minor = bce_COM_b06FwReleaseMinor;
4272 fw.ver_fix = bce_COM_b06FwReleaseFix;
4273 fw.start_addr = bce_COM_b06FwStartAddr;
4275 fw.text_addr = bce_COM_b06FwTextAddr;
4276 fw.text_len = bce_COM_b06FwTextLen;
4278 fw.text = bce_COM_b06FwText;
4280 fw.data_addr = bce_COM_b06FwDataAddr;
4281 fw.data_len = bce_COM_b06FwDataLen;
4283 fw.data = bce_COM_b06FwData;
4285 fw.sbss_addr = bce_COM_b06FwSbssAddr;
4286 fw.sbss_len = bce_COM_b06FwSbssLen;
4288 fw.sbss = bce_COM_b06FwSbss;
4290 fw.bss_addr = bce_COM_b06FwBssAddr;
4291 fw.bss_len = bce_COM_b06FwBssLen;
4293 fw.bss = bce_COM_b06FwBss;
4295 fw.rodata_addr = bce_COM_b06FwRodataAddr;
4296 fw.rodata_len = bce_COM_b06FwRodataLen;
4297 fw.rodata_index = 0;
4298 fw.rodata = bce_COM_b06FwRodata;
4301 DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
4302 bce_load_cpu_fw(sc, &cpu_reg, &fw);
4303 bce_start_cpu(sc, &cpu_reg);
4305 DBEXIT(BCE_VERBOSE_RESET);
4309 /****************************************************************************/
4310 /* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs. */
4312 /* Loads the firmware for each CPU and starts the CPU. */
4316 /****************************************************************************/
4318 bce_init_cpus(struct bce_softc *sc)
4320 DBENTER(BCE_VERBOSE_RESET);
4322 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4323 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4325 if ((BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax)) {
4326 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1,
4327 sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1);
4328 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2,
4329 sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2);
4331 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1,
4332 sizeof(bce_xi_rv2p_proc1), RV2P_PROC1);
4333 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2,
4334 sizeof(bce_xi_rv2p_proc2), RV2P_PROC2);
4338 bce_load_rv2p_fw(sc, bce_rv2p_proc1,
4339 sizeof(bce_rv2p_proc1), RV2P_PROC1);
4340 bce_load_rv2p_fw(sc, bce_rv2p_proc2,
4341 sizeof(bce_rv2p_proc2), RV2P_PROC2);
4344 bce_init_rxp_cpu(sc);
4345 bce_init_txp_cpu(sc);
4346 bce_init_tpat_cpu(sc);
4347 bce_init_com_cpu(sc);
4348 bce_init_cp_cpu(sc);
4350 DBEXIT(BCE_VERBOSE_RESET);
4354 /****************************************************************************/
4355 /* Initialize context memory. */
4357 /* Clears the memory associated with each Context ID (CID). */
4361 /****************************************************************************/
4363 bce_init_ctx(struct bce_softc *sc)
4366 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
4368 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4369 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4370 int i, retry_cnt = CTX_INIT_RETRY_COUNT;
4373 DBPRINT(sc, BCE_INFO_CTX, "Initializing 5709 context.\n");
4376 * BCM5709 context memory may be cached
4377 * in host memory so prepare the host memory
4380 val = BCE_CTX_COMMAND_ENABLED | BCE_CTX_COMMAND_MEM_INIT | (1 << 12);
4381 val |= (BCM_PAGE_BITS - 8) << 16;
4382 REG_WR(sc, BCE_CTX_COMMAND, val);
4384 /* Wait for mem init command to complete. */
4385 for (i = 0; i < retry_cnt; i++) {
4386 val = REG_RD(sc, BCE_CTX_COMMAND);
4387 if (!(val & BCE_CTX_COMMAND_MEM_INIT))
4392 /* ToDo: Consider returning an error here. */
4393 DBRUNIF((val & BCE_CTX_COMMAND_MEM_INIT),
4394 BCE_PRINTF("%s(): Context memory initialization failed!\n",
4397 for (i = 0; i < sc->ctx_pages; i++) {
4400 /* Set the physical address of the context memory cache. */
4401 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0,
4402 BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) |
4403 BCE_CTX_HOST_PAGE_TBL_DATA0_VALID);
4404 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1,
4405 BCE_ADDR_HI(sc->ctx_paddr[i]));
4406 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL, i |
4407 BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4409 /* Verify that the context memory write was successful. */
4410 for (j = 0; j < retry_cnt; j++) {
4411 val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL);
4412 if ((val & BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0)
4417 /* ToDo: Consider returning an error here. */
4418 DBRUNIF((val & BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ),
4419 BCE_PRINTF("%s(): Failed to initialize context page %d!\n",
4423 u32 vcid_addr, offset;
4425 DBPRINT(sc, BCE_INFO, "Initializing 5706/5708 context.\n");
4428 * For the 5706/5708, context memory is local to
4429 * the controller, so initialize the controller
4433 vcid_addr = GET_CID_ADDR(96);
4436 vcid_addr -= PHY_CTX_SIZE;
4438 REG_WR(sc, BCE_CTX_VIRT_ADDR, 0);
4439 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
4441 for(offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
4442 CTX_WR(sc, 0x00, offset, 0);
4445 REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
4446 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
4450 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
4454 /****************************************************************************/
4455 /* Fetch the permanent MAC address of the controller. */
4459 /****************************************************************************/
4461 bce_get_mac_addr(struct bce_softc *sc)
4463 u32 mac_lo = 0, mac_hi = 0;
4465 DBENTER(BCE_VERBOSE_RESET);
4467 * The NetXtreme II bootcode populates various NIC
4468 * power-on and runtime configuration items in a
4469 * shared memory area. The factory configured MAC
4470 * address is available from both NVRAM and the
4471 * shared memory area so we'll read the value from
4472 * shared memory for speed.
4475 mac_hi = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_UPPER);
4476 mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER);
4478 if ((mac_lo == 0) && (mac_hi == 0)) {
4479 BCE_PRINTF("%s(%d): Invalid Ethernet address!\n",
4480 __FILE__, __LINE__);
4482 sc->eaddr[0] = (u_char)(mac_hi >> 8);
4483 sc->eaddr[1] = (u_char)(mac_hi >> 0);
4484 sc->eaddr[2] = (u_char)(mac_lo >> 24);
4485 sc->eaddr[3] = (u_char)(mac_lo >> 16);
4486 sc->eaddr[4] = (u_char)(mac_lo >> 8);
4487 sc->eaddr[5] = (u_char)(mac_lo >> 0);
4490 DBPRINT(sc, BCE_INFO_MISC, "Permanent Ethernet address = %6D\n", sc->eaddr, ":");
4491 DBEXIT(BCE_VERBOSE_RESET);
4495 /****************************************************************************/
4496 /* Program the MAC address. */
4500 /****************************************************************************/
4502 bce_set_mac_addr(struct bce_softc *sc)
4505 u8 *mac_addr = sc->eaddr;
4507 /* ToDo: Add support for setting multiple MAC addresses. */
4509 DBENTER(BCE_VERBOSE_RESET);
4510 DBPRINT(sc, BCE_INFO_MISC, "Setting Ethernet address = %6D\n", sc->eaddr, ":");
4512 val = (mac_addr[0] << 8) | mac_addr[1];
4514 REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
4516 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
4517 (mac_addr[4] << 8) | mac_addr[5];
4519 REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
4521 DBEXIT(BCE_VERBOSE_RESET);
4525 /****************************************************************************/
4526 /* Stop the controller. */
4530 /****************************************************************************/
4532 bce_stop(struct bce_softc *sc)
4535 struct ifmedia_entry *ifm;
4536 struct mii_data *mii = NULL;
4539 DBENTER(BCE_VERBOSE_RESET);
4541 BCE_LOCK_ASSERT(sc);
4545 mii = device_get_softc(sc->bce_miibus);
4547 callout_stop(&sc->bce_tick_callout);
4549 /* Disable the transmit/receive blocks. */
4550 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT);
4551 REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
4554 bce_disable_intr(sc);
4556 /* Free RX buffers. */
4557 #ifdef BCE_JUMBO_HDRSPLIT
4558 bce_free_pg_chain(sc);
4560 bce_free_rx_chain(sc);
4562 /* Free TX buffers. */
4563 bce_free_tx_chain(sc);
4566 * Isolate/power down the PHY, but leave the media selection
4567 * unchanged so that things will be put back to normal when
4568 * we bring the interface back up.
4571 itmp = ifp->if_flags;
4572 ifp->if_flags |= IFF_UP;
4574 /* If we are called from bce_detach(), mii is already NULL. */
4576 ifm = mii->mii_media.ifm_cur;
4577 mtmp = ifm->ifm_media;
4578 ifm->ifm_media = IFM_ETHER | IFM_NONE;
4580 ifm->ifm_media = mtmp;
4583 ifp->if_flags = itmp;
4584 sc->watchdog_timer = 0;
4588 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4590 DBEXIT(BCE_VERBOSE_RESET);
4595 bce_reset(struct bce_softc *sc, u32 reset_code)
4600 DBENTER(BCE_VERBOSE_RESET);
4602 DBPRINT(sc, BCE_VERBOSE_RESET, "%s(): reset_code = 0x%08X\n",
4603 __FUNCTION__, reset_code);
4605 /* Wait for pending PCI transactions to complete. */
4606 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
4607 BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4608 BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4609 BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4610 BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4611 val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
4615 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4616 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4617 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
4618 val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
4619 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
4622 /* Assume bootcode is running. */
4623 sc->bce_fw_timed_out = 0;
4625 /* Give the firmware a chance to prepare for the reset. */
4626 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
4628 goto bce_reset_exit;
4630 /* Set a firmware reminder that this is a soft reset. */
4631 bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE, BCE_DRV_RESET_SIGNATURE_MAGIC);
4633 /* Dummy read to force the chip to complete all current transactions. */
4634 val = REG_RD(sc, BCE_MISC_ID);
4637 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4638 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4639 REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET);
4640 REG_RD(sc, BCE_MISC_COMMAND);
4643 val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4644 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4646 pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4);
4648 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4649 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4650 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4651 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
4653 /* Allow up to 30us for reset to complete. */
4654 for (i = 0; i < 10; i++) {
4655 val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
4656 if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4657 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
4663 /* Check that reset completed successfully. */
4664 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4665 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4666 BCE_PRINTF("%s(%d): Reset failed!\n",
4667 __FILE__, __LINE__);
4669 goto bce_reset_exit;
4673 /* Make sure byte swapping is properly configured. */
4674 val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
4675 if (val != 0x01020304) {
4676 BCE_PRINTF("%s(%d): Byte swap is incorrect!\n",
4677 __FILE__, __LINE__);
4679 goto bce_reset_exit;
4682 /* Just completed a reset, assume that firmware is running again. */
4683 sc->bce_fw_timed_out = 0;
4685 /* Wait for the firmware to finish its initialization. */
4686 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
4688 BCE_PRINTF("%s(%d): Firmware did not complete initialization!\n",
4689 __FILE__, __LINE__);
4692 DBEXIT(BCE_VERBOSE_RESET);
4698 bce_chipinit(struct bce_softc *sc)
4703 DBENTER(BCE_VERBOSE_RESET);
4705 bce_disable_intr(sc);
4708 * Initialize DMA byte/word swapping, configure the number of DMA
4709 * channels and PCI clock compensation delay.
4711 val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
4712 BCE_DMA_CONFIG_DATA_WORD_SWAP |
4713 #if BYTE_ORDER == BIG_ENDIAN
4714 BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
4716 BCE_DMA_CONFIG_CNTL_WORD_SWAP |
4717 DMA_READ_CHANS << 12 |
4718 DMA_WRITE_CHANS << 16;
4720 val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
4722 if ((sc->bce_flags & BCE_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
4723 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
4726 * This setting resolves a problem observed on certain Intel PCI
4727 * chipsets that cannot handle multiple outstanding DMA operations.
4728 * See errata E9_5706A1_65.
4730 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
4731 (BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0) &&
4732 !(sc->bce_flags & BCE_PCIX_FLAG))
4733 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
4735 REG_WR(sc, BCE_DMA_CONFIG, val);
4737 /* Enable the RX_V2P and Context state machines before access. */
4738 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4739 BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4740 BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4741 BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4743 /* Initialize context mapping and zero out the quick contexts. */
4746 /* Initialize the on-boards CPUs */
4749 /* Enable management frames (NC-SI) to flow to the MCP. */
4750 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
4751 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) | BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
4752 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
4755 /* Prepare NVRAM for access. */
4756 if (bce_init_nvram(sc)) {
4758 goto bce_chipinit_exit;
4761 /* Set the kernel bypass block size */
4762 val = REG_RD(sc, BCE_MQ_CONFIG);
4763 val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4764 val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4766 /* Enable bins used on the 5709. */
4767 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4768 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4769 val |= BCE_MQ_CONFIG_BIN_MQ_MODE;
4770 if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1)
4771 val |= BCE_MQ_CONFIG_HALT_DIS;
4774 REG_WR(sc, BCE_MQ_CONFIG, val);
4776 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4777 REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
4778 REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
4780 /* Set the page size and clear the RV2P processor stall bits. */
4781 val = (BCM_PAGE_BITS - 8) << 24;
4782 REG_WR(sc, BCE_RV2P_CONFIG, val);
4784 /* Configure page size. */
4785 val = REG_RD(sc, BCE_TBDR_CONFIG);
4786 val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
4787 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4788 REG_WR(sc, BCE_TBDR_CONFIG, val);
4790 /* Set the perfect match control register to default. */
4791 REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0);
4794 DBEXIT(BCE_VERBOSE_RESET);
4800 /****************************************************************************/
4801 /* Initialize the controller in preparation to send/receive traffic. */
4804 /* 0 for success, positive value for failure. */
4805 /****************************************************************************/
4807 bce_blockinit(struct bce_softc *sc)
4812 DBENTER(BCE_VERBOSE_RESET);
4814 /* Load the hardware default MAC address. */
4815 bce_set_mac_addr(sc);
4817 /* Set the Ethernet backoff seed value */
4818 val = sc->eaddr[0] + (sc->eaddr[1] << 8) +
4819 (sc->eaddr[2] << 16) + (sc->eaddr[3] ) +
4820 (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
4821 REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
4823 sc->last_status_idx = 0;
4824 sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
4826 /* Set up link change interrupt generation. */
4827 REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
4829 /* Program the physical address of the status block. */
4830 REG_WR(sc, BCE_HC_STATUS_ADDR_L,
4831 BCE_ADDR_LO(sc->status_block_paddr));
4832 REG_WR(sc, BCE_HC_STATUS_ADDR_H,
4833 BCE_ADDR_HI(sc->status_block_paddr));
4835 /* Program the physical address of the statistics block. */
4836 REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
4837 BCE_ADDR_LO(sc->stats_block_paddr));
4838 REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
4839 BCE_ADDR_HI(sc->stats_block_paddr));
4841 /* Program various host coalescing parameters. */
4842 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4843 (sc->bce_tx_quick_cons_trip_int << 16) | sc->bce_tx_quick_cons_trip);
4844 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4845 (sc->bce_rx_quick_cons_trip_int << 16) | sc->bce_rx_quick_cons_trip);
4846 REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
4847 (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
4848 REG_WR(sc, BCE_HC_TX_TICKS,
4849 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
4850 REG_WR(sc, BCE_HC_RX_TICKS,
4851 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
4852 REG_WR(sc, BCE_HC_COM_TICKS,
4853 (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
4854 REG_WR(sc, BCE_HC_CMD_TICKS,
4855 (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
4856 REG_WR(sc, BCE_HC_STATS_TICKS,
4857 (sc->bce_stats_ticks & 0xffff00));
4858 REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4860 /* Configure the Host Coalescing block. */
4861 val = BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE |
4862 BCE_HC_CONFIG_COLLECT_STATS;
4865 /* ToDo: Add MSI-X support. */
4866 if (sc->bce_flags & BCE_USING_MSIX_FLAG) {
4867 u32 base = ((BCE_TX_VEC - 1) * BCE_HC_SB_CONFIG_SIZE) +
4870 REG_WR(sc, BCE_HC_MSIX_BIT_VECTOR, BCE_HC_MSIX_BIT_VECTOR_VAL);
4872 REG_WR(sc, base, BCE_HC_SB_CONFIG_1_TX_TMR_MODE |
4873 BCE_HC_SB_CONFIG_1_ONE_SHOT);
4875 REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF,
4876 (sc->tx_quick_cons_trip_int << 16) |
4877 sc->tx_quick_cons_trip);
4879 REG_WR(sc, base + BCE_HC_TX_TICKS_OFF,
4880 (sc->tx_ticks_int << 16) | sc->tx_ticks);
4882 val |= BCE_HC_CONFIG_SB_ADDR_INC_128B;
4886 * Tell the HC block to automatically set the
4887 * INT_MASK bit after an MSI/MSI-X interrupt
4888 * is generated so the driver doesn't have to.
4890 if (sc->bce_flags & BCE_ONE_SHOT_MSI_FLAG)
4891 val |= BCE_HC_CONFIG_ONE_SHOT;
4893 /* Set the MSI-X status blocks to 128 byte boundaries. */
4894 if (sc->bce_flags & BCE_USING_MSIX_FLAG)
4895 val |= BCE_HC_CONFIG_SB_ADDR_INC_128B;
4898 REG_WR(sc, BCE_HC_CONFIG, val);
4900 /* Clear the internal statistics counters. */
4901 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
4903 /* Verify that bootcode is running. */
4904 reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE);
4906 DBRUNIF(DB_RANDOMTRUE(bootcode_running_failure_sim_control),
4907 BCE_PRINTF("%s(%d): Simulating bootcode failure.\n",
4908 __FILE__, __LINE__);
4911 if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
4912 BCE_DEV_INFO_SIGNATURE_MAGIC) {
4913 BCE_PRINTF("%s(%d): Bootcode not running! Found: 0x%08X, "
4914 "Expected: 08%08X\n", __FILE__, __LINE__,
4915 (reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK),
4916 BCE_DEV_INFO_SIGNATURE_MAGIC);
4918 goto bce_blockinit_exit;
4922 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4923 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
4924 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
4925 val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
4926 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
4929 /* Allow bootcode to apply any additional fixes before enabling MAC. */
4930 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
4932 /* Enable link state change interrupt generation. */
4933 REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
4935 /* Enable the RXP. */
4936 bce_start_rxp_cpu(sc);
4938 /* Disable management frames (NC-SI) from flowing to the MCP. */
4939 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
4940 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) & ~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
4941 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
4944 /* Enable all remaining blocks in the MAC. */
4945 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
4946 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716))
4947 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT_XI);
4949 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
4951 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
4954 /* Save the current host coalescing block settings. */
4955 sc->hc_command = REG_RD(sc, BCE_HC_COMMAND);
4958 DBEXIT(BCE_VERBOSE_RESET);
4964 /****************************************************************************/
4965 /* Encapsulate an mbuf into the rx_bd chain. */
4968 /* 0 for success, positive value for failure. */
4969 /****************************************************************************/
4971 bce_get_rx_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
4972 u16 *chain_prod, u32 *prod_bseq)
4975 bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
4976 struct mbuf *m_new = NULL;
4978 int nsegs, error, rc = 0;
4980 u16 debug_chain_prod = *chain_prod;
4983 DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
4985 /* Make sure the inputs are valid. */
4986 DBRUNIF((*chain_prod > MAX_RX_BD),
4987 BCE_PRINTF("%s(%d): RX producer out of range: 0x%04X > 0x%04X\n",
4988 __FILE__, __LINE__, *chain_prod, (u16) MAX_RX_BD));
4990 DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, "
4991 "prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
4993 /* Update some debug statistic counters */
4994 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
4995 sc->rx_low_watermark = sc->free_rx_bd);
4996 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++);
4998 /* Check whether this is a new mbuf allocation. */
5001 /* Simulate an mbuf allocation failure. */
5002 DBRUNIF(DB_RANDOMTRUE(mbuf_alloc_failed_sim_control),
5003 sc->mbuf_alloc_failed_count++;
5004 sc->mbuf_alloc_failed_sim_count++;
5006 goto bce_get_rx_buf_exit);
5008 /* This is a new mbuf allocation. */
5009 #ifdef BCE_JUMBO_HDRSPLIT
5010 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
5012 if (sc->rx_bd_mbuf_alloc_size <= MCLBYTES)
5013 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
5015 m_new = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, sc->rx_bd_mbuf_alloc_size);
5018 if (m_new == NULL) {
5019 sc->mbuf_alloc_failed_count++;
5021 goto bce_get_rx_buf_exit;
5024 DBRUN(sc->debug_rx_mbuf_alloc++);
5026 /* Reuse an existing mbuf. */
5030 /* Make sure we have a valid packet header. */
5031 M_ASSERTPKTHDR(m_new);
5033 /* Initialize the mbuf size and pad if necessary for alignment. */
5034 m_new->m_pkthdr.len = m_new->m_len = sc->rx_bd_mbuf_alloc_size;
5035 m_adj(m_new, sc->rx_bd_mbuf_align_pad);
5037 /* ToDo: Consider calling m_fragment() to test error handling. */
5039 /* Map the mbuf cluster into device memory. */
5040 map = sc->rx_mbuf_map[*chain_prod];
5041 error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag, map, m_new,
5042 segs, &nsegs, BUS_DMA_NOWAIT);
5044 /* Handle any mapping errors. */
5046 BCE_PRINTF("%s(%d): Error mapping mbuf into RX chain (%d)!\n",
5047 __FILE__, __LINE__, error);
5049 sc->dma_map_addr_rx_failed_count++;
5052 DBRUN(sc->debug_rx_mbuf_alloc--);
5055 goto bce_get_rx_buf_exit;
5058 /* All mbufs must map to a single segment. */
5059 KASSERT(nsegs == 1, ("%s(): Too many segments returned (%d)!",
5060 __FUNCTION__, nsegs));
5062 /* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREREAD) here? */
5064 /* Setup the rx_bd for the segment. */
5065 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
5067 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[0].ds_addr));
5068 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[0].ds_addr));
5069 rxbd->rx_bd_len = htole32(segs[0].ds_len);
5070 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END);
5071 *prod_bseq += segs[0].ds_len;
5073 /* Save the mbuf and update our counter. */
5074 sc->rx_mbuf_ptr[*chain_prod] = m_new;
5075 sc->free_rx_bd -= nsegs;
5077 DBRUNMSG(BCE_INSANE_RECV, bce_dump_rx_mbuf_chain(sc, debug_chain_prod,
5080 DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, "
5081 "prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
5083 bce_get_rx_buf_exit:
5084 DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
5090 #ifdef BCE_JUMBO_HDRSPLIT
5091 /****************************************************************************/
5092 /* Encapsulate an mbuf cluster into the page chain. */
5095 /* 0 for success, positive value for failure. */
5096 /****************************************************************************/
5098 bce_get_pg_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
5103 struct mbuf *m_new = NULL;
5107 u16 debug_prod_idx = *prod_idx;
5110 DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
5112 /* Make sure the inputs are valid. */
5113 DBRUNIF((*prod_idx > MAX_PG_BD),
5114 BCE_PRINTF("%s(%d): page producer out of range: 0x%04X > 0x%04X\n",
5115 __FILE__, __LINE__, *prod_idx, (u16) MAX_PG_BD));
5117 DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, "
5118 "chain_prod = 0x%04X\n", __FUNCTION__, *prod, *prod_idx);
5120 /* Update counters if we've hit a new low or run out of pages. */
5121 DBRUNIF((sc->free_pg_bd < sc->pg_low_watermark),
5122 sc->pg_low_watermark = sc->free_pg_bd);
5123 DBRUNIF((sc->free_pg_bd == sc->max_pg_bd), sc->pg_empty_count++);
5125 /* Check whether this is a new mbuf allocation. */
5128 /* Simulate an mbuf allocation failure. */
5129 DBRUNIF(DB_RANDOMTRUE(mbuf_alloc_failed_sim_control),
5130 sc->mbuf_alloc_failed_count++;
5131 sc->mbuf_alloc_failed_sim_count++;
5133 goto bce_get_pg_buf_exit);
5135 /* This is a new mbuf allocation. */
5136 m_new = m_getcl(M_DONTWAIT, MT_DATA, 0);
5137 if (m_new == NULL) {
5138 sc->mbuf_alloc_failed_count++;
5140 goto bce_get_pg_buf_exit;
5143 DBRUN(sc->debug_pg_mbuf_alloc++);
5145 /* Reuse an existing mbuf. */
5147 m_new->m_data = m_new->m_ext.ext_buf;
5150 m_new->m_len = sc->pg_bd_mbuf_alloc_size;
5152 /* ToDo: Consider calling m_fragment() to test error handling. */
5154 /* Map the mbuf cluster into device memory. */
5155 map = sc->pg_mbuf_map[*prod_idx];
5156 error = bus_dmamap_load(sc->pg_mbuf_tag, map, mtod(m_new, void *),
5157 sc->pg_bd_mbuf_alloc_size, bce_dma_map_addr, &busaddr, BUS_DMA_NOWAIT);
5159 /* Handle any mapping errors. */
5161 BCE_PRINTF("%s(%d): Error mapping mbuf into page chain!\n",
5162 __FILE__, __LINE__);
5165 DBRUN(sc->debug_pg_mbuf_alloc--);
5168 goto bce_get_pg_buf_exit;
5171 /* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREREAD) here? */
5174 * The page chain uses the same rx_bd data structure
5175 * as the receive chain but doesn't require a byte sequence (bseq).
5177 pgbd = &sc->pg_bd_chain[PG_PAGE(*prod_idx)][PG_IDX(*prod_idx)];
5179 pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(busaddr));
5180 pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(busaddr));
5181 pgbd->rx_bd_len = htole32(sc->pg_bd_mbuf_alloc_size);
5182 pgbd->rx_bd_flags = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END);
5184 /* Save the mbuf and update our counter. */
5185 sc->pg_mbuf_ptr[*prod_idx] = m_new;
5188 DBRUNMSG(BCE_INSANE_RECV, bce_dump_pg_mbuf_chain(sc, debug_prod_idx,
5191 DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, "
5192 "prod_idx = 0x%04X\n", __FUNCTION__, *prod, *prod_idx);
5194 bce_get_pg_buf_exit:
5195 DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
5199 #endif /* BCE_JUMBO_HDRSPLIT */
5201 /****************************************************************************/
5202 /* Initialize the TX context memory. */
5206 /****************************************************************************/
5208 bce_init_tx_context(struct bce_softc *sc)
5212 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
5214 /* Initialize the context ID for an L2 TX chain. */
5215 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
5216 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
5217 /* Set the CID type to support an L2 connection. */
5218 val = BCE_L2CTX_TX_TYPE_TYPE_L2_XI | BCE_L2CTX_TX_TYPE_SIZE_L2_XI;
5219 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE_XI, val);
5220 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2_XI | (8 << 16);
5221 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_CMD_TYPE_XI, val);
5223 /* Point the hardware to the first page in the chain. */
5224 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
5225 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val);
5226 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
5227 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val);
5229 /* Set the CID type to support an L2 connection. */
5230 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
5231 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE, val);
5232 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
5233 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_CMD_TYPE, val);
5235 /* Point the hardware to the first page in the chain. */
5236 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
5237 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TBDR_BHADDR_HI, val);
5238 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
5239 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TBDR_BHADDR_LO, val);
5242 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
5246 /****************************************************************************/
5247 /* Allocate memory and initialize the TX data structures. */
5250 /* 0 for success, positive value for failure. */
5251 /****************************************************************************/
5253 bce_init_tx_chain(struct bce_softc *sc)
5258 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD);
5260 /* Set the initial TX producer/consumer indices. */
5263 sc->tx_prod_bseq = 0;
5265 sc->max_tx_bd = USABLE_TX_BD;
5266 DBRUN(sc->tx_hi_watermark = USABLE_TX_BD);
5267 DBRUN(sc->tx_full_count = 0);
5270 * The NetXtreme II supports a linked-list structre called
5271 * a Buffer Descriptor Chain (or BD chain). A BD chain
5272 * consists of a series of 1 or more chain pages, each of which
5273 * consists of a fixed number of BD entries.
5274 * The last BD entry on each page is a pointer to the next page
5275 * in the chain, and the last pointer in the BD chain
5276 * points back to the beginning of the chain.
5279 /* Set the TX next pointer chain entries. */
5280 for (i = 0; i < TX_PAGES; i++) {
5283 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
5285 /* Check if we've reached the last page. */
5286 if (i == (TX_PAGES - 1))
5291 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
5292 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
5295 bce_init_tx_context(sc);
5297 DBRUNMSG(BCE_INSANE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
5298 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD);
5304 /****************************************************************************/
5305 /* Free memory and clear the TX data structures. */
5309 /****************************************************************************/
5311 bce_free_tx_chain(struct bce_softc *sc)
5315 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD);
5317 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
5318 for (i = 0; i < TOTAL_TX_BD; i++) {
5319 if (sc->tx_mbuf_ptr[i] != NULL) {
5320 if (sc->tx_mbuf_map[i] != NULL)
5321 bus_dmamap_sync(sc->tx_mbuf_tag, sc->tx_mbuf_map[i],
5322 BUS_DMASYNC_POSTWRITE);
5323 m_freem(sc->tx_mbuf_ptr[i]);
5324 sc->tx_mbuf_ptr[i] = NULL;
5325 DBRUN(sc->debug_tx_mbuf_alloc--);
5329 /* Clear each TX chain page. */
5330 for (i = 0; i < TX_PAGES; i++)
5331 bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
5335 /* Check if we lost any mbufs in the process. */
5336 DBRUNIF((sc->debug_tx_mbuf_alloc),
5337 BCE_PRINTF("%s(%d): Memory leak! Lost %d mbufs "
5339 __FILE__, __LINE__, sc->debug_tx_mbuf_alloc));
5341 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD);
5345 /****************************************************************************/
5346 /* Initialize the RX context memory. */
5350 /****************************************************************************/
5352 bce_init_rx_context(struct bce_softc *sc)
5356 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_CTX);
5358 /* Initialize the type, size, and BD cache levels for the RX context. */
5359 val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
5360 BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 |
5361 (0x02 << BCE_L2CTX_RX_BD_PRE_READ_SHIFT);
5364 * Set the level for generating pause frames
5365 * when the number of available rx_bd's gets
5366 * too low (the low watermark) and the level
5367 * when pause frames can be stopped (the high
5370 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
5371 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
5372 u32 lo_water, hi_water;
5374 lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT;
5375 hi_water = USABLE_RX_BD / 4;
5377 lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE;
5378 hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE;
5382 else if (hi_water == 0)
5384 val |= (lo_water << BCE_L2CTX_RX_LO_WATER_MARK_SHIFT) |
5385 (hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT);
5388 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_CTX_TYPE, val);
5390 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */
5391 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
5392 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
5393 val = REG_RD(sc, BCE_MQ_MAP_L2_5);
5394 REG_WR(sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM);
5397 /* Point the hardware to the first page in the chain. */
5398 val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]);
5399 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_HI, val);
5400 val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
5401 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_LO, val);
5403 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_CTX);
5407 /****************************************************************************/
5408 /* Allocate memory and initialize the RX data structures. */
5411 /* 0 for success, positive value for failure. */
5412 /****************************************************************************/
5414 bce_init_rx_chain(struct bce_softc *sc)
5419 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
5422 /* Initialize the RX producer and consumer indices. */
5425 sc->rx_prod_bseq = 0;
5426 sc->free_rx_bd = USABLE_RX_BD;
5427 sc->max_rx_bd = USABLE_RX_BD;
5428 DBRUN(sc->rx_low_watermark = sc->max_rx_bd);
5429 DBRUN(sc->rx_empty_count = 0);
5431 /* Initialize the RX next pointer chain entries. */
5432 for (i = 0; i < RX_PAGES; i++) {
5435 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
5437 /* Check if we've reached the last page. */
5438 if (i == (RX_PAGES - 1))
5443 /* Setup the chain page pointers. */
5444 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j]));
5445 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j]));
5448 /* Fill up the RX chain. */
5449 bce_fill_rx_chain(sc);
5451 for (i = 0; i < RX_PAGES; i++) {
5452 bus_dmamap_sync(sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i],
5453 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5456 bce_init_rx_context(sc);
5458 DBRUNMSG(BCE_EXTREME_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD));
5459 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
5461 /* ToDo: Are there possible failure modes here? */
5466 /****************************************************************************/
5467 /* Add mbufs to the RX chain until its full or an mbuf allocation error */
5472 /****************************************************************************/
5474 bce_fill_rx_chain(struct bce_softc *sc)
5479 DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
5482 /* Get the RX chain producer indices. */
5484 prod_bseq = sc->rx_prod_bseq;
5486 /* Keep filling the RX chain until it's full. */
5487 while (sc->free_rx_bd > 0) {
5488 prod_idx = RX_CHAIN_IDX(prod);
5489 if (bce_get_rx_buf(sc, NULL, &prod, &prod_idx, &prod_bseq)) {
5490 /* Bail out if we can't add an mbuf to the chain. */
5493 prod = NEXT_RX_BD(prod);
5496 /* Save the RX chain producer indices. */
5498 sc->rx_prod_bseq = prod_bseq;
5500 DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE),
5501 BCE_PRINTF("%s(): Invalid rx_prod value: 0x%04X\n",
5502 __FUNCTION__, sc->rx_prod));
5504 /* Write the mailbox and tell the chip about the waiting rx_bd's. */
5505 REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BDIDX,
5507 REG_WR(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BSEQ,
5510 DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
5515 /****************************************************************************/
5516 /* Free memory and clear the RX data structures. */
5520 /****************************************************************************/
5522 bce_free_rx_chain(struct bce_softc *sc)
5526 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
5528 /* Free any mbufs still in the RX mbuf chain. */
5529 for (i = 0; i < TOTAL_RX_BD; i++) {
5530 if (sc->rx_mbuf_ptr[i] != NULL) {
5531 if (sc->rx_mbuf_map[i] != NULL)
5532 bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[i],
5533 BUS_DMASYNC_POSTREAD);
5534 m_freem(sc->rx_mbuf_ptr[i]);
5535 sc->rx_mbuf_ptr[i] = NULL;
5536 DBRUN(sc->debug_rx_mbuf_alloc--);
5540 /* Clear each RX chain page. */
5541 for (i = 0; i < RX_PAGES; i++)
5542 bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
5544 sc->free_rx_bd = sc->max_rx_bd;
5546 /* Check if we lost any mbufs in the process. */
5547 DBRUNIF((sc->debug_rx_mbuf_alloc),
5548 BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from rx chain!\n",
5549 __FUNCTION__, sc->debug_rx_mbuf_alloc));
5551 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
5555 #ifdef BCE_JUMBO_HDRSPLIT
5556 /****************************************************************************/
5557 /* Allocate memory and initialize the page data structures. */
5558 /* Assumes that bce_init_rx_chain() has not already been called. */
5561 /* 0 for success, positive value for failure. */
5562 /****************************************************************************/
5564 bce_init_pg_chain(struct bce_softc *sc)
5570 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
5573 /* Initialize the page producer and consumer indices. */
5576 sc->free_pg_bd = USABLE_PG_BD;
5577 sc->max_pg_bd = USABLE_PG_BD;
5578 DBRUN(sc->pg_low_watermark = sc->max_pg_bd);
5579 DBRUN(sc->pg_empty_count = 0);
5581 /* Initialize the page next pointer chain entries. */
5582 for (i = 0; i < PG_PAGES; i++) {
5585 pgbd = &sc->pg_bd_chain[i][USABLE_PG_BD_PER_PAGE];
5587 /* Check if we've reached the last page. */
5588 if (i == (PG_PAGES - 1))
5593 /* Setup the chain page pointers. */
5594 pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->pg_bd_chain_paddr[j]));
5595 pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->pg_bd_chain_paddr[j]));
5598 /* Setup the MQ BIN mapping for host_pg_bidx. */
5599 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
5600 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716))
5601 REG_WR(sc, BCE_MQ_MAP_L2_3, BCE_MQ_MAP_L2_3_DEFAULT);
5603 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, 0);
5605 /* Configure the rx_bd and page chain mbuf cluster size. */
5606 val = (sc->rx_bd_mbuf_data_len << 16) | sc->pg_bd_mbuf_alloc_size;
5607 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, val);
5609 /* Configure the context reserved for jumbo support. */
5610 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_RBDC_KEY,
5611 BCE_L2CTX_RX_RBDC_JUMBO_KEY);
5613 /* Point the hardware to the first page in the page chain. */
5614 val = BCE_ADDR_HI(sc->pg_bd_chain_paddr[0]);
5615 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_HI, val);
5616 val = BCE_ADDR_LO(sc->pg_bd_chain_paddr[0]);
5617 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_LO, val);
5619 /* Fill up the page chain. */
5620 bce_fill_pg_chain(sc);
5622 for (i = 0; i < PG_PAGES; i++) {
5623 bus_dmamap_sync(sc->pg_bd_chain_tag, sc->pg_bd_chain_map[i],
5624 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5627 DBRUNMSG(BCE_EXTREME_RECV, bce_dump_pg_chain(sc, 0, TOTAL_PG_BD));
5628 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
5634 /****************************************************************************/
5635 /* Add mbufs to the page chain until its full or an mbuf allocation error */
5640 /****************************************************************************/
5642 bce_fill_pg_chain(struct bce_softc *sc)
5646 DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
5649 /* Get the page chain prodcuer index. */
5652 /* Keep filling the page chain until it's full. */
5653 while (sc->free_pg_bd > 0) {
5654 prod_idx = PG_CHAIN_IDX(prod);
5655 if (bce_get_pg_buf(sc, NULL, &prod, &prod_idx)) {
5656 /* Bail out if we can't add an mbuf to the chain. */
5659 prod = NEXT_PG_BD(prod);
5662 /* Save the page chain producer index. */
5665 DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE),
5666 BCE_PRINTF("%s(): Invalid pg_prod value: 0x%04X\n",
5667 __FUNCTION__, sc->pg_prod));
5670 * Write the mailbox and tell the chip about
5671 * the new rx_bd's in the page chain.
5673 REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_PG_BDIDX,
5676 DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
5681 /****************************************************************************/
5682 /* Free memory and clear the RX data structures. */
5686 /****************************************************************************/
5688 bce_free_pg_chain(struct bce_softc *sc)
5692 DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
5694 /* Free any mbufs still in the mbuf page chain. */
5695 for (i = 0; i < TOTAL_PG_BD; i++) {
5696 if (sc->pg_mbuf_ptr[i] != NULL) {
5697 if (sc->pg_mbuf_map[i] != NULL)
5698 bus_dmamap_sync(sc->pg_mbuf_tag, sc->pg_mbuf_map[i],
5699 BUS_DMASYNC_POSTREAD);
5700 m_freem(sc->pg_mbuf_ptr[i]);
5701 sc->pg_mbuf_ptr[i] = NULL;
5702 DBRUN(sc->debug_pg_mbuf_alloc--);
5706 /* Clear each page chain pages. */
5707 for (i = 0; i < PG_PAGES; i++)
5708 bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ);
5710 sc->free_pg_bd = sc->max_pg_bd;
5712 /* Check if we lost any mbufs in the process. */
5713 DBRUNIF((sc->debug_pg_mbuf_alloc),
5714 BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from page chain!\n",
5715 __FUNCTION__, sc->debug_pg_mbuf_alloc));
5717 DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
5719 #endif /* BCE_JUMBO_HDRSPLIT */
5722 /****************************************************************************/
5723 /* Set media options. */
5726 /* 0 for success, positive value for failure. */
5727 /****************************************************************************/
5729 bce_ifmedia_upd(struct ifnet *ifp)
5731 struct bce_softc *sc = ifp->if_softc;
5733 DBENTER(BCE_VERBOSE);
5736 bce_ifmedia_upd_locked(ifp);
5739 DBEXIT(BCE_VERBOSE);
5744 /****************************************************************************/
5745 /* Set media options. */
5749 /****************************************************************************/
5751 bce_ifmedia_upd_locked(struct ifnet *ifp)
5753 struct bce_softc *sc = ifp->if_softc;
5754 struct mii_data *mii;
5756 DBENTER(BCE_VERBOSE);
5758 BCE_LOCK_ASSERT(sc);
5760 mii = device_get_softc(sc->bce_miibus);
5762 /* Make sure the MII bus has been enumerated. */
5765 if (mii->mii_instance) {
5766 struct mii_softc *miisc;
5768 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
5769 mii_phy_reset(miisc);
5774 DBEXIT(BCE_VERBOSE);
5778 /****************************************************************************/
5779 /* Reports current media status. */
5783 /****************************************************************************/
5785 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
5787 struct bce_softc *sc = ifp->if_softc;
5788 struct mii_data *mii;
5790 DBENTER(BCE_VERBOSE);
5794 mii = device_get_softc(sc->bce_miibus);
5797 ifmr->ifm_active = mii->mii_media_active;
5798 ifmr->ifm_status = mii->mii_media_status;
5802 DBEXIT(BCE_VERBOSE);
5806 /****************************************************************************/
5807 /* Handles PHY generated interrupt events. */
5811 /****************************************************************************/
5813 bce_phy_intr(struct bce_softc *sc)
5815 u32 new_link_state, old_link_state;
5817 DBENTER(BCE_VERBOSE_PHY | BCE_VERBOSE_INTR);
5819 new_link_state = sc->status_block->status_attn_bits &
5820 STATUS_ATTN_BITS_LINK_STATE;
5821 old_link_state = sc->status_block->status_attn_bits_ack &
5822 STATUS_ATTN_BITS_LINK_STATE;
5824 /* Handle any changes if the link state has changed. */
5825 if (new_link_state != old_link_state) {
5827 /* Update the status_attn_bits_ack field in the status block. */
5828 if (new_link_state) {
5829 REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
5830 STATUS_ATTN_BITS_LINK_STATE);
5831 DBPRINT(sc, BCE_INFO_PHY, "%s(): Link is now UP.\n",
5835 REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
5836 STATUS_ATTN_BITS_LINK_STATE);
5837 DBPRINT(sc, BCE_INFO_PHY, "%s(): Link is now DOWN.\n",
5842 * Assume link is down and allow
5843 * tick routine to update the state
5844 * based on the actual media state.
5847 callout_stop(&sc->bce_tick_callout);
5851 /* Acknowledge the link change interrupt. */
5852 REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
5854 DBEXIT(BCE_VERBOSE_PHY | BCE_VERBOSE_INTR);
5858 /****************************************************************************/
5859 /* Reads the receive consumer value from the status block (skipping over */
5860 /* chain page pointer if necessary). */
5864 /****************************************************************************/
5866 bce_get_hw_rx_cons(struct bce_softc *sc)
5871 hw_cons = sc->status_block->status_rx_quick_consumer_index0;
5872 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
5878 /****************************************************************************/
5879 /* Handles received frame interrupt events. */
5883 /****************************************************************************/
5885 bce_rx_intr(struct bce_softc *sc)
5887 struct ifnet *ifp = sc->bce_ifp;
5888 struct l2_fhdr *l2fhdr;
5889 unsigned int pkt_len;
5890 u16 sw_rx_cons, sw_rx_cons_idx, hw_rx_cons;
5892 #ifdef BCE_JUMBO_HDRSPLIT
5893 unsigned int rem_len;
5894 u16 sw_pg_cons, sw_pg_cons_idx;
5897 DBENTER(BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
5898 DBRUN(sc->rx_interrupts++);
5899 DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): rx_prod = 0x%04X, "
5900 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
5901 __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
5903 /* Prepare the RX chain pages to be accessed by the host CPU. */
5904 for (int i = 0; i < RX_PAGES; i++)
5905 bus_dmamap_sync(sc->rx_bd_chain_tag,
5906 sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTREAD);
5908 #ifdef BCE_JUMBO_HDRSPLIT
5909 /* Prepare the page chain pages to be accessed by the host CPU. */
5910 for (int i = 0; i < PG_PAGES; i++)
5911 bus_dmamap_sync(sc->pg_bd_chain_tag,
5912 sc->pg_bd_chain_map[i], BUS_DMASYNC_POSTREAD);
5915 /* Get the hardware's view of the RX consumer index. */
5916 hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
5918 /* Get working copies of the driver's view of the consumer indices. */
5919 sw_rx_cons = sc->rx_cons;
5920 #ifdef BCE_JUMBO_HDRSPLIT
5921 sw_pg_cons = sc->pg_cons;
5924 /* Update some debug statistics counters */
5925 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
5926 sc->rx_low_watermark = sc->free_rx_bd);
5927 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++);
5929 /* Scan through the receive chain as long as there is work to do */
5930 /* ToDo: Consider setting a limit on the number of packets processed. */
5932 while (sw_rx_cons != hw_rx_cons) {
5935 /* Convert the producer/consumer indices to an actual rx_bd index. */
5936 sw_rx_cons_idx = RX_CHAIN_IDX(sw_rx_cons);
5938 /* Unmap the mbuf from DMA space. */
5939 bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[sw_rx_cons_idx],
5940 BUS_DMASYNC_POSTREAD);
5941 bus_dmamap_unload(sc->rx_mbuf_tag,
5942 sc->rx_mbuf_map[sw_rx_cons_idx]);
5944 /* Remove the mbuf from the RX chain. */
5945 m0 = sc->rx_mbuf_ptr[sw_rx_cons_idx];
5946 sc->rx_mbuf_ptr[sw_rx_cons_idx] = NULL;
5947 DBRUN(sc->debug_rx_mbuf_alloc--);
5951 DBPRINT(sc, BCE_EXTREME_RECV, "%s(): Oops! Empty mbuf pointer "
5952 "found in sc->rx_mbuf_ptr[0x%04X]!\n",
5953 __FUNCTION__, sw_rx_cons_idx);
5954 goto bce_rx_int_next_rx;
5958 * Frames received on the NetXteme II are prepended with an
5959 * l2_fhdr structure which provides status information about
5960 * the received frame (including VLAN tags and checksum info).
5961 * The frames are also automatically adjusted to align the IP
5962 * header (i.e. two null bytes are inserted before the Ethernet
5963 * header). As a result the data DMA'd by the controller into
5964 * the mbuf is as follows:
5966 * +---------+-----+---------------------+-----+
5967 * | l2_fhdr | pad | packet data | FCS |
5968 * +---------+-----+---------------------+-----+
5970 * The l2_fhdr needs to be checked and skipped and the FCS needs
5971 * to be stripped before sending the packet up the stack.
5973 l2fhdr = mtod(m0, struct l2_fhdr *);
5975 /* Get the packet data + FCS length and the status. */
5976 pkt_len = l2fhdr->l2_fhdr_pkt_len;
5977 status = l2fhdr->l2_fhdr_status;
5980 * Skip over the l2_fhdr and pad, resulting in the
5981 * following data in the mbuf:
5982 * +---------------------+-----+
5983 * | packet data | FCS |
5984 * +---------------------+-----+
5986 m_adj(m0, sizeof(struct l2_fhdr) + ETHER_ALIGN);
5988 #ifdef BCE_JUMBO_HDRSPLIT
5990 * Check whether the received frame fits in a single
5991 * mbuf or not (i.e. packet data + FCS <=
5992 * sc->rx_bd_mbuf_data_len bytes).
5994 if (pkt_len > m0->m_len) {
5996 * The received frame is larger than a single mbuf.
5997 * If the frame was a TCP frame then only the TCP
5998 * header is placed in the mbuf, the remaining
5999 * payload (including FCS) is placed in the page
6000 * chain, the SPLIT flag is set, and the header
6001 * length is placed in the IP checksum field.
6002 * If the frame is not a TCP frame then the mbuf
6003 * is filled and the remaining bytes are placed
6004 * in the page chain.
6007 DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a large packet.\n",
6011 * When the page chain is enabled and the TCP
6012 * header has been split from the TCP payload,
6013 * the ip_xsum structure will reflect the length
6014 * of the TCP header, not the IP checksum. Set
6015 * the packet length of the mbuf accordingly.
6017 if (status & L2_FHDR_STATUS_SPLIT)
6018 m0->m_len = l2fhdr->l2_fhdr_ip_xsum;
6020 rem_len = pkt_len - m0->m_len;
6022 /* Pull mbufs off the page chain for the remaining data. */
6023 while (rem_len > 0) {
6026 sw_pg_cons_idx = PG_CHAIN_IDX(sw_pg_cons);
6028 /* Remove the mbuf from the page chain. */
6029 m_pg = sc->pg_mbuf_ptr[sw_pg_cons_idx];
6030 sc->pg_mbuf_ptr[sw_pg_cons_idx] = NULL;
6031 DBRUN(sc->debug_pg_mbuf_alloc--);
6034 /* Unmap the page chain mbuf from DMA space. */
6035 bus_dmamap_sync(sc->pg_mbuf_tag,
6036 sc->pg_mbuf_map[sw_pg_cons_idx],
6037 BUS_DMASYNC_POSTREAD);
6038 bus_dmamap_unload(sc->pg_mbuf_tag,
6039 sc->pg_mbuf_map[sw_pg_cons_idx]);
6041 /* Adjust the mbuf length. */
6042 if (rem_len < m_pg->m_len) {
6043 /* The mbuf chain is complete. */
6044 m_pg->m_len = rem_len;
6047 /* More packet data is waiting. */
6048 rem_len -= m_pg->m_len;
6051 /* Concatenate the mbuf cluster to the mbuf. */
6054 sw_pg_cons = NEXT_PG_BD(sw_pg_cons);
6057 /* Set the total packet length. */
6058 m0->m_pkthdr.len = pkt_len;
6062 * The received packet is small and fits in a
6063 * single mbuf (i.e. the l2_fhdr + pad + packet +
6064 * FCS <= MHLEN). In other words, the packet is
6065 * 154 bytes or less in size.
6068 DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a small packet.\n",
6071 /* Set the total packet length. */
6072 m0->m_pkthdr.len = m0->m_len = pkt_len;
6075 /* Set the total packet length. */
6076 m0->m_pkthdr.len = m0->m_len = pkt_len;
6079 /* Remove the trailing Ethernet FCS. */
6080 m_adj(m0, -ETHER_CRC_LEN);
6082 /* Check that the resulting mbuf chain is valid. */
6083 DBRUN(m_sanity(m0, FALSE));
6084 DBRUNIF(((m0->m_len < ETHER_HDR_LEN) |
6085 (m0->m_pkthdr.len > BCE_MAX_JUMBO_ETHER_MTU_VLAN)),
6086 BCE_PRINTF("Invalid Ethernet frame size!\n");
6089 DBRUNIF(DB_RANDOMTRUE(l2fhdr_error_sim_control),
6090 BCE_PRINTF("Simulating l2_fhdr status error.\n");
6091 sc->l2fhdr_error_sim_count++;
6092 status = status | L2_FHDR_ERRORS_PHY_DECODE);
6094 /* Check the received frame for errors. */
6095 if (status & (L2_FHDR_ERRORS_BAD_CRC |
6096 L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT |
6097 L2_FHDR_ERRORS_TOO_SHORT | L2_FHDR_ERRORS_GIANT_FRAME)) {
6099 /* Log the error and release the mbuf. */
6101 sc->l2fhdr_error_count++;
6105 goto bce_rx_int_next_rx;
6108 /* Send the packet to the appropriate interface. */
6109 m0->m_pkthdr.rcvif = ifp;
6111 /* Assume no hardware checksum. */
6112 m0->m_pkthdr.csum_flags = 0;
6114 /* Validate the checksum if offload enabled. */
6115 if (ifp->if_capenable & IFCAP_RXCSUM) {
6117 /* Check for an IP datagram. */
6118 if (!(status & L2_FHDR_STATUS_SPLIT) &&
6119 (status & L2_FHDR_STATUS_IP_DATAGRAM)) {
6120 m0->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
6122 /* Check if the IP checksum is valid. */
6123 if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0)
6124 m0->m_pkthdr.csum_flags |= CSUM_IP_VALID;
6127 /* Check for a valid TCP/UDP frame. */
6128 if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
6129 L2_FHDR_STATUS_UDP_DATAGRAM)) {
6131 /* Check for a good TCP/UDP checksum. */
6132 if ((status & (L2_FHDR_ERRORS_TCP_XSUM |
6133 L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
6134 m0->m_pkthdr.csum_data =
6135 l2fhdr->l2_fhdr_tcp_udp_xsum;
6136 m0->m_pkthdr.csum_flags |= (CSUM_DATA_VALID
6142 /* Attach the VLAN tag. */
6143 if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
6144 #if __FreeBSD_version < 700000
6145 VLAN_INPUT_TAG(ifp, m0, l2fhdr->l2_fhdr_vlan_tag, continue);
6147 m0->m_pkthdr.ether_vtag = l2fhdr->l2_fhdr_vlan_tag;
6148 m0->m_flags |= M_VLANTAG;
6152 /* Increment received packet statistics. */
6156 sw_rx_cons = NEXT_RX_BD(sw_rx_cons);
6158 /* If we have a packet, pass it up the stack */
6160 /* Make sure we don't lose our place when we release the lock. */
6161 sc->rx_cons = sw_rx_cons;
6162 #ifdef BCE_JUMBO_HDRSPLIT
6163 sc->pg_cons = sw_pg_cons;
6167 (*ifp->if_input)(ifp, m0);
6170 /* Recover our place. */
6171 sw_rx_cons = sc->rx_cons;
6172 #ifdef BCE_JUMBO_HDRSPLIT
6173 sw_pg_cons = sc->pg_cons;
6177 /* Refresh hw_cons to see if there's new work */
6178 if (sw_rx_cons == hw_rx_cons)
6179 hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
6182 /* No new packets to process. Refill the RX and page chains and exit. */
6183 #ifdef BCE_JUMBO_HDRSPLIT
6184 sc->pg_cons = sw_pg_cons;
6185 bce_fill_pg_chain(sc);
6188 sc->rx_cons = sw_rx_cons;
6189 bce_fill_rx_chain(sc);
6191 /* Prepare the page chain pages to be accessed by the NIC. */
6192 for (int i = 0; i < RX_PAGES; i++)
6193 bus_dmamap_sync(sc->rx_bd_chain_tag,
6194 sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
6196 #ifdef BCE_JUMBO_HDRSPLIT
6197 for (int i = 0; i < PG_PAGES; i++)
6198 bus_dmamap_sync(sc->pg_bd_chain_tag,
6199 sc->pg_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
6202 DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): rx_prod = 0x%04X, "
6203 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
6204 __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
6205 DBEXIT(BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
6209 /****************************************************************************/
6210 /* Reads the transmit consumer value from the status block (skipping over */
6211 /* chain page pointer if necessary). */
6215 /****************************************************************************/
6217 bce_get_hw_tx_cons(struct bce_softc *sc)
6222 hw_cons = sc->status_block->status_tx_quick_consumer_index0;
6223 if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
6230 /****************************************************************************/
6231 /* Handles transmit completion interrupt events. */
6235 /****************************************************************************/
6237 bce_tx_intr(struct bce_softc *sc)
6239 struct ifnet *ifp = sc->bce_ifp;
6240 u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
6242 DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_INTR);
6243 DBRUN(sc->tx_interrupts++);
6244 DBPRINT(sc, BCE_EXTREME_SEND, "%s(enter): tx_prod = 0x%04X, "
6245 "tx_cons = 0x%04X, tx_prod_bseq = 0x%08X\n",
6246 __FUNCTION__, sc->tx_prod, sc->tx_cons, sc->tx_prod_bseq);
6248 BCE_LOCK_ASSERT(sc);
6250 /* Get the hardware's view of the TX consumer index. */
6251 hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc);
6252 sw_tx_cons = sc->tx_cons;
6254 /* Prevent speculative reads from getting ahead of the status block. */
6255 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
6256 BUS_SPACE_BARRIER_READ);
6258 /* Cycle through any completed TX chain page entries. */
6259 while (sw_tx_cons != hw_tx_cons) {
6261 struct tx_bd *txbd = NULL;
6263 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
6265 DBPRINT(sc, BCE_INFO_SEND,
6266 "%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, "
6267 "sw_tx_chain_cons = 0x%04X\n",
6268 __FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
6270 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
6271 BCE_PRINTF("%s(%d): TX chain consumer out of range! "
6272 " 0x%04X > 0x%04X\n", __FILE__, __LINE__, sw_tx_chain_cons,
6274 bce_breakpoint(sc));
6276 DBRUN(txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
6277 [TX_IDX(sw_tx_chain_cons)]);
6279 DBRUNIF((txbd == NULL),
6280 BCE_PRINTF("%s(%d): Unexpected NULL tx_bd[0x%04X]!\n",
6281 __FILE__, __LINE__, sw_tx_chain_cons);
6282 bce_breakpoint(sc));
6284 DBRUNMSG(BCE_INFO_SEND, BCE_PRINTF("%s(): ", __FUNCTION__);
6285 bce_dump_txbd(sc, sw_tx_chain_cons, txbd));
6288 * Free the associated mbuf. Remember
6289 * that only the last tx_bd of a packet
6290 * has an mbuf pointer and DMA map.
6292 if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
6294 /* Validate that this is the last tx_bd. */
6295 DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)),
6296 BCE_PRINTF("%s(%d): tx_bd END flag not set but "
6297 "txmbuf == NULL!\n", __FILE__, __LINE__);
6298 bce_breakpoint(sc));
6300 DBRUNMSG(BCE_INFO_SEND,
6301 BCE_PRINTF("%s(): Unloading map/freeing mbuf "
6302 "from tx_bd[0x%04X]\n", __FUNCTION__, sw_tx_chain_cons));
6304 /* Unmap the mbuf. */
6305 bus_dmamap_unload(sc->tx_mbuf_tag,
6306 sc->tx_mbuf_map[sw_tx_chain_cons]);
6308 /* Free the mbuf. */
6309 m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
6310 sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
6311 DBRUN(sc->debug_tx_mbuf_alloc--);
6317 sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
6319 /* Refresh hw_cons to see if there's new work. */
6320 hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc);
6322 /* Prevent speculative reads from getting ahead of the status block. */
6323 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
6324 BUS_SPACE_BARRIER_READ);
6327 /* Clear the TX timeout timer. */
6328 sc->watchdog_timer = 0;
6330 /* Clear the tx hardware queue full flag. */
6331 if (sc->used_tx_bd < sc->max_tx_bd) {
6332 DBRUNIF((ifp->if_drv_flags & IFF_DRV_OACTIVE),
6333 DBPRINT(sc, BCE_INFO_SEND,
6334 "%s(): Open TX chain! %d/%d (used/total)\n",
6335 __FUNCTION__, sc->used_tx_bd, sc->max_tx_bd));
6336 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
6339 sc->tx_cons = sw_tx_cons;
6341 DBPRINT(sc, BCE_EXTREME_SEND, "%s(exit): tx_prod = 0x%04X, "
6342 "tx_cons = 0x%04X, tx_prod_bseq = 0x%08X\n",
6343 __FUNCTION__, sc->tx_prod, sc->tx_cons, sc->tx_prod_bseq);
6344 DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_INTR);
6348 /****************************************************************************/
6349 /* Disables interrupt generation. */
6353 /****************************************************************************/
6355 bce_disable_intr(struct bce_softc *sc)
6357 DBENTER(BCE_VERBOSE_INTR);
6359 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
6360 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
6362 DBEXIT(BCE_VERBOSE_INTR);
6366 /****************************************************************************/
6367 /* Enables interrupt generation. */
6371 /****************************************************************************/
6373 bce_enable_intr(struct bce_softc *sc, int coal_now)
6375 DBENTER(BCE_VERBOSE_INTR);
6377 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
6378 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
6379 BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
6381 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
6382 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
6384 /* Force an immediate interrupt (whether there is new data or not). */
6386 REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW);
6388 DBEXIT(BCE_VERBOSE_INTR);
6392 /****************************************************************************/
6393 /* Handles controller initialization. */
6397 /****************************************************************************/
6399 bce_init_locked(struct bce_softc *sc)
6404 DBENTER(BCE_VERBOSE_RESET);
6406 BCE_LOCK_ASSERT(sc);
6410 /* Check if the driver is still running and bail out if it is. */
6411 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
6412 goto bce_init_locked_exit;
6416 if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
6417 BCE_PRINTF("%s(%d): Controller reset failed!\n",
6418 __FILE__, __LINE__);
6419 goto bce_init_locked_exit;
6422 if (bce_chipinit(sc)) {
6423 BCE_PRINTF("%s(%d): Controller initialization failed!\n",
6424 __FILE__, __LINE__);
6425 goto bce_init_locked_exit;
6428 if (bce_blockinit(sc)) {
6429 BCE_PRINTF("%s(%d): Block initialization failed!\n",
6430 __FILE__, __LINE__);
6431 goto bce_init_locked_exit;
6434 /* Load our MAC address. */
6435 bcopy(IF_LLADDR(sc->bce_ifp), sc->eaddr, ETHER_ADDR_LEN);
6436 bce_set_mac_addr(sc);
6439 * Calculate and program the hardware Ethernet MTU
6440 * size. Be generous on the receive if we have room.
6442 #ifdef BCE_JUMBO_HDRSPLIT
6443 if (ifp->if_mtu <= (sc->rx_bd_mbuf_data_len + sc->pg_bd_mbuf_alloc_size))
6444 ether_mtu = sc->rx_bd_mbuf_data_len + sc->pg_bd_mbuf_alloc_size;
6446 if (ifp->if_mtu <= sc->rx_bd_mbuf_data_len)
6447 ether_mtu = sc->rx_bd_mbuf_data_len;
6450 ether_mtu = ifp->if_mtu;
6452 ether_mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN;
6454 DBPRINT(sc, BCE_INFO_MISC, "%s(): setting h/w mtu = %d\n", __FUNCTION__,
6457 /* Program the mtu, enabling jumbo frame support if necessary. */
6458 if (ether_mtu > (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN))
6459 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE,
6460 min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
6461 BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
6463 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
6465 DBPRINT(sc, BCE_INFO_LOAD,
6466 "%s(): rx_bd_mbuf_alloc_size = %d, rx_bce_mbuf_data_len = %d, "
6467 "rx_bd_mbuf_align_pad = %d\n", __FUNCTION__,
6468 sc->rx_bd_mbuf_alloc_size, sc->rx_bd_mbuf_data_len,
6469 sc->rx_bd_mbuf_align_pad);
6471 /* Program appropriate promiscuous/multicast filtering. */
6472 bce_set_rx_mode(sc);
6474 #ifdef BCE_JUMBO_HDRSPLIT
6475 DBPRINT(sc, BCE_INFO_LOAD, "%s(): pg_bd_mbuf_alloc_size = %d\n",
6476 __FUNCTION__, sc->pg_bd_mbuf_alloc_size);
6478 /* Init page buffer descriptor chain. */
6479 bce_init_pg_chain(sc);
6482 /* Init RX buffer descriptor chain. */
6483 bce_init_rx_chain(sc);
6485 /* Init TX buffer descriptor chain. */
6486 bce_init_tx_chain(sc);
6488 /* Enable host interrupts. */
6489 bce_enable_intr(sc, 1);
6491 bce_ifmedia_upd_locked(ifp);
6493 /* Let the OS know the driver is up and running. */
6494 ifp->if_drv_flags |= IFF_DRV_RUNNING;
6495 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
6497 callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc);
6499 bce_init_locked_exit:
6500 DBEXIT(BCE_VERBOSE_RESET);
6504 /****************************************************************************/
6505 /* Initialize the controller just enough so that any management firmware */
6506 /* running on the device will continue to operate correctly. */
6510 /****************************************************************************/
6512 bce_mgmt_init_locked(struct bce_softc *sc)
6516 DBENTER(BCE_VERBOSE_RESET);
6518 BCE_LOCK_ASSERT(sc);
6520 /* Bail out if management firmware is not running. */
6521 if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) {
6522 DBPRINT(sc, BCE_VERBOSE_SPECIAL,
6523 "No management firmware running...\n");
6524 goto bce_mgmt_init_locked_exit;
6529 /* Enable all critical blocks in the MAC. */
6530 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
6531 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
6534 bce_ifmedia_upd_locked(ifp);
6536 bce_mgmt_init_locked_exit:
6537 DBEXIT(BCE_VERBOSE_RESET);
6541 /****************************************************************************/
6542 /* Handles controller initialization when called from an unlocked routine. */
6546 /****************************************************************************/
6550 struct bce_softc *sc = xsc;
6552 DBENTER(BCE_VERBOSE_RESET);
6555 bce_init_locked(sc);
6558 DBEXIT(BCE_VERBOSE_RESET);
6562 /****************************************************************************/
6563 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
6564 /* memory visible to the controller. */
6567 /* 0 for success, positive value for failure. */
6569 /* m_head: May be set to NULL if MBUF is excessively fragmented. */
6570 /****************************************************************************/
6572 bce_tx_encap(struct bce_softc *sc, struct mbuf **m_head)
6574 bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
6576 struct tx_bd *txbd = NULL;
6578 struct ether_vlan_header *eh;
6581 u16 prod, chain_prod, etype, mss = 0, vlan_tag = 0, flags = 0;
6583 int hdr_len = 0, e_hlen = 0, ip_hlen = 0, tcp_hlen = 0, ip_len = 0;
6588 int i, error, nsegs, rc = 0;
6590 DBENTER(BCE_VERBOSE_SEND);
6591 DBPRINT(sc, BCE_INFO_SEND,
6592 "%s(enter): tx_prod = 0x%04X, tx_chain_prod = %04X, "
6593 "tx_prod_bseq = 0x%08X\n",
6594 __FUNCTION__, sc->tx_prod, (u16) TX_CHAIN_IDX(sc->tx_prod),
6597 /* Transfer any checksum offload flags to the bd. */
6599 if (m0->m_pkthdr.csum_flags) {
6600 if (m0->m_pkthdr.csum_flags & CSUM_IP)
6601 flags |= TX_BD_FLAGS_IP_CKSUM;
6602 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
6603 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6604 if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
6605 /* For TSO the controller needs two pieces of info, */
6606 /* the MSS and the IP+TCP options length. */
6607 mss = htole16(m0->m_pkthdr.tso_segsz);
6609 /* Map the header and find the Ethernet type & header length */
6610 eh = mtod(m0, struct ether_vlan_header *);
6611 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
6612 etype = ntohs(eh->evl_proto);
6613 e_hlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6615 etype = ntohs(eh->evl_encap_proto);
6616 e_hlen = ETHER_HDR_LEN;
6619 /* Check for supported TSO Ethernet types (only IPv4 for now) */
6622 ip = (struct ip *)(m0->m_data + e_hlen);
6624 /* TSO only supported for TCP protocol */
6625 if (ip->ip_p != IPPROTO_TCP) {
6626 BCE_PRINTF("%s(%d): TSO enabled for non-TCP frame!.\n",
6627 __FILE__, __LINE__);
6628 goto bce_tx_encap_skip_tso;
6631 /* Get IP header length in bytes (min 20) */
6632 ip_hlen = ip->ip_hl << 2;
6634 /* Get the TCP header length in bytes (min 20) */
6635 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
6636 tcp_hlen = (th->th_off << 2);
6638 /* IP header length and checksum will be calc'd by hardware */
6639 ip_len = ip->ip_len;
6643 case ETHERTYPE_IPV6:
6644 BCE_PRINTF("%s(%d): TSO over IPv6 not supported!.\n",
6645 __FILE__, __LINE__);
6646 goto bce_tx_encap_skip_tso;
6648 BCE_PRINTF("%s(%d): TSO enabled for unsupported protocol!.\n",
6649 __FILE__, __LINE__);
6650 goto bce_tx_encap_skip_tso;
6653 hdr_len = e_hlen + ip_hlen + tcp_hlen;
6655 DBPRINT(sc, BCE_EXTREME_SEND,
6656 "%s(): hdr_len = %d, e_hlen = %d, ip_hlen = %d, tcp_hlen = %d, ip_len = %d\n",
6657 __FUNCTION__, hdr_len, e_hlen, ip_hlen, tcp_hlen, ip_len);
6659 /* Set the LSO flag in the TX BD */
6660 flags |= TX_BD_FLAGS_SW_LSO;
6661 /* Set the length of IP + TCP options (in 32 bit words) */
6662 flags |= (((ip_hlen + tcp_hlen - 40) >> 2) << 8);
6664 bce_tx_encap_skip_tso:
6665 DBRUN(sc->requested_tso_frames++);
6669 /* Transfer any VLAN tags to the bd. */
6670 if (m0->m_flags & M_VLANTAG) {
6671 flags |= TX_BD_FLAGS_VLAN_TAG;
6672 vlan_tag = m0->m_pkthdr.ether_vtag;
6675 /* Map the mbuf into DMAable memory. */
6677 chain_prod = TX_CHAIN_IDX(prod);
6678 map = sc->tx_mbuf_map[chain_prod];
6680 /* Map the mbuf into our DMA address space. */
6681 error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0,
6682 segs, &nsegs, BUS_DMA_NOWAIT);
6684 /* Check if the DMA mapping was successful */
6685 if (error == EFBIG) {
6687 sc->fragmented_mbuf_count++;
6689 /* Try to defrag the mbuf. */
6690 m0 = m_defrag(*m_head, M_DONTWAIT);
6692 /* Defrag was unsuccessful */
6695 sc->mbuf_alloc_failed_count++;
6697 goto bce_tx_encap_exit;
6700 /* Defrag was successful, try mapping again */
6702 error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0,
6703 segs, &nsegs, BUS_DMA_NOWAIT);
6705 /* Still getting an error after a defrag. */
6706 if (error == ENOMEM) {
6707 /* Insufficient DMA buffers available. */
6708 sc->dma_map_addr_tx_failed_count++;
6710 goto bce_tx_encap_exit;
6711 } else if (error != 0) {
6712 /* Still can't map the mbuf, release it and return an error. */
6714 "%s(%d): Unknown error mapping mbuf into TX chain!\n",
6715 __FILE__, __LINE__);
6718 sc->dma_map_addr_tx_failed_count++;
6720 goto bce_tx_encap_exit;
6722 } else if (error == ENOMEM) {
6723 /* Insufficient DMA buffers available. */
6724 sc->dma_map_addr_tx_failed_count++;
6726 goto bce_tx_encap_exit;
6727 } else if (error != 0) {
6730 sc->dma_map_addr_tx_failed_count++;
6732 goto bce_tx_encap_exit;
6735 /* Make sure there's room in the chain */
6736 if (nsegs > (sc->max_tx_bd - sc->used_tx_bd)) {
6737 bus_dmamap_unload(sc->tx_mbuf_tag, map);
6739 goto bce_tx_encap_exit;
6742 /* prod points to an empty tx_bd at this point. */
6743 prod_bseq = sc->tx_prod_bseq;
6746 debug_prod = chain_prod;
6749 DBPRINT(sc, BCE_INFO_SEND,
6750 "%s(start): prod = 0x%04X, chain_prod = 0x%04X, "
6751 "prod_bseq = 0x%08X\n",
6752 __FUNCTION__, prod, chain_prod, prod_bseq);
6755 * Cycle through each mbuf segment that makes up
6756 * the outgoing frame, gathering the mapping info
6757 * for that segment and creating a tx_bd for
6760 for (i = 0; i < nsegs ; i++) {
6762 chain_prod = TX_CHAIN_IDX(prod);
6763 txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
6765 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
6766 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
6767 txbd->tx_bd_mss_nbytes = htole32(mss << 16) | htole16(segs[i].ds_len);
6768 txbd->tx_bd_vlan_tag = htole16(vlan_tag);
6769 txbd->tx_bd_flags = htole16(flags);
6770 prod_bseq += segs[i].ds_len;
6772 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
6773 prod = NEXT_TX_BD(prod);
6776 /* Set the END flag on the last TX buffer descriptor. */
6777 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
6779 DBRUNMSG(BCE_EXTREME_SEND, bce_dump_tx_chain(sc, debug_prod, nsegs));
6781 DBPRINT(sc, BCE_INFO_SEND,
6782 "%s( end ): prod = 0x%04X, chain_prod = 0x%04X, "
6783 "prod_bseq = 0x%08X\n",
6784 __FUNCTION__, prod, chain_prod, prod_bseq);
6787 * Ensure that the mbuf pointer for this transmission
6788 * is placed at the array index of the last
6789 * descriptor in this chain. This is done
6790 * because a single map is used for all
6791 * segments of the mbuf and we don't want to
6792 * unload the map before all of the segments
6795 sc->tx_mbuf_ptr[chain_prod] = m0;
6796 sc->used_tx_bd += nsegs;
6798 /* Update some debug statistic counters */
6799 DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
6800 sc->tx_hi_watermark = sc->used_tx_bd);
6801 DBRUNIF((sc->used_tx_bd == sc->max_tx_bd), sc->tx_full_count++);
6802 DBRUNIF(sc->debug_tx_mbuf_alloc++);
6804 DBRUNMSG(BCE_EXTREME_SEND, bce_dump_tx_mbuf_chain(sc, chain_prod, 1));
6806 /* prod points to the next free tx_bd at this point. */
6808 sc->tx_prod_bseq = prod_bseq;
6810 DBPRINT(sc, BCE_INFO_SEND,
6811 "%s(exit): prod = 0x%04X, chain_prod = %04X, "
6812 "prod_bseq = 0x%08X\n",
6813 __FUNCTION__, sc->tx_prod, (u16) TX_CHAIN_IDX(sc->tx_prod),
6817 DBEXIT(BCE_VERBOSE_SEND);
6822 /****************************************************************************/
6823 /* Main transmit routine when called from another routine with a lock. */
6827 /****************************************************************************/
6829 bce_start_locked(struct ifnet *ifp)
6831 struct bce_softc *sc = ifp->if_softc;
6832 struct mbuf *m_head = NULL;
6834 u16 tx_prod, tx_chain_prod;
6836 DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
6838 BCE_LOCK_ASSERT(sc);
6840 /* prod points to the next free tx_bd. */
6841 tx_prod = sc->tx_prod;
6842 tx_chain_prod = TX_CHAIN_IDX(tx_prod);
6844 DBPRINT(sc, BCE_INFO_SEND,
6845 "%s(enter): tx_prod = 0x%04X, tx_chain_prod = 0x%04X, "
6846 "tx_prod_bseq = 0x%08X\n",
6847 __FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
6849 /* If there's no link or the transmit queue is empty then just exit. */
6850 if (!sc->bce_link) {
6851 DBPRINT(sc, BCE_INFO_SEND, "%s(): No link.\n",
6853 goto bce_start_locked_exit;
6856 if (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
6857 DBPRINT(sc, BCE_INFO_SEND, "%s(): Transmit queue empty.\n",
6859 goto bce_start_locked_exit;
6863 * Keep adding entries while there is space in the ring.
6865 while (sc->used_tx_bd < sc->max_tx_bd) {
6867 /* Check for any frames to send. */
6868 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
6870 /* Stop when the transmit queue is empty. */
6875 * Pack the data into the transmit ring. If we
6876 * don't have room, place the mbuf back at the
6877 * head of the queue and set the OACTIVE flag
6878 * to wait for the NIC to drain the chain.
6880 if (bce_tx_encap(sc, &m_head)) {
6881 /* No room, put the frame back on the transmit queue. */
6883 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
6884 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
6885 DBPRINT(sc, BCE_INFO_SEND,
6886 "TX chain is closed for business! Total tx_bd used = %d\n",
6893 /* Send a copy of the frame to any BPF listeners. */
6894 ETHER_BPF_MTAP(ifp, m_head);
6897 /* Exit if no packets were dequeued. */
6899 DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): No packets were dequeued\n",
6901 goto bce_start_locked_exit;
6904 DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): Inserted %d frames into send queue.\n",
6905 __FUNCTION__, count);
6907 REG_WR(sc, BCE_MQ_COMMAND, REG_RD(sc, BCE_MQ_COMMAND) | BCE_MQ_COMMAND_NO_MAP_ERROR);
6909 /* Write the mailbox and tell the chip about the waiting tx_bd's. */
6910 DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): MB_GET_CID_ADDR(TX_CID) = 0x%08X; "
6911 "BCE_L2MQ_TX_HOST_BIDX = 0x%08X, sc->tx_prod = 0x%04X\n",
6913 MB_GET_CID_ADDR(TX_CID), BCE_L2MQ_TX_HOST_BIDX, sc->tx_prod);
6914 REG_WR16(sc, MB_GET_CID_ADDR(TX_CID) + BCE_L2MQ_TX_HOST_BIDX, sc->tx_prod);
6915 DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): MB_GET_CID_ADDR(TX_CID) = 0x%08X; "
6916 "BCE_L2MQ_TX_HOST_BSEQ = 0x%08X, sc->tx_prod_bseq = 0x%04X\n",
6918 MB_GET_CID_ADDR(TX_CID), BCE_L2MQ_TX_HOST_BSEQ, sc->tx_prod_bseq);
6919 REG_WR(sc, MB_GET_CID_ADDR(TX_CID) + BCE_L2MQ_TX_HOST_BSEQ, sc->tx_prod_bseq);
6921 /* Set the tx timeout. */
6922 sc->watchdog_timer = BCE_TX_TIMEOUT;
6924 DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_ctx(sc, TX_CID));
6925 DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_mq_regs(sc));
6927 bce_start_locked_exit:
6928 DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
6933 /****************************************************************************/
6934 /* Main transmit routine when called from another routine without a lock. */
6938 /****************************************************************************/
6940 bce_start(struct ifnet *ifp)
6942 struct bce_softc *sc = ifp->if_softc;
6944 DBENTER(BCE_VERBOSE_SEND);
6947 bce_start_locked(ifp);
6950 DBEXIT(BCE_VERBOSE_SEND);
6954 /****************************************************************************/
6955 /* Handles any IOCTL calls from the operating system. */
6958 /* 0 for success, positive value for failure. */
6959 /****************************************************************************/
6961 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
6963 struct bce_softc *sc = ifp->if_softc;
6964 struct ifreq *ifr = (struct ifreq *) data;
6965 struct mii_data *mii;
6966 int mask, error = 0;
6968 DBENTER(BCE_VERBOSE_MISC);
6972 /* Set the interface MTU. */
6974 /* Check that the MTU setting is supported. */
6975 if ((ifr->ifr_mtu < BCE_MIN_MTU) ||
6976 (ifr->ifr_mtu > BCE_MAX_JUMBO_MTU)) {
6981 DBPRINT(sc, BCE_INFO_MISC,
6982 "SIOCSIFMTU: Changing MTU from %d to %d\n",
6983 (int) ifp->if_mtu, (int) ifr->ifr_mtu);
6986 ifp->if_mtu = ifr->ifr_mtu;
6987 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
6988 #ifdef BCE_JUMBO_HDRSPLIT
6989 /* No buffer allocation size changes are necessary. */
6991 /* Recalculate our buffer allocation sizes. */
6992 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN) > MCLBYTES) {
6993 sc->rx_bd_mbuf_alloc_size = MJUM9BYTES;
6994 sc->rx_bd_mbuf_align_pad = roundup2(MJUM9BYTES, 16) - MJUM9BYTES;
6995 sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size -
6996 sc->rx_bd_mbuf_align_pad;
6998 sc->rx_bd_mbuf_alloc_size = MCLBYTES;
6999 sc->rx_bd_mbuf_align_pad = roundup2(MCLBYTES, 16) - MCLBYTES;
7000 sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size -
7001 sc->rx_bd_mbuf_align_pad;
7005 bce_init_locked(sc);
7009 /* Set interface flags. */
7011 DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Received SIOCSIFFLAGS\n");
7015 /* Check if the interface is up. */
7016 if (ifp->if_flags & IFF_UP) {
7017 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
7018 /* Change promiscuous/multicast flags as necessary. */
7019 bce_set_rx_mode(sc);
7022 bce_init_locked(sc);
7025 /* The interface is down, check if driver is running. */
7026 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
7029 /* If MFW is running, restart the controller a bit. */
7030 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
7031 bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
7033 bce_mgmt_init_locked(sc);
7043 /* Add/Delete multicast address */
7046 DBPRINT(sc, BCE_VERBOSE_MISC, "Received SIOCADDMULTI/SIOCDELMULTI\n");
7049 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
7050 bce_set_rx_mode(sc);
7057 /* Set/Get Interface media */
7060 DBPRINT(sc, BCE_VERBOSE_MISC, "Received SIOCSIFMEDIA/SIOCGIFMEDIA\n");
7062 mii = device_get_softc(sc->bce_miibus);
7063 error = ifmedia_ioctl(ifp, ifr,
7064 &mii->mii_media, command);
7067 /* Set interface capability */
7069 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
7070 DBPRINT(sc, BCE_INFO_MISC, "Received SIOCSIFCAP = 0x%08X\n", (u32) mask);
7072 /* Toggle the TX checksum capabilites enable flag. */
7073 if (mask & IFCAP_TXCSUM) {
7074 ifp->if_capenable ^= IFCAP_TXCSUM;
7075 if (IFCAP_TXCSUM & ifp->if_capenable)
7076 ifp->if_hwassist = BCE_IF_HWASSIST;
7078 ifp->if_hwassist = 0;
7081 /* Toggle the RX checksum capabilities enable flag. */
7082 if (mask & IFCAP_RXCSUM) {
7083 ifp->if_capenable ^= IFCAP_RXCSUM;
7084 if (IFCAP_RXCSUM & ifp->if_capenable)
7085 ifp->if_hwassist = BCE_IF_HWASSIST;
7087 ifp->if_hwassist = 0;
7090 /* Toggle the TSO capabilities enable flag. */
7091 if (bce_tso_enable && (mask & IFCAP_TSO4)) {
7092 ifp->if_capenable ^= IFCAP_TSO4;
7093 if (IFCAP_RXCSUM & ifp->if_capenable)
7094 ifp->if_hwassist = BCE_IF_HWASSIST;
7096 ifp->if_hwassist = 0;
7099 /* Toggle VLAN_MTU capabilities enable flag. */
7100 if (mask & IFCAP_VLAN_MTU) {
7101 BCE_PRINTF("%s(%d): Changing VLAN_MTU not supported.\n",
7102 __FILE__, __LINE__);
7105 /* Toggle VLANHWTAG capabilities enabled flag. */
7106 if (mask & IFCAP_VLAN_HWTAGGING) {
7107 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
7108 BCE_PRINTF("%s(%d): Cannot change VLAN_HWTAGGING while "
7109 "management firmware (ASF/IPMI/UMP) is running!\n",
7110 __FILE__, __LINE__);
7112 BCE_PRINTF("%s(%d): Changing VLAN_HWTAGGING not supported!\n",
7113 __FILE__, __LINE__);
7118 /* We don't know how to handle the IOCTL, pass it on. */
7119 error = ether_ioctl(ifp, command, data);
7123 DBEXIT(BCE_VERBOSE_MISC);
7128 /****************************************************************************/
7129 /* Transmit timeout handler. */
7133 /****************************************************************************/
7135 bce_watchdog(struct bce_softc *sc)
7137 DBENTER(BCE_EXTREME_SEND);
7139 BCE_LOCK_ASSERT(sc);
7141 /* If the watchdog timer hasn't expired then just exit. */
7142 if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
7143 goto bce_watchdog_exit;
7145 /* If pause frames are active then don't reset the hardware. */
7146 /* ToDo: Should we reset the timer here? */
7147 if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED)
7148 goto bce_watchdog_exit;
7150 BCE_PRINTF("%s(%d): Watchdog timeout occurred, resetting!\n",
7151 __FILE__, __LINE__);
7154 bce_dump_driver_state(sc);
7155 bce_dump_status_block(sc);
7156 bce_dump_stats_block(sc);
7158 bce_dump_txp_state(sc, 0);
7159 bce_dump_rxp_state(sc, 0);
7160 bce_dump_tpat_state(sc, 0);
7161 bce_dump_cp_state(sc, 0);
7162 bce_dump_com_state(sc, 0));
7164 DBRUN(bce_breakpoint(sc));
7166 sc->bce_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
7168 bce_init_locked(sc);
7169 sc->bce_ifp->if_oerrors++;
7172 DBEXIT(BCE_EXTREME_SEND);
7177 * Interrupt handler.
7179 /****************************************************************************/
7180 /* Main interrupt entry point. Verifies that the controller generated the */
7181 /* interrupt and then calls a separate routine for handle the various */
7182 /* interrupt causes (PHY, TX, RX). */
7185 /* 0 for success, positive value for failure. */
7186 /****************************************************************************/
7190 struct bce_softc *sc;
7192 u32 status_attn_bits;
7193 u16 hw_rx_cons, hw_tx_cons;
7198 DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
7199 DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
7203 DBRUN(sc->interrupts_generated++);
7205 /* Synchnorize before we read from interface's status block */
7206 bus_dmamap_sync(sc->status_tag, sc->status_map,
7207 BUS_DMASYNC_POSTREAD);
7210 * If the hardware status block index
7211 * matches the last value read by the
7212 * driver and we haven't asserted our
7213 * interrupt then there's nothing to do.
7215 if ((sc->status_block->status_idx == sc->last_status_idx) &&
7216 (REG_RD(sc, BCE_PCICFG_MISC_STATUS) & BCE_PCICFG_MISC_STATUS_INTA_VALUE)) {
7217 DBPRINT(sc, BCE_VERBOSE_INTR, "%s(): Spurious interrupt.\n",
7222 /* Ack the interrupt and stop others from occuring. */
7223 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
7224 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
7225 BCE_PCICFG_INT_ACK_CMD_MASK_INT);
7227 /* Check if the hardware has finished any work. */
7228 hw_rx_cons = bce_get_hw_rx_cons(sc);
7229 hw_tx_cons = bce_get_hw_tx_cons(sc);
7231 /* Keep processing data as long as there is work to do. */
7234 status_attn_bits = sc->status_block->status_attn_bits;
7236 DBRUNIF(DB_RANDOMTRUE(unexpected_attention_sim_control),
7237 BCE_PRINTF("Simulating unexpected status attention bit set.");
7238 sc->unexpected_attention_sim_count++;
7239 status_attn_bits = status_attn_bits | STATUS_ATTN_BITS_PARITY_ERROR);
7241 /* Was it a link change interrupt? */
7242 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
7243 (sc->status_block->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
7246 /* Clear any transient status updates during link state change. */
7247 REG_WR(sc, BCE_HC_COMMAND,
7248 sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT);
7249 REG_RD(sc, BCE_HC_COMMAND);
7252 /* If any other attention is asserted then the chip is toast. */
7253 if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
7254 (sc->status_block->status_attn_bits_ack &
7255 ~STATUS_ATTN_BITS_LINK_STATE))) {
7257 sc->unexpected_attention_count++;
7259 BCE_PRINTF("%s(%d): Fatal attention detected: 0x%08X\n",
7260 __FILE__, __LINE__, sc->status_block->status_attn_bits);
7263 if (unexpected_attention_sim_control == 0)
7264 bce_breakpoint(sc));
7266 bce_init_locked(sc);
7270 /* Check for any completed RX frames. */
7271 if (hw_rx_cons != sc->hw_rx_cons)
7274 /* Check for any completed TX frames. */
7275 if (hw_tx_cons != sc->hw_tx_cons)
7278 /* Save the status block index value for use during the next interrupt. */
7279 sc->last_status_idx = sc->status_block->status_idx;
7281 /* Prevent speculative reads from getting ahead of the status block. */
7282 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
7283 BUS_SPACE_BARRIER_READ);
7285 /* If there's no work left then exit the interrupt service routine. */
7286 hw_rx_cons = bce_get_hw_rx_cons(sc);
7287 hw_tx_cons = bce_get_hw_tx_cons(sc);
7289 if ((hw_rx_cons == sc->hw_rx_cons) && (hw_tx_cons == sc->hw_tx_cons))
7294 bus_dmamap_sync(sc->status_tag, sc->status_map,
7295 BUS_DMASYNC_PREREAD);
7297 /* Re-enable interrupts. */
7298 bce_enable_intr(sc, 0);
7300 /* Handle any frames that arrived while handling the interrupt. */
7301 if (ifp->if_drv_flags & IFF_DRV_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
7302 bce_start_locked(ifp);
7307 DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
7311 /****************************************************************************/
7312 /* Programs the various packet receive modes (broadcast and multicast). */
7316 /****************************************************************************/
7318 bce_set_rx_mode(struct bce_softc *sc)
7321 struct ifmultiaddr *ifma;
7322 u32 hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
7323 u32 rx_mode, sort_mode;
7326 DBENTER(BCE_VERBOSE_MISC);
7328 BCE_LOCK_ASSERT(sc);
7332 /* Initialize receive mode default settings. */
7333 rx_mode = sc->rx_mode & ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
7334 BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
7335 sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
7338 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
7341 if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
7342 (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)))
7343 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
7346 * Check for promiscuous, all multicast, or selected
7347 * multicast address filtering.
7349 if (ifp->if_flags & IFF_PROMISC) {
7350 DBPRINT(sc, BCE_INFO_MISC, "Enabling promiscuous mode.\n");
7352 /* Enable promiscuous mode. */
7353 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
7354 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
7355 } else if (ifp->if_flags & IFF_ALLMULTI) {
7356 DBPRINT(sc, BCE_INFO_MISC, "Enabling all multicast mode.\n");
7358 /* Enable all multicast addresses. */
7359 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
7360 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff);
7362 sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
7364 /* Accept one or more multicast(s). */
7365 DBPRINT(sc, BCE_INFO_MISC, "Enabling selective multicast mode.\n");
7367 if_maddr_rlock(ifp);
7368 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
7369 if (ifma->ifma_addr->sa_family != AF_LINK)
7371 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
7372 ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF;
7373 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
7375 if_maddr_runlock(ifp);
7377 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
7378 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]);
7380 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
7383 /* Only make changes if the recive mode has actually changed. */
7384 if (rx_mode != sc->rx_mode) {
7385 DBPRINT(sc, BCE_VERBOSE_MISC, "Enabling new receive mode: 0x%08X\n",
7388 sc->rx_mode = rx_mode;
7389 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
7392 /* Disable and clear the exisitng sort before enabling a new sort. */
7393 REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
7394 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
7395 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
7397 DBEXIT(BCE_VERBOSE_MISC);
7401 /****************************************************************************/
7402 /* Called periodically to updates statistics from the controllers */
7403 /* statistics block. */
7407 /****************************************************************************/
7409 bce_stats_update(struct bce_softc *sc)
7412 struct statistics_block *stats;
7414 DBENTER(BCE_EXTREME_MISC);
7418 stats = (struct statistics_block *) sc->stats_block;
7421 * Certain controllers don't report
7422 * carrier sense errors correctly.
7423 * See errata E11_5708CA0_1165.
7425 if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
7426 !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0))
7427 ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
7430 * Update the sysctl statistics from the
7431 * hardware statistics.
7433 sc->stat_IfHCInOctets =
7434 ((u64) stats->stat_IfHCInOctets_hi << 32) +
7435 (u64) stats->stat_IfHCInOctets_lo;
7437 sc->stat_IfHCInBadOctets =
7438 ((u64) stats->stat_IfHCInBadOctets_hi << 32) +
7439 (u64) stats->stat_IfHCInBadOctets_lo;
7441 sc->stat_IfHCOutOctets =
7442 ((u64) stats->stat_IfHCOutOctets_hi << 32) +
7443 (u64) stats->stat_IfHCOutOctets_lo;
7445 sc->stat_IfHCOutBadOctets =
7446 ((u64) stats->stat_IfHCOutBadOctets_hi << 32) +
7447 (u64) stats->stat_IfHCOutBadOctets_lo;
7449 sc->stat_IfHCInUcastPkts =
7450 ((u64) stats->stat_IfHCInUcastPkts_hi << 32) +
7451 (u64) stats->stat_IfHCInUcastPkts_lo;
7453 sc->stat_IfHCInMulticastPkts =
7454 ((u64) stats->stat_IfHCInMulticastPkts_hi << 32) +
7455 (u64) stats->stat_IfHCInMulticastPkts_lo;
7457 sc->stat_IfHCInBroadcastPkts =
7458 ((u64) stats->stat_IfHCInBroadcastPkts_hi << 32) +
7459 (u64) stats->stat_IfHCInBroadcastPkts_lo;
7461 sc->stat_IfHCOutUcastPkts =
7462 ((u64) stats->stat_IfHCOutUcastPkts_hi << 32) +
7463 (u64) stats->stat_IfHCOutUcastPkts_lo;
7465 sc->stat_IfHCOutMulticastPkts =
7466 ((u64) stats->stat_IfHCOutMulticastPkts_hi << 32) +
7467 (u64) stats->stat_IfHCOutMulticastPkts_lo;
7469 sc->stat_IfHCOutBroadcastPkts =
7470 ((u64) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
7471 (u64) stats->stat_IfHCOutBroadcastPkts_lo;
7473 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
7474 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
7476 sc->stat_Dot3StatsCarrierSenseErrors =
7477 stats->stat_Dot3StatsCarrierSenseErrors;
7479 sc->stat_Dot3StatsFCSErrors =
7480 stats->stat_Dot3StatsFCSErrors;
7482 sc->stat_Dot3StatsAlignmentErrors =
7483 stats->stat_Dot3StatsAlignmentErrors;
7485 sc->stat_Dot3StatsSingleCollisionFrames =
7486 stats->stat_Dot3StatsSingleCollisionFrames;
7488 sc->stat_Dot3StatsMultipleCollisionFrames =
7489 stats->stat_Dot3StatsMultipleCollisionFrames;
7491 sc->stat_Dot3StatsDeferredTransmissions =
7492 stats->stat_Dot3StatsDeferredTransmissions;
7494 sc->stat_Dot3StatsExcessiveCollisions =
7495 stats->stat_Dot3StatsExcessiveCollisions;
7497 sc->stat_Dot3StatsLateCollisions =
7498 stats->stat_Dot3StatsLateCollisions;
7500 sc->stat_EtherStatsCollisions =
7501 stats->stat_EtherStatsCollisions;
7503 sc->stat_EtherStatsFragments =
7504 stats->stat_EtherStatsFragments;
7506 sc->stat_EtherStatsJabbers =
7507 stats->stat_EtherStatsJabbers;
7509 sc->stat_EtherStatsUndersizePkts =
7510 stats->stat_EtherStatsUndersizePkts;
7512 sc->stat_EtherStatsOversizePkts =
7513 stats->stat_EtherStatsOversizePkts;
7515 sc->stat_EtherStatsPktsRx64Octets =
7516 stats->stat_EtherStatsPktsRx64Octets;
7518 sc->stat_EtherStatsPktsRx65Octetsto127Octets =
7519 stats->stat_EtherStatsPktsRx65Octetsto127Octets;
7521 sc->stat_EtherStatsPktsRx128Octetsto255Octets =
7522 stats->stat_EtherStatsPktsRx128Octetsto255Octets;
7524 sc->stat_EtherStatsPktsRx256Octetsto511Octets =
7525 stats->stat_EtherStatsPktsRx256Octetsto511Octets;
7527 sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
7528 stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
7530 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
7531 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
7533 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
7534 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
7536 sc->stat_EtherStatsPktsTx64Octets =
7537 stats->stat_EtherStatsPktsTx64Octets;
7539 sc->stat_EtherStatsPktsTx65Octetsto127Octets =
7540 stats->stat_EtherStatsPktsTx65Octetsto127Octets;
7542 sc->stat_EtherStatsPktsTx128Octetsto255Octets =
7543 stats->stat_EtherStatsPktsTx128Octetsto255Octets;
7545 sc->stat_EtherStatsPktsTx256Octetsto511Octets =
7546 stats->stat_EtherStatsPktsTx256Octetsto511Octets;
7548 sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
7549 stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
7551 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
7552 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
7554 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
7555 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
7557 sc->stat_XonPauseFramesReceived =
7558 stats->stat_XonPauseFramesReceived;
7560 sc->stat_XoffPauseFramesReceived =
7561 stats->stat_XoffPauseFramesReceived;
7563 sc->stat_OutXonSent =
7564 stats->stat_OutXonSent;
7566 sc->stat_OutXoffSent =
7567 stats->stat_OutXoffSent;
7569 sc->stat_FlowControlDone =
7570 stats->stat_FlowControlDone;
7572 sc->stat_MacControlFramesReceived =
7573 stats->stat_MacControlFramesReceived;
7575 sc->stat_XoffStateEntered =
7576 stats->stat_XoffStateEntered;
7578 sc->stat_IfInFramesL2FilterDiscards =
7579 stats->stat_IfInFramesL2FilterDiscards;
7581 sc->stat_IfInRuleCheckerDiscards =
7582 stats->stat_IfInRuleCheckerDiscards;
7584 sc->stat_IfInFTQDiscards =
7585 stats->stat_IfInFTQDiscards;
7587 sc->stat_IfInMBUFDiscards =
7588 stats->stat_IfInMBUFDiscards;
7590 sc->stat_IfInRuleCheckerP4Hit =
7591 stats->stat_IfInRuleCheckerP4Hit;
7593 sc->stat_CatchupInRuleCheckerDiscards =
7594 stats->stat_CatchupInRuleCheckerDiscards;
7596 sc->stat_CatchupInFTQDiscards =
7597 stats->stat_CatchupInFTQDiscards;
7599 sc->stat_CatchupInMBUFDiscards =
7600 stats->stat_CatchupInMBUFDiscards;
7602 sc->stat_CatchupInRuleCheckerP4Hit =
7603 stats->stat_CatchupInRuleCheckerP4Hit;
7605 sc->com_no_buffers = REG_RD_IND(sc, 0x120084);
7608 * Update the interface statistics from the
7609 * hardware statistics.
7611 ifp->if_collisions =
7612 (u_long) sc->stat_EtherStatsCollisions;
7614 /* ToDo: This method loses soft errors. */
7616 (u_long) sc->stat_EtherStatsUndersizePkts +
7617 (u_long) sc->stat_EtherStatsOversizePkts +
7618 (u_long) sc->stat_IfInMBUFDiscards +
7619 (u_long) sc->stat_Dot3StatsAlignmentErrors +
7620 (u_long) sc->stat_Dot3StatsFCSErrors +
7621 (u_long) sc->stat_IfInRuleCheckerDiscards +
7622 (u_long) sc->stat_IfInFTQDiscards +
7623 (u_long) sc->com_no_buffers;
7625 /* ToDo: This method loses soft errors. */
7627 (u_long) sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
7628 (u_long) sc->stat_Dot3StatsExcessiveCollisions +
7629 (u_long) sc->stat_Dot3StatsLateCollisions;
7631 /* ToDo: Add additional statistics. */
7633 DBEXIT(BCE_EXTREME_MISC);
7637 /****************************************************************************/
7638 /* Periodic function to notify the bootcode that the driver is still */
7643 /****************************************************************************/
7645 bce_pulse(void *xsc)
7647 struct bce_softc *sc = xsc;
7650 DBENTER(BCE_EXTREME_MISC);
7652 BCE_LOCK_ASSERT(sc);
7654 /* Tell the firmware that the driver is still running. */
7655 msg = (u32) ++sc->bce_fw_drv_pulse_wr_seq;
7656 bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg);
7658 /* Schedule the next pulse. */
7659 callout_reset(&sc->bce_pulse_callout, hz, bce_pulse, sc);
7661 DBEXIT(BCE_EXTREME_MISC);
7665 /****************************************************************************/
7666 /* Periodic function to perform maintenance tasks. */
7670 /****************************************************************************/
7674 struct bce_softc *sc = xsc;
7675 struct mii_data *mii;
7680 DBENTER(BCE_EXTREME_MISC);
7682 BCE_LOCK_ASSERT(sc);
7684 /* Schedule the next tick. */
7685 callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc);
7687 /* Update the statistics from the hardware statistics block. */
7688 bce_stats_update(sc);
7690 /* Top off the receive and page chains. */
7691 #ifdef BCE_JUMBO_HDRSPLIT
7692 bce_fill_pg_chain(sc);
7694 bce_fill_rx_chain(sc);
7696 /* Check that chip hasn't hung. */
7699 /* If link is up already up then we're done. */
7703 /* Link is down. Check what the PHY's doing. */
7704 mii = device_get_softc(sc->bce_miibus);
7707 /* Check if the link has come up. */
7708 if ((mii->mii_media_status & IFM_ACTIVE) &&
7709 (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)) {
7710 DBPRINT(sc, BCE_VERBOSE_MISC, "%s(): Link up!\n", __FUNCTION__);
7712 if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
7713 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) &&
7715 BCE_PRINTF("Gigabit link up!\n");
7716 /* Now that link is up, handle any outstanding TX traffic. */
7717 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
7718 DBPRINT(sc, BCE_VERBOSE_MISC, "%s(): Found pending TX traffic.\n",
7720 bce_start_locked(ifp);
7725 DBEXIT(BCE_EXTREME_MISC);
7731 /****************************************************************************/
7732 /* Allows the driver state to be dumped through the sysctl interface. */
7735 /* 0 for success, positive value for failure. */
7736 /****************************************************************************/
7738 bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS)
7742 struct bce_softc *sc;
7745 error = sysctl_handle_int(oidp, &result, 0, req);
7747 if (error || !req->newptr)
7751 sc = (struct bce_softc *)arg1;
7752 bce_dump_driver_state(sc);
7759 /****************************************************************************/
7760 /* Allows the hardware state to be dumped through the sysctl interface. */
7763 /* 0 for success, positive value for failure. */
7764 /****************************************************************************/
7766 bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS)
7770 struct bce_softc *sc;
7773 error = sysctl_handle_int(oidp, &result, 0, req);
7775 if (error || !req->newptr)
7779 sc = (struct bce_softc *)arg1;
7780 bce_dump_hw_state(sc);
7787 /****************************************************************************/
7788 /* Allows the bootcode state to be dumped through the sysctl interface. */
7791 /* 0 for success, positive value for failure. */
7792 /****************************************************************************/
7794 bce_sysctl_bc_state(SYSCTL_HANDLER_ARGS)
7798 struct bce_softc *sc;
7801 error = sysctl_handle_int(oidp, &result, 0, req);
7803 if (error || !req->newptr)
7807 sc = (struct bce_softc *)arg1;
7808 bce_dump_bc_state(sc);
7815 /****************************************************************************/
7816 /* Provides a sysctl interface to allow dumping the RX chain. */
7819 /* 0 for success, positive value for failure. */
7820 /****************************************************************************/
7822 bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS)
7826 struct bce_softc *sc;
7829 error = sysctl_handle_int(oidp, &result, 0, req);
7831 if (error || !req->newptr)
7835 sc = (struct bce_softc *)arg1;
7836 bce_dump_rx_chain(sc, 0, TOTAL_RX_BD);
7843 /****************************************************************************/
7844 /* Provides a sysctl interface to allow dumping the TX chain. */
7847 /* 0 for success, positive value for failure. */
7848 /****************************************************************************/
7850 bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS)
7854 struct bce_softc *sc;
7857 error = sysctl_handle_int(oidp, &result, 0, req);
7859 if (error || !req->newptr)
7863 sc = (struct bce_softc *)arg1;
7864 bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
7871 #ifdef BCE_JUMBO_HDRSPLIT
7872 /****************************************************************************/
7873 /* Provides a sysctl interface to allow dumping the page chain. */
7876 /* 0 for success, positive value for failure. */
7877 /****************************************************************************/
7879 bce_sysctl_dump_pg_chain(SYSCTL_HANDLER_ARGS)
7883 struct bce_softc *sc;
7886 error = sysctl_handle_int(oidp, &result, 0, req);
7888 if (error || !req->newptr)
7892 sc = (struct bce_softc *)arg1;
7893 bce_dump_pg_chain(sc, 0, TOTAL_PG_BD);
7900 /****************************************************************************/
7901 /* Provides a sysctl interface to allow reading arbitrary NVRAM offsets in */
7902 /* the device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */
7905 /* 0 for success, positive value for failure. */
7906 /****************************************************************************/
7908 bce_sysctl_nvram_read(SYSCTL_HANDLER_ARGS)
7910 struct bce_softc *sc = (struct bce_softc *)arg1;
7914 u8 *data = (u8 *) val;
7917 error = sysctl_handle_int(oidp, &result, 0, req);
7918 if (error || (req->newptr == NULL))
7921 bce_nvram_read(sc, result, data, 4);
7922 BCE_PRINTF("offset 0x%08X = 0x%08X\n", result, bce_be32toh(val[0]));
7928 /****************************************************************************/
7929 /* Provides a sysctl interface to allow reading arbitrary registers in the */
7930 /* device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */
7933 /* 0 for success, positive value for failure. */
7934 /****************************************************************************/
7936 bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
7938 struct bce_softc *sc = (struct bce_softc *)arg1;
7943 error = sysctl_handle_int(oidp, &result, 0, req);
7944 if (error || (req->newptr == NULL))
7947 /* Make sure the register is accessible. */
7948 if (result < 0x8000) {
7949 val = REG_RD(sc, result);
7950 BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val);
7951 } else if (result < 0x0280000) {
7952 val = REG_RD_IND(sc, result);
7953 BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val);
7960 /****************************************************************************/
7961 /* Provides a sysctl interface to allow reading arbitrary PHY registers in */
7962 /* the device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */
7965 /* 0 for success, positive value for failure. */
7966 /****************************************************************************/
7968 bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS)
7970 struct bce_softc *sc;
7976 error = sysctl_handle_int(oidp, &result, 0, req);
7977 if (error || (req->newptr == NULL))
7980 /* Make sure the register is accessible. */
7981 if (result < 0x20) {
7982 sc = (struct bce_softc *)arg1;
7984 val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result);
7985 BCE_PRINTF("phy 0x%02X = 0x%04X\n", result, val);
7991 /****************************************************************************/
7992 /* Provides a sysctl interface to allow reading a CID. */
7995 /* 0 for success, positive value for failure. */
7996 /****************************************************************************/
7998 bce_sysctl_dump_ctx(SYSCTL_HANDLER_ARGS)
8000 struct bce_softc *sc;
8005 error = sysctl_handle_int(oidp, &result, 0, req);
8006 if (error || (req->newptr == NULL))
8009 /* Make sure the register is accessible. */
8010 if (result <= TX_CID) {
8011 sc = (struct bce_softc *)arg1;
8012 bce_dump_ctx(sc, result);
8019 /****************************************************************************/
8020 /* Provides a sysctl interface to forcing the driver to dump state and */
8021 /* enter the debugger. DO NOT ENABLE ON PRODUCTION SYSTEMS! */
8024 /* 0 for success, positive value for failure. */
8025 /****************************************************************************/
8027 bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS)
8031 struct bce_softc *sc;
8034 error = sysctl_handle_int(oidp, &result, 0, req);
8036 if (error || !req->newptr)
8040 sc = (struct bce_softc *)arg1;
8049 /****************************************************************************/
8050 /* Adds any sysctl parameters for tuning or debugging purposes. */
8053 /* 0 for success, positive value for failure. */
8054 /****************************************************************************/
8056 bce_add_sysctls(struct bce_softc *sc)
8058 struct sysctl_ctx_list *ctx;
8059 struct sysctl_oid_list *children;
8061 DBENTER(BCE_VERBOSE_MISC);
8063 ctx = device_get_sysctl_ctx(sc->bce_dev);
8064 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev));
8067 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8068 "l2fhdr_error_sim_control",
8069 CTLFLAG_RW, &l2fhdr_error_sim_control,
8070 0, "Debug control to force l2fhdr errors");
8072 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8073 "l2fhdr_error_sim_count",
8074 CTLFLAG_RD, &sc->l2fhdr_error_sim_count,
8075 0, "Number of simulated l2_fhdr errors");
8078 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8079 "l2fhdr_error_count",
8080 CTLFLAG_RD, &sc->l2fhdr_error_count,
8081 0, "Number of l2_fhdr errors");
8084 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8085 "mbuf_alloc_failed_sim_control",
8086 CTLFLAG_RW, &mbuf_alloc_failed_sim_control,
8087 0, "Debug control to force mbuf allocation failures");
8089 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8090 "mbuf_alloc_failed_sim_count",
8091 CTLFLAG_RD, &sc->mbuf_alloc_failed_sim_count,
8092 0, "Number of simulated mbuf cluster allocation failures");
8095 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8096 "mbuf_alloc_failed_count",
8097 CTLFLAG_RD, &sc->mbuf_alloc_failed_count,
8098 0, "Number of mbuf allocation failures");
8100 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8101 "fragmented_mbuf_count",
8102 CTLFLAG_RD, &sc->fragmented_mbuf_count,
8103 0, "Number of fragmented mbufs");
8106 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8107 "dma_map_addr_failed_sim_control",
8108 CTLFLAG_RW, &dma_map_addr_failed_sim_control,
8109 0, "Debug control to force DMA mapping failures");
8111 /* ToDo: Figure out how to update this value in bce_dma_map_addr(). */
8112 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8113 "dma_map_addr_failed_sim_count",
8114 CTLFLAG_RD, &sc->dma_map_addr_failed_sim_count,
8115 0, "Number of simulated DMA mapping failures");
8119 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8120 "dma_map_addr_rx_failed_count",
8121 CTLFLAG_RD, &sc->dma_map_addr_rx_failed_count,
8122 0, "Number of RX DMA mapping failures");
8124 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8125 "dma_map_addr_tx_failed_count",
8126 CTLFLAG_RD, &sc->dma_map_addr_tx_failed_count,
8127 0, "Number of TX DMA mapping failures");
8130 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8131 "unexpected_attention_sim_control",
8132 CTLFLAG_RW, &unexpected_attention_sim_control,
8133 0, "Debug control to simulate unexpected attentions");
8135 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8136 "unexpected_attention_sim_count",
8137 CTLFLAG_RW, &sc->unexpected_attention_sim_count,
8138 0, "Number of simulated unexpected attentions");
8141 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8142 "unexpected_attention_count",
8143 CTLFLAG_RW, &sc->unexpected_attention_count,
8144 0, "Number of unexpected attentions");
8147 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8148 "debug_bootcode_running_failure",
8149 CTLFLAG_RW, &bootcode_running_failure_sim_control,
8150 0, "Debug control to force bootcode running failures");
8152 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8154 CTLFLAG_RD, &sc->rx_low_watermark,
8155 0, "Lowest level of free rx_bd's");
8157 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8159 CTLFLAG_RD, &sc->rx_empty_count,
8160 0, "Number of times the RX chain was empty");
8162 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8164 CTLFLAG_RD, &sc->tx_hi_watermark,
8165 0, "Highest level of used tx_bd's");
8167 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8169 CTLFLAG_RD, &sc->tx_full_count,
8170 0, "Number of times the TX chain was full");
8172 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
8173 "requested_tso_frames",
8174 CTLFLAG_RD, &sc->requested_tso_frames,
8175 0, "Number of TSO frames received");
8177 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8179 CTLFLAG_RD, &sc->rx_interrupts,
8180 0, "Number of RX interrupts");
8182 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8184 CTLFLAG_RD, &sc->tx_interrupts,
8185 0, "Number of TX interrupts");
8187 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8189 CTLFLAG_RD, &sc->rx_intr_time,
8190 "RX interrupt time");
8192 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8194 CTLFLAG_RD, &sc->tx_intr_time,
8195 "TX interrupt time");
8198 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8199 "stat_IfHcInOctets",
8200 CTLFLAG_RD, &sc->stat_IfHCInOctets,
8203 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8204 "stat_IfHCInBadOctets",
8205 CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
8206 "Bad bytes received");
8208 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8209 "stat_IfHCOutOctets",
8210 CTLFLAG_RD, &sc->stat_IfHCOutOctets,
8213 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8214 "stat_IfHCOutBadOctets",
8215 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
8218 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8219 "stat_IfHCInUcastPkts",
8220 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
8221 "Unicast packets received");
8223 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8224 "stat_IfHCInMulticastPkts",
8225 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
8226 "Multicast packets received");
8228 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8229 "stat_IfHCInBroadcastPkts",
8230 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
8231 "Broadcast packets received");
8233 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8234 "stat_IfHCOutUcastPkts",
8235 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
8236 "Unicast packets sent");
8238 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8239 "stat_IfHCOutMulticastPkts",
8240 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
8241 "Multicast packets sent");
8243 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
8244 "stat_IfHCOutBroadcastPkts",
8245 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
8246 "Broadcast packets sent");
8248 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8249 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
8250 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
8251 0, "Internal MAC transmit errors");
8253 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8254 "stat_Dot3StatsCarrierSenseErrors",
8255 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
8256 0, "Carrier sense errors");
8258 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8259 "stat_Dot3StatsFCSErrors",
8260 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
8261 0, "Frame check sequence errors");
8263 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8264 "stat_Dot3StatsAlignmentErrors",
8265 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
8266 0, "Alignment errors");
8268 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8269 "stat_Dot3StatsSingleCollisionFrames",
8270 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
8271 0, "Single Collision Frames");
8273 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8274 "stat_Dot3StatsMultipleCollisionFrames",
8275 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
8276 0, "Multiple Collision Frames");
8278 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8279 "stat_Dot3StatsDeferredTransmissions",
8280 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
8281 0, "Deferred Transmissions");
8283 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8284 "stat_Dot3StatsExcessiveCollisions",
8285 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
8286 0, "Excessive Collisions");
8288 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8289 "stat_Dot3StatsLateCollisions",
8290 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
8291 0, "Late Collisions");
8293 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8294 "stat_EtherStatsCollisions",
8295 CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
8298 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8299 "stat_EtherStatsFragments",
8300 CTLFLAG_RD, &sc->stat_EtherStatsFragments,
8303 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8304 "stat_EtherStatsJabbers",
8305 CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
8308 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8309 "stat_EtherStatsUndersizePkts",
8310 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
8311 0, "Undersize packets");
8313 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8314 "stat_EtherStatsOversizePkts",
8315 CTLFLAG_RD, &sc->stat_EtherStatsOversizePkts,
8316 0, "stat_EtherStatsOversizePkts");
8318 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8319 "stat_EtherStatsPktsRx64Octets",
8320 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
8321 0, "Bytes received in 64 byte packets");
8323 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8324 "stat_EtherStatsPktsRx65Octetsto127Octets",
8325 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
8326 0, "Bytes received in 65 to 127 byte packets");
8328 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8329 "stat_EtherStatsPktsRx128Octetsto255Octets",
8330 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
8331 0, "Bytes received in 128 to 255 byte packets");
8333 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8334 "stat_EtherStatsPktsRx256Octetsto511Octets",
8335 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
8336 0, "Bytes received in 256 to 511 byte packets");
8338 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8339 "stat_EtherStatsPktsRx512Octetsto1023Octets",
8340 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
8341 0, "Bytes received in 512 to 1023 byte packets");
8343 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8344 "stat_EtherStatsPktsRx1024Octetsto1522Octets",
8345 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
8346 0, "Bytes received in 1024 t0 1522 byte packets");
8348 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8349 "stat_EtherStatsPktsRx1523Octetsto9022Octets",
8350 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
8351 0, "Bytes received in 1523 to 9022 byte packets");
8353 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8354 "stat_EtherStatsPktsTx64Octets",
8355 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
8356 0, "Bytes sent in 64 byte packets");
8358 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8359 "stat_EtherStatsPktsTx65Octetsto127Octets",
8360 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
8361 0, "Bytes sent in 65 to 127 byte packets");
8363 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8364 "stat_EtherStatsPktsTx128Octetsto255Octets",
8365 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
8366 0, "Bytes sent in 128 to 255 byte packets");
8368 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8369 "stat_EtherStatsPktsTx256Octetsto511Octets",
8370 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
8371 0, "Bytes sent in 256 to 511 byte packets");
8373 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8374 "stat_EtherStatsPktsTx512Octetsto1023Octets",
8375 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
8376 0, "Bytes sent in 512 to 1023 byte packets");
8378 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8379 "stat_EtherStatsPktsTx1024Octetsto1522Octets",
8380 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
8381 0, "Bytes sent in 1024 to 1522 byte packets");
8383 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8384 "stat_EtherStatsPktsTx1523Octetsto9022Octets",
8385 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
8386 0, "Bytes sent in 1523 to 9022 byte packets");
8388 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8389 "stat_XonPauseFramesReceived",
8390 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
8391 0, "XON pause frames receved");
8393 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8394 "stat_XoffPauseFramesReceived",
8395 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
8396 0, "XOFF pause frames received");
8398 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8400 CTLFLAG_RD, &sc->stat_OutXonSent,
8401 0, "XON pause frames sent");
8403 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8405 CTLFLAG_RD, &sc->stat_OutXoffSent,
8406 0, "XOFF pause frames sent");
8408 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8409 "stat_FlowControlDone",
8410 CTLFLAG_RD, &sc->stat_FlowControlDone,
8411 0, "Flow control done");
8413 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8414 "stat_MacControlFramesReceived",
8415 CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
8416 0, "MAC control frames received");
8418 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8419 "stat_XoffStateEntered",
8420 CTLFLAG_RD, &sc->stat_XoffStateEntered,
8421 0, "XOFF state entered");
8423 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8424 "stat_IfInFramesL2FilterDiscards",
8425 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
8426 0, "Received L2 packets discarded");
8428 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8429 "stat_IfInRuleCheckerDiscards",
8430 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
8431 0, "Received packets discarded by rule");
8433 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8434 "stat_IfInFTQDiscards",
8435 CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
8436 0, "Received packet FTQ discards");
8438 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8439 "stat_IfInMBUFDiscards",
8440 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
8441 0, "Received packets discarded due to lack of controller buffer memory");
8443 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8444 "stat_IfInRuleCheckerP4Hit",
8445 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
8446 0, "Received packets rule checker hits");
8448 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8449 "stat_CatchupInRuleCheckerDiscards",
8450 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
8451 0, "Received packets discarded in Catchup path");
8453 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8454 "stat_CatchupInFTQDiscards",
8455 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
8456 0, "Received packets discarded in FTQ in Catchup path");
8458 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8459 "stat_CatchupInMBUFDiscards",
8460 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
8461 0, "Received packets discarded in controller buffer memory in Catchup path");
8463 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8464 "stat_CatchupInRuleCheckerP4Hit",
8465 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
8466 0, "Received packets rule checker hits in Catchup path");
8468 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
8470 CTLFLAG_RD, &sc->com_no_buffers,
8471 0, "Valid packets received but no RX buffers available");
8474 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8475 "driver_state", CTLTYPE_INT | CTLFLAG_RW,
8477 bce_sysctl_driver_state, "I", "Drive state information");
8479 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8480 "hw_state", CTLTYPE_INT | CTLFLAG_RW,
8482 bce_sysctl_hw_state, "I", "Hardware state information");
8484 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8485 "bc_state", CTLTYPE_INT | CTLFLAG_RW,
8487 bce_sysctl_bc_state, "I", "Bootcode state information");
8489 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8490 "dump_rx_chain", CTLTYPE_INT | CTLFLAG_RW,
8492 bce_sysctl_dump_rx_chain, "I", "Dump rx_bd chain");
8494 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8495 "dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW,
8497 bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain");
8499 #ifdef BCE_JUMBO_HDRSPLIT
8500 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8501 "dump_pg_chain", CTLTYPE_INT | CTLFLAG_RW,
8503 bce_sysctl_dump_pg_chain, "I", "Dump page chain");
8505 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8506 "dump_ctx", CTLTYPE_INT | CTLFLAG_RW,
8508 bce_sysctl_dump_ctx, "I", "Dump context memory");
8510 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8511 "breakpoint", CTLTYPE_INT | CTLFLAG_RW,
8513 bce_sysctl_breakpoint, "I", "Driver breakpoint");
8515 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8516 "reg_read", CTLTYPE_INT | CTLFLAG_RW,
8518 bce_sysctl_reg_read, "I", "Register read");
8520 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8521 "nvram_read", CTLTYPE_INT | CTLFLAG_RW,
8523 bce_sysctl_nvram_read, "I", "NVRAM read");
8525 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
8526 "phy_read", CTLTYPE_INT | CTLFLAG_RW,
8528 bce_sysctl_phy_read, "I", "PHY register read");
8532 DBEXIT(BCE_VERBOSE_MISC);
8536 /****************************************************************************/
8537 /* BCE Debug Routines */
8538 /****************************************************************************/
8541 /****************************************************************************/
8542 /* Freezes the controller to allow for a cohesive state dump. */
8546 /****************************************************************************/
8548 bce_freeze_controller(struct bce_softc *sc)
8551 val = REG_RD(sc, BCE_MISC_COMMAND);
8552 val |= BCE_MISC_COMMAND_DISABLE_ALL;
8553 REG_WR(sc, BCE_MISC_COMMAND, val);
8557 /****************************************************************************/
8558 /* Unfreezes the controller after a freeze operation. This may not always */
8559 /* work and the controller will require a reset! */
8563 /****************************************************************************/
8565 bce_unfreeze_controller(struct bce_softc *sc)
8568 val = REG_RD(sc, BCE_MISC_COMMAND);
8569 val |= BCE_MISC_COMMAND_ENABLE_ALL;
8570 REG_WR(sc, BCE_MISC_COMMAND, val);
8574 /****************************************************************************/
8575 /* Prints out Ethernet frame information from an mbuf. */
8577 /* Partially decode an Ethernet frame to look at some important headers. */
8581 /****************************************************************************/
8583 bce_dump_enet(struct bce_softc *sc, struct mbuf *m)
8585 struct ether_vlan_header *eh;
8594 "-----------------------------"
8596 "-----------------------------\n");
8598 eh = mtod(m, struct ether_vlan_header *);
8600 /* Handle VLAN encapsulation if present. */
8601 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
8602 etype = ntohs(eh->evl_proto);
8603 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
8605 etype = ntohs(eh->evl_encap_proto);
8606 ehlen = ETHER_HDR_LEN;
8609 /* ToDo: Add VLAN output. */
8610 BCE_PRINTF("enet: dest = %6D, src = %6D, type = 0x%04X, hlen = %d\n",
8611 eh->evl_dhost, ":", eh->evl_shost, ":", etype, ehlen);
8615 ip = (struct ip *)(m->m_data + ehlen);
8616 BCE_PRINTF("--ip: dest = 0x%08X , src = 0x%08X, len = %d bytes, "
8617 "protocol = 0x%02X, xsum = 0x%04X\n",
8618 ntohl(ip->ip_dst.s_addr), ntohl(ip->ip_src.s_addr),
8619 ntohs(ip->ip_len), ip->ip_p, ntohs(ip->ip_sum));
8623 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
8624 BCE_PRINTF("-tcp: dest = %d, src = %d, hlen = %d bytes, "
8625 "flags = 0x%b, csum = 0x%04X\n",
8626 ntohs(th->th_dport), ntohs(th->th_sport), (th->th_off << 2),
8627 th->th_flags, "\20\10CWR\07ECE\06URG\05ACK\04PSH\03RST\02SYN\01FIN",
8631 uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
8632 BCE_PRINTF("-udp: dest = %d, src = %d, len = %d bytes, "
8633 "csum = 0x%04X\n", ntohs(uh->uh_dport), ntohs(uh->uh_sport),
8634 ntohs(uh->uh_ulen), ntohs(uh->uh_sum));
8637 BCE_PRINTF("icmp:\n");
8640 BCE_PRINTF("----: Other IP protocol.\n");
8643 case ETHERTYPE_IPV6:
8644 BCE_PRINTF("ipv6: No decode supported.\n");
8647 BCE_PRINTF("-arp: ");
8648 ah = (struct arphdr *) (m->m_data + ehlen);
8649 switch (ntohs(ah->ar_op)) {
8650 case ARPOP_REVREQUEST:
8651 printf("reverse ARP request\n");
8653 case ARPOP_REVREPLY:
8654 printf("reverse ARP reply\n");
8657 printf("ARP request\n");
8660 printf("ARP reply\n");
8663 printf("other ARP operation\n");
8667 BCE_PRINTF("----: Other protocol.\n");
8671 "-----------------------------"
8673 "-----------------------------\n");
8677 /****************************************************************************/
8678 /* Prints out information about an mbuf. */
8682 /****************************************************************************/
8683 static __attribute__ ((noinline)) void
8684 bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
8686 struct mbuf *mp = m;
8689 BCE_PRINTF("mbuf: null pointer\n");
8694 BCE_PRINTF("mbuf: %p, m_len = %d, m_flags = 0x%b, m_data = %p\n",
8695 mp, mp->m_len, mp->m_flags,
8696 "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY",
8699 if (mp->m_flags & M_PKTHDR) {
8700 BCE_PRINTF("- m_pkthdr: len = %d, flags = 0x%b, csum_flags = %b\n",
8701 mp->m_pkthdr.len, mp->m_flags,
8702 "\20\12M_BCAST\13M_MCAST\14M_FRAG\15M_FIRSTFRAG"
8703 "\16M_LASTFRAG\21M_VLANTAG\22M_PROMISC\23M_NOFREE",
8704 mp->m_pkthdr.csum_flags,
8705 "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS"
8706 "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED"
8707 "\12CSUM_IP_VALID\13CSUM_DATA_VALID\14CSUM_PSEUDO_HDR");
8710 if (mp->m_flags & M_EXT) {
8711 BCE_PRINTF("- m_ext: %p, ext_size = %d, type = ",
8712 mp->m_ext.ext_buf, mp->m_ext.ext_size);
8713 switch (mp->m_ext.ext_type) {
8714 case EXT_CLUSTER: printf("EXT_CLUSTER\n"); break;
8715 case EXT_SFBUF: printf("EXT_SFBUF\n"); break;
8716 case EXT_JUMBO9: printf("EXT_JUMBO9\n"); break;
8717 case EXT_JUMBO16: printf("EXT_JUMBO16\n"); break;
8718 case EXT_PACKET: printf("EXT_PACKET\n"); break;
8719 case EXT_MBUF: printf("EXT_MBUF\n"); break;
8720 case EXT_NET_DRV: printf("EXT_NET_DRV\n"); break;
8721 case EXT_MOD_TYPE: printf("EXT_MDD_TYPE\n"); break;
8722 case EXT_DISPOSABLE: printf("EXT_DISPOSABLE\n"); break;
8723 case EXT_EXTREF: printf("EXT_EXTREF\n"); break;
8724 default: printf("UNKNOWN\n");
8733 /****************************************************************************/
8734 /* Prints out the mbufs in the TX mbuf chain. */
8738 /****************************************************************************/
8739 static __attribute__ ((noinline)) void
8740 bce_dump_tx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
8745 "----------------------------"
8747 "----------------------------\n");
8749 for (int i = 0; i < count; i++) {
8750 m = sc->tx_mbuf_ptr[chain_prod];
8751 BCE_PRINTF("txmbuf[0x%04X]\n", chain_prod);
8752 bce_dump_mbuf(sc, m);
8753 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
8757 "----------------------------"
8759 "----------------------------\n");
8763 /****************************************************************************/
8764 /* Prints out the mbufs in the RX mbuf chain. */
8768 /****************************************************************************/
8769 static __attribute__ ((noinline)) void
8770 bce_dump_rx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
8775 "----------------------------"
8777 "----------------------------\n");
8779 for (int i = 0; i < count; i++) {
8780 m = sc->rx_mbuf_ptr[chain_prod];
8781 BCE_PRINTF("rxmbuf[0x%04X]\n", chain_prod);
8782 bce_dump_mbuf(sc, m);
8783 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
8788 "----------------------------"
8790 "----------------------------\n");
8794 #ifdef BCE_JUMBO_HDRSPLIT
8795 /****************************************************************************/
8796 /* Prints out the mbufs in the mbuf page chain. */
8800 /****************************************************************************/
8801 static __attribute__ ((noinline)) void
8802 bce_dump_pg_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
8807 "----------------------------"
8809 "----------------------------\n");
8811 for (int i = 0; i < count; i++) {
8812 m = sc->pg_mbuf_ptr[chain_prod];
8813 BCE_PRINTF("pgmbuf[0x%04X]\n", chain_prod);
8814 bce_dump_mbuf(sc, m);
8815 chain_prod = PG_CHAIN_IDX(NEXT_PG_BD(chain_prod));
8820 "----------------------------"
8822 "----------------------------\n");
8827 /****************************************************************************/
8828 /* Prints out a tx_bd structure. */
8832 /****************************************************************************/
8833 static __attribute__ ((noinline)) void
8834 bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd)
8836 if (idx > MAX_TX_BD)
8837 /* Index out of range. */
8838 BCE_PRINTF("tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
8839 else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
8840 /* TX Chain page pointer. */
8841 BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
8842 idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo);
8844 /* Normal tx_bd entry. */
8845 BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
8846 "vlan tag= 0x%04X, flags = 0x%04X (", idx,
8847 txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
8848 txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag,
8851 if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT)
8852 printf(" CONN_FAULT");
8854 if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM)
8855 printf(" TCP_UDP_CKSUM");
8857 if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM)
8858 printf(" IP_CKSUM");
8860 if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG)
8863 if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW)
8864 printf(" COAL_NOW");
8866 if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC)
8867 printf(" DONT_GEN_CRC");
8869 if (txbd->tx_bd_flags & TX_BD_FLAGS_START)
8872 if (txbd->tx_bd_flags & TX_BD_FLAGS_END)
8875 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO)
8878 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD)
8879 printf(" OPTION_WORD");
8881 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS)
8884 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP)
8893 /****************************************************************************/
8894 /* Prints out a rx_bd structure. */
8898 /****************************************************************************/
8899 static __attribute__ ((noinline)) void
8900 bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
8902 if (idx > MAX_RX_BD)
8903 /* Index out of range. */
8904 BCE_PRINTF("rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
8905 else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
8906 /* RX Chain page pointer. */
8907 BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
8908 idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo);
8910 /* Normal rx_bd entry. */
8911 BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
8912 "flags = 0x%08X\n", idx,
8913 rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
8914 rxbd->rx_bd_len, rxbd->rx_bd_flags);
8918 #ifdef BCE_JUMBO_HDRSPLIT
8919 /****************************************************************************/
8920 /* Prints out a rx_bd structure in the page chain. */
8924 /****************************************************************************/
8925 static __attribute__ ((noinline)) void
8926 bce_dump_pgbd(struct bce_softc *sc, int idx, struct rx_bd *pgbd)
8928 if (idx > MAX_PG_BD)
8929 /* Index out of range. */
8930 BCE_PRINTF("pg_bd[0x%04X]: Invalid pg_bd index!\n", idx);
8931 else if ((idx & USABLE_PG_BD_PER_PAGE) == USABLE_PG_BD_PER_PAGE)
8932 /* Page Chain page pointer. */
8933 BCE_PRINTF("px_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
8934 idx, pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo);
8936 /* Normal rx_bd entry. */
8937 BCE_PRINTF("pg_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
8938 "flags = 0x%08X\n", idx,
8939 pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo,
8940 pgbd->rx_bd_len, pgbd->rx_bd_flags);
8945 /****************************************************************************/
8946 /* Prints out a l2_fhdr structure. */
8950 /****************************************************************************/
8951 static __attribute__ ((noinline)) void
8952 bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr)
8954 BCE_PRINTF("l2_fhdr[0x%04X]: status = 0x%b, "
8955 "pkt_len = %d, vlan = 0x%04x, ip_xsum/hdr_len = 0x%04X, "
8956 "tcp_udp_xsum = 0x%04X\n", idx,
8957 l2fhdr->l2_fhdr_status, BCE_L2FHDR_PRINTFB,
8958 l2fhdr->l2_fhdr_pkt_len, l2fhdr->l2_fhdr_vlan_tag,
8959 l2fhdr->l2_fhdr_ip_xsum, l2fhdr->l2_fhdr_tcp_udp_xsum);
8963 /****************************************************************************/
8964 /* Prints out context memory info. (Only useful for CID 0 to 16.) */
8968 /****************************************************************************/
8969 static __attribute__ ((noinline)) void
8970 bce_dump_ctx(struct bce_softc *sc, u16 cid)
8972 if (cid <= TX_CID) {
8974 "----------------------------"
8976 "----------------------------\n");
8978 BCE_PRINTF(" 0x%04X - (CID) Context ID\n", cid);
8980 if (cid == RX_CID) {
8981 BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_BDIDX) host rx "
8983 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_HOST_BDIDX));
8984 BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_BSEQ) host byte sequence\n",
8985 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_HOST_BSEQ));
8986 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BSEQ) h/w byte sequence\n",
8987 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BSEQ));
8988 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDHADDR_HI) h/w buffer "
8989 "descriptor address\n",
8990 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDHADDR_HI));
8991 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDHADDR_LO) h/w buffer "
8992 "descriptor address\n",
8993 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDHADDR_LO));
8994 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDIDX) h/w rx consumer index\n",
8995 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDIDX));
8996 BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_PG_BDIDX) host page "
8998 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_HOST_PG_BDIDX));
8999 BCE_PRINTF(" 0x%08X - (L2CTX_RX_PG_BUF_SIZE) host rx_bd/page "
9001 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_PG_BUF_SIZE));
9002 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDHADDR_HI) h/w page "
9004 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_PG_BDHADDR_HI));
9005 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDHADDR_LO) h/w page "
9007 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_PG_BDHADDR_LO));
9008 BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDIDX) h/w page "
9010 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_PG_BDIDX));
9011 } else if (cid == TX_CID) {
9012 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
9013 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
9014 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TYPE_XI) ctx type\n",
9015 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_TYPE_XI));
9016 BCE_PRINTF(" 0x%08X - (L2CTX_CMD_TX_TYPE_XI) ctx cmd\n",
9017 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_CMD_TYPE_XI));
9018 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BDHADDR_HI_XI) h/w buffer "
9019 "descriptor address\n", CTX_RD(sc,
9020 GET_CID_ADDR(cid), BCE_L2CTX_TX_TBDR_BHADDR_HI_XI));
9021 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BHADDR_LO_XI) h/w buffer "
9022 "descriptor address\n", CTX_RD(sc,
9023 GET_CID_ADDR(cid), BCE_L2CTX_TX_TBDR_BHADDR_LO_XI));
9024 BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BIDX_XI) host producer "
9025 "index\n", CTX_RD(sc, GET_CID_ADDR(cid),
9026 BCE_L2CTX_TX_HOST_BIDX_XI));
9027 BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BSEQ_XI) host byte "
9028 "sequence\n", CTX_RD(sc, GET_CID_ADDR(cid),
9029 BCE_L2CTX_TX_HOST_BSEQ_XI));
9031 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TYPE) ctx type\n",
9032 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_TYPE));
9033 BCE_PRINTF(" 0x%08X - (L2CTX_TX_CMD_TYPE) ctx cmd\n",
9034 CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_CMD_TYPE));
9035 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BDHADDR_HI) h/w buffer "
9036 "descriptor address\n", CTX_RD(sc, GET_CID_ADDR(cid),
9037 BCE_L2CTX_TX_TBDR_BHADDR_HI));
9038 BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BHADDR_LO) h/w buffer "
9039 "descriptor address\n", CTX_RD(sc, GET_CID_ADDR(cid),
9040 BCE_L2CTX_TX_TBDR_BHADDR_LO));
9041 BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BIDX) host producer "
9042 "index\n", CTX_RD(sc, GET_CID_ADDR(cid),
9043 BCE_L2CTX_TX_HOST_BIDX));
9044 BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BSEQ) host byte "
9045 "sequence\n", CTX_RD(sc, GET_CID_ADDR(cid),
9046 BCE_L2CTX_TX_HOST_BSEQ));
9049 BCE_PRINTF(" Unknown CID\n");
9052 "----------------------------"
9054 "----------------------------\n");
9056 for (int i = 0x0; i < 0x300; i += 0x10) {
9057 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i,
9058 CTX_RD(sc, GET_CID_ADDR(cid), i),
9059 CTX_RD(sc, GET_CID_ADDR(cid), i + 0x4),
9060 CTX_RD(sc, GET_CID_ADDR(cid), i + 0x8),
9061 CTX_RD(sc, GET_CID_ADDR(cid), i + 0xc));
9066 "----------------------------"
9068 "----------------------------\n");
9073 /****************************************************************************/
9074 /* Prints out the FTQ data. */
9078 /****************************************************************************/
9079 static __attribute__ ((noinline)) void
9080 bce_dump_ftqs(struct bce_softc *sc)
9082 u32 cmd, ctl, cur_depth, max_depth, valid_cnt, val;
9085 "----------------------------"
9087 "----------------------------\n");
9089 BCE_PRINTF(" FTQ Command Control Depth_Now Max_Depth Valid_Cnt \n");
9090 BCE_PRINTF(" ------- ---------- ---------- ---------- ---------- ----------\n");
9092 /* Setup the generic statistic counters for the FTQ valid count. */
9093 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PPQ_VALID_CNT << 24) |
9094 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPCQ_VALID_CNT << 16) |
9095 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPQ_VALID_CNT << 8) |
9096 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUPQ_VALID_CNT);
9097 REG_WR(sc, BCE_HC_STAT_GEN_SEL_0, val);
9099 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCHQ_VALID_CNT << 24) |
9100 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMAQ_VALID_CNT << 16) |
9101 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PTQ_VALID_CNT << 8) |
9102 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PMQ_VALID_CNT);
9103 REG_WR(sc, BCE_HC_STAT_GEN_SEL_1, val);
9105 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPATQ_VALID_CNT << 24) |
9106 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMAQ_VALID_CNT << 16) |
9107 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXPQ_VALID_CNT << 8) |
9108 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDRQ_VALID_CNT);
9109 REG_WR(sc, BCE_HC_STAT_GEN_SEL_2, val);
9111 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMQ_VALID_CNT << 24) |
9112 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMTQ_VALID_CNT << 16) |
9113 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMXQ_VALID_CNT << 8) |
9114 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TASQ_VALID_CNT);
9115 REG_WR(sc, BCE_HC_STAT_GEN_SEL_3, val);
9117 /* Input queue to the Receive Lookup state machine */
9118 cmd = REG_RD(sc, BCE_RLUP_FTQ_CMD);
9119 ctl = REG_RD(sc, BCE_RLUP_FTQ_CTL);
9120 cur_depth = (ctl & BCE_RLUP_FTQ_CTL_CUR_DEPTH) >> 22;
9121 max_depth = (ctl & BCE_RLUP_FTQ_CTL_MAX_DEPTH) >> 12;
9122 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0);
9123 BCE_PRINTF(" RLUP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9124 cmd, ctl, cur_depth, max_depth, valid_cnt);
9126 /* Input queue to the Receive Processor */
9127 cmd = REG_RD_IND(sc, BCE_RXP_FTQ_CMD);
9128 ctl = REG_RD_IND(sc, BCE_RXP_FTQ_CTL);
9129 cur_depth = (ctl & BCE_RXP_FTQ_CTL_CUR_DEPTH) >> 22;
9130 max_depth = (ctl & BCE_RXP_FTQ_CTL_MAX_DEPTH) >> 12;
9131 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1);
9132 BCE_PRINTF(" RXP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9133 cmd, ctl, cur_depth, max_depth, valid_cnt);
9135 /* Input queue to the Recevie Processor */
9136 cmd = REG_RD_IND(sc, BCE_RXP_CFTQ_CMD);
9137 ctl = REG_RD_IND(sc, BCE_RXP_CFTQ_CTL);
9138 cur_depth = (ctl & BCE_RXP_CFTQ_CTL_CUR_DEPTH) >> 22;
9139 max_depth = (ctl & BCE_RXP_CFTQ_CTL_MAX_DEPTH) >> 12;
9140 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2);
9141 BCE_PRINTF(" RXPC 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9142 cmd, ctl, cur_depth, max_depth, valid_cnt);
9144 /* Input queue to the Receive Virtual to Physical state machine */
9145 cmd = REG_RD(sc, BCE_RV2P_PFTQ_CMD);
9146 ctl = REG_RD(sc, BCE_RV2P_PFTQ_CTL);
9147 cur_depth = (ctl & BCE_RV2P_PFTQ_CTL_CUR_DEPTH) >> 22;
9148 max_depth = (ctl & BCE_RV2P_PFTQ_CTL_MAX_DEPTH) >> 12;
9149 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3);
9150 BCE_PRINTF(" RV2PP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9151 cmd, ctl, cur_depth, max_depth, valid_cnt);
9153 /* Input queue to the Recevie Virtual to Physical state machine */
9154 cmd = REG_RD(sc, BCE_RV2P_MFTQ_CMD);
9155 ctl = REG_RD(sc, BCE_RV2P_MFTQ_CTL);
9156 cur_depth = (ctl & BCE_RV2P_MFTQ_CTL_CUR_DEPTH) >> 22;
9157 max_depth = (ctl & BCE_RV2P_MFTQ_CTL_MAX_DEPTH) >> 12;
9158 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT4);
9159 BCE_PRINTF(" RV2PM 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9160 cmd, ctl, cur_depth, max_depth, valid_cnt);
9162 /* Input queue to the Receive Virtual to Physical state machine */
9163 cmd = REG_RD(sc, BCE_RV2P_TFTQ_CMD);
9164 ctl = REG_RD(sc, BCE_RV2P_TFTQ_CTL);
9165 cur_depth = (ctl & BCE_RV2P_TFTQ_CTL_CUR_DEPTH) >> 22;
9166 max_depth = (ctl & BCE_RV2P_TFTQ_CTL_MAX_DEPTH) >> 12;
9167 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT5);
9168 BCE_PRINTF(" RV2PT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9169 cmd, ctl, cur_depth, max_depth, valid_cnt);
9171 /* Input queue to the Receive DMA state machine */
9172 cmd = REG_RD(sc, BCE_RDMA_FTQ_CMD);
9173 ctl = REG_RD(sc, BCE_RDMA_FTQ_CTL);
9174 cur_depth = (ctl & BCE_RDMA_FTQ_CTL_CUR_DEPTH) >> 22;
9175 max_depth = (ctl & BCE_RDMA_FTQ_CTL_MAX_DEPTH) >> 12;
9176 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT6);
9177 BCE_PRINTF(" RDMA 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9178 cmd, ctl, cur_depth, max_depth, valid_cnt);
9180 /* Input queue to the Transmit Scheduler state machine */
9181 cmd = REG_RD(sc, BCE_TSCH_FTQ_CMD);
9182 ctl = REG_RD(sc, BCE_TSCH_FTQ_CTL);
9183 cur_depth = (ctl & BCE_TSCH_FTQ_CTL_CUR_DEPTH) >> 22;
9184 max_depth = (ctl & BCE_TSCH_FTQ_CTL_MAX_DEPTH) >> 12;
9185 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT7);
9186 BCE_PRINTF(" TSCH 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9187 cmd, ctl, cur_depth, max_depth, valid_cnt);
9189 /* Input queue to the Transmit Buffer Descriptor state machine */
9190 cmd = REG_RD(sc, BCE_TBDR_FTQ_CMD);
9191 ctl = REG_RD(sc, BCE_TBDR_FTQ_CTL);
9192 cur_depth = (ctl & BCE_TBDR_FTQ_CTL_CUR_DEPTH) >> 22;
9193 max_depth = (ctl & BCE_TBDR_FTQ_CTL_MAX_DEPTH) >> 12;
9194 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT8);
9195 BCE_PRINTF(" TBDR 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9196 cmd, ctl, cur_depth, max_depth, valid_cnt);
9198 /* Input queue to the Transmit Processor */
9199 cmd = REG_RD_IND(sc, BCE_TXP_FTQ_CMD);
9200 ctl = REG_RD_IND(sc, BCE_TXP_FTQ_CTL);
9201 cur_depth = (ctl & BCE_TXP_FTQ_CTL_CUR_DEPTH) >> 22;
9202 max_depth = (ctl & BCE_TXP_FTQ_CTL_MAX_DEPTH) >> 12;
9203 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT9);
9204 BCE_PRINTF(" TXP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9205 cmd, ctl, cur_depth, max_depth, valid_cnt);
9207 /* Input queue to the Transmit DMA state machine */
9208 cmd = REG_RD(sc, BCE_TDMA_FTQ_CMD);
9209 ctl = REG_RD(sc, BCE_TDMA_FTQ_CTL);
9210 cur_depth = (ctl & BCE_TDMA_FTQ_CTL_CUR_DEPTH) >> 22;
9211 max_depth = (ctl & BCE_TDMA_FTQ_CTL_MAX_DEPTH) >> 12;
9212 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT10);
9213 BCE_PRINTF(" TDMA 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9214 cmd, ctl, cur_depth, max_depth, valid_cnt);
9216 /* Input queue to the Transmit Patch-Up Processor */
9217 cmd = REG_RD_IND(sc, BCE_TPAT_FTQ_CMD);
9218 ctl = REG_RD_IND(sc, BCE_TPAT_FTQ_CTL);
9219 cur_depth = (ctl & BCE_TPAT_FTQ_CTL_CUR_DEPTH) >> 22;
9220 max_depth = (ctl & BCE_TPAT_FTQ_CTL_MAX_DEPTH) >> 12;
9221 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT11);
9222 BCE_PRINTF(" TPAT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9223 cmd, ctl, cur_depth, max_depth, valid_cnt);
9225 /* Input queue to the Transmit Assembler state machine */
9226 cmd = REG_RD_IND(sc, BCE_TAS_FTQ_CMD);
9227 ctl = REG_RD_IND(sc, BCE_TAS_FTQ_CTL);
9228 cur_depth = (ctl & BCE_TAS_FTQ_CTL_CUR_DEPTH) >> 22;
9229 max_depth = (ctl & BCE_TAS_FTQ_CTL_MAX_DEPTH) >> 12;
9230 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT12);
9231 BCE_PRINTF(" TAS 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9232 cmd, ctl, cur_depth, max_depth, valid_cnt);
9234 /* Input queue to the Completion Processor */
9235 cmd = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CMD);
9236 ctl = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CTL);
9237 cur_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_CUR_DEPTH) >> 22;
9238 max_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_MAX_DEPTH) >> 12;
9239 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT13);
9240 BCE_PRINTF(" COMX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9241 cmd, ctl, cur_depth, max_depth, valid_cnt);
9243 /* Input queue to the Completion Processor */
9244 cmd = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CMD);
9245 ctl = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CTL);
9246 cur_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_CUR_DEPTH) >> 22;
9247 max_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_MAX_DEPTH) >> 12;
9248 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT14);
9249 BCE_PRINTF(" COMT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9250 cmd, ctl, cur_depth, max_depth, valid_cnt);
9252 /* Input queue to the Completion Processor */
9253 cmd = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CMD);
9254 ctl = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CTL);
9255 cur_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_CUR_DEPTH) >> 22;
9256 max_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_MAX_DEPTH) >> 12;
9257 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT15);
9258 BCE_PRINTF(" COMX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9259 cmd, ctl, cur_depth, max_depth, valid_cnt);
9261 /* Setup the generic statistic counters for the FTQ valid count. */
9262 val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSQ_VALID_CNT << 16) |
9263 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CPQ_VALID_CNT << 8) |
9264 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_MGMQ_VALID_CNT);
9266 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
9267 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716))
9268 val = val | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCSQ_VALID_CNT_XI << 24);
9269 REG_WR(sc, BCE_HC_STAT_GEN_SEL_0, val);
9271 /* Input queue to the Management Control Processor */
9272 cmd = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CMD);
9273 ctl = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CTL);
9274 cur_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_CUR_DEPTH) >> 22;
9275 max_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_MAX_DEPTH) >> 12;
9276 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0);
9277 BCE_PRINTF(" MCP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9278 cmd, ctl, cur_depth, max_depth, valid_cnt);
9280 /* Input queue to the Command Processor */
9281 cmd = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CMD);
9282 ctl = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CTL);
9283 cur_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_CUR_DEPTH) >> 22;
9284 max_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_MAX_DEPTH) >> 12;
9285 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1);
9286 BCE_PRINTF(" CP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9287 cmd, ctl, cur_depth, max_depth, valid_cnt);
9289 /* Input queue to the Completion Scheduler state machine */
9290 cmd = REG_RD(sc, BCE_CSCH_CH_FTQ_CMD);
9291 ctl = REG_RD(sc, BCE_CSCH_CH_FTQ_CTL);
9292 cur_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_CUR_DEPTH) >> 22;
9293 max_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_MAX_DEPTH) >> 12;
9294 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2);
9295 BCE_PRINTF(" CS 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9296 cmd, ctl, cur_depth, max_depth, valid_cnt);
9298 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) ||
9299 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716)) {
9300 /* Input queue to the Receive Virtual to Physical Command Scheduler */
9301 cmd = REG_RD(sc, BCE_RV2PCSR_FTQ_CMD);
9302 ctl = REG_RD(sc, BCE_RV2PCSR_FTQ_CTL);
9303 cur_depth = (ctl & 0xFFC00000) >> 22;
9304 max_depth = (ctl & 0x003FF000) >> 12;
9305 valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3);
9306 BCE_PRINTF(" RV2PCSR 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
9307 cmd, ctl, cur_depth, max_depth, valid_cnt);
9311 "----------------------------"
9313 "----------------------------\n");
9317 /****************************************************************************/
9318 /* Prints out the TX chain. */
9322 /****************************************************************************/
9323 static __attribute__ ((noinline)) void
9324 bce_dump_tx_chain(struct bce_softc *sc, u16 tx_prod, int count)
9328 /* First some info about the tx_bd chain structure. */
9330 "----------------------------"
9332 "----------------------------\n");
9334 BCE_PRINTF("page size = 0x%08X, tx chain pages = 0x%08X\n",
9335 (u32) BCM_PAGE_SIZE, (u32) TX_PAGES);
9337 BCE_PRINTF("tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
9338 (u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE);
9340 BCE_PRINTF("total tx_bd = 0x%08X\n", (u32) TOTAL_TX_BD);
9343 "----------------------------"
9345 "----------------------------\n");
9347 /* Now print out the tx_bd's themselves. */
9348 for (int i = 0; i < count; i++) {
9349 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
9350 bce_dump_txbd(sc, tx_prod, txbd);
9351 tx_prod = NEXT_TX_BD(tx_prod);
9355 "----------------------------"
9357 "----------------------------\n");
9361 /****************************************************************************/
9362 /* Prints out the RX chain. */
9366 /****************************************************************************/
9367 static __attribute__ ((noinline)) void
9368 bce_dump_rx_chain(struct bce_softc *sc, u16 rx_prod, int count)
9372 /* First some info about the rx_bd chain structure. */
9374 "----------------------------"
9376 "----------------------------\n");
9378 BCE_PRINTF("page size = 0x%08X, rx chain pages = 0x%08X\n",
9379 (u32) BCM_PAGE_SIZE, (u32) RX_PAGES);
9381 BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
9382 (u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE);
9384 BCE_PRINTF("total rx_bd = 0x%08X\n", (u32) TOTAL_RX_BD);
9387 "----------------------------"
9389 "----------------------------\n");
9391 /* Now print out the rx_bd's themselves. */
9392 for (int i = 0; i < count; i++) {
9393 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
9394 bce_dump_rxbd(sc, rx_prod, rxbd);
9395 rx_prod = RX_CHAIN_IDX(rx_prod + 1);
9399 "----------------------------"
9401 "----------------------------\n");
9405 #ifdef BCE_JUMBO_HDRSPLIT
9406 /****************************************************************************/
9407 /* Prints out the page chain. */
9411 /****************************************************************************/
9412 static __attribute__ ((noinline)) void
9413 bce_dump_pg_chain(struct bce_softc *sc, u16 pg_prod, int count)
9417 /* First some info about the page chain structure. */
9419 "----------------------------"
9421 "----------------------------\n");
9423 BCE_PRINTF("page size = 0x%08X, pg chain pages = 0x%08X\n",
9424 (u32) BCM_PAGE_SIZE, (u32) PG_PAGES);
9426 BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
9427 (u32) TOTAL_PG_BD_PER_PAGE, (u32) USABLE_PG_BD_PER_PAGE);
9429 BCE_PRINTF("total rx_bd = 0x%08X, max_pg_bd = 0x%08X\n",
9430 (u32) TOTAL_PG_BD, (u32) MAX_PG_BD);
9433 "----------------------------"
9435 "----------------------------\n");
9437 /* Now print out the rx_bd's themselves. */
9438 for (int i = 0; i < count; i++) {
9439 pgbd = &sc->pg_bd_chain[PG_PAGE(pg_prod)][PG_IDX(pg_prod)];
9440 bce_dump_pgbd(sc, pg_prod, pgbd);
9441 pg_prod = PG_CHAIN_IDX(pg_prod + 1);
9445 "----------------------------"
9447 "----------------------------\n");
9452 /****************************************************************************/
9453 /* Prints out the status block from host memory. */
9457 /****************************************************************************/
9458 static __attribute__ ((noinline)) void
9459 bce_dump_status_block(struct bce_softc *sc)
9461 struct status_block *sblk;
9463 sblk = sc->status_block;
9466 "----------------------------"
9468 "----------------------------\n");
9470 BCE_PRINTF(" 0x%08X - attn_bits\n",
9471 sblk->status_attn_bits);
9473 BCE_PRINTF(" 0x%08X - attn_bits_ack\n",
9474 sblk->status_attn_bits_ack);
9476 BCE_PRINTF("0x%04X(0x%04X) - rx_cons0\n",
9477 sblk->status_rx_quick_consumer_index0,
9478 (u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index0));
9480 BCE_PRINTF("0x%04X(0x%04X) - tx_cons0\n",
9481 sblk->status_tx_quick_consumer_index0,
9482 (u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index0));
9484 BCE_PRINTF(" 0x%04X - status_idx\n", sblk->status_idx);
9486 /* Theses indices are not used for normal L2 drivers. */
9487 if (sblk->status_rx_quick_consumer_index1)
9488 BCE_PRINTF("0x%04X(0x%04X) - rx_cons1\n",
9489 sblk->status_rx_quick_consumer_index1,
9490 (u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index1));
9492 if (sblk->status_tx_quick_consumer_index1)
9493 BCE_PRINTF("0x%04X(0x%04X) - tx_cons1\n",
9494 sblk->status_tx_quick_consumer_index1,
9495 (u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index1));
9497 if (sblk->status_rx_quick_consumer_index2)
9498 BCE_PRINTF("0x%04X(0x%04X)- rx_cons2\n",
9499 sblk->status_rx_quick_consumer_index2,
9500 (u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index2));
9502 if (sblk->status_tx_quick_consumer_index2)
9503 BCE_PRINTF("0x%04X(0x%04X) - tx_cons2\n",
9504 sblk->status_tx_quick_consumer_index2,
9505 (u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index2));
9507 if (sblk->status_rx_quick_consumer_index3)
9508 BCE_PRINTF("0x%04X(0x%04X) - rx_cons3\n",
9509 sblk->status_rx_quick_consumer_index3,
9510 (u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index3));
9512 if (sblk->status_tx_quick_consumer_index3)
9513 BCE_PRINTF("0x%04X(0x%04X) - tx_cons3\n",
9514 sblk->status_tx_quick_consumer_index3,
9515 (u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index3));
9517 if (sblk->status_rx_quick_consumer_index4 ||
9518 sblk->status_rx_quick_consumer_index5)
9519 BCE_PRINTF("rx_cons4 = 0x%08X, rx_cons5 = 0x%08X\n",
9520 sblk->status_rx_quick_consumer_index4,
9521 sblk->status_rx_quick_consumer_index5);
9523 if (sblk->status_rx_quick_consumer_index6 ||
9524 sblk->status_rx_quick_consumer_index7)
9525 BCE_PRINTF("rx_cons6 = 0x%08X, rx_cons7 = 0x%08X\n",
9526 sblk->status_rx_quick_consumer_index6,
9527 sblk->status_rx_quick_consumer_index7);
9529 if (sblk->status_rx_quick_consumer_index8 ||
9530 sblk->status_rx_quick_consumer_index9)
9531 BCE_PRINTF("rx_cons8 = 0x%08X, rx_cons9 = 0x%08X\n",
9532 sblk->status_rx_quick_consumer_index8,
9533 sblk->status_rx_quick_consumer_index9);
9535 if (sblk->status_rx_quick_consumer_index10 ||
9536 sblk->status_rx_quick_consumer_index11)
9537 BCE_PRINTF("rx_cons10 = 0x%08X, rx_cons11 = 0x%08X\n",
9538 sblk->status_rx_quick_consumer_index10,
9539 sblk->status_rx_quick_consumer_index11);
9541 if (sblk->status_rx_quick_consumer_index12 ||
9542 sblk->status_rx_quick_consumer_index13)
9543 BCE_PRINTF("rx_cons12 = 0x%08X, rx_cons13 = 0x%08X\n",
9544 sblk->status_rx_quick_consumer_index12,
9545 sblk->status_rx_quick_consumer_index13);
9547 if (sblk->status_rx_quick_consumer_index14 ||
9548 sblk->status_rx_quick_consumer_index15)
9549 BCE_PRINTF("rx_cons14 = 0x%08X, rx_cons15 = 0x%08X\n",
9550 sblk->status_rx_quick_consumer_index14,
9551 sblk->status_rx_quick_consumer_index15);
9553 if (sblk->status_completion_producer_index ||
9554 sblk->status_cmd_consumer_index)
9555 BCE_PRINTF("com_prod = 0x%08X, cmd_cons = 0x%08X\n",
9556 sblk->status_completion_producer_index,
9557 sblk->status_cmd_consumer_index);
9560 "----------------------------"
9562 "----------------------------\n");
9566 /****************************************************************************/
9567 /* Prints out the statistics block from host memory. */
9571 /****************************************************************************/
9572 static __attribute__ ((noinline)) void
9573 bce_dump_stats_block(struct bce_softc *sc)
9575 struct statistics_block *sblk;
9577 sblk = sc->stats_block;
9581 " Stats Block (All Stats Not Shown Are 0) "
9582 "---------------\n");
9584 if (sblk->stat_IfHCInOctets_hi
9585 || sblk->stat_IfHCInOctets_lo)
9586 BCE_PRINTF("0x%08X:%08X : "
9588 sblk->stat_IfHCInOctets_hi,
9589 sblk->stat_IfHCInOctets_lo);
9591 if (sblk->stat_IfHCInBadOctets_hi
9592 || sblk->stat_IfHCInBadOctets_lo)
9593 BCE_PRINTF("0x%08X:%08X : "
9594 "IfHcInBadOctets\n",
9595 sblk->stat_IfHCInBadOctets_hi,
9596 sblk->stat_IfHCInBadOctets_lo);
9598 if (sblk->stat_IfHCOutOctets_hi
9599 || sblk->stat_IfHCOutOctets_lo)
9600 BCE_PRINTF("0x%08X:%08X : "
9602 sblk->stat_IfHCOutOctets_hi,
9603 sblk->stat_IfHCOutOctets_lo);
9605 if (sblk->stat_IfHCOutBadOctets_hi
9606 || sblk->stat_IfHCOutBadOctets_lo)
9607 BCE_PRINTF("0x%08X:%08X : "
9608 "IfHcOutBadOctets\n",
9609 sblk->stat_IfHCOutBadOctets_hi,
9610 sblk->stat_IfHCOutBadOctets_lo);
9612 if (sblk->stat_IfHCInUcastPkts_hi
9613 || sblk->stat_IfHCInUcastPkts_lo)
9614 BCE_PRINTF("0x%08X:%08X : "
9615 "IfHcInUcastPkts\n",
9616 sblk->stat_IfHCInUcastPkts_hi,
9617 sblk->stat_IfHCInUcastPkts_lo);
9619 if (sblk->stat_IfHCInBroadcastPkts_hi
9620 || sblk->stat_IfHCInBroadcastPkts_lo)
9621 BCE_PRINTF("0x%08X:%08X : "
9622 "IfHcInBroadcastPkts\n",
9623 sblk->stat_IfHCInBroadcastPkts_hi,
9624 sblk->stat_IfHCInBroadcastPkts_lo);
9626 if (sblk->stat_IfHCInMulticastPkts_hi
9627 || sblk->stat_IfHCInMulticastPkts_lo)
9628 BCE_PRINTF("0x%08X:%08X : "
9629 "IfHcInMulticastPkts\n",
9630 sblk->stat_IfHCInMulticastPkts_hi,
9631 sblk->stat_IfHCInMulticastPkts_lo);
9633 if (sblk->stat_IfHCOutUcastPkts_hi
9634 || sblk->stat_IfHCOutUcastPkts_lo)
9635 BCE_PRINTF("0x%08X:%08X : "
9636 "IfHcOutUcastPkts\n",
9637 sblk->stat_IfHCOutUcastPkts_hi,
9638 sblk->stat_IfHCOutUcastPkts_lo);
9640 if (sblk->stat_IfHCOutBroadcastPkts_hi
9641 || sblk->stat_IfHCOutBroadcastPkts_lo)
9642 BCE_PRINTF("0x%08X:%08X : "
9643 "IfHcOutBroadcastPkts\n",
9644 sblk->stat_IfHCOutBroadcastPkts_hi,
9645 sblk->stat_IfHCOutBroadcastPkts_lo);
9647 if (sblk->stat_IfHCOutMulticastPkts_hi
9648 || sblk->stat_IfHCOutMulticastPkts_lo)
9649 BCE_PRINTF("0x%08X:%08X : "
9650 "IfHcOutMulticastPkts\n",
9651 sblk->stat_IfHCOutMulticastPkts_hi,
9652 sblk->stat_IfHCOutMulticastPkts_lo);
9654 if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
9655 BCE_PRINTF(" 0x%08X : "
9656 "emac_tx_stat_dot3statsinternalmactransmiterrors\n",
9657 sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
9659 if (sblk->stat_Dot3StatsCarrierSenseErrors)
9660 BCE_PRINTF(" 0x%08X : Dot3StatsCarrierSenseErrors\n",
9661 sblk->stat_Dot3StatsCarrierSenseErrors);
9663 if (sblk->stat_Dot3StatsFCSErrors)
9664 BCE_PRINTF(" 0x%08X : Dot3StatsFCSErrors\n",
9665 sblk->stat_Dot3StatsFCSErrors);
9667 if (sblk->stat_Dot3StatsAlignmentErrors)
9668 BCE_PRINTF(" 0x%08X : Dot3StatsAlignmentErrors\n",
9669 sblk->stat_Dot3StatsAlignmentErrors);
9671 if (sblk->stat_Dot3StatsSingleCollisionFrames)
9672 BCE_PRINTF(" 0x%08X : Dot3StatsSingleCollisionFrames\n",
9673 sblk->stat_Dot3StatsSingleCollisionFrames);
9675 if (sblk->stat_Dot3StatsMultipleCollisionFrames)
9676 BCE_PRINTF(" 0x%08X : Dot3StatsMultipleCollisionFrames\n",
9677 sblk->stat_Dot3StatsMultipleCollisionFrames);
9679 if (sblk->stat_Dot3StatsDeferredTransmissions)
9680 BCE_PRINTF(" 0x%08X : Dot3StatsDeferredTransmissions\n",
9681 sblk->stat_Dot3StatsDeferredTransmissions);
9683 if (sblk->stat_Dot3StatsExcessiveCollisions)
9684 BCE_PRINTF(" 0x%08X : Dot3StatsExcessiveCollisions\n",
9685 sblk->stat_Dot3StatsExcessiveCollisions);
9687 if (sblk->stat_Dot3StatsLateCollisions)
9688 BCE_PRINTF(" 0x%08X : Dot3StatsLateCollisions\n",
9689 sblk->stat_Dot3StatsLateCollisions);
9691 if (sblk->stat_EtherStatsCollisions)
9692 BCE_PRINTF(" 0x%08X : EtherStatsCollisions\n",
9693 sblk->stat_EtherStatsCollisions);
9695 if (sblk->stat_EtherStatsFragments)
9696 BCE_PRINTF(" 0x%08X : EtherStatsFragments\n",
9697 sblk->stat_EtherStatsFragments);
9699 if (sblk->stat_EtherStatsJabbers)
9700 BCE_PRINTF(" 0x%08X : EtherStatsJabbers\n",
9701 sblk->stat_EtherStatsJabbers);
9703 if (sblk->stat_EtherStatsUndersizePkts)
9704 BCE_PRINTF(" 0x%08X : EtherStatsUndersizePkts\n",
9705 sblk->stat_EtherStatsUndersizePkts);
9707 if (sblk->stat_EtherStatsOversizePkts)
9708 BCE_PRINTF(" 0x%08X : EtherStatsOverrsizePkts\n",
9709 sblk->stat_EtherStatsOversizePkts);
9711 if (sblk->stat_EtherStatsPktsRx64Octets)
9712 BCE_PRINTF(" 0x%08X : EtherStatsPktsRx64Octets\n",
9713 sblk->stat_EtherStatsPktsRx64Octets);
9715 if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
9716 BCE_PRINTF(" 0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
9717 sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
9719 if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
9720 BCE_PRINTF(" 0x%08X : EtherStatsPktsRx128Octetsto255Octets\n",
9721 sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
9723 if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
9724 BCE_PRINTF(" 0x%08X : EtherStatsPktsRx256Octetsto511Octets\n",
9725 sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
9727 if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
9728 BCE_PRINTF(" 0x%08X : EtherStatsPktsRx512Octetsto1023Octets\n",
9729 sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
9731 if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
9732 BCE_PRINTF(" 0x%08X : EtherStatsPktsRx1024Octetsto1522Octets\n",
9733 sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
9735 if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
9736 BCE_PRINTF(" 0x%08X : EtherStatsPktsRx1523Octetsto9022Octets\n",
9737 sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
9739 if (sblk->stat_EtherStatsPktsTx64Octets)
9740 BCE_PRINTF(" 0x%08X : EtherStatsPktsTx64Octets\n",
9741 sblk->stat_EtherStatsPktsTx64Octets);
9743 if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
9744 BCE_PRINTF(" 0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
9745 sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
9747 if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
9748 BCE_PRINTF(" 0x%08X : EtherStatsPktsTx128Octetsto255Octets\n",
9749 sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
9751 if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
9752 BCE_PRINTF(" 0x%08X : EtherStatsPktsTx256Octetsto511Octets\n",
9753 sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
9755 if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
9756 BCE_PRINTF(" 0x%08X : EtherStatsPktsTx512Octetsto1023Octets\n",
9757 sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
9759 if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
9760 BCE_PRINTF(" 0x%08X : EtherStatsPktsTx1024Octetsto1522Octets\n",
9761 sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
9763 if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
9764 BCE_PRINTF(" 0x%08X : EtherStatsPktsTx1523Octetsto9022Octets\n",
9765 sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
9767 if (sblk->stat_XonPauseFramesReceived)
9768 BCE_PRINTF(" 0x%08X : XonPauseFramesReceived\n",
9769 sblk->stat_XonPauseFramesReceived);
9771 if (sblk->stat_XoffPauseFramesReceived)
9772 BCE_PRINTF(" 0x%08X : XoffPauseFramesReceived\n",
9773 sblk->stat_XoffPauseFramesReceived);
9775 if (sblk->stat_OutXonSent)
9776 BCE_PRINTF(" 0x%08X : OutXonSent\n",
9777 sblk->stat_OutXonSent);
9779 if (sblk->stat_OutXoffSent)
9780 BCE_PRINTF(" 0x%08X : OutXoffSent\n",
9781 sblk->stat_OutXoffSent);
9783 if (sblk->stat_FlowControlDone)
9784 BCE_PRINTF(" 0x%08X : FlowControlDone\n",
9785 sblk->stat_FlowControlDone);
9787 if (sblk->stat_MacControlFramesReceived)
9788 BCE_PRINTF(" 0x%08X : MacControlFramesReceived\n",
9789 sblk->stat_MacControlFramesReceived);
9791 if (sblk->stat_XoffStateEntered)
9792 BCE_PRINTF(" 0x%08X : XoffStateEntered\n",
9793 sblk->stat_XoffStateEntered);
9795 if (sblk->stat_IfInFramesL2FilterDiscards)
9796 BCE_PRINTF(" 0x%08X : IfInFramesL2FilterDiscards\n",
9797 sblk->stat_IfInFramesL2FilterDiscards);
9799 if (sblk->stat_IfInRuleCheckerDiscards)
9800 BCE_PRINTF(" 0x%08X : IfInRuleCheckerDiscards\n",
9801 sblk->stat_IfInRuleCheckerDiscards);
9803 if (sblk->stat_IfInFTQDiscards)
9804 BCE_PRINTF(" 0x%08X : IfInFTQDiscards\n",
9805 sblk->stat_IfInFTQDiscards);
9807 if (sblk->stat_IfInMBUFDiscards)
9808 BCE_PRINTF(" 0x%08X : IfInMBUFDiscards\n",
9809 sblk->stat_IfInMBUFDiscards);
9811 if (sblk->stat_IfInRuleCheckerP4Hit)
9812 BCE_PRINTF(" 0x%08X : IfInRuleCheckerP4Hit\n",
9813 sblk->stat_IfInRuleCheckerP4Hit);
9815 if (sblk->stat_CatchupInRuleCheckerDiscards)
9816 BCE_PRINTF(" 0x%08X : CatchupInRuleCheckerDiscards\n",
9817 sblk->stat_CatchupInRuleCheckerDiscards);
9819 if (sblk->stat_CatchupInFTQDiscards)
9820 BCE_PRINTF(" 0x%08X : CatchupInFTQDiscards\n",
9821 sblk->stat_CatchupInFTQDiscards);
9823 if (sblk->stat_CatchupInMBUFDiscards)
9824 BCE_PRINTF(" 0x%08X : CatchupInMBUFDiscards\n",
9825 sblk->stat_CatchupInMBUFDiscards);
9827 if (sblk->stat_CatchupInRuleCheckerP4Hit)
9828 BCE_PRINTF(" 0x%08X : CatchupInRuleCheckerP4Hit\n",
9829 sblk->stat_CatchupInRuleCheckerP4Hit);
9832 "----------------------------"
9834 "----------------------------\n");
9838 /****************************************************************************/
9839 /* Prints out a summary of the driver state. */
9843 /****************************************************************************/
9844 static __attribute__ ((noinline)) void
9845 bce_dump_driver_state(struct bce_softc *sc)
9850 "-----------------------------"
9852 "-----------------------------\n");
9854 val_hi = BCE_ADDR_HI(sc);
9855 val_lo = BCE_ADDR_LO(sc);
9856 BCE_PRINTF("0x%08X:%08X - (sc) driver softc structure virtual address\n",
9859 val_hi = BCE_ADDR_HI(sc->bce_vhandle);
9860 val_lo = BCE_ADDR_LO(sc->bce_vhandle);
9861 BCE_PRINTF("0x%08X:%08X - (sc->bce_vhandle) PCI BAR virtual address\n",
9864 val_hi = BCE_ADDR_HI(sc->status_block);
9865 val_lo = BCE_ADDR_LO(sc->status_block);
9866 BCE_PRINTF("0x%08X:%08X - (sc->status_block) status block virtual address\n",
9869 val_hi = BCE_ADDR_HI(sc->stats_block);
9870 val_lo = BCE_ADDR_LO(sc->stats_block);
9871 BCE_PRINTF("0x%08X:%08X - (sc->stats_block) statistics block virtual address\n",
9874 val_hi = BCE_ADDR_HI(sc->tx_bd_chain);
9875 val_lo = BCE_ADDR_LO(sc->tx_bd_chain);
9877 "0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain virtual adddress\n",
9880 val_hi = BCE_ADDR_HI(sc->rx_bd_chain);
9881 val_lo = BCE_ADDR_LO(sc->rx_bd_chain);
9883 "0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain virtual address\n",
9886 #ifdef BCE_JUMBO_HDRSPLIT
9887 val_hi = BCE_ADDR_HI(sc->pg_bd_chain);
9888 val_lo = BCE_ADDR_LO(sc->pg_bd_chain);
9890 "0x%08X:%08X - (sc->pg_bd_chain) page chain virtual address\n",
9894 val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr);
9895 val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr);
9897 "0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
9900 val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr);
9901 val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr);
9903 "0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
9906 #ifdef BCE_JUMBO_HDRSPLIT
9907 val_hi = BCE_ADDR_HI(sc->pg_mbuf_ptr);
9908 val_lo = BCE_ADDR_LO(sc->pg_mbuf_ptr);
9910 "0x%08X:%08X - (sc->pg_mbuf_ptr) page mbuf chain virtual address\n",
9914 BCE_PRINTF(" 0x%08X - (sc->interrupts_generated) h/w intrs\n",
9915 sc->interrupts_generated);
9917 BCE_PRINTF(" 0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
9920 BCE_PRINTF(" 0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
9923 BCE_PRINTF(" 0x%08X - (sc->last_status_idx) status block index\n",
9924 sc->last_status_idx);
9926 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->tx_prod) tx producer index\n",
9927 sc->tx_prod, (u16) TX_CHAIN_IDX(sc->tx_prod));
9929 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->tx_cons) tx consumer index\n",
9930 sc->tx_cons, (u16) TX_CHAIN_IDX(sc->tx_cons));
9932 BCE_PRINTF(" 0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
9935 BCE_PRINTF(" 0x%08X - (sc->debug_tx_mbuf_alloc) tx mbufs allocated\n",
9936 sc->debug_tx_mbuf_alloc);
9938 BCE_PRINTF(" 0x%08X - (sc->used_tx_bd) used tx_bd's\n",
9941 BCE_PRINTF("0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
9942 sc->tx_hi_watermark, sc->max_tx_bd);
9944 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->rx_prod) rx producer index\n",
9945 sc->rx_prod, (u16) RX_CHAIN_IDX(sc->rx_prod));
9947 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->rx_cons) rx consumer index\n",
9948 sc->rx_cons, (u16) RX_CHAIN_IDX(sc->rx_cons));
9950 BCE_PRINTF(" 0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
9953 BCE_PRINTF(" 0x%08X - (sc->debug_rx_mbuf_alloc) rx mbufs allocated\n",
9954 sc->debug_rx_mbuf_alloc);
9956 BCE_PRINTF(" 0x%08X - (sc->free_rx_bd) free rx_bd's\n",
9959 #ifdef BCE_JUMBO_HDRSPLIT
9960 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->pg_prod) page producer index\n",
9961 sc->pg_prod, (u16) PG_CHAIN_IDX(sc->pg_prod));
9963 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->pg_cons) page consumer index\n",
9964 sc->pg_cons, (u16) PG_CHAIN_IDX(sc->pg_cons));
9966 BCE_PRINTF(" 0x%08X - (sc->debug_pg_mbuf_alloc) page mbufs allocated\n",
9967 sc->debug_pg_mbuf_alloc);
9969 BCE_PRINTF(" 0x%08X - (sc->free_pg_bd) free page rx_bd's\n",
9972 BCE_PRINTF("0x%08X/%08X - (sc->pg_low_watermark) page low watermark\n",
9973 sc->pg_low_watermark, sc->max_pg_bd);
9976 BCE_PRINTF(" 0x%08X - (sc->mbuf_alloc_failed_count) "
9977 "mbuf alloc failures\n",
9978 sc->mbuf_alloc_failed_count);
9980 BCE_PRINTF(" 0x%08X - (sc->bce_flags) bce mac flags\n",
9983 BCE_PRINTF(" 0x%08X - (sc->bce_phy_flags) bce phy flags\n",
9987 "----------------------------"
9989 "----------------------------\n");
9993 /****************************************************************************/
9994 /* Prints out the hardware state through a summary of important register, */
9995 /* followed by a complete register dump. */
9999 /****************************************************************************/
10000 static __attribute__ ((noinline)) void
10001 bce_dump_hw_state(struct bce_softc *sc)
10006 "----------------------------"
10008 "----------------------------\n");
10010 BCE_PRINTF("%s - bootcode version\n", sc->bce_bc_ver);
10012 val = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS);
10013 BCE_PRINTF("0x%08X - (0x%06X) misc_enable_status_bits\n",
10014 val, BCE_MISC_ENABLE_STATUS_BITS);
10016 val = REG_RD(sc, BCE_DMA_STATUS);
10017 BCE_PRINTF("0x%08X - (0x%06X) dma_status\n", val, BCE_DMA_STATUS);
10019 val = REG_RD(sc, BCE_CTX_STATUS);
10020 BCE_PRINTF("0x%08X - (0x%06X) ctx_status\n", val, BCE_CTX_STATUS);
10022 val = REG_RD(sc, BCE_EMAC_STATUS);
10023 BCE_PRINTF("0x%08X - (0x%06X) emac_status\n", val, BCE_EMAC_STATUS);
10025 val = REG_RD(sc, BCE_RPM_STATUS);
10026 BCE_PRINTF("0x%08X - (0x%06X) rpm_status\n", val, BCE_RPM_STATUS);
10028 val = REG_RD(sc, 0x2004);
10029 BCE_PRINTF("0x%08X - (0x%06X) rlup_status\n", val, 0x2004);
10031 val = REG_RD(sc, BCE_RV2P_STATUS);
10032 BCE_PRINTF("0x%08X - (0x%06X) rv2p_status\n", val, BCE_RV2P_STATUS);
10034 val = REG_RD(sc, 0x2c04);
10035 BCE_PRINTF("0x%08X - (0x%06X) rdma_status\n", val, 0x2c04);
10037 val = REG_RD(sc, BCE_TBDR_STATUS);
10038 BCE_PRINTF("0x%08X - (0x%06X) tbdr_status\n", val, BCE_TBDR_STATUS);
10040 val = REG_RD(sc, BCE_TDMA_STATUS);
10041 BCE_PRINTF("0x%08X - (0x%06X) tdma_status\n", val, BCE_TDMA_STATUS);
10043 val = REG_RD(sc, BCE_HC_STATUS);
10044 BCE_PRINTF("0x%08X - (0x%06X) hc_status\n", val, BCE_HC_STATUS);
10046 val = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
10047 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", val, BCE_TXP_CPU_STATE);
10049 val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
10050 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", val, BCE_TPAT_CPU_STATE);
10052 val = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
10053 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", val, BCE_RXP_CPU_STATE);
10055 val = REG_RD_IND(sc, BCE_COM_CPU_STATE);
10056 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n", val, BCE_COM_CPU_STATE);
10058 val = REG_RD_IND(sc, BCE_MCP_CPU_STATE);
10059 BCE_PRINTF("0x%08X - (0x%06X) mcp_cpu_state\n", val, BCE_MCP_CPU_STATE);
10061 val = REG_RD_IND(sc, BCE_CP_CPU_STATE);
10062 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n", val, BCE_CP_CPU_STATE);
10065 "----------------------------"
10067 "----------------------------\n");
10070 "----------------------------"
10072 "----------------------------\n");
10074 for (int i = 0x400; i < 0x8000; i += 0x10) {
10075 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
10076 i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
10077 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
10081 "----------------------------"
10083 "----------------------------\n");
10087 /****************************************************************************/
10088 /* Prints out the mailbox queue registers. */
10092 /****************************************************************************/
10093 static __attribute__ ((noinline)) void
10094 bce_dump_mq_regs(struct bce_softc *sc)
10097 "----------------------------"
10099 "----------------------------\n");
10102 "----------------------------"
10104 "----------------------------\n");
10106 for (int i = 0x3c00; i < 0x4000; i += 0x10) {
10107 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
10108 i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
10109 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
10113 "----------------------------"
10115 "----------------------------\n");
10119 /****************************************************************************/
10120 /* Prints out the bootcode state. */
10124 /****************************************************************************/
10125 static __attribute__ ((noinline)) void
10126 bce_dump_bc_state(struct bce_softc *sc)
10131 "----------------------------"
10133 "----------------------------\n");
10135 BCE_PRINTF("%s - bootcode version\n", sc->bce_bc_ver);
10137 val = bce_shmem_rd(sc, BCE_BC_RESET_TYPE);
10138 BCE_PRINTF("0x%08X - (0x%06X) reset_type\n",
10139 val, BCE_BC_RESET_TYPE);
10141 val = bce_shmem_rd(sc, BCE_BC_STATE);
10142 BCE_PRINTF("0x%08X - (0x%06X) state\n",
10143 val, BCE_BC_STATE);
10145 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
10146 BCE_PRINTF("0x%08X - (0x%06X) condition\n",
10147 val, BCE_BC_STATE_CONDITION);
10149 val = bce_shmem_rd(sc, BCE_BC_STATE_DEBUG_CMD);
10150 BCE_PRINTF("0x%08X - (0x%06X) debug_cmd\n",
10151 val, BCE_BC_STATE_DEBUG_CMD);
10154 "----------------------------"
10156 "----------------------------\n");
10160 /****************************************************************************/
10161 /* Prints out the TXP processor state. */
10165 /****************************************************************************/
10166 static __attribute__ ((noinline)) void
10167 bce_dump_txp_state(struct bce_softc *sc, int regs)
10173 "----------------------------"
10175 "----------------------------\n");
10177 for (int i = 0; i < 3; i++)
10178 fw_version[i] = htonl(REG_RD_IND(sc,
10179 (BCE_TXP_SCRATCH + 0x10 + i * 4)));
10180 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
10182 val = REG_RD_IND(sc, BCE_TXP_CPU_MODE);
10183 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_mode\n", val, BCE_TXP_CPU_MODE);
10185 val = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
10186 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", val, BCE_TXP_CPU_STATE);
10188 val = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK);
10189 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_event_mask\n", val,
10190 BCE_TXP_CPU_EVENT_MASK);
10194 "----------------------------"
10196 "----------------------------\n");
10198 for (int i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) {
10199 /* Skip the big blank spaces */
10200 if (i < 0x454000 && i > 0x5ffff)
10201 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
10202 i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
10203 REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
10208 "----------------------------"
10210 "----------------------------\n");
10214 /****************************************************************************/
10215 /* Prints out the RXP processor state. */
10219 /****************************************************************************/
10220 static __attribute__ ((noinline)) void
10221 bce_dump_rxp_state(struct bce_softc *sc, int regs)
10227 "----------------------------"
10229 "----------------------------\n");
10231 for (int i = 0; i < 3; i++)
10232 fw_version[i] = htonl(REG_RD_IND(sc,
10233 (BCE_RXP_SCRATCH + 0x10 + i * 4)));
10234 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
10236 val = REG_RD_IND(sc, BCE_RXP_CPU_MODE);
10237 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_mode\n", val, BCE_RXP_CPU_MODE);
10239 val = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
10240 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", val, BCE_RXP_CPU_STATE);
10242 val = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK);
10243 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_event_mask\n", val,
10244 BCE_RXP_CPU_EVENT_MASK);
10248 "----------------------------"
10250 "----------------------------\n");
10252 for (int i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) {
10253 /* Skip the big blank sapces */
10254 if (i < 0xc5400 && i > 0xdffff)
10255 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
10256 i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
10257 REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
10262 "----------------------------"
10264 "----------------------------\n");
10268 /****************************************************************************/
10269 /* Prints out the TPAT processor state. */
10273 /****************************************************************************/
10274 static __attribute__ ((noinline)) void
10275 bce_dump_tpat_state(struct bce_softc *sc, int regs)
10281 "----------------------------"
10283 "----------------------------\n");
10285 for (int i = 0; i < 3; i++)
10286 fw_version[i] = htonl(REG_RD_IND(sc,
10287 (BCE_TPAT_SCRATCH + 0x410 + i * 4)));
10288 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
10290 val = REG_RD_IND(sc, BCE_TPAT_CPU_MODE);
10291 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_mode\n", val, BCE_TPAT_CPU_MODE);
10293 val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
10294 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", val, BCE_TPAT_CPU_STATE);
10296 val = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK);
10297 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_event_mask\n", val,
10298 BCE_TPAT_CPU_EVENT_MASK);
10302 "----------------------------"
10304 "----------------------------\n");
10306 for (int i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) {
10307 /* Skip the big blank spaces */
10308 if (i < 0x854000 && i > 0x9ffff)
10309 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
10310 i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
10311 REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
10316 "----------------------------"
10318 "----------------------------\n");
10322 /****************************************************************************/
10323 /* Prints out the Command Procesor (CP) state. */
10327 /****************************************************************************/
10328 static __attribute__ ((noinline)) void
10329 bce_dump_cp_state(struct bce_softc *sc, int regs)
10335 "----------------------------"
10337 "----------------------------\n");
10339 for (int i = 0; i < 3; i++)
10340 fw_version[i] = htonl(REG_RD_IND(sc,
10341 (BCE_CP_SCRATCH + 0x10 + i * 4)));
10342 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
10344 val = REG_RD_IND(sc, BCE_CP_CPU_MODE);
10345 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_mode\n", val, BCE_CP_CPU_MODE);
10347 val = REG_RD_IND(sc, BCE_CP_CPU_STATE);
10348 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n", val, BCE_CP_CPU_STATE);
10350 val = REG_RD_IND(sc, BCE_CP_CPU_EVENT_MASK);
10351 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_event_mask\n", val,
10352 BCE_CP_CPU_EVENT_MASK);
10356 "----------------------------"
10358 "----------------------------\n");
10360 for (int i = BCE_CP_CPU_MODE; i < 0x1aa000; i += 0x10) {
10361 /* Skip the big blank spaces */
10362 if (i < 0x185400 && i > 0x19ffff)
10363 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
10364 i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
10365 REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
10370 "----------------------------"
10372 "----------------------------\n");
10376 /****************************************************************************/
10377 /* Prints out the Completion Procesor (COM) state. */
10381 /****************************************************************************/
10382 static __attribute__ ((noinline)) void
10383 bce_dump_com_state(struct bce_softc *sc, int regs)
10389 "----------------------------"
10391 "----------------------------\n");
10393 for (int i = 0; i < 3; i++)
10394 fw_version[i] = htonl(REG_RD_IND(sc,
10395 (BCE_COM_SCRATCH + 0x10 + i * 4)));
10396 BCE_PRINTF("Firmware version - %s\n", (char *) fw_version);
10398 val = REG_RD_IND(sc, BCE_COM_CPU_MODE);
10399 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_mode\n", val, BCE_COM_CPU_MODE);
10401 val = REG_RD_IND(sc, BCE_COM_CPU_STATE);
10402 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n", val, BCE_COM_CPU_STATE);
10404 val = REG_RD_IND(sc, BCE_COM_CPU_EVENT_MASK);
10405 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_event_mask\n", val,
10406 BCE_COM_CPU_EVENT_MASK);
10410 "----------------------------"
10412 "----------------------------\n");
10414 for (int i = BCE_COM_CPU_MODE; i < 0x1053e8; i += 0x10) {
10415 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
10416 i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
10417 REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
10422 "----------------------------"
10424 "----------------------------\n");
10428 /****************************************************************************/
10429 /* Prints out the driver state and then enters the debugger. */
10433 /****************************************************************************/
10435 bce_breakpoint(struct bce_softc *sc)
10439 * Unreachable code to silence compiler warnings
10440 * about unused functions.
10443 bce_freeze_controller(sc);
10444 bce_unfreeze_controller(sc);
10445 bce_dump_enet(sc, NULL);
10446 bce_dump_txbd(sc, 0, NULL);
10447 bce_dump_rxbd(sc, 0, NULL);
10448 bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
10449 bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
10450 bce_dump_l2fhdr(sc, 0, NULL);
10451 bce_dump_ctx(sc, RX_CID);
10453 bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
10454 bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
10455 bce_dump_status_block(sc);
10456 bce_dump_stats_block(sc);
10457 bce_dump_driver_state(sc);
10458 bce_dump_hw_state(sc);
10459 bce_dump_bc_state(sc);
10460 bce_dump_txp_state(sc, 0);
10461 bce_dump_rxp_state(sc, 0);
10462 bce_dump_tpat_state(sc, 0);
10463 bce_dump_cp_state(sc, 0);
10464 bce_dump_com_state(sc, 0);
10465 #ifdef BCE_JUMBO_HDRSPLIT
10466 bce_dump_pgbd(sc, 0, NULL);
10467 bce_dump_pg_mbuf_chain(sc, 0, USABLE_PG_BD);
10468 bce_dump_pg_chain(sc, 0, USABLE_PG_BD);
10472 bce_dump_status_block(sc);
10473 bce_dump_driver_state(sc);
10475 /* Call the debugger. */