2 * Copyright (c) 2006-2007 Broadcom Corporation
3 * David Christensen <davidch@broadcom.com>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written consent.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
35 * The following controllers are supported by this driver:
39 * The following controllers are not supported by this driver:
41 * BCM5706S A0, A1, A2, A3
43 * BCM5708S A0, B0, B1, B2
48 #include <dev/bce/if_bcereg.h>
49 #include <dev/bce/if_bcefw.h>
51 /****************************************************************************/
52 /* BCE Debug Options */
53 /****************************************************************************/
55 u32 bce_debug = BCE_WARN;
58 /* 1 = 1 in 2,147,483,648 */
59 /* 256 = 1 in 8,388,608 */
60 /* 2048 = 1 in 1,048,576 */
61 /* 65536 = 1 in 32,768 */
62 /* 1048576 = 1 in 2,048 */
63 /* 268435456 = 1 in 8 */
64 /* 536870912 = 1 in 4 */
65 /* 1073741824 = 1 in 2 */
67 /* Controls how often the l2_fhdr frame error check will fail. */
68 int bce_debug_l2fhdr_status_check = 0;
70 /* Controls how often the unexpected attention check will fail. */
71 int bce_debug_unexpected_attention = 0;
73 /* Controls how often to simulate an mbuf allocation failure. */
74 int bce_debug_mbuf_allocation_failure = 0;
76 /* Controls how often to simulate a DMA mapping failure. */
77 int bce_debug_dma_map_addr_failure = 0;
79 /* Controls how often to simulate a bootcode failure. */
80 int bce_debug_bootcode_running_failure = 0;
84 /****************************************************************************/
85 /* PCI Device ID Table */
87 /* Used by bce_probe() to identify the devices supported by this driver. */
88 /****************************************************************************/
89 #define BCE_DEVDESC_MAX 64
91 static struct bce_type bce_devs[] = {
92 /* BCM5706C Controllers and OEM boards. */
93 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101,
94 "HP NC370T Multifunction Gigabit Server Adapter" },
95 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106,
96 "HP NC370i Multifunction Gigabit Server Adapter" },
97 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID,
98 "Broadcom NetXtreme II BCM5706 1000Base-T" },
100 /* BCM5706S controllers and OEM boards. */
101 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
102 "HP NC370F Multifunction Gigabit Server Adapter" },
103 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID,
104 "Broadcom NetXtreme II BCM5706 1000Base-SX" },
106 /* BCM5708C controllers and OEM boards. */
107 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID,
108 "Broadcom NetXtreme II BCM5708 1000Base-T" },
110 /* BCM5708S controllers and OEM boards. */
111 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID,
112 "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117 /****************************************************************************/
118 /* Supported Flash NVRAM device data. */
119 /****************************************************************************/
120 static struct flash_spec flash_table[] =
123 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
124 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
125 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
127 /* Expansion entry 0001 */
128 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
129 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
130 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
132 /* Saifun SA25F010 (non-buffered flash) */
133 /* strap, cfg1, & write1 need updates */
134 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
135 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
136 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
137 "Non-buffered flash (128kB)"},
138 /* Saifun SA25F020 (non-buffered flash) */
139 /* strap, cfg1, & write1 need updates */
140 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
141 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
142 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
143 "Non-buffered flash (256kB)"},
144 /* Expansion entry 0100 */
145 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
146 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
149 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
150 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
151 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
152 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
153 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
154 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
155 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
156 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
157 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
158 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
159 /* Saifun SA25F005 (non-buffered flash) */
160 /* strap, cfg1, & write1 need updates */
161 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
162 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
163 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
164 "Non-buffered flash (64kB)"},
166 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
167 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
168 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
170 /* Expansion entry 1001 */
171 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
172 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
175 /* Expansion entry 1010 */
176 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
177 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180 /* ATMEL AT45DB011B (buffered flash) */
181 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
182 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
183 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
184 "Buffered flash (128kB)"},
185 /* Expansion entry 1100 */
186 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
187 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
188 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190 /* Expansion entry 1101 */
191 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
192 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
195 /* Ateml Expansion entry 1110 */
196 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
197 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
198 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
199 "Entry 1110 (Atmel)"},
200 /* ATMEL AT45DB021B (buffered flash) */
201 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
202 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
203 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
204 "Buffered flash (256kB)"},
208 /****************************************************************************/
209 /* FreeBSD device entry points. */
210 /****************************************************************************/
211 static int bce_probe (device_t);
212 static int bce_attach (device_t);
213 static int bce_detach (device_t);
214 static void bce_shutdown (device_t);
217 /****************************************************************************/
218 /* BCE Debug Data Structure Dump Routines */
219 /****************************************************************************/
221 static void bce_dump_mbuf (struct bce_softc *, struct mbuf *);
222 static void bce_dump_tx_mbuf_chain (struct bce_softc *, int, int);
223 static void bce_dump_rx_mbuf_chain (struct bce_softc *, int, int);
224 static void bce_dump_txbd (struct bce_softc *, int, struct tx_bd *);
225 static void bce_dump_rxbd (struct bce_softc *, int, struct rx_bd *);
226 static void bce_dump_l2fhdr (struct bce_softc *, int, struct l2_fhdr *);
227 static void bce_dump_tx_chain (struct bce_softc *, int, int);
228 static void bce_dump_rx_chain (struct bce_softc *, int, int);
229 static void bce_dump_status_block (struct bce_softc *);
230 static void bce_dump_stats_block (struct bce_softc *);
231 static void bce_dump_driver_state (struct bce_softc *);
232 static void bce_dump_hw_state (struct bce_softc *);
233 static void bce_dump_bc_state (struct bce_softc *);
234 static void bce_breakpoint (struct bce_softc *);
238 /****************************************************************************/
239 /* BCE Register/Memory Access Routines */
240 /****************************************************************************/
241 static u32 bce_reg_rd_ind (struct bce_softc *, u32);
242 static void bce_reg_wr_ind (struct bce_softc *, u32, u32);
243 static void bce_ctx_wr (struct bce_softc *, u32, u32, u32);
244 static int bce_miibus_read_reg (device_t, int, int);
245 static int bce_miibus_write_reg (device_t, int, int, int);
246 static void bce_miibus_statchg (device_t);
249 /****************************************************************************/
250 /* BCE NVRAM Access Routines */
251 /****************************************************************************/
252 static int bce_acquire_nvram_lock (struct bce_softc *);
253 static int bce_release_nvram_lock (struct bce_softc *);
254 static void bce_enable_nvram_access (struct bce_softc *);
255 static void bce_disable_nvram_access(struct bce_softc *);
256 static int bce_nvram_read_dword (struct bce_softc *, u32, u8 *, u32);
257 static int bce_init_nvram (struct bce_softc *);
258 static int bce_nvram_read (struct bce_softc *, u32, u8 *, int);
259 static int bce_nvram_test (struct bce_softc *);
260 #ifdef BCE_NVRAM_WRITE_SUPPORT
261 static int bce_enable_nvram_write (struct bce_softc *);
262 static void bce_disable_nvram_write (struct bce_softc *);
263 static int bce_nvram_erase_page (struct bce_softc *, u32);
264 static int bce_nvram_write_dword (struct bce_softc *, u32, u8 *, u32);
265 static int bce_nvram_write (struct bce_softc *, u32, u8 *, int);
268 /****************************************************************************/
270 /****************************************************************************/
271 static void bce_dma_map_addr (void *, bus_dma_segment_t *, int, int);
272 static int bce_dma_alloc (device_t);
273 static void bce_dma_free (struct bce_softc *);
274 static void bce_release_resources (struct bce_softc *);
276 /****************************************************************************/
277 /* BCE Firmware Synchronization and Load */
278 /****************************************************************************/
279 static int bce_fw_sync (struct bce_softc *, u32);
280 static void bce_load_rv2p_fw (struct bce_softc *, u32 *, u32, u32);
281 static void bce_load_cpu_fw (struct bce_softc *, struct cpu_reg *, struct fw_info *);
282 static void bce_init_cpus (struct bce_softc *);
284 static void bce_stop (struct bce_softc *);
285 static int bce_reset (struct bce_softc *, u32);
286 static int bce_chipinit (struct bce_softc *);
287 static int bce_blockinit (struct bce_softc *);
288 static int bce_get_buf (struct bce_softc *, struct mbuf *, u16 *, u16 *, u32 *);
290 static int bce_init_tx_chain (struct bce_softc *);
291 static void bce_fill_rx_chain (struct bce_softc *);
292 static int bce_init_rx_chain (struct bce_softc *);
293 static void bce_free_rx_chain (struct bce_softc *);
294 static void bce_free_tx_chain (struct bce_softc *);
296 static int bce_tx_encap (struct bce_softc *, struct mbuf **);
297 static void bce_start_locked (struct ifnet *);
298 static void bce_start (struct ifnet *);
299 static int bce_ioctl (struct ifnet *, u_long, caddr_t);
300 static void bce_watchdog (struct bce_softc *);
301 static int bce_ifmedia_upd (struct ifnet *);
302 static void bce_ifmedia_upd_locked (struct ifnet *);
303 static void bce_ifmedia_sts (struct ifnet *, struct ifmediareq *);
304 static void bce_init_locked (struct bce_softc *);
305 static void bce_init (void *);
306 static void bce_mgmt_init_locked (struct bce_softc *sc);
308 static void bce_init_context (struct bce_softc *);
309 static void bce_get_mac_addr (struct bce_softc *);
310 static void bce_set_mac_addr (struct bce_softc *);
311 static void bce_phy_intr (struct bce_softc *);
312 static void bce_rx_intr (struct bce_softc *);
313 static void bce_tx_intr (struct bce_softc *);
314 static void bce_disable_intr (struct bce_softc *);
315 static void bce_enable_intr (struct bce_softc *);
317 #ifdef DEVICE_POLLING
318 static void bce_poll_locked (struct ifnet *, enum poll_cmd, int);
319 static void bce_poll (struct ifnet *, enum poll_cmd, int);
321 static void bce_intr (void *);
322 static void bce_set_rx_mode (struct bce_softc *);
323 static void bce_stats_update (struct bce_softc *);
324 static void bce_tick (void *);
325 static void bce_pulse (void *);
326 static void bce_add_sysctls (struct bce_softc *);
329 /****************************************************************************/
330 /* FreeBSD device dispatch table. */
331 /****************************************************************************/
332 static device_method_t bce_methods[] = {
333 /* Device interface */
334 DEVMETHOD(device_probe, bce_probe),
335 DEVMETHOD(device_attach, bce_attach),
336 DEVMETHOD(device_detach, bce_detach),
337 DEVMETHOD(device_shutdown, bce_shutdown),
340 DEVMETHOD(bus_print_child, bus_generic_print_child),
341 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
344 DEVMETHOD(miibus_readreg, bce_miibus_read_reg),
345 DEVMETHOD(miibus_writereg, bce_miibus_write_reg),
346 DEVMETHOD(miibus_statchg, bce_miibus_statchg),
351 static driver_t bce_driver = {
354 sizeof(struct bce_softc)
357 static devclass_t bce_devclass;
359 MODULE_DEPEND(bce, pci, 1, 1, 1);
360 MODULE_DEPEND(bce, ether, 1, 1, 1);
361 MODULE_DEPEND(bce, miibus, 1, 1, 1);
363 DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, 0, 0);
364 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
367 /****************************************************************************/
368 /* Tunable device values */
369 /****************************************************************************/
370 static int bce_msi_enable = 1;
372 /* Allowable values are 0 (IRQ only) and 1 (IRQ or MSI) */
373 TUNABLE_INT("hw.bce.msi_enable", &bce_msi_enable);
374 SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD, 0, "bce driver parameters");
375 SYSCTL_UINT(_hw_bce, OID_AUTO, msi_enable, CTLFLAG_RDTUN, &bce_msi_enable, 0,
376 "MSI | INTx selector");
378 /****************************************************************************/
379 /* Device probe function. */
381 /* Compares the device to the driver's list of supported devices and */
382 /* reports back to the OS whether this is the right driver for the device. */
385 /* BUS_PROBE_DEFAULT on success, positive value on failure. */
386 /****************************************************************************/
388 bce_probe(device_t dev)
391 struct bce_softc *sc;
393 u16 vid = 0, did = 0, svid = 0, sdid = 0;
397 sc = device_get_softc(dev);
398 bzero(sc, sizeof(struct bce_softc));
399 sc->bce_unit = device_get_unit(dev);
402 /* Get the data for the device to be probed. */
403 vid = pci_get_vendor(dev);
404 did = pci_get_device(dev);
405 svid = pci_get_subvendor(dev);
406 sdid = pci_get_subdevice(dev);
408 DBPRINT(sc, BCE_VERBOSE_LOAD,
409 "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
410 "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
412 /* Look through the list of known devices for a match. */
413 while(t->bce_name != NULL) {
415 if ((vid == t->bce_vid) && (did == t->bce_did) &&
416 ((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) &&
417 ((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) {
419 descbuf = malloc(BCE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
424 /* Print out the device identity. */
425 snprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
427 (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
428 (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
430 device_set_desc_copy(dev, descbuf);
431 free(descbuf, M_TEMP);
432 return(BUS_PROBE_DEFAULT);
441 /****************************************************************************/
442 /* Device attach function. */
444 /* Allocates device resources, performs secondary chip identification, */
445 /* resets and initializes the hardware, and initializes driver instance */
449 /* 0 on success, positive value on failure. */
450 /****************************************************************************/
452 bce_attach(device_t dev)
454 struct bce_softc *sc;
457 int count, mbuf, rid, rc = 0;
459 sc = device_get_softc(dev);
462 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
464 mbuf = device_get_unit(dev);
466 /* Set initial device and PHY flags */
468 sc->bce_phy_flags = 0;
472 pci_enable_busmaster(dev);
474 /* Allocate PCI memory resources. */
476 sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
477 &rid, RF_ACTIVE | PCI_RF_DENSE);
479 if (sc->bce_res_mem == NULL) {
480 BCE_PRINTF("%s(%d): PCI memory allocation failed\n",
483 goto bce_attach_fail;
486 /* Get various resource handles. */
487 sc->bce_btag = rman_get_bustag(sc->bce_res_mem);
488 sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
489 sc->bce_vhandle = (vm_offset_t) rman_get_virtual(sc->bce_res_mem);
491 /* If MSI is enabled in the driver, get the vector count. */
492 count = bce_msi_enable ? pci_msi_count(dev) : 0;
494 /* Allocate PCI IRQ resources. */
495 if (count == 1 && pci_alloc_msi(dev, &count) == 0 && count == 1) {
497 sc->bce_flags |= BCE_USING_MSI_FLAG;
498 DBPRINT(sc, BCE_VERBOSE_LOAD,
499 "Allocating %d MSI interrupt(s)\n", count);
502 DBPRINT(sc, BCE_VERBOSE_LOAD, "Allocating IRQ interrupt\n");
505 sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
506 RF_SHAREABLE | RF_ACTIVE);
508 if (sc->bce_res_irq == NULL) {
509 BCE_PRINTF("%s(%d): PCI map interrupt failed!\n",
512 goto bce_attach_fail;
515 /* Initialize mutex for the current device instance. */
516 BCE_LOCK_INIT(sc, device_get_nameunit(dev));
519 * Configure byte swap and enable indirect register access.
520 * Rely on CPU to do target byte swapping on big endian systems.
521 * Access to registers outside of PCI configurtion space are not
522 * valid until this is done.
524 pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
525 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
526 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
528 /* Save ASIC revsion info. */
529 sc->bce_chipid = REG_RD(sc, BCE_MISC_ID);
531 /* Weed out any non-production controller revisions. */
532 switch(BCE_CHIP_ID(sc)) {
533 case BCE_CHIP_ID_5706_A0:
534 case BCE_CHIP_ID_5706_A1:
535 case BCE_CHIP_ID_5708_A0:
536 case BCE_CHIP_ID_5708_B0:
537 BCE_PRINTF("%s(%d): Unsupported controller revision (%c%d)!\n",
539 (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
540 (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
542 goto bce_attach_fail;
546 * The embedded PCIe to PCI-X bridge (EPB)
547 * in the 5708 cannot address memory above
548 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
550 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
551 sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR;
553 sc->max_bus_addr = BUS_SPACE_MAXADDR;
556 * Find the base address for shared memory access.
557 * Newer versions of bootcode use a signature and offset
558 * while older versions use a fixed address.
560 val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
561 if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG)
562 sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0);
564 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
566 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): bce_shmem_base = 0x%08X\n",
567 __FUNCTION__, sc->bce_shmem_base);
569 sc->bce_fw_ver = REG_RD_IND(sc, sc->bce_shmem_base +
570 BCE_DEV_INFO_BC_REV);
571 DBPRINT(sc, BCE_INFO_FIRMWARE, "%s(): bce_fw_ver = 0x%08X\n",
572 __FUNCTION__, sc->bce_fw_ver);
574 /* Check if any management firmware is running. */
575 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_FEATURE);
576 if (val & (BCE_PORT_FEATURE_ASF_ENABLED | BCE_PORT_FEATURE_IMD_ENABLED)) {
577 sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
578 DBPRINT(sc, BCE_INFO_LOAD, "%s(): BCE_MFW_ENABLE_FLAG\n",
582 /* Get PCI bus information (speed and type). */
583 val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
584 if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
587 sc->bce_flags |= BCE_PCIX_FLAG;
589 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS);
591 clkreg &= BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
593 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
594 sc->bus_speed_mhz = 133;
597 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
598 sc->bus_speed_mhz = 100;
601 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
602 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
603 sc->bus_speed_mhz = 66;
606 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
607 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
608 sc->bus_speed_mhz = 50;
611 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
612 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
613 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
614 sc->bus_speed_mhz = 33;
618 if (val & BCE_PCICFG_MISC_STATUS_M66EN)
619 sc->bus_speed_mhz = 66;
621 sc->bus_speed_mhz = 33;
624 if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
625 sc->bce_flags |= BCE_PCI_32BIT_FLAG;
627 /* Reset the controller and announce to bootcode that driver is present. */
628 if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
629 BCE_PRINTF("%s(%d): Controller reset failed!\n",
632 goto bce_attach_fail;
635 /* Initialize the controller. */
636 if (bce_chipinit(sc)) {
637 BCE_PRINTF("%s(%d): Controller initialization failed!\n",
640 goto bce_attach_fail;
643 /* Perform NVRAM test. */
644 if (bce_nvram_test(sc)) {
645 BCE_PRINTF("%s(%d): NVRAM test failed!\n",
648 goto bce_attach_fail;
651 /* Fetch the permanent Ethernet MAC address. */
652 bce_get_mac_addr(sc);
655 * Trip points control how many BDs
656 * should be ready before generating an
657 * interrupt while ticks control how long
658 * a BD can sit in the chain before
659 * generating an interrupt. Set the default
660 * values for the RX and TX chains.
664 /* Force more frequent interrupts. */
665 sc->bce_tx_quick_cons_trip_int = 1;
666 sc->bce_tx_quick_cons_trip = 1;
667 sc->bce_tx_ticks_int = 0;
668 sc->bce_tx_ticks = 0;
670 sc->bce_rx_quick_cons_trip_int = 1;
671 sc->bce_rx_quick_cons_trip = 1;
672 sc->bce_rx_ticks_int = 0;
673 sc->bce_rx_ticks = 0;
675 /* Improve throughput at the expense of increased latency. */
676 sc->bce_tx_quick_cons_trip_int = 20;
677 sc->bce_tx_quick_cons_trip = 20;
678 sc->bce_tx_ticks_int = 80;
679 sc->bce_tx_ticks = 80;
681 sc->bce_rx_quick_cons_trip_int = 6;
682 sc->bce_rx_quick_cons_trip = 6;
683 sc->bce_rx_ticks_int = 18;
684 sc->bce_rx_ticks = 18;
687 /* Update statistics once every second. */
688 sc->bce_stats_ticks = 1000000 & 0xffff00;
691 * The SerDes based NetXtreme II controllers
692 * that support 2.5Gb operation (currently
693 * 5708S) use a PHY at address 2, otherwise
694 * the PHY is present at address 1.
696 sc->bce_phy_addr = 1;
698 if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
699 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
700 sc->bce_flags |= BCE_NO_WOL_FLAG;
701 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
702 sc->bce_phy_addr = 2;
703 val = REG_RD_IND(sc, sc->bce_shmem_base +
704 BCE_SHARED_HW_CFG_CONFIG);
705 if (val & BCE_SHARED_HW_CFG_PHY_2_5G) {
706 sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
707 DBPRINT(sc, BCE_INFO_LOAD, "Found 2.5Gb capable adapter\n");
712 /* Store data needed by PHY driver for backplane applications */
713 sc->bce_shared_hw_cfg = REG_RD_IND(sc, sc->bce_shmem_base +
714 BCE_SHARED_HW_CFG_CONFIG);
715 sc->bce_port_hw_cfg = REG_RD_IND(sc, sc->bce_shmem_base +
716 BCE_SHARED_HW_CFG_CONFIG);
718 /* Allocate DMA memory resources. */
719 if (bce_dma_alloc(dev)) {
720 BCE_PRINTF("%s(%d): DMA resource allocation failed!\n",
723 goto bce_attach_fail;
726 /* Allocate an ifnet structure. */
727 ifp = sc->bce_ifp = if_alloc(IFT_ETHER);
729 BCE_PRINTF("%s(%d): Interface allocation failed!\n",
732 goto bce_attach_fail;
735 /* Initialize the ifnet interface. */
737 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
738 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
739 ifp->if_ioctl = bce_ioctl;
740 ifp->if_start = bce_start;
741 ifp->if_init = bce_init;
742 ifp->if_mtu = ETHERMTU;
743 ifp->if_hwassist = BCE_IF_HWASSIST;
744 ifp->if_capabilities = BCE_IF_CAPABILITIES;
745 ifp->if_capenable = ifp->if_capabilities;
747 /* Assume a standard 1500 byte MTU size for mbuf allocations. */
748 sc->mbuf_alloc_size = MCLBYTES;
749 #ifdef DEVICE_POLLING
750 ifp->if_capabilities |= IFCAP_POLLING;
753 ifp->if_snd.ifq_drv_maxlen = USABLE_TX_BD;
754 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
755 IFQ_SET_READY(&ifp->if_snd);
757 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
758 ifp->if_baudrate = IF_Mbps(2500ULL);
760 ifp->if_baudrate = IF_Mbps(1000);
762 /* Check for an MII child bus by probing the PHY. */
763 if (mii_phy_probe(dev, &sc->bce_miibus, bce_ifmedia_upd,
765 BCE_PRINTF("%s(%d): No PHY found on child MII bus!\n",
768 goto bce_attach_fail;
771 /* Attach to the Ethernet interface list. */
772 ether_ifattach(ifp, sc->eaddr);
774 #if __FreeBSD_version < 500000
775 callout_init(&sc->bce_tick_callout);
776 callout_init(&sc->bce_pulse_callout);
778 callout_init_mtx(&sc->bce_tick_callout, &sc->bce_mtx, 0);
779 callout_init_mtx(&sc->bce_pulse_callout, &sc->bce_mtx, 0);
782 /* Hookup IRQ last. */
783 #if __FreeBSD_version > 700030
784 rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_TYPE_NET | INTR_MPSAFE,
785 NULL, bce_intr, sc, &sc->bce_intrhand);
787 rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_TYPE_NET | INTR_MPSAFE,
788 bce_intr, sc, &sc->bce_intrhand);
792 BCE_PRINTF("%s(%d): Failed to setup IRQ!\n",
795 goto bce_attach_exit;
799 * At this point we've acquired all the resources
800 * we need to run so there's no turning back, we're
801 * cleared for launch.
804 /* Print some important debugging info. */
805 DBRUN(BCE_INFO, bce_dump_driver_state(sc));
807 /* Add the supported sysctls to the kernel. */
812 * The chip reset earlier notified the bootcode that
813 * a driver is present. We now need to start our pulse
814 * routine so that the bootcode is reminded that we're
819 bce_mgmt_init_locked(sc);
822 /* Finally, print some useful adapter info */
823 BCE_PRINTF("ASIC (0x%08X); ", sc->bce_chipid);
824 printf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
825 ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4));
826 printf("Bus (PCI%s, %s, %dMHz); ",
827 ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
828 ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
830 printf("F/W (0x%08X); Flags( ", sc->bce_fw_ver);
831 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
833 if (sc->bce_flags & BCE_USING_MSI_FLAG)
835 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
839 goto bce_attach_exit;
842 bce_release_resources(sc);
846 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
852 /****************************************************************************/
853 /* Device detach function. */
855 /* Stops the controller, resets the controller, and releases resources. */
858 /* 0 on success, positive value on failure. */
859 /****************************************************************************/
861 bce_detach(device_t dev)
863 struct bce_softc *sc = device_get_softc(dev);
867 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
871 #ifdef DEVICE_POLLING
872 if (ifp->if_capenable & IFCAP_POLLING)
873 ether_poll_deregister(ifp);
876 /* Stop the pulse so the bootcode can go to driver absent state. */
877 callout_stop(&sc->bce_pulse_callout);
879 /* Stop and reset the controller. */
882 if (sc->bce_flags & BCE_NO_WOL_FLAG)
883 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
885 msg = BCE_DRV_MSG_CODE_UNLOAD;
891 /* If we have a child device on the MII bus remove it too. */
892 bus_generic_detach(dev);
893 device_delete_child(dev, sc->bce_miibus);
895 /* Release all remaining resources. */
896 bce_release_resources(sc);
898 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
904 /****************************************************************************/
905 /* Device shutdown function. */
907 /* Stops and resets the controller. */
911 /****************************************************************************/
913 bce_shutdown(device_t dev)
915 struct bce_softc *sc = device_get_softc(dev);
918 DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Entering %s()\n", __FUNCTION__);
922 if (sc->bce_flags & BCE_NO_WOL_FLAG)
923 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
925 msg = BCE_DRV_MSG_CODE_UNLOAD;
929 DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Exiting %s()\n", __FUNCTION__);
933 /****************************************************************************/
934 /* Indirect register read. */
936 /* Reads NetXtreme II registers using an index/data register pair in PCI */
937 /* configuration space. Using this mechanism avoids issues with posted */
938 /* reads but is much slower than memory-mapped I/O. */
941 /* The value of the register. */
942 /****************************************************************************/
944 bce_reg_rd_ind(struct bce_softc *sc, u32 offset)
949 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
953 val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
954 DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
955 __FUNCTION__, offset, val);
959 return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
964 /****************************************************************************/
965 /* Indirect register write. */
967 /* Writes NetXtreme II registers using an index/data register pair in PCI */
968 /* configuration space. Using this mechanism avoids issues with posted */
969 /* writes but is muchh slower than memory-mapped I/O. */
973 /****************************************************************************/
975 bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val)
980 DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
981 __FUNCTION__, offset, val);
983 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
984 pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
988 /****************************************************************************/
989 /* Context memory write. */
991 /* The NetXtreme II controller uses context memory to track connection */
992 /* information for L2 and higher network protocols. */
996 /****************************************************************************/
998 bce_ctx_wr(struct bce_softc *sc, u32 cid_addr, u32 offset, u32 val)
1001 DBPRINT(sc, BCE_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
1002 "val = 0x%08X\n", __FUNCTION__, cid_addr, offset, val);
1005 REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1006 REG_WR(sc, BCE_CTX_DATA, val);
1010 /****************************************************************************/
1011 /* PHY register read. */
1013 /* Implements register reads on the MII bus. */
1016 /* The value of the register. */
1017 /****************************************************************************/
1019 bce_miibus_read_reg(device_t dev, int phy, int reg)
1021 struct bce_softc *sc;
1025 sc = device_get_softc(dev);
1027 /* Make sure we are accessing the correct PHY address. */
1028 if (phy != sc->bce_phy_addr) {
1029 DBPRINT(sc, BCE_EXCESSIVE_PHY, "Invalid PHY address %d for PHY read!\n", phy);
1033 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1034 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1035 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1037 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1038 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1043 val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
1044 BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
1045 BCE_EMAC_MDIO_COMM_START_BUSY;
1046 REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
1048 for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1051 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1052 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1055 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1056 val &= BCE_EMAC_MDIO_COMM_DATA;
1062 if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1063 BCE_PRINTF("%s(%d): Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
1064 __FILE__, __LINE__, phy, reg);
1067 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1070 DBPRINT(sc, BCE_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1071 __FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff);
1073 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1074 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1075 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1077 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1078 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1083 return (val & 0xffff);
1088 /****************************************************************************/
1089 /* PHY register write. */
1091 /* Implements register writes on the MII bus. */
1094 /* The value of the register. */
1095 /****************************************************************************/
1097 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1099 struct bce_softc *sc;
1103 sc = device_get_softc(dev);
1105 /* Make sure we are accessing the correct PHY address. */
1106 if (phy != sc->bce_phy_addr) {
1107 DBPRINT(sc, BCE_EXCESSIVE_PHY, "Invalid PHY address %d for PHY write!\n", phy);
1111 DBPRINT(sc, BCE_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1112 __FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff);
1114 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1115 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1116 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1118 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1119 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1124 val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1125 BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1126 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1127 REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1129 for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1132 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1133 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1139 if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1140 BCE_PRINTF("%s(%d): PHY write timeout!\n",
1141 __FILE__, __LINE__);
1143 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1144 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1145 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1147 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1148 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1157 /****************************************************************************/
1158 /* MII bus status change. */
1160 /* Called by the MII bus driver when the PHY establishes link to set the */
1161 /* MAC interface registers. */
1165 /****************************************************************************/
1167 bce_miibus_statchg(device_t dev)
1169 struct bce_softc *sc;
1170 struct mii_data *mii;
1173 sc = device_get_softc(dev);
1175 mii = device_get_softc(sc->bce_miibus);
1177 val = REG_RD(sc, BCE_EMAC_MODE);
1178 val &= ~(BCE_EMAC_MODE_PORT | BCE_EMAC_MODE_HALF_DUPLEX |
1179 BCE_EMAC_MODE_MAC_LOOP | BCE_EMAC_MODE_FORCE_LINK |
1182 /* Set MII or GMII interface based on the speed negotiated by the PHY. */
1183 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1185 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
1186 DBPRINT(sc, BCE_INFO, "Enabling 10Mb interface.\n");
1187 val |= BCE_EMAC_MODE_PORT_MII_10;
1192 DBPRINT(sc, BCE_INFO, "Enabling MII interface.\n");
1193 val |= BCE_EMAC_MODE_PORT_MII;
1196 DBPRINT(sc, BCE_INFO, "Enabling 2.5G MAC mode.\n");
1197 val |= BCE_EMAC_MODE_25G;
1201 DBPRINT(sc, BCE_INFO, "Enabling GMII interface.\n");
1202 val |= BCE_EMAC_MODE_PORT_GMII;
1205 DBPRINT(sc, BCE_INFO, "Enabling default GMII interface.\n");
1206 val |= BCE_EMAC_MODE_PORT_GMII;
1209 /* Set half or full duplex based on the duplicity negotiated by the PHY. */
1210 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
1211 DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n");
1212 val |= BCE_EMAC_MODE_HALF_DUPLEX;
1214 DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n");
1216 REG_WR(sc, BCE_EMAC_MODE, val);
1219 /* Todo: Enable flow control support in brgphy and bge. */
1220 /* FLAG0 is set if RX is enabled and FLAG1 if TX is enabled */
1221 if (mii->mii_media_active & IFM_FLAG0)
1222 BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN);
1223 if (mii->mii_media_active & IFM_FLAG1)
1224 BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_TX_MODE_FLOW_EN);
1230 /****************************************************************************/
1231 /* Acquire NVRAM lock. */
1233 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */
1234 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1235 /* for use by the driver. */
1238 /* 0 on success, positive value on failure. */
1239 /****************************************************************************/
1241 bce_acquire_nvram_lock(struct bce_softc *sc)
1246 DBPRINT(sc, BCE_VERBOSE_NVRAM, "Acquiring NVRAM lock.\n");
1248 /* Request access to the flash interface. */
1249 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1250 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1251 val = REG_RD(sc, BCE_NVM_SW_ARB);
1252 if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1258 if (j >= NVRAM_TIMEOUT_COUNT) {
1259 DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n");
1267 /****************************************************************************/
1268 /* Release NVRAM lock. */
1270 /* When the caller is finished accessing NVRAM the lock must be released. */
1271 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1272 /* for use by the driver. */
1275 /* 0 on success, positive value on failure. */
1276 /****************************************************************************/
1278 bce_release_nvram_lock(struct bce_softc *sc)
1283 DBPRINT(sc, BCE_VERBOSE_NVRAM, "Releasing NVRAM lock.\n");
1286 * Relinquish nvram interface.
1288 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1290 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1291 val = REG_RD(sc, BCE_NVM_SW_ARB);
1292 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1298 if (j >= NVRAM_TIMEOUT_COUNT) {
1299 DBPRINT(sc, BCE_WARN, "Timeout reeasing NVRAM lock!\n");
1307 #ifdef BCE_NVRAM_WRITE_SUPPORT
1308 /****************************************************************************/
1309 /* Enable NVRAM write access. */
1311 /* Before writing to NVRAM the caller must enable NVRAM writes. */
1314 /* 0 on success, positive value on failure. */
1315 /****************************************************************************/
1317 bce_enable_nvram_write(struct bce_softc *sc)
1321 DBPRINT(sc, BCE_VERBOSE_NVRAM, "Enabling NVRAM write.\n");
1323 val = REG_RD(sc, BCE_MISC_CFG);
1324 REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI);
1326 if (!sc->bce_flash_info->buffered) {
1329 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1330 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT);
1332 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1335 val = REG_RD(sc, BCE_NVM_COMMAND);
1336 if (val & BCE_NVM_COMMAND_DONE)
1340 if (j >= NVRAM_TIMEOUT_COUNT) {
1341 DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n");
1349 /****************************************************************************/
1350 /* Disable NVRAM write access. */
1352 /* When the caller is finished writing to NVRAM write access must be */
1357 /****************************************************************************/
1359 bce_disable_nvram_write(struct bce_softc *sc)
1363 DBPRINT(sc, BCE_VERBOSE_NVRAM, "Disabling NVRAM write.\n");
1365 val = REG_RD(sc, BCE_MISC_CFG);
1366 REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN);
1371 /****************************************************************************/
1372 /* Enable NVRAM access. */
1374 /* Before accessing NVRAM for read or write operations the caller must */
1375 /* enabled NVRAM access. */
1379 /****************************************************************************/
1381 bce_enable_nvram_access(struct bce_softc *sc)
1385 DBPRINT(sc, BCE_VERBOSE_NVRAM, "Enabling NVRAM access.\n");
1387 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1388 /* Enable both bits, even on read. */
1389 REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1390 val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1394 /****************************************************************************/
1395 /* Disable NVRAM access. */
1397 /* When the caller is finished accessing NVRAM access must be disabled. */
1401 /****************************************************************************/
1403 bce_disable_nvram_access(struct bce_softc *sc)
1407 DBPRINT(sc, BCE_VERBOSE_NVRAM, "Disabling NVRAM access.\n");
1409 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1411 /* Disable both bits, even after read. */
1412 REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1413 val & ~(BCE_NVM_ACCESS_ENABLE_EN |
1414 BCE_NVM_ACCESS_ENABLE_WR_EN));
1418 #ifdef BCE_NVRAM_WRITE_SUPPORT
1419 /****************************************************************************/
1420 /* Erase NVRAM page before writing. */
1422 /* Non-buffered flash parts require that a page be erased before it is */
1426 /* 0 on success, positive value on failure. */
1427 /****************************************************************************/
1429 bce_nvram_erase_page(struct bce_softc *sc, u32 offset)
1434 /* Buffered flash doesn't require an erase. */
1435 if (sc->bce_flash_info->buffered)
1438 DBPRINT(sc, BCE_VERBOSE_NVRAM, "Erasing NVRAM page.\n");
1440 /* Build an erase command. */
1441 cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR |
1442 BCE_NVM_COMMAND_DOIT;
1445 * Clear the DONE bit separately, set the NVRAM adress to erase,
1446 * and issue the erase command.
1448 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1449 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1450 REG_WR(sc, BCE_NVM_COMMAND, cmd);
1452 /* Wait for completion. */
1453 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1458 val = REG_RD(sc, BCE_NVM_COMMAND);
1459 if (val & BCE_NVM_COMMAND_DONE)
1463 if (j >= NVRAM_TIMEOUT_COUNT) {
1464 DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n");
1470 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1473 /****************************************************************************/
1474 /* Read a dword (32 bits) from NVRAM. */
1476 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */
1477 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */
1480 /* 0 on success and the 32 bit value read, positive value on failure. */
1481 /****************************************************************************/
1483 bce_nvram_read_dword(struct bce_softc *sc, u32 offset, u8 *ret_val,
1489 /* Build the command word. */
1490 cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1492 /* Calculate the offset for buffered flash. */
1493 if (sc->bce_flash_info->buffered) {
1494 offset = ((offset / sc->bce_flash_info->page_size) <<
1495 sc->bce_flash_info->page_bits) +
1496 (offset % sc->bce_flash_info->page_size);
1500 * Clear the DONE bit separately, set the address to read,
1501 * and issue the read.
1503 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1504 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1505 REG_WR(sc, BCE_NVM_COMMAND, cmd);
1507 /* Wait for completion. */
1508 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1513 val = REG_RD(sc, BCE_NVM_COMMAND);
1514 if (val & BCE_NVM_COMMAND_DONE) {
1515 val = REG_RD(sc, BCE_NVM_READ);
1517 val = bce_be32toh(val);
1518 memcpy(ret_val, &val, 4);
1523 /* Check for errors. */
1524 if (i >= NVRAM_TIMEOUT_COUNT) {
1525 BCE_PRINTF("%s(%d): Timeout error reading NVRAM at offset 0x%08X!\n",
1526 __FILE__, __LINE__, offset);
1534 #ifdef BCE_NVRAM_WRITE_SUPPORT
1535 /****************************************************************************/
1536 /* Write a dword (32 bits) to NVRAM. */
1538 /* Write a 32 bit word to NVRAM. The caller is assumed to have already */
1539 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and */
1540 /* enabled NVRAM write access. */
1543 /* 0 on success, positive value on failure. */
1544 /****************************************************************************/
1546 bce_nvram_write_dword(struct bce_softc *sc, u32 offset, u8 *val,
1552 /* Build the command word. */
1553 cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags;
1555 /* Calculate the offset for buffered flash. */
1556 if (sc->bce_flash_info->buffered) {
1557 offset = ((offset / sc->bce_flash_info->page_size) <<
1558 sc->bce_flash_info->page_bits) +
1559 (offset % sc->bce_flash_info->page_size);
1563 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1564 * set the NVRAM address to write, and issue the write command
1566 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1567 memcpy(&val32, val, 4);
1568 val32 = htobe32(val32);
1569 REG_WR(sc, BCE_NVM_WRITE, val32);
1570 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1571 REG_WR(sc, BCE_NVM_COMMAND, cmd);
1573 /* Wait for completion. */
1574 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1577 if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE)
1580 if (j >= NVRAM_TIMEOUT_COUNT) {
1581 BCE_PRINTF("%s(%d): Timeout error writing NVRAM at offset 0x%08X\n",
1582 __FILE__, __LINE__, offset);
1588 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1591 /****************************************************************************/
1592 /* Initialize NVRAM access. */
1594 /* Identify the NVRAM device in use and prepare the NVRAM interface to */
1595 /* access that device. */
1598 /* 0 on success, positive value on failure. */
1599 /****************************************************************************/
1601 bce_init_nvram(struct bce_softc *sc)
1604 int j, entry_count, rc;
1605 struct flash_spec *flash;
1607 DBPRINT(sc, BCE_VERBOSE_NVRAM, "Entering %s()\n", __FUNCTION__);
1609 /* Determine the selected interface. */
1610 val = REG_RD(sc, BCE_NVM_CFG1);
1612 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1617 * Flash reconfiguration is required to support additional
1618 * NVRAM devices not directly supported in hardware.
1619 * Check if the flash interface was reconfigured
1623 if (val & 0x40000000) {
1624 /* Flash interface reconfigured by bootcode. */
1626 DBPRINT(sc,BCE_INFO_LOAD,
1627 "bce_init_nvram(): Flash WAS reconfigured.\n");
1629 for (j = 0, flash = &flash_table[0]; j < entry_count;
1631 if ((val & FLASH_BACKUP_STRAP_MASK) ==
1632 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1633 sc->bce_flash_info = flash;
1638 /* Flash interface not yet reconfigured. */
1641 DBPRINT(sc,BCE_INFO_LOAD,
1642 "bce_init_nvram(): Flash was NOT reconfigured.\n");
1644 if (val & (1 << 23))
1645 mask = FLASH_BACKUP_STRAP_MASK;
1647 mask = FLASH_STRAP_MASK;
1649 /* Look for the matching NVRAM device configuration data. */
1650 for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) {
1652 /* Check if the device matches any of the known devices. */
1653 if ((val & mask) == (flash->strapping & mask)) {
1654 /* Found a device match. */
1655 sc->bce_flash_info = flash;
1657 /* Request access to the flash interface. */
1658 if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1661 /* Reconfigure the flash interface. */
1662 bce_enable_nvram_access(sc);
1663 REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1664 REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1665 REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1666 REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1667 bce_disable_nvram_access(sc);
1668 bce_release_nvram_lock(sc);
1675 /* Check if a matching device was found. */
1676 if (j == entry_count) {
1677 sc->bce_flash_info = NULL;
1678 BCE_PRINTF("%s(%d): Unknown Flash NVRAM found!\n",
1679 __FILE__, __LINE__);
1683 /* Write the flash config data to the shared memory interface. */
1684 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_SHARED_HW_CFG_CONFIG2);
1685 val &= BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1687 sc->bce_flash_size = val;
1689 sc->bce_flash_size = sc->bce_flash_info->total_size;
1691 DBPRINT(sc, BCE_INFO_LOAD, "bce_init_nvram() flash->total_size = 0x%08X\n",
1692 sc->bce_flash_info->total_size);
1694 DBPRINT(sc, BCE_VERBOSE_NVRAM, "Exiting %s()\n", __FUNCTION__);
1700 /****************************************************************************/
1701 /* Read an arbitrary range of data from NVRAM. */
1703 /* Prepares the NVRAM interface for access and reads the requested data */
1704 /* into the supplied buffer. */
1707 /* 0 on success and the data read, positive value on failure. */
1708 /****************************************************************************/
1710 bce_nvram_read(struct bce_softc *sc, u32 offset, u8 *ret_buf,
1714 u32 cmd_flags, offset32, len32, extra;
1719 /* Request access to the flash interface. */
1720 if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1723 /* Enable access to flash interface */
1724 bce_enable_nvram_access(sc);
1737 pre_len = 4 - (offset & 3);
1739 if (pre_len >= len32) {
1741 cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1744 cmd_flags = BCE_NVM_COMMAND_FIRST;
1747 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1752 memcpy(ret_buf, buf + (offset & 3), pre_len);
1760 extra = 4 - (len32 & 3);
1761 len32 = (len32 + 4) & ~3;
1768 cmd_flags = BCE_NVM_COMMAND_LAST;
1770 cmd_flags = BCE_NVM_COMMAND_FIRST |
1771 BCE_NVM_COMMAND_LAST;
1773 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1775 memcpy(ret_buf, buf, 4 - extra);
1777 else if (len32 > 0) {
1780 /* Read the first word. */
1784 cmd_flags = BCE_NVM_COMMAND_FIRST;
1786 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1788 /* Advance to the next dword. */
1793 while (len32 > 4 && rc == 0) {
1794 rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1796 /* Advance to the next dword. */
1805 cmd_flags = BCE_NVM_COMMAND_LAST;
1806 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1808 memcpy(ret_buf, buf, 4 - extra);
1811 /* Disable access to flash interface and release the lock. */
1812 bce_disable_nvram_access(sc);
1813 bce_release_nvram_lock(sc);
1819 #ifdef BCE_NVRAM_WRITE_SUPPORT
1820 /****************************************************************************/
1821 /* Write an arbitrary range of data from NVRAM. */
1823 /* Prepares the NVRAM interface for write access and writes the requested */
1824 /* data from the supplied buffer. The caller is responsible for */
1825 /* calculating any appropriate CRCs. */
1828 /* 0 on success, positive value on failure. */
1829 /****************************************************************************/
1831 bce_nvram_write(struct bce_softc *sc, u32 offset, u8 *data_buf,
1834 u32 written, offset32, len32;
1835 u8 *buf, start[4], end[4];
1837 int align_start, align_end;
1842 align_start = align_end = 0;
1844 if ((align_start = (offset32 & 3))) {
1846 len32 += align_start;
1847 if ((rc = bce_nvram_read(sc, offset32, start, 4)))
1852 if ((len32 > 4) || !align_start) {
1853 align_end = 4 - (len32 & 3);
1855 if ((rc = bce_nvram_read(sc, offset32 + len32 - 4,
1862 if (align_start || align_end) {
1863 buf = malloc(len32, M_DEVBUF, M_NOWAIT);
1867 memcpy(buf, start, 4);
1870 memcpy(buf + len32 - 4, end, 4);
1872 memcpy(buf + align_start, data_buf, buf_size);
1876 while ((written < len32) && (rc == 0)) {
1877 u32 page_start, page_end, data_start, data_end;
1878 u32 addr, cmd_flags;
1880 u8 flash_buffer[264];
1882 /* Find the page_start addr */
1883 page_start = offset32 + written;
1884 page_start -= (page_start % sc->bce_flash_info->page_size);
1885 /* Find the page_end addr */
1886 page_end = page_start + sc->bce_flash_info->page_size;
1887 /* Find the data_start addr */
1888 data_start = (written == 0) ? offset32 : page_start;
1889 /* Find the data_end addr */
1890 data_end = (page_end > offset32 + len32) ?
1891 (offset32 + len32) : page_end;
1893 /* Request access to the flash interface. */
1894 if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1895 goto nvram_write_end;
1897 /* Enable access to flash interface */
1898 bce_enable_nvram_access(sc);
1900 cmd_flags = BCE_NVM_COMMAND_FIRST;
1901 if (sc->bce_flash_info->buffered == 0) {
1904 /* Read the whole page into the buffer
1905 * (non-buffer flash only) */
1906 for (j = 0; j < sc->bce_flash_info->page_size; j += 4) {
1907 if (j == (sc->bce_flash_info->page_size - 4)) {
1908 cmd_flags |= BCE_NVM_COMMAND_LAST;
1910 rc = bce_nvram_read_dword(sc,
1916 goto nvram_write_end;
1922 /* Enable writes to flash interface (unlock write-protect) */
1923 if ((rc = bce_enable_nvram_write(sc)) != 0)
1924 goto nvram_write_end;
1926 /* Erase the page */
1927 if ((rc = bce_nvram_erase_page(sc, page_start)) != 0)
1928 goto nvram_write_end;
1930 /* Re-enable the write again for the actual write */
1931 bce_enable_nvram_write(sc);
1933 /* Loop to write back the buffer data from page_start to
1936 if (sc->bce_flash_info->buffered == 0) {
1937 for (addr = page_start; addr < data_start;
1938 addr += 4, i += 4) {
1940 rc = bce_nvram_write_dword(sc, addr,
1941 &flash_buffer[i], cmd_flags);
1944 goto nvram_write_end;
1950 /* Loop to write the new data from data_start to data_end */
1951 for (addr = data_start; addr < data_end; addr += 4, i++) {
1952 if ((addr == page_end - 4) ||
1953 ((sc->bce_flash_info->buffered) &&
1954 (addr == data_end - 4))) {
1956 cmd_flags |= BCE_NVM_COMMAND_LAST;
1958 rc = bce_nvram_write_dword(sc, addr, buf,
1962 goto nvram_write_end;
1968 /* Loop to write back the buffer data from data_end
1970 if (sc->bce_flash_info->buffered == 0) {
1971 for (addr = data_end; addr < page_end;
1972 addr += 4, i += 4) {
1974 if (addr == page_end-4) {
1975 cmd_flags = BCE_NVM_COMMAND_LAST;
1977 rc = bce_nvram_write_dword(sc, addr,
1978 &flash_buffer[i], cmd_flags);
1981 goto nvram_write_end;
1987 /* Disable writes to flash interface (lock write-protect) */
1988 bce_disable_nvram_write(sc);
1990 /* Disable access to flash interface */
1991 bce_disable_nvram_access(sc);
1992 bce_release_nvram_lock(sc);
1994 /* Increment written */
1995 written += data_end - data_start;
1999 if (align_start || align_end)
2000 free(buf, M_DEVBUF);
2004 #endif /* BCE_NVRAM_WRITE_SUPPORT */
2007 /****************************************************************************/
2008 /* Verifies that NVRAM is accessible and contains valid data. */
2010 /* Reads the configuration data from NVRAM and verifies that the CRC is */
2014 /* 0 on success, positive value on failure. */
2015 /****************************************************************************/
2017 bce_nvram_test(struct bce_softc *sc)
2019 u32 buf[BCE_NVRAM_SIZE / 4];
2020 u8 *data = (u8 *) buf;
2026 * Check that the device NVRAM is valid by reading
2027 * the magic value at offset 0.
2029 if ((rc = bce_nvram_read(sc, 0, data, 4)) != 0)
2030 goto bce_nvram_test_done;
2033 magic = bce_be32toh(buf[0]);
2034 if (magic != BCE_NVRAM_MAGIC) {
2036 BCE_PRINTF("%s(%d): Invalid NVRAM magic value! Expected: 0x%08X, "
2038 __FILE__, __LINE__, BCE_NVRAM_MAGIC, magic);
2039 goto bce_nvram_test_done;
2043 * Verify that the device NVRAM includes valid
2044 * configuration data.
2046 if ((rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE)) != 0)
2047 goto bce_nvram_test_done;
2049 csum = ether_crc32_le(data, 0x100);
2050 if (csum != BCE_CRC32_RESIDUAL) {
2052 BCE_PRINTF("%s(%d): Invalid Manufacturing Information NVRAM CRC! "
2053 "Expected: 0x%08X, Found: 0x%08X\n",
2054 __FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
2055 goto bce_nvram_test_done;
2058 csum = ether_crc32_le(data + 0x100, 0x100);
2059 if (csum != BCE_CRC32_RESIDUAL) {
2060 BCE_PRINTF("%s(%d): Invalid Feature Configuration Information "
2061 "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
2062 __FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
2066 bce_nvram_test_done:
2071 /****************************************************************************/
2072 /* Free any DMA memory owned by the driver. */
2074 /* Scans through each data structre that requires DMA memory and frees */
2075 /* the memory if allocated. */
2079 /****************************************************************************/
2081 bce_dma_free(struct bce_softc *sc)
2085 DBPRINT(sc,BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2087 /* Destroy the status block. */
2088 if (sc->status_block != NULL)
2094 if (sc->status_map != NULL) {
2098 bus_dmamap_destroy(sc->status_tag,
2102 if (sc->status_tag != NULL)
2103 bus_dma_tag_destroy(sc->status_tag);
2106 /* Destroy the statistics block. */
2107 if (sc->stats_block != NULL)
2113 if (sc->stats_map != NULL) {
2117 bus_dmamap_destroy(sc->stats_tag,
2121 if (sc->stats_tag != NULL)
2122 bus_dma_tag_destroy(sc->stats_tag);
2125 /* Free, unmap and destroy all TX buffer descriptor chain pages. */
2126 for (i = 0; i < TX_PAGES; i++ ) {
2127 if (sc->tx_bd_chain[i] != NULL)
2129 sc->tx_bd_chain_tag,
2131 sc->tx_bd_chain_map[i]);
2133 if (sc->tx_bd_chain_map[i] != NULL) {
2135 sc->tx_bd_chain_tag,
2136 sc->tx_bd_chain_map[i]);
2138 sc->tx_bd_chain_tag,
2139 sc->tx_bd_chain_map[i]);
2144 /* Destroy the TX buffer descriptor tag. */
2145 if (sc->tx_bd_chain_tag != NULL)
2146 bus_dma_tag_destroy(sc->tx_bd_chain_tag);
2149 /* Free, unmap and destroy all RX buffer descriptor chain pages. */
2150 for (i = 0; i < RX_PAGES; i++ ) {
2151 if (sc->rx_bd_chain[i] != NULL)
2153 sc->rx_bd_chain_tag,
2155 sc->rx_bd_chain_map[i]);
2157 if (sc->rx_bd_chain_map[i] != NULL) {
2159 sc->rx_bd_chain_tag,
2160 sc->rx_bd_chain_map[i]);
2162 sc->rx_bd_chain_tag,
2163 sc->rx_bd_chain_map[i]);
2167 /* Destroy the RX buffer descriptor tag. */
2168 if (sc->rx_bd_chain_tag != NULL)
2169 bus_dma_tag_destroy(sc->rx_bd_chain_tag);
2172 /* Unload and destroy the TX mbuf maps. */
2173 for (i = 0; i < TOTAL_TX_BD; i++) {
2174 if (sc->tx_mbuf_map[i] != NULL) {
2175 bus_dmamap_unload(sc->tx_mbuf_tag,
2176 sc->tx_mbuf_map[i]);
2177 bus_dmamap_destroy(sc->tx_mbuf_tag,
2178 sc->tx_mbuf_map[i]);
2182 /* Destroy the TX mbuf tag. */
2183 if (sc->tx_mbuf_tag != NULL)
2184 bus_dma_tag_destroy(sc->tx_mbuf_tag);
2187 /* Unload and destroy the RX mbuf maps. */
2188 for (i = 0; i < TOTAL_RX_BD; i++) {
2189 if (sc->rx_mbuf_map[i] != NULL) {
2190 bus_dmamap_unload(sc->rx_mbuf_tag,
2191 sc->rx_mbuf_map[i]);
2192 bus_dmamap_destroy(sc->rx_mbuf_tag,
2193 sc->rx_mbuf_map[i]);
2197 /* Destroy the RX mbuf tag. */
2198 if (sc->rx_mbuf_tag != NULL)
2199 bus_dma_tag_destroy(sc->rx_mbuf_tag);
2202 /* Destroy the parent tag */
2203 if (sc->parent_tag != NULL)
2204 bus_dma_tag_destroy(sc->parent_tag);
2206 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2211 /****************************************************************************/
2212 /* Get DMA memory from the OS. */
2214 /* Validates that the OS has provided DMA buffers in response to a */
2215 /* bus_dmamap_load() call and saves the physical address of those buffers. */
2216 /* When the callback is used the OS will return 0 for the mapping function */
2217 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */
2218 /* failures back to the caller. */
2222 /****************************************************************************/
2224 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2226 bus_addr_t *busaddr = arg;
2228 /* Simulate a mapping failure. */
2229 DBRUNIF(DB_RANDOMTRUE(bce_debug_dma_map_addr_failure),
2230 printf("bce: %s(%d): Simulating DMA mapping error.\n",
2231 __FILE__, __LINE__);
2234 /* Check for an error and signal the caller that an error occurred. */
2236 printf("bce %s(%d): DMA mapping error! error = %d, "
2237 "nseg = %d\n", __FILE__, __LINE__, error, nseg);
2242 *busaddr = segs->ds_addr;
2247 /****************************************************************************/
2248 /* Allocate any DMA memory needed by the driver. */
2250 /* Allocates DMA memory needed for the various global structures needed by */
2254 /* 0 for success, positive value for failure. */
2255 /****************************************************************************/
2257 bce_dma_alloc(device_t dev)
2259 struct bce_softc *sc;
2260 int i, error, rc = 0;
2262 bus_size_t max_size, max_seg_size;
2265 sc = device_get_softc(dev);
2267 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2270 * Allocate the parent bus DMA tag appropriate for PCI.
2272 if (bus_dma_tag_create(NULL,
2279 BUS_SPACE_UNRESTRICTED,
2280 BUS_SPACE_MAXSIZE_32BIT,
2284 BCE_PRINTF("%s(%d): Could not allocate parent DMA tag!\n",
2285 __FILE__, __LINE__);
2287 goto bce_dma_alloc_exit;
2291 * Create a DMA tag for the status block, allocate and clear the
2292 * memory, map the memory into DMA space, and fetch the physical
2293 * address of the block.
2295 if (bus_dma_tag_create(sc->parent_tag,
2307 BCE_PRINTF("%s(%d): Could not allocate status block DMA tag!\n",
2308 __FILE__, __LINE__);
2310 goto bce_dma_alloc_exit;
2313 if(bus_dmamem_alloc(sc->status_tag,
2314 (void **)&sc->status_block,
2317 BCE_PRINTF("%s(%d): Could not allocate status block DMA memory!\n",
2318 __FILE__, __LINE__);
2320 goto bce_dma_alloc_exit;
2323 bzero((char *)sc->status_block, BCE_STATUS_BLK_SZ);
2325 error = bus_dmamap_load(sc->status_tag,
2334 BCE_PRINTF("%s(%d): Could not map status block DMA memory!\n",
2335 __FILE__, __LINE__);
2337 goto bce_dma_alloc_exit;
2340 sc->status_block_paddr = busaddr;
2341 /* DRC - Fix for 64 bit addresses. */
2342 DBPRINT(sc, BCE_INFO, "status_block_paddr = 0x%08X\n",
2343 (u32) sc->status_block_paddr);
2346 * Create a DMA tag for the statistics block, allocate and clear the
2347 * memory, map the memory into DMA space, and fetch the physical
2348 * address of the block.
2350 if (bus_dma_tag_create(sc->parent_tag,
2362 BCE_PRINTF("%s(%d): Could not allocate statistics block DMA tag!\n",
2363 __FILE__, __LINE__);
2365 goto bce_dma_alloc_exit;
2368 if (bus_dmamem_alloc(sc->stats_tag,
2369 (void **)&sc->stats_block,
2372 BCE_PRINTF("%s(%d): Could not allocate statistics block DMA memory!\n",
2373 __FILE__, __LINE__);
2375 goto bce_dma_alloc_exit;
2378 bzero((char *)sc->stats_block, BCE_STATS_BLK_SZ);
2380 error = bus_dmamap_load(sc->stats_tag,
2389 BCE_PRINTF("%s(%d): Could not map statistics block DMA memory!\n",
2390 __FILE__, __LINE__);
2392 goto bce_dma_alloc_exit;
2395 sc->stats_block_paddr = busaddr;
2396 /* DRC - Fix for 64 bit address. */
2397 DBPRINT(sc,BCE_INFO, "stats_block_paddr = 0x%08X\n",
2398 (u32) sc->stats_block_paddr);
2401 * Create a DMA tag for the TX buffer descriptor chain,
2402 * allocate and clear the memory, and fetch the
2403 * physical address of the block.
2405 if(bus_dma_tag_create(sc->parent_tag,
2411 BCE_TX_CHAIN_PAGE_SZ,
2413 BCE_TX_CHAIN_PAGE_SZ,
2416 &sc->tx_bd_chain_tag)) {
2417 BCE_PRINTF("%s(%d): Could not allocate TX descriptor chain DMA tag!\n",
2418 __FILE__, __LINE__);
2420 goto bce_dma_alloc_exit;
2423 for (i = 0; i < TX_PAGES; i++) {
2425 if(bus_dmamem_alloc(sc->tx_bd_chain_tag,
2426 (void **)&sc->tx_bd_chain[i],
2428 &sc->tx_bd_chain_map[i])) {
2429 BCE_PRINTF("%s(%d): Could not allocate TX descriptor "
2430 "chain DMA memory!\n", __FILE__, __LINE__);
2432 goto bce_dma_alloc_exit;
2435 error = bus_dmamap_load(sc->tx_bd_chain_tag,
2436 sc->tx_bd_chain_map[i],
2438 BCE_TX_CHAIN_PAGE_SZ,
2444 BCE_PRINTF("%s(%d): Could not map TX descriptor chain DMA memory!\n",
2445 __FILE__, __LINE__);
2447 goto bce_dma_alloc_exit;
2450 sc->tx_bd_chain_paddr[i] = busaddr;
2451 /* DRC - Fix for 64 bit systems. */
2452 DBPRINT(sc, BCE_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2453 i, (u32) sc->tx_bd_chain_paddr[i]);
2456 max_size = MCLBYTES * BCE_MAX_SEGMENTS;
2457 max_segments = BCE_MAX_SEGMENTS;
2458 max_seg_size = MCLBYTES;
2459 /* Create a DMA tag for TX mbufs. */
2460 if (bus_dma_tag_create(sc->parent_tag,
2471 &sc->tx_mbuf_tag)) {
2472 BCE_PRINTF("%s(%d): Could not allocate TX mbuf DMA tag!\n",
2473 __FILE__, __LINE__);
2475 goto bce_dma_alloc_exit;
2478 /* Create DMA maps for the TX mbufs clusters. */
2479 for (i = 0; i < TOTAL_TX_BD; i++) {
2480 if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT,
2481 &sc->tx_mbuf_map[i])) {
2482 BCE_PRINTF("%s(%d): Unable to create TX mbuf DMA map!\n",
2483 __FILE__, __LINE__);
2485 goto bce_dma_alloc_exit;
2490 * Create a DMA tag for the RX buffer descriptor chain,
2491 * allocate and clear the memory, and fetch the physical
2492 * address of the blocks.
2494 if (bus_dma_tag_create(sc->parent_tag,
2500 BCE_RX_CHAIN_PAGE_SZ,
2502 BCE_RX_CHAIN_PAGE_SZ,
2505 &sc->rx_bd_chain_tag)) {
2506 BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain DMA tag!\n",
2507 __FILE__, __LINE__);
2509 goto bce_dma_alloc_exit;
2512 for (i = 0; i < RX_PAGES; i++) {
2514 if (bus_dmamem_alloc(sc->rx_bd_chain_tag,
2515 (void **)&sc->rx_bd_chain[i],
2517 &sc->rx_bd_chain_map[i])) {
2518 BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain "
2519 "DMA memory!\n", __FILE__, __LINE__);
2521 goto bce_dma_alloc_exit;
2524 bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
2526 error = bus_dmamap_load(sc->rx_bd_chain_tag,
2527 sc->rx_bd_chain_map[i],
2529 BCE_RX_CHAIN_PAGE_SZ,
2535 BCE_PRINTF("%s(%d): Could not map RX descriptor chain DMA memory!\n",
2536 __FILE__, __LINE__);
2538 goto bce_dma_alloc_exit;
2541 sc->rx_bd_chain_paddr[i] = busaddr;
2542 /* DRC - Fix for 64 bit systems. */
2543 DBPRINT(sc, BCE_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2544 i, (u32) sc->rx_bd_chain_paddr[i]);
2548 * Create a DMA tag for RX mbufs.
2550 if (bus_dma_tag_create(sc->parent_tag,
2561 &sc->rx_mbuf_tag)) {
2562 BCE_PRINTF("%s(%d): Could not allocate RX mbuf DMA tag!\n",
2563 __FILE__, __LINE__);
2565 goto bce_dma_alloc_exit;
2568 /* Create DMA maps for the RX mbuf clusters. */
2569 for (i = 0; i < TOTAL_RX_BD; i++) {
2570 if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT,
2571 &sc->rx_mbuf_map[i])) {
2572 BCE_PRINTF("%s(%d): Unable to create RX mbuf DMA map!\n",
2573 __FILE__, __LINE__);
2575 goto bce_dma_alloc_exit;
2580 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2586 /****************************************************************************/
2587 /* Release all resources used by the driver. */
2589 /* Releases all resources acquired by the driver including interrupts, */
2590 /* interrupt handler, interfaces, mutexes, and DMA memory. */
2594 /****************************************************************************/
2596 bce_release_resources(struct bce_softc *sc)
2600 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2606 if (sc->bce_intrhand != NULL) {
2607 DBPRINT(sc, BCE_INFO_RESET, "Removing interrupt handler.\n");
2608 bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand);
2611 if (sc->bce_res_irq != NULL) {
2612 DBPRINT(sc, BCE_INFO_RESET, "Releasing IRQ.\n");
2613 bus_release_resource(dev, SYS_RES_IRQ, sc->bce_flags & BCE_USING_MSI_FLAG ? 1 : 0,
2617 if (sc->bce_flags & BCE_USING_MSI_FLAG) {
2618 DBPRINT(sc, BCE_INFO_RESET, "Releasing MSI vector.\n");
2619 pci_release_msi(dev);
2622 if (sc->bce_res_mem != NULL) {
2623 DBPRINT(sc, BCE_INFO_RESET, "Releasing PCI memory.\n");
2624 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), sc->bce_res_mem);
2627 if (sc->bce_ifp != NULL) {
2628 DBPRINT(sc, BCE_INFO_RESET, "Releasing IF.\n");
2629 if_free(sc->bce_ifp);
2632 if (mtx_initialized(&sc->bce_mtx))
2633 BCE_LOCK_DESTROY(sc);
2635 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2640 /****************************************************************************/
2641 /* Firmware synchronization. */
2643 /* Before performing certain events such as a chip reset, synchronize with */
2644 /* the firmware first. */
2647 /* 0 for success, positive value for failure. */
2648 /****************************************************************************/
2650 bce_fw_sync(struct bce_softc *sc, u32 msg_data)
2655 /* Don't waste any time if we've timed out before. */
2656 if (sc->bce_fw_timed_out) {
2658 goto bce_fw_sync_exit;
2661 /* Increment the message sequence number. */
2662 sc->bce_fw_wr_seq++;
2663 msg_data |= sc->bce_fw_wr_seq;
2665 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "bce_fw_sync(): msg_data = 0x%08X\n", msg_data);
2667 /* Send the message to the bootcode driver mailbox. */
2668 REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2670 /* Wait for the bootcode to acknowledge the message. */
2671 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2672 /* Check for a response in the bootcode firmware mailbox. */
2673 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_FW_MB);
2674 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2679 /* If we've timed out, tell the bootcode that we've stopped waiting. */
2680 if (((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ)) &&
2681 ((msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0)) {
2683 BCE_PRINTF("%s(%d): Firmware synchronization timeout! "
2684 "msg_data = 0x%08X\n",
2685 __FILE__, __LINE__, msg_data);
2687 msg_data &= ~BCE_DRV_MSG_CODE;
2688 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2690 REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2692 sc->bce_fw_timed_out = 1;
2701 /****************************************************************************/
2702 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */
2706 /****************************************************************************/
2708 bce_load_rv2p_fw(struct bce_softc *sc, u32 *rv2p_code,
2709 u32 rv2p_code_len, u32 rv2p_proc)
2714 for (i = 0; i < rv2p_code_len; i += 8) {
2715 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2717 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2720 if (rv2p_proc == RV2P_PROC1) {
2721 val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2722 REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2725 val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2726 REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2730 /* Reset the processor, un-stall is done later. */
2731 if (rv2p_proc == RV2P_PROC1) {
2732 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2735 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2740 /****************************************************************************/
2741 /* Load RISC processor firmware. */
2743 /* Loads firmware from the file if_bcefw.h into the scratchpad memory */
2744 /* associated with a particular processor. */
2748 /****************************************************************************/
2750 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2757 val = REG_RD_IND(sc, cpu_reg->mode);
2758 val |= cpu_reg->mode_value_halt;
2759 REG_WR_IND(sc, cpu_reg->mode, val);
2760 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2762 /* Load the Text area. */
2763 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2767 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2768 REG_WR_IND(sc, offset, fw->text[j]);
2772 /* Load the Data area. */
2773 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2777 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2778 REG_WR_IND(sc, offset, fw->data[j]);
2782 /* Load the SBSS area. */
2783 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2787 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2788 REG_WR_IND(sc, offset, fw->sbss[j]);
2792 /* Load the BSS area. */
2793 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2797 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2798 REG_WR_IND(sc, offset, fw->bss[j]);
2802 /* Load the Read-Only area. */
2803 offset = cpu_reg->spad_base +
2804 (fw->rodata_addr - cpu_reg->mips_view_base);
2808 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2809 REG_WR_IND(sc, offset, fw->rodata[j]);
2813 /* Clear the pre-fetch instruction. */
2814 REG_WR_IND(sc, cpu_reg->inst, 0);
2815 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2817 /* Start the CPU. */
2818 val = REG_RD_IND(sc, cpu_reg->mode);
2819 val &= ~cpu_reg->mode_value_halt;
2820 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2821 REG_WR_IND(sc, cpu_reg->mode, val);
2825 /****************************************************************************/
2826 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs. */
2828 /* Loads the firmware for each CPU and starts the CPU. */
2832 /****************************************************************************/
2834 bce_init_cpus(struct bce_softc *sc)
2836 struct cpu_reg cpu_reg;
2839 /* Initialize the RV2P processor. */
2840 bce_load_rv2p_fw(sc, bce_rv2p_proc1, sizeof(bce_rv2p_proc1), RV2P_PROC1);
2841 bce_load_rv2p_fw(sc, bce_rv2p_proc2, sizeof(bce_rv2p_proc2), RV2P_PROC2);
2843 /* Initialize the RX Processor. */
2844 cpu_reg.mode = BCE_RXP_CPU_MODE;
2845 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2846 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2847 cpu_reg.state = BCE_RXP_CPU_STATE;
2848 cpu_reg.state_value_clear = 0xffffff;
2849 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2850 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2851 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2852 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2853 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2854 cpu_reg.spad_base = BCE_RXP_SCRATCH;
2855 cpu_reg.mips_view_base = 0x8000000;
2857 fw.ver_major = bce_RXP_b06FwReleaseMajor;
2858 fw.ver_minor = bce_RXP_b06FwReleaseMinor;
2859 fw.ver_fix = bce_RXP_b06FwReleaseFix;
2860 fw.start_addr = bce_RXP_b06FwStartAddr;
2862 fw.text_addr = bce_RXP_b06FwTextAddr;
2863 fw.text_len = bce_RXP_b06FwTextLen;
2865 fw.text = bce_RXP_b06FwText;
2867 fw.data_addr = bce_RXP_b06FwDataAddr;
2868 fw.data_len = bce_RXP_b06FwDataLen;
2870 fw.data = bce_RXP_b06FwData;
2872 fw.sbss_addr = bce_RXP_b06FwSbssAddr;
2873 fw.sbss_len = bce_RXP_b06FwSbssLen;
2875 fw.sbss = bce_RXP_b06FwSbss;
2877 fw.bss_addr = bce_RXP_b06FwBssAddr;
2878 fw.bss_len = bce_RXP_b06FwBssLen;
2880 fw.bss = bce_RXP_b06FwBss;
2882 fw.rodata_addr = bce_RXP_b06FwRodataAddr;
2883 fw.rodata_len = bce_RXP_b06FwRodataLen;
2884 fw.rodata_index = 0;
2885 fw.rodata = bce_RXP_b06FwRodata;
2887 DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n");
2888 bce_load_cpu_fw(sc, &cpu_reg, &fw);
2890 /* Initialize the TX Processor. */
2891 cpu_reg.mode = BCE_TXP_CPU_MODE;
2892 cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
2893 cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
2894 cpu_reg.state = BCE_TXP_CPU_STATE;
2895 cpu_reg.state_value_clear = 0xffffff;
2896 cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
2897 cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
2898 cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
2899 cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
2900 cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
2901 cpu_reg.spad_base = BCE_TXP_SCRATCH;
2902 cpu_reg.mips_view_base = 0x8000000;
2904 fw.ver_major = bce_TXP_b06FwReleaseMajor;
2905 fw.ver_minor = bce_TXP_b06FwReleaseMinor;
2906 fw.ver_fix = bce_TXP_b06FwReleaseFix;
2907 fw.start_addr = bce_TXP_b06FwStartAddr;
2909 fw.text_addr = bce_TXP_b06FwTextAddr;
2910 fw.text_len = bce_TXP_b06FwTextLen;
2912 fw.text = bce_TXP_b06FwText;
2914 fw.data_addr = bce_TXP_b06FwDataAddr;
2915 fw.data_len = bce_TXP_b06FwDataLen;
2917 fw.data = bce_TXP_b06FwData;
2919 fw.sbss_addr = bce_TXP_b06FwSbssAddr;
2920 fw.sbss_len = bce_TXP_b06FwSbssLen;
2922 fw.sbss = bce_TXP_b06FwSbss;
2924 fw.bss_addr = bce_TXP_b06FwBssAddr;
2925 fw.bss_len = bce_TXP_b06FwBssLen;
2927 fw.bss = bce_TXP_b06FwBss;
2929 fw.rodata_addr = bce_TXP_b06FwRodataAddr;
2930 fw.rodata_len = bce_TXP_b06FwRodataLen;
2931 fw.rodata_index = 0;
2932 fw.rodata = bce_TXP_b06FwRodata;
2934 DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n");
2935 bce_load_cpu_fw(sc, &cpu_reg, &fw);
2937 /* Initialize the TX Patch-up Processor. */
2938 cpu_reg.mode = BCE_TPAT_CPU_MODE;
2939 cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
2940 cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
2941 cpu_reg.state = BCE_TPAT_CPU_STATE;
2942 cpu_reg.state_value_clear = 0xffffff;
2943 cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
2944 cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
2945 cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
2946 cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
2947 cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
2948 cpu_reg.spad_base = BCE_TPAT_SCRATCH;
2949 cpu_reg.mips_view_base = 0x8000000;
2951 fw.ver_major = bce_TPAT_b06FwReleaseMajor;
2952 fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
2953 fw.ver_fix = bce_TPAT_b06FwReleaseFix;
2954 fw.start_addr = bce_TPAT_b06FwStartAddr;
2956 fw.text_addr = bce_TPAT_b06FwTextAddr;
2957 fw.text_len = bce_TPAT_b06FwTextLen;
2959 fw.text = bce_TPAT_b06FwText;
2961 fw.data_addr = bce_TPAT_b06FwDataAddr;
2962 fw.data_len = bce_TPAT_b06FwDataLen;
2964 fw.data = bce_TPAT_b06FwData;
2966 fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
2967 fw.sbss_len = bce_TPAT_b06FwSbssLen;
2969 fw.sbss = bce_TPAT_b06FwSbss;
2971 fw.bss_addr = bce_TPAT_b06FwBssAddr;
2972 fw.bss_len = bce_TPAT_b06FwBssLen;
2974 fw.bss = bce_TPAT_b06FwBss;
2976 fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
2977 fw.rodata_len = bce_TPAT_b06FwRodataLen;
2978 fw.rodata_index = 0;
2979 fw.rodata = bce_TPAT_b06FwRodata;
2981 DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n");
2982 bce_load_cpu_fw(sc, &cpu_reg, &fw);
2984 /* Initialize the Completion Processor. */
2985 cpu_reg.mode = BCE_COM_CPU_MODE;
2986 cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
2987 cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
2988 cpu_reg.state = BCE_COM_CPU_STATE;
2989 cpu_reg.state_value_clear = 0xffffff;
2990 cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
2991 cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
2992 cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
2993 cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
2994 cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
2995 cpu_reg.spad_base = BCE_COM_SCRATCH;
2996 cpu_reg.mips_view_base = 0x8000000;
2998 fw.ver_major = bce_COM_b06FwReleaseMajor;
2999 fw.ver_minor = bce_COM_b06FwReleaseMinor;
3000 fw.ver_fix = bce_COM_b06FwReleaseFix;
3001 fw.start_addr = bce_COM_b06FwStartAddr;
3003 fw.text_addr = bce_COM_b06FwTextAddr;
3004 fw.text_len = bce_COM_b06FwTextLen;
3006 fw.text = bce_COM_b06FwText;
3008 fw.data_addr = bce_COM_b06FwDataAddr;
3009 fw.data_len = bce_COM_b06FwDataLen;
3011 fw.data = bce_COM_b06FwData;
3013 fw.sbss_addr = bce_COM_b06FwSbssAddr;
3014 fw.sbss_len = bce_COM_b06FwSbssLen;
3016 fw.sbss = bce_COM_b06FwSbss;
3018 fw.bss_addr = bce_COM_b06FwBssAddr;
3019 fw.bss_len = bce_COM_b06FwBssLen;
3021 fw.bss = bce_COM_b06FwBss;
3023 fw.rodata_addr = bce_COM_b06FwRodataAddr;
3024 fw.rodata_len = bce_COM_b06FwRodataLen;
3025 fw.rodata_index = 0;
3026 fw.rodata = bce_COM_b06FwRodata;
3028 DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
3029 bce_load_cpu_fw(sc, &cpu_reg, &fw);
3033 /****************************************************************************/
3034 /* Initialize context memory. */
3036 /* Clears the memory associated with each Context ID (CID). */
3040 /****************************************************************************/
3042 bce_init_context(struct bce_softc *sc)
3048 u32 vcid_addr, pcid_addr, offset;
3052 vcid_addr = GET_CID_ADDR(vcid);
3053 pcid_addr = vcid_addr;
3055 REG_WR(sc, BCE_CTX_VIRT_ADDR, 0x00);
3056 REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
3058 /* Zero out the context. */
3059 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
3060 CTX_WR(sc, 0x00, offset, 0);
3063 REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
3064 REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
3069 /****************************************************************************/
3070 /* Fetch the permanent MAC address of the controller. */
3074 /****************************************************************************/
3076 bce_get_mac_addr(struct bce_softc *sc)
3078 u32 mac_lo = 0, mac_hi = 0;
3081 * The NetXtreme II bootcode populates various NIC
3082 * power-on and runtime configuration items in a
3083 * shared memory area. The factory configured MAC
3084 * address is available from both NVRAM and the
3085 * shared memory area so we'll read the value from
3086 * shared memory for speed.
3089 mac_hi = REG_RD_IND(sc, sc->bce_shmem_base +
3090 BCE_PORT_HW_CFG_MAC_UPPER);
3091 mac_lo = REG_RD_IND(sc, sc->bce_shmem_base +
3092 BCE_PORT_HW_CFG_MAC_LOWER);
3094 if ((mac_lo == 0) && (mac_hi == 0)) {
3095 BCE_PRINTF("%s(%d): Invalid Ethernet address!\n",
3096 __FILE__, __LINE__);
3098 sc->eaddr[0] = (u_char)(mac_hi >> 8);
3099 sc->eaddr[1] = (u_char)(mac_hi >> 0);
3100 sc->eaddr[2] = (u_char)(mac_lo >> 24);
3101 sc->eaddr[3] = (u_char)(mac_lo >> 16);
3102 sc->eaddr[4] = (u_char)(mac_lo >> 8);
3103 sc->eaddr[5] = (u_char)(mac_lo >> 0);
3106 DBPRINT(sc, BCE_INFO_MISC, "Permanent Ethernet address = %6D\n", sc->eaddr, ":");
3110 /****************************************************************************/
3111 /* Program the MAC address. */
3115 /****************************************************************************/
3117 bce_set_mac_addr(struct bce_softc *sc)
3120 u8 *mac_addr = sc->eaddr;
3122 DBPRINT(sc, BCE_INFO_MISC, "Setting Ethernet address = %6D\n", sc->eaddr, ":");
3124 val = (mac_addr[0] << 8) | mac_addr[1];
3126 REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
3128 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
3129 (mac_addr[4] << 8) | mac_addr[5];
3131 REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
3135 /****************************************************************************/
3136 /* Stop the controller. */
3140 /****************************************************************************/
3142 bce_stop(struct bce_softc *sc)
3145 struct ifmedia_entry *ifm;
3146 struct mii_data *mii = NULL;
3149 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3151 BCE_LOCK_ASSERT(sc);
3155 mii = device_get_softc(sc->bce_miibus);
3157 callout_stop(&sc->bce_tick_callout);
3159 /* Disable the transmit/receive blocks. */
3160 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 0x5ffffff);
3161 REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3164 bce_disable_intr(sc);
3166 /* Free RX buffers. */
3167 bce_free_rx_chain(sc);
3169 /* Free TX buffers. */
3170 bce_free_tx_chain(sc);
3173 * Isolate/power down the PHY, but leave the media selection
3174 * unchanged so that things will be put back to normal when
3175 * we bring the interface back up.
3178 itmp = ifp->if_flags;
3179 ifp->if_flags |= IFF_UP;
3181 /* If we are called from bce_detach(), mii is already NULL. */
3183 ifm = mii->mii_media.ifm_cur;
3184 mtmp = ifm->ifm_media;
3185 ifm->ifm_media = IFM_ETHER | IFM_NONE;
3187 ifm->ifm_media = mtmp;
3190 ifp->if_flags = itmp;
3191 sc->watchdog_timer = 0;
3195 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3197 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3202 bce_reset(struct bce_softc *sc, u32 reset_code)
3207 DBPRINT(sc, BCE_VERBOSE_RESET, "%s(): reset_code = 0x%08X\n",
3208 __FUNCTION__, reset_code);
3210 /* Wait for pending PCI transactions to complete. */
3211 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
3212 BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3213 BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3214 BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3215 BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3216 val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3219 /* Assume bootcode is running. */
3220 sc->bce_fw_timed_out = 0;
3222 /* Give the firmware a chance to prepare for the reset. */
3223 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
3225 goto bce_reset_exit;
3227 /* Set a firmware reminder that this is a soft reset. */
3228 REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_RESET_SIGNATURE,
3229 BCE_DRV_RESET_SIGNATURE_MAGIC);
3231 /* Dummy read to force the chip to complete all current transactions. */
3232 val = REG_RD(sc, BCE_MISC_ID);
3235 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3236 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3237 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3238 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
3240 /* Allow up to 30us for reset to complete. */
3241 for (i = 0; i < 10; i++) {
3242 val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
3243 if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3244 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3250 /* Check that reset completed successfully. */
3251 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3252 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3253 BCE_PRINTF("%s(%d): Reset failed!\n",
3254 __FILE__, __LINE__);
3256 goto bce_reset_exit;
3259 /* Make sure byte swapping is properly configured. */
3260 val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
3261 if (val != 0x01020304) {
3262 BCE_PRINTF("%s(%d): Byte swap is incorrect!\n",
3263 __FILE__, __LINE__);
3265 goto bce_reset_exit;
3268 /* Just completed a reset, assume that firmware is running again. */
3269 sc->bce_fw_timed_out = 0;
3271 /* Wait for the firmware to finish its initialization. */
3272 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
3274 BCE_PRINTF("%s(%d): Firmware did not complete initialization!\n",
3275 __FILE__, __LINE__);
3283 bce_chipinit(struct bce_softc *sc)
3288 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3290 /* Make sure the interrupt is not active. */
3291 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3294 * Initialize DMA byte/word swapping, configure the number of DMA
3295 * channels and PCI clock compensation delay.
3297 val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3298 BCE_DMA_CONFIG_DATA_WORD_SWAP |
3299 #if BYTE_ORDER == BIG_ENDIAN
3300 BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3302 BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3303 DMA_READ_CHANS << 12 |
3304 DMA_WRITE_CHANS << 16;
3306 val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3308 if ((sc->bce_flags & BCE_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
3309 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3312 * This setting resolves a problem observed on certain Intel PCI
3313 * chipsets that cannot handle multiple outstanding DMA operations.
3314 * See errata E9_5706A1_65.
3316 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
3317 (BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0) &&
3318 !(sc->bce_flags & BCE_PCIX_FLAG))
3319 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3321 REG_WR(sc, BCE_DMA_CONFIG, val);
3323 /* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3324 if (sc->bce_flags & BCE_PCIX_FLAG) {
3327 val = pci_read_config(sc->bce_dev, BCE_PCI_PCIX_CMD, 2);
3328 pci_write_config(sc->bce_dev, BCE_PCI_PCIX_CMD, val & ~0x2, 2);
3331 /* Enable the RX_V2P and Context state machines before access. */
3332 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3333 BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3334 BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3335 BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3337 /* Initialize context mapping and zero out the quick contexts. */
3338 bce_init_context(sc);
3340 /* Initialize the on-boards CPUs */
3343 /* Prepare NVRAM for access. */
3344 if (bce_init_nvram(sc)) {
3346 goto bce_chipinit_exit;
3349 /* Set the kernel bypass block size */
3350 val = REG_RD(sc, BCE_MQ_CONFIG);
3351 val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3352 val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3353 REG_WR(sc, BCE_MQ_CONFIG, val);
3355 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3356 REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3357 REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3359 /* Set the page size and clear the RV2P processor stall bits. */
3360 val = (BCM_PAGE_BITS - 8) << 24;
3361 REG_WR(sc, BCE_RV2P_CONFIG, val);
3363 /* Configure page size. */
3364 val = REG_RD(sc, BCE_TBDR_CONFIG);
3365 val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3366 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3367 REG_WR(sc, BCE_TBDR_CONFIG, val);
3370 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3376 /****************************************************************************/
3377 /* Initialize the controller in preparation to send/receive traffic. */
3380 /* 0 for success, positive value for failure. */
3381 /****************************************************************************/
3383 bce_blockinit(struct bce_softc *sc)
3388 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3390 /* Load the hardware default MAC address. */
3391 bce_set_mac_addr(sc);
3393 /* Set the Ethernet backoff seed value */
3394 val = sc->eaddr[0] + (sc->eaddr[1] << 8) +
3395 (sc->eaddr[2] << 16) + (sc->eaddr[3] ) +
3396 (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3397 REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3399 sc->last_status_idx = 0;
3400 sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3402 /* Set up link change interrupt generation. */
3403 REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3405 /* Program the physical address of the status block. */
3406 REG_WR(sc, BCE_HC_STATUS_ADDR_L,
3407 BCE_ADDR_LO(sc->status_block_paddr));
3408 REG_WR(sc, BCE_HC_STATUS_ADDR_H,
3409 BCE_ADDR_HI(sc->status_block_paddr));
3411 /* Program the physical address of the statistics block. */
3412 REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3413 BCE_ADDR_LO(sc->stats_block_paddr));
3414 REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3415 BCE_ADDR_HI(sc->stats_block_paddr));
3417 /* Program various host coalescing parameters. */
3418 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3419 (sc->bce_tx_quick_cons_trip_int << 16) | sc->bce_tx_quick_cons_trip);
3420 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3421 (sc->bce_rx_quick_cons_trip_int << 16) | sc->bce_rx_quick_cons_trip);
3422 REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3423 (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3424 REG_WR(sc, BCE_HC_TX_TICKS,
3425 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3426 REG_WR(sc, BCE_HC_RX_TICKS,
3427 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3428 REG_WR(sc, BCE_HC_COM_TICKS,
3429 (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3430 REG_WR(sc, BCE_HC_CMD_TICKS,
3431 (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3432 REG_WR(sc, BCE_HC_STATS_TICKS,
3433 (sc->bce_stats_ticks & 0xffff00));
3434 REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS,
3436 REG_WR(sc, BCE_HC_CONFIG,
3437 (BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE |
3438 BCE_HC_CONFIG_COLLECT_STATS));
3440 /* Clear the internal statistics counters. */
3441 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3443 /* Verify that bootcode is running. */
3444 reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_SIGNATURE);
3446 DBRUNIF(DB_RANDOMTRUE(bce_debug_bootcode_running_failure),
3447 BCE_PRINTF("%s(%d): Simulating bootcode failure.\n",
3448 __FILE__, __LINE__);
3451 if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3452 BCE_DEV_INFO_SIGNATURE_MAGIC) {
3453 BCE_PRINTF("%s(%d): Bootcode not running! Found: 0x%08X, "
3454 "Expected: 08%08X\n", __FILE__, __LINE__,
3455 (reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK),
3456 BCE_DEV_INFO_SIGNATURE_MAGIC);
3458 goto bce_blockinit_exit;
3461 /* Allow bootcode to apply any additional fixes before enabling MAC. */
3462 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3464 /* Enable link state change interrupt generation. */
3465 REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3467 /* Enable all remaining blocks in the MAC. */
3468 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 0x5ffffff);
3469 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3473 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3479 /****************************************************************************/
3480 /* Encapsulate an mbuf cluster into the rx_bd chain. */
3482 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */
3483 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */
3486 /* Todo: Consider writing the hardware mailboxes here to make rx_bd's */
3487 /* available to the hardware as soon as possible. */
3490 /* 0 for success, positive value for failure. */
3491 /****************************************************************************/
3493 bce_get_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod, u16 *chain_prod,
3497 bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
3498 struct mbuf *m_new = NULL;
3500 int i, nsegs, error, rc = 0;
3502 u16 debug_chain_prod = *chain_prod;
3505 DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Entering %s()\n",
3508 /* Make sure the inputs are valid. */
3509 DBRUNIF((*chain_prod > MAX_RX_BD),
3510 BCE_PRINTF("%s(%d): RX producer out of range: 0x%04X > 0x%04X\n",
3511 __FILE__, __LINE__, *chain_prod, (u16) MAX_RX_BD));
3513 DBPRINT(sc, BCE_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, "
3514 "prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
3516 /* Check whether this is a new mbuf allocation. */
3519 /* Simulate an mbuf allocation failure. */
3520 DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
3521 sc->mbuf_alloc_failed++;
3522 sc->mbuf_sim_alloc_failed++;
3524 goto bce_get_buf_exit);
3526 /* This is a new mbuf allocation. */
3527 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
3528 if (m_new == NULL) {
3530 DBPRINT(sc, BCE_WARN, "%s(%d): RX mbuf header allocation failed!\n",
3531 __FILE__, __LINE__);
3533 sc->mbuf_alloc_failed++;
3536 goto bce_get_buf_exit;
3539 DBRUNIF(1, sc->rx_mbuf_alloc++);
3541 /* Simulate an mbuf cluster allocation failure. */
3542 DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
3544 sc->rx_mbuf_alloc--;
3545 sc->mbuf_alloc_failed++;
3546 sc->mbuf_sim_alloc_failed++;
3548 goto bce_get_buf_exit);
3550 /* Attach a cluster to the mbuf. */
3551 m_cljget(m_new, M_DONTWAIT, sc->mbuf_alloc_size);
3552 if (!(m_new->m_flags & M_EXT)) {
3554 DBPRINT(sc, BCE_WARN, "%s(%d): RX mbuf chain allocation failed!\n",
3555 __FILE__, __LINE__);
3558 DBRUNIF(1, sc->rx_mbuf_alloc--);
3560 sc->mbuf_alloc_failed++;
3562 goto bce_get_buf_exit;
3565 /* Initialize the mbuf cluster. */
3566 m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3568 /* Reuse an existing mbuf. */
3570 m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3571 m_new->m_data = m_new->m_ext.ext_buf;
3574 /* Map the mbuf cluster into device memory. */
3575 map = sc->rx_mbuf_map[*chain_prod];
3576 error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag, map, m_new,
3577 segs, &nsegs, BUS_DMA_NOWAIT);
3579 /* Handle any mapping errors. */
3581 BCE_PRINTF("%s(%d): Error mapping mbuf into RX chain!\n",
3582 __FILE__, __LINE__);
3585 DBRUNIF(1, sc->rx_mbuf_alloc--);
3588 goto bce_get_buf_exit;
3591 /* Make sure there is room in the receive chain. */
3592 if (nsegs > sc->free_rx_bd) {
3593 bus_dmamap_unload(sc->rx_mbuf_tag, map);
3596 DBRUNIF(1, sc->rx_mbuf_alloc--);
3599 goto bce_get_buf_exit;
3603 /* Track the distribution of buffer segments. */
3604 sc->rx_mbuf_segs[nsegs]++;
3607 /* Update some debug statistic counters */
3608 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3609 sc->rx_low_watermark = sc->free_rx_bd);
3610 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++);
3612 /* Setup the rx_bd for the first segment. */
3613 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3615 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[0].ds_addr));
3616 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[0].ds_addr));
3617 rxbd->rx_bd_len = htole32(segs[0].ds_len);
3618 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
3619 *prod_bseq += segs[0].ds_len;
3621 for (i = 1; i < nsegs; i++) {
3623 *prod = NEXT_RX_BD(*prod);
3624 *chain_prod = RX_CHAIN_IDX(*prod);
3626 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3628 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
3629 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
3630 rxbd->rx_bd_len = htole32(segs[i].ds_len);
3631 rxbd->rx_bd_flags = 0;
3632 *prod_bseq += segs[i].ds_len;
3635 rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3637 /* Save the mbuf and update our counter. */
3638 sc->rx_mbuf_ptr[*chain_prod] = m_new;
3639 sc->free_rx_bd -= nsegs;
3641 DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_mbuf_chain(sc, debug_chain_prod,
3644 DBPRINT(sc, BCE_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, "
3645 "prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
3648 DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Exiting %s()\n",
3655 /****************************************************************************/
3656 /* Allocate memory and initialize the TX data structures. */
3659 /* 0 for success, positive value for failure. */
3660 /****************************************************************************/
3662 bce_init_tx_chain(struct bce_softc *sc)
3668 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3670 /* Set the initial TX producer/consumer indices. */
3673 sc->tx_prod_bseq = 0;
3675 sc->max_tx_bd = USABLE_TX_BD;
3676 DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3677 DBRUNIF(1, sc->tx_full_count = 0);
3680 * The NetXtreme II supports a linked-list structre called
3681 * a Buffer Descriptor Chain (or BD chain). A BD chain
3682 * consists of a series of 1 or more chain pages, each of which
3683 * consists of a fixed number of BD entries.
3684 * The last BD entry on each page is a pointer to the next page
3685 * in the chain, and the last pointer in the BD chain
3686 * points back to the beginning of the chain.
3689 /* Set the TX next pointer chain entries. */
3690 for (i = 0; i < TX_PAGES; i++) {
3693 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3695 /* Check if we've reached the last page. */
3696 if (i == (TX_PAGES - 1))
3701 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
3702 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
3705 /* Initialize the context ID for an L2 TX chain. */
3706 val = BCE_L2CTX_TYPE_TYPE_L2;
3707 val |= BCE_L2CTX_TYPE_SIZE_L2;
3708 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TYPE, val);
3710 val = BCE_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3711 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_CMD_TYPE, val);
3713 /* Point the hardware to the first page in the chain. */
3714 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
3715 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_HI, val);
3716 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
3717 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_LO, val);
3719 DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
3721 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3727 /****************************************************************************/
3728 /* Free memory and clear the TX data structures. */
3732 /****************************************************************************/
3734 bce_free_tx_chain(struct bce_softc *sc)
3738 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3740 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3741 for (i = 0; i < TOTAL_TX_BD; i++) {
3742 if (sc->tx_mbuf_ptr[i] != NULL) {
3743 if (sc->tx_mbuf_map != NULL)
3744 bus_dmamap_sync(sc->tx_mbuf_tag, sc->tx_mbuf_map[i],
3745 BUS_DMASYNC_POSTWRITE);
3746 m_freem(sc->tx_mbuf_ptr[i]);
3747 sc->tx_mbuf_ptr[i] = NULL;
3748 DBRUNIF(1, sc->tx_mbuf_alloc--);
3752 /* Clear each TX chain page. */
3753 for (i = 0; i < TX_PAGES; i++)
3754 bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
3758 /* Check if we lost any mbufs in the process. */
3759 DBRUNIF((sc->tx_mbuf_alloc),
3760 BCE_PRINTF("%s(%d): Memory leak! Lost %d mbufs "
3762 __FILE__, __LINE__, sc->tx_mbuf_alloc));
3764 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3768 /****************************************************************************/
3769 /* Add mbufs to the RX chain until its full or an mbuf allocation error */
3774 /****************************************************************************/
3776 bce_fill_rx_chain(struct bce_softc *sc)
3778 u16 prod, chain_prod;
3781 int rx_mbuf_alloc_before, free_rx_bd_before;
3784 DBPRINT(sc, BCE_EXCESSIVE_RECV, "Entering %s()\n", __FUNCTION__);
3787 prod_bseq = sc->rx_prod_bseq;
3790 rx_mbuf_alloc_before = sc->rx_mbuf_alloc;
3791 free_rx_bd_before = sc->free_rx_bd;
3794 /* Keep filling the RX chain until it's full. */
3795 while (sc->free_rx_bd > 0) {
3796 chain_prod = RX_CHAIN_IDX(prod);
3797 if (bce_get_buf(sc, NULL, &prod, &chain_prod, &prod_bseq)) {
3798 /* Bail out if we can't add an mbuf to the chain. */
3801 prod = NEXT_RX_BD(prod);
3805 DBRUNIF((sc->rx_mbuf_alloc - rx_mbuf_alloc_before),
3806 BCE_PRINTF("%s(): Installed %d mbufs in %d rx_bd entries.\n",
3807 __FUNCTION__, (sc->rx_mbuf_alloc - rx_mbuf_alloc_before),
3808 (free_rx_bd_before - sc->free_rx_bd)));
3811 /* Save the RX chain producer index. */
3813 sc->rx_prod_bseq = prod_bseq;
3815 /* Tell the chip about the waiting rx_bd's. */
3816 REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
3817 REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3819 DBPRINT(sc, BCE_EXCESSIVE_RECV, "Exiting %s()\n", __FUNCTION__);
3824 /****************************************************************************/
3825 /* Allocate memory and initialize the RX data structures. */
3828 /* 0 for success, positive value for failure. */
3829 /****************************************************************************/
3831 bce_init_rx_chain(struct bce_softc *sc)
3837 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3839 /* Initialize the RX producer and consumer indices. */
3842 sc->rx_prod_bseq = 0;
3843 sc->free_rx_bd = USABLE_RX_BD;
3844 sc->max_rx_bd = USABLE_RX_BD;
3845 DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
3846 DBRUNIF(1, sc->rx_empty_count = 0);
3848 /* Initialize the RX next pointer chain entries. */
3849 for (i = 0; i < RX_PAGES; i++) {
3852 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
3854 /* Check if we've reached the last page. */
3855 if (i == (RX_PAGES - 1))
3860 /* Setup the chain page pointers. */
3861 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j]));
3862 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j]));
3865 /* Initialize the context ID for an L2 RX chain. */
3866 val = BCE_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3867 val |= BCE_L2CTX_CTX_TYPE_SIZE_L2;
3869 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_CTX_TYPE, val);
3871 /* Point the hardware to the first page in the chain. */
3872 val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]);
3873 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_HI, val);
3874 val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
3875 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_LO, val);
3878 /* Fill up the RX chain. */
3879 bce_fill_rx_chain(sc);
3882 for (i = 0; i < RX_PAGES; i++) {
3884 sc->rx_bd_chain_tag,
3885 sc->rx_bd_chain_map[i],
3886 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3889 DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD));
3891 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3897 /****************************************************************************/
3898 /* Free memory and clear the RX data structures. */
3902 /****************************************************************************/
3904 bce_free_rx_chain(struct bce_softc *sc)
3908 int rx_mbuf_alloc_before;
3911 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3914 rx_mbuf_alloc_before = sc->rx_mbuf_alloc;
3917 /* Free any mbufs still in the RX mbuf chain. */
3918 for (i = 0; i < TOTAL_RX_BD; i++) {
3919 if (sc->rx_mbuf_ptr[i] != NULL) {
3920 if (sc->rx_mbuf_map[i] != NULL)
3921 bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[i],
3922 BUS_DMASYNC_POSTREAD);
3923 m_freem(sc->rx_mbuf_ptr[i]);
3924 sc->rx_mbuf_ptr[i] = NULL;
3925 DBRUNIF(1, sc->rx_mbuf_alloc--);
3929 DBRUNIF((rx_mbuf_alloc_before - sc->rx_mbuf_alloc),
3930 BCE_PRINTF("%s(): Released %d mbufs.\n",
3931 __FUNCTION__, (rx_mbuf_alloc_before - sc->rx_mbuf_alloc)));
3933 /* Clear each RX chain page. */
3934 for (i = 0; i < RX_PAGES; i++)
3935 bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
3937 sc->free_rx_bd = sc->max_rx_bd;
3939 /* Check if we lost any mbufs in the process. */
3940 DBRUNIF((sc->rx_mbuf_alloc),
3941 BCE_PRINTF("%s(%d): Memory leak! Lost %d mbufs from rx chain!\n",
3942 __FILE__, __LINE__, sc->rx_mbuf_alloc));
3944 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3948 /****************************************************************************/
3949 /* Set media options. */
3952 /* 0 for success, positive value for failure. */
3953 /****************************************************************************/
3955 bce_ifmedia_upd(struct ifnet *ifp)
3957 struct bce_softc *sc;
3961 bce_ifmedia_upd_locked(ifp);
3967 /****************************************************************************/
3968 /* Set media options. */
3972 /****************************************************************************/
3974 bce_ifmedia_upd_locked(struct ifnet *ifp)
3976 struct bce_softc *sc;
3977 struct mii_data *mii;
3978 struct ifmedia *ifm;
3981 ifm = &sc->bce_ifmedia;
3982 BCE_LOCK_ASSERT(sc);
3984 mii = device_get_softc(sc->bce_miibus);
3986 /* Make sure the MII bus has been enumerated. */
3989 if (mii->mii_instance) {
3990 struct mii_softc *miisc;
3992 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3993 mii_phy_reset(miisc);
4000 /****************************************************************************/
4001 /* Reports current media status. */
4005 /****************************************************************************/
4007 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4009 struct bce_softc *sc;
4010 struct mii_data *mii;
4016 mii = device_get_softc(sc->bce_miibus);
4019 ifmr->ifm_active = mii->mii_media_active;
4020 ifmr->ifm_status = mii->mii_media_status;
4026 /****************************************************************************/
4027 /* Handles PHY generated interrupt events. */
4031 /****************************************************************************/
4033 bce_phy_intr(struct bce_softc *sc)
4035 u32 new_link_state, old_link_state;
4037 new_link_state = sc->status_block->status_attn_bits &
4038 STATUS_ATTN_BITS_LINK_STATE;
4039 old_link_state = sc->status_block->status_attn_bits_ack &
4040 STATUS_ATTN_BITS_LINK_STATE;
4042 /* Handle any changes if the link state has changed. */
4043 if (new_link_state != old_link_state) {
4045 DBRUN(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
4048 callout_stop(&sc->bce_tick_callout);
4051 /* Update the status_attn_bits_ack field in the status block. */
4052 if (new_link_state) {
4053 REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
4054 STATUS_ATTN_BITS_LINK_STATE);
4055 DBPRINT(sc, BCE_INFO_MISC, "Link is now UP.\n");
4058 REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
4059 STATUS_ATTN_BITS_LINK_STATE);
4060 DBPRINT(sc, BCE_INFO_MISC, "Link is now DOWN.\n");
4065 /* Acknowledge the link change interrupt. */
4066 REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
4070 /****************************************************************************/
4071 /* Handles received frame interrupt events. */
4075 /****************************************************************************/
4077 bce_rx_intr(struct bce_softc *sc)
4079 struct status_block *sblk = sc->status_block;
4080 struct ifnet *ifp = sc->bce_ifp;
4081 u16 hw_cons, sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
4083 struct l2_fhdr *l2fhdr;
4085 DBRUNIF(1, sc->rx_interrupts++);
4087 /* Prepare the RX chain pages to be accessed by the host CPU. */
4088 for (int i = 0; i < RX_PAGES; i++)
4089 bus_dmamap_sync(sc->rx_bd_chain_tag,
4090 sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTWRITE);
4092 /* Get the hardware's view of the RX consumer index. */
4093 hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
4094 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4097 /* Get working copies of the driver's view of the RX indices. */
4098 sw_cons = sc->rx_cons;
4099 sw_prod = sc->rx_prod;
4100 sw_prod_bseq = sc->rx_prod_bseq;
4102 DBPRINT(sc, BCE_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
4103 "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
4104 __FUNCTION__, sw_prod, sw_cons, sw_prod_bseq);
4106 /* Prevent speculative reads from getting ahead of the status block. */
4107 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4108 BUS_SPACE_BARRIER_READ);
4110 /* Update some debug statistics counters */
4111 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
4112 sc->rx_low_watermark = sc->free_rx_bd);
4113 DBRUNIF((sc->free_rx_bd == USABLE_RX_BD), sc->rx_empty_count++);
4115 /* Scan through the receive chain as long as there is work to do */
4116 while (sw_cons != hw_cons) {
4122 /* Clear the mbuf pointer. */
4125 /* Convert the producer/consumer indices to an actual rx_bd index. */
4126 sw_chain_cons = RX_CHAIN_IDX(sw_cons);
4127 sw_chain_prod = RX_CHAIN_IDX(sw_prod);
4129 /* Get the used rx_bd. */
4130 rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)];
4133 DBRUN(BCE_VERBOSE_RECV,
4134 BCE_PRINTF("%s(): ", __FUNCTION__);
4135 bce_dump_rxbd(sc, sw_chain_cons, rxbd));
4137 #ifdef DEVICE_POLLING
4138 if (ifp->if_capenable & IFCAP_POLLING) {
4139 if (sc->bce_rxcycles <= 0)
4145 /* The mbuf is stored with the last rx_bd entry of a packet. */
4146 if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
4148 /* Validate that this is the last rx_bd. */
4149 DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)),
4150 BCE_PRINTF("%s(%d): Unexpected mbuf found in rx_bd[0x%04X]!\n",
4151 __FILE__, __LINE__, sw_chain_cons);
4152 bce_breakpoint(sc));
4155 * ToDo: If the received packet is small enough
4156 * to fit into a single, non-M_EXT mbuf,
4157 * allocate a new mbuf here, copy the data to
4158 * that mbuf, and recycle the mapped jumbo frame.
4161 /* Unmap the mbuf from DMA space. */
4162 bus_dmamap_sync(sc->rx_mbuf_tag,
4163 sc->rx_mbuf_map[sw_chain_cons],
4164 BUS_DMASYNC_POSTREAD);
4165 bus_dmamap_unload(sc->rx_mbuf_tag,
4166 sc->rx_mbuf_map[sw_chain_cons]);
4168 /* Remove the mbuf from the RX chain. */
4169 m = sc->rx_mbuf_ptr[sw_chain_cons];
4170 sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
4173 * Frames received on the NetXteme II are prepended
4174 * with an l2_fhdr structure which provides status
4175 * information about the received frame (including
4176 * VLAN tags and checksum info). The frames are also
4177 * automatically adjusted to align the IP header
4178 * (i.e. two null bytes are inserted before the
4181 l2fhdr = mtod(m, struct l2_fhdr *);
4183 len = l2fhdr->l2_fhdr_pkt_len;
4184 status = l2fhdr->l2_fhdr_status;
4186 DBRUNIF(DB_RANDOMTRUE(bce_debug_l2fhdr_status_check),
4187 BCE_PRINTF("Simulating l2_fhdr status error.\n");
4188 status = status | L2_FHDR_ERRORS_PHY_DECODE);
4190 /* Watch for unusual sized frames. */
4191 DBRUNIF(((len < BCE_MIN_MTU) || (len > BCE_MAX_JUMBO_ETHER_MTU_VLAN)),
4192 BCE_PRINTF("%s(%d): Unusual frame size found. "
4193 "Min(%d), Actual(%d), Max(%d)\n",
4194 __FILE__, __LINE__, (int) BCE_MIN_MTU,
4195 len, (int) BCE_MAX_JUMBO_ETHER_MTU_VLAN);
4196 bce_dump_mbuf(sc, m);
4197 bce_breakpoint(sc));
4199 len -= ETHER_CRC_LEN;
4201 /* Check the received frame for errors. */
4202 if (status & (L2_FHDR_ERRORS_BAD_CRC |
4203 L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT |
4204 L2_FHDR_ERRORS_TOO_SHORT | L2_FHDR_ERRORS_GIANT_FRAME)) {
4206 /* Log the error and release the mbuf. */
4208 DBRUNIF(1, sc->l2fhdr_status_errors++);
4210 /* Todo: Reuse the mbuf to improve performance. */
4214 goto bce_rx_int_next_rx;
4217 /* Skip over the l2_fhdr when passing the data up the stack. */
4218 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4220 /* Adjust the packet length to match the received data. */
4221 m->m_pkthdr.len = m->m_len = len;
4223 /* Send the packet to the appropriate interface. */
4224 m->m_pkthdr.rcvif = ifp;
4226 DBRUN(BCE_VERBOSE_RECV,
4227 struct ether_header *eh;
4228 eh = mtod(m, struct ether_header *);
4229 BCE_PRINTF("%s(): to: %6D, from: %6D, type: 0x%04X\n",
4230 __FUNCTION__, eh->ether_dhost, ":",
4231 eh->ether_shost, ":", htons(eh->ether_type)));
4233 /* Validate the checksum if offload enabled. */
4234 if (ifp->if_capenable & IFCAP_RXCSUM) {
4236 /* Check for an IP datagram. */
4237 if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4238 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4240 /* Check if the IP checksum is valid. */
4241 if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0)
4242 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4244 DBPRINT(sc, BCE_WARN_SEND,
4245 "%s(): Invalid IP checksum = 0x%04X!\n",
4246 __FUNCTION__, l2fhdr->l2_fhdr_ip_xsum);
4249 /* Check for a valid TCP/UDP frame. */
4250 if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4251 L2_FHDR_STATUS_UDP_DATAGRAM)) {
4253 /* Check for a good TCP/UDP checksum. */
4254 if ((status & (L2_FHDR_ERRORS_TCP_XSUM |
4255 L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4256 m->m_pkthdr.csum_data =
4257 l2fhdr->l2_fhdr_tcp_udp_xsum;
4258 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID
4261 DBPRINT(sc, BCE_WARN_SEND,
4262 "%s(): Invalid TCP/UDP checksum = 0x%04X!\n",
4263 __FUNCTION__, l2fhdr->l2_fhdr_tcp_udp_xsum);
4269 * If we received a packet with a vlan tag,
4270 * attach that information to the packet.
4272 if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
4273 DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): VLAN tag = 0x%04X\n",
4274 __FUNCTION__, l2fhdr->l2_fhdr_vlan_tag);
4275 #if __FreeBSD_version < 700000
4276 VLAN_INPUT_TAG(ifp, m, l2fhdr->l2_fhdr_vlan_tag, continue);
4278 m->m_pkthdr.ether_vtag = l2fhdr->l2_fhdr_vlan_tag;
4279 m->m_flags |= M_VLANTAG;
4283 /* Pass the mbuf off to the upper layers. */
4287 sw_prod = NEXT_RX_BD(sw_prod);
4290 sw_cons = NEXT_RX_BD(sw_cons);
4292 /* If we have a packet, pass it up the stack */
4294 /* Make sure we don't lose our place when we release the lock. */
4295 sc->rx_cons = sw_cons;
4297 DBPRINT(sc, BCE_VERBOSE_RECV, "%s(): Passing received frame up.\n",
4300 (*ifp->if_input)(ifp, m);
4301 DBRUNIF(1, sc->rx_mbuf_alloc--);
4304 /* Recover our place. */
4305 sw_cons = sc->rx_cons;
4308 /* Refresh hw_cons to see if there's new work */
4309 if (sw_cons == hw_cons) {
4310 hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
4311 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4315 /* Prevent speculative reads from getting ahead of the status block. */
4316 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4317 BUS_SPACE_BARRIER_READ);
4320 /* No new packets to process. Refill the RX chain and exit. */
4321 sc->rx_cons = sw_cons;
4322 bce_fill_rx_chain(sc);
4324 for (int i = 0; i < RX_PAGES; i++)
4325 bus_dmamap_sync(sc->rx_bd_chain_tag,
4326 sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
4328 DBPRINT(sc, BCE_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
4329 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
4330 __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
4334 /****************************************************************************/
4335 /* Handles transmit completion interrupt events. */
4339 /****************************************************************************/
4341 bce_tx_intr(struct bce_softc *sc)
4343 struct status_block *sblk = sc->status_block;
4344 struct ifnet *ifp = sc->bce_ifp;
4345 u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
4347 BCE_LOCK_ASSERT(sc);
4349 DBRUNIF(1, sc->tx_interrupts++);
4351 /* Get the hardware's view of the TX consumer index. */
4352 hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4354 /* Skip to the next entry if this is a chain page pointer. */
4355 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4358 sw_tx_cons = sc->tx_cons;
4360 /* Prevent speculative reads from getting ahead of the status block. */
4361 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4362 BUS_SPACE_BARRIER_READ);
4364 /* Cycle through any completed TX chain page entries. */
4365 while (sw_tx_cons != hw_tx_cons) {
4367 struct tx_bd *txbd = NULL;
4369 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4371 DBPRINT(sc, BCE_INFO_SEND,
4372 "%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, "
4373 "sw_tx_chain_cons = 0x%04X\n",
4374 __FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4376 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4377 BCE_PRINTF("%s(%d): TX chain consumer out of range! "
4378 " 0x%04X > 0x%04X\n", __FILE__, __LINE__, sw_tx_chain_cons,
4380 bce_breakpoint(sc));
4382 DBRUNIF(1, txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
4383 [TX_IDX(sw_tx_chain_cons)]);
4385 DBRUNIF((txbd == NULL),
4386 BCE_PRINTF("%s(%d): Unexpected NULL tx_bd[0x%04X]!\n",
4387 __FILE__, __LINE__, sw_tx_chain_cons);
4388 bce_breakpoint(sc));
4390 DBRUN(BCE_INFO_SEND, BCE_PRINTF("%s(): ", __FUNCTION__);
4391 bce_dump_txbd(sc, sw_tx_chain_cons, txbd));
4394 * Free the associated mbuf. Remember
4395 * that only the last tx_bd of a packet
4396 * has an mbuf pointer and DMA map.
4398 if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
4400 /* Validate that this is the last tx_bd. */
4401 DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)),
4402 BCE_PRINTF("%s(%d): tx_bd END flag not set but "
4403 "txmbuf == NULL!\n", __FILE__, __LINE__);
4404 bce_breakpoint(sc));
4406 DBRUN(BCE_INFO_SEND,
4407 BCE_PRINTF("%s(): Unloading map/freeing mbuf "
4408 "from tx_bd[0x%04X]\n", __FUNCTION__, sw_tx_chain_cons));
4410 /* Unmap the mbuf. */
4411 bus_dmamap_unload(sc->tx_mbuf_tag,
4412 sc->tx_mbuf_map[sw_tx_chain_cons]);
4414 /* Free the mbuf. */
4415 m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
4416 sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
4417 DBRUNIF(1, sc->tx_mbuf_alloc--);
4423 sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4425 /* Refresh hw_cons to see if there's new work. */
4426 hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4427 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4430 /* Prevent speculative reads from getting ahead of the status block. */
4431 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4432 BUS_SPACE_BARRIER_READ);
4435 /* Clear the TX timeout timer. */
4436 sc->watchdog_timer = 0;
4438 /* Clear the tx hardware queue full flag. */
4439 if (sc->used_tx_bd < sc->max_tx_bd) {
4440 DBRUNIF((ifp->if_drv_flags & IFF_DRV_OACTIVE),
4441 DBPRINT(sc, BCE_INFO_SEND,
4442 "%s(): Open TX chain! %d/%d (used/total)\n",
4443 __FUNCTION__, sc->used_tx_bd, sc->max_tx_bd));
4444 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4447 sc->tx_cons = sw_tx_cons;
4451 /****************************************************************************/
4452 /* Disables interrupt generation. */
4456 /****************************************************************************/
4458 bce_disable_intr(struct bce_softc *sc)
4460 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4461 BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4462 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
4466 /****************************************************************************/
4467 /* Enables interrupt generation. */
4471 /****************************************************************************/
4473 bce_enable_intr(struct bce_softc *sc)
4477 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4478 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4479 BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4481 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4482 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4484 val = REG_RD(sc, BCE_HC_COMMAND);
4485 REG_WR(sc, BCE_HC_COMMAND, val | BCE_HC_COMMAND_COAL_NOW);
4489 /****************************************************************************/
4490 /* Handles controller initialization. */
4494 /****************************************************************************/
4496 bce_init_locked(struct bce_softc *sc)
4501 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4503 BCE_LOCK_ASSERT(sc);
4507 /* Check if the driver is still running and bail out if it is. */
4508 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4509 goto bce_init_locked_exit;
4513 if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
4514 BCE_PRINTF("%s(%d): Controller reset failed!\n",
4515 __FILE__, __LINE__);
4516 goto bce_init_locked_exit;
4519 if (bce_chipinit(sc)) {
4520 BCE_PRINTF("%s(%d): Controller initialization failed!\n",
4521 __FILE__, __LINE__);
4522 goto bce_init_locked_exit;
4525 if (bce_blockinit(sc)) {
4526 BCE_PRINTF("%s(%d): Block initialization failed!\n",
4527 __FILE__, __LINE__);
4528 goto bce_init_locked_exit;
4531 /* Load our MAC address. */
4532 bcopy(IF_LLADDR(sc->bce_ifp), sc->eaddr, ETHER_ADDR_LEN);
4533 bce_set_mac_addr(sc);
4535 /* Calculate and program the Ethernet MTU size. */
4536 ether_mtu = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ifp->if_mtu +
4539 DBPRINT(sc, BCE_INFO_MISC, "%s(): setting mtu = %d\n",__FUNCTION__, ether_mtu);
4542 * Program the mtu, enabling jumbo frame
4543 * support if necessary. Also set the mbuf
4544 * allocation count for RX frames.
4546 if (ether_mtu > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) {
4547 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
4548 BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4549 sc->mbuf_alloc_size = MJUM9BYTES;
4551 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
4552 sc->mbuf_alloc_size = MCLBYTES;
4555 /* Calculate the RX Ethernet frame size for rx_bd's. */
4556 sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4558 DBPRINT(sc, BCE_INFO_RECV,
4559 "%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4560 "max_frame_size = %d\n",
4561 __FUNCTION__, (int) MCLBYTES, sc->mbuf_alloc_size, sc->max_frame_size);
4563 /* Program appropriate promiscuous/multicast filtering. */
4564 bce_set_rx_mode(sc);
4566 /* Init RX buffer descriptor chain. */
4567 bce_init_rx_chain(sc);
4569 /* Init TX buffer descriptor chain. */
4570 bce_init_tx_chain(sc);
4572 #ifdef DEVICE_POLLING
4573 /* Disable interrupts if we are polling. */
4574 if (ifp->if_capenable & IFCAP_POLLING) {
4575 bce_disable_intr(sc);
4577 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4578 (1 << 16) | sc->bce_rx_quick_cons_trip);
4579 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4580 (1 << 16) | sc->bce_tx_quick_cons_trip);
4583 /* Enable host interrupts. */
4584 bce_enable_intr(sc);
4586 bce_ifmedia_upd_locked(ifp);
4588 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4589 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4591 callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc);
4593 bce_init_locked_exit:
4594 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4600 /****************************************************************************/
4601 /* Initialize the controller just enough so that any management firmware */
4602 /* running on the device will continue to operate correctly. */
4606 /****************************************************************************/
4608 bce_mgmt_init_locked(struct bce_softc *sc)
4612 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4614 BCE_LOCK_ASSERT(sc);
4616 /* Bail out if management firmware is not running. */
4617 if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) {
4618 DBPRINT(sc, BCE_VERBOSE_SPECIAL,
4619 "No management firmware running...\n");
4620 goto bce_mgmt_init_locked_exit;
4625 /* Enable all critical blocks in the MAC. */
4626 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 0x5ffffff);
4627 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
4630 bce_ifmedia_upd_locked(ifp);
4631 bce_mgmt_init_locked_exit:
4632 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4638 /****************************************************************************/
4639 /* Handles controller initialization when called from an unlocked routine. */
4643 /****************************************************************************/
4647 struct bce_softc *sc = xsc;
4650 bce_init_locked(sc);
4655 /****************************************************************************/
4656 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4657 /* memory visible to the controller. */
4660 /* 0 for success, positive value for failure. */
4662 /* m_head: May be set to NULL if MBUF is excessively fragmented. */
4663 /****************************************************************************/
4665 bce_tx_encap(struct bce_softc *sc, struct mbuf **m_head)
4667 bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
4669 struct tx_bd *txbd = NULL;
4672 u16 vlan_tag = 0, flags = 0;
4673 u16 chain_prod, prod;
4679 int i, error, nsegs, rc = 0;
4681 /* Transfer any checksum offload flags to the bd. */
4683 if (m0->m_pkthdr.csum_flags) {
4684 if (m0->m_pkthdr.csum_flags & CSUM_IP)
4685 flags |= TX_BD_FLAGS_IP_CKSUM;
4686 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
4687 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4690 /* Transfer any VLAN tags to the bd. */
4691 #if __FreeBSD_version > 700022
4692 if (m0->m_flags & M_VLANTAG) {
4693 flags |= TX_BD_FLAGS_VLAN_TAG;
4694 vlan_tag = m0->m_pkthdr.ether_vtag;
4697 mtag = VLAN_OUTPUT_TAG(sc->bce_ifp, m0);
4699 flags |= TX_BD_FLAGS_VLAN_TAG;
4700 vlan_tag = VLAN_TAG_VALUE(mtag);
4704 /* Map the mbuf into DMAable memory. */
4706 chain_prod = TX_CHAIN_IDX(prod);
4707 map = sc->tx_mbuf_map[chain_prod];
4709 /* Map the mbuf into our DMA address space. */
4710 error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0,
4711 segs, &nsegs, BUS_DMA_NOWAIT);
4713 /* Check if the DMA mapping was successful */
4714 if (error == EFBIG) {
4716 /* The mbuf is too fragmented for our DMA mapping. */
4717 DBPRINT(sc, BCE_WARN, "%s(): fragmented mbuf (%d pieces)\n",
4718 __FUNCTION__, nsegs);
4719 DBRUNIF(1, bce_dump_mbuf(sc, m0););
4721 /* Try to defrag the mbuf. */
4722 m0 = m_defrag(*m_head, M_DONTWAIT);
4724 /* Defrag was unsuccessful */
4727 sc->mbuf_alloc_failed++;
4731 /* Defrag was successful, try mapping again */
4733 error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0,
4734 segs, &nsegs, BUS_DMA_NOWAIT);
4736 /* Still getting an error after a defrag. */
4737 if (error == ENOMEM) {
4738 /* Insufficient DMA buffers available. */
4739 sc->tx_dma_map_failures++;
4741 } else if (error != 0) {
4742 /* Still can't map the mbuf, release it and return an error. */
4744 "%s(%d): Unknown error mapping mbuf into TX chain!\n",
4745 __FILE__, __LINE__);
4748 sc->tx_dma_map_failures++;
4751 } else if (error == ENOMEM) {
4752 /* Insufficient DMA buffers available. */
4753 sc->tx_dma_map_failures++;
4755 } else if (error != 0) {
4758 sc->tx_dma_map_failures++;
4762 /* Make sure there's room in the chain */
4763 if (nsegs > (sc->max_tx_bd - sc->used_tx_bd)) {
4764 bus_dmamap_unload(sc->tx_mbuf_tag, map);
4768 /* prod points to an empty tx_bd at this point. */
4769 prod_bseq = sc->tx_prod_bseq;
4772 debug_prod = chain_prod;
4775 DBPRINT(sc, BCE_INFO_SEND,
4776 "%s(): Start: prod = 0x%04X, chain_prod = %04X, "
4777 "prod_bseq = 0x%08X\n",
4778 __FUNCTION__, prod, chain_prod, prod_bseq);
4781 * Cycle through each mbuf segment that makes up
4782 * the outgoing frame, gathering the mapping info
4783 * for that segment and creating a tx_bd to for
4786 for (i = 0; i < nsegs ; i++) {
4788 chain_prod = TX_CHAIN_IDX(prod);
4789 txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4791 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
4792 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
4793 txbd->tx_bd_mss_nbytes = htole16(segs[i].ds_len);
4794 txbd->tx_bd_vlan_tag = htole16(vlan_tag);
4795 txbd->tx_bd_flags = htole16(flags);
4796 prod_bseq += segs[i].ds_len;
4798 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
4799 prod = NEXT_TX_BD(prod);
4802 /* Set the END flag on the last TX buffer descriptor. */
4803 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
4805 DBRUN(BCE_EXCESSIVE_SEND, bce_dump_tx_chain(sc, debug_prod, nsegs));
4807 DBPRINT(sc, BCE_INFO_SEND,
4808 "%s(): End: prod = 0x%04X, chain_prod = %04X, "
4809 "prod_bseq = 0x%08X\n",
4810 __FUNCTION__, prod, chain_prod, prod_bseq);
4813 * Ensure that the mbuf pointer for this transmission
4814 * is placed at the array index of the last
4815 * descriptor in this chain. This is done
4816 * because a single map is used for all
4817 * segments of the mbuf and we don't want to
4818 * unload the map before all of the segments
4821 sc->tx_mbuf_ptr[chain_prod] = m0;
4822 sc->used_tx_bd += nsegs;
4824 /* Update some debug statistic counters */
4825 DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
4826 sc->tx_hi_watermark = sc->used_tx_bd);
4827 DBRUNIF((sc->used_tx_bd == sc->max_tx_bd), sc->tx_full_count++);
4828 DBRUNIF(1, sc->tx_mbuf_alloc++);
4830 DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_mbuf_chain(sc, chain_prod, nsegs));
4832 /* prod points to the next free tx_bd at this point. */
4834 sc->tx_prod_bseq = prod_bseq;
4840 /****************************************************************************/
4841 /* Main transmit routine when called from another routine with a lock. */
4845 /****************************************************************************/
4847 bce_start_locked(struct ifnet *ifp)
4849 struct bce_softc *sc = ifp->if_softc;
4850 struct mbuf *m_head = NULL;
4852 u16 tx_prod, tx_chain_prod;
4854 /* If there's no link or the transmit queue is empty then just exit. */
4855 if (!sc->bce_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
4856 DBPRINT(sc, BCE_INFO_SEND, "%s(): No link or transmit queue empty.\n",
4858 goto bce_start_locked_exit;
4861 /* prod points to the next free tx_bd. */
4862 tx_prod = sc->tx_prod;
4863 tx_chain_prod = TX_CHAIN_IDX(tx_prod);
4865 DBPRINT(sc, BCE_INFO_SEND,
4866 "%s(): Start: tx_prod = 0x%04X, tx_chain_prod = %04X, "
4867 "tx_prod_bseq = 0x%08X\n",
4868 __FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
4871 * Keep adding entries while there is space in the ring.
4873 while (sc->used_tx_bd < sc->max_tx_bd) {
4875 /* Check for any frames to send. */
4876 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4881 * Pack the data into the transmit ring. If we
4882 * don't have room, place the mbuf back at the
4883 * head of the queue and set the OACTIVE flag
4884 * to wait for the NIC to drain the chain.
4886 if (bce_tx_encap(sc, &m_head)) {
4888 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4889 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4890 DBPRINT(sc, BCE_INFO_SEND,
4891 "TX chain is closed for business! Total tx_bd used = %d\n",
4898 /* Send a copy of the frame to any BPF listeners. */
4899 ETHER_BPF_MTAP(ifp, m_head);
4903 /* no packets were dequeued */
4904 DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): No packets were dequeued\n",
4906 goto bce_start_locked_exit;
4909 /* Update the driver's counters. */
4910 tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod);
4912 DBPRINT(sc, BCE_INFO_SEND,
4913 "%s(): End: tx_prod = 0x%04X, tx_chain_prod = 0x%04X, "
4914 "tx_prod_bseq = 0x%08X\n",
4915 __FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
4917 /* Start the transmit. */
4918 REG_WR16(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BIDX, sc->tx_prod);
4919 REG_WR(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
4921 /* Set the tx timeout. */
4922 sc->watchdog_timer = BCE_TX_TIMEOUT;
4924 bce_start_locked_exit:
4929 /****************************************************************************/
4930 /* Main transmit routine when called from another routine without a lock. */
4934 /****************************************************************************/
4936 bce_start(struct ifnet *ifp)
4938 struct bce_softc *sc = ifp->if_softc;
4941 bce_start_locked(ifp);
4946 /****************************************************************************/
4947 /* Handles any IOCTL calls from the operating system. */
4950 /* 0 for success, positive value for failure. */
4951 /****************************************************************************/
4953 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4955 struct bce_softc *sc = ifp->if_softc;
4956 struct ifreq *ifr = (struct ifreq *) data;
4957 struct mii_data *mii;
4958 int mask, error = 0;
4962 /* Set the interface MTU. */
4964 /* Check that the MTU setting is supported. */
4965 if ((ifr->ifr_mtu < BCE_MIN_MTU) ||
4966 (ifr->ifr_mtu > BCE_MAX_JUMBO_MTU)) {
4971 DBPRINT(sc, BCE_INFO_MISC,
4972 "SIOCSIFMTU: Changing MTU from %d to %d\n",
4973 (int) ifp->if_mtu, (int) ifr->ifr_mtu);
4976 ifp->if_mtu = ifr->ifr_mtu;
4977 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4978 bce_init_locked(sc);
4982 /* Set interface flags. */
4984 DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Received SIOCSIFFLAGS\n");
4988 /* Check if the interface is up. */
4989 if (ifp->if_flags & IFF_UP) {
4990 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4991 /* Change promiscuous/multicast flags as necessary. */
4992 bce_set_rx_mode(sc);
4995 bce_init_locked(sc);
4998 /* The interface is down, check if driver is running. */
4999 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5002 /* If MFW is running, restart the controller a bit. */
5003 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
5004 bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
5006 bce_mgmt_init_locked(sc);
5016 /* Add/Delete multicast address */
5019 DBPRINT(sc, BCE_VERBOSE_MISC, "Received SIOCADDMULTI/SIOCDELMULTI\n");
5022 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5023 bce_set_rx_mode(sc);
5030 /* Set/Get Interface media */
5033 DBPRINT(sc, BCE_VERBOSE_MISC, "Received SIOCSIFMEDIA/SIOCGIFMEDIA\n");
5035 mii = device_get_softc(sc->bce_miibus);
5036 error = ifmedia_ioctl(ifp, ifr,
5037 &mii->mii_media, command);
5040 /* Set interface capability */
5042 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5043 DBPRINT(sc, BCE_INFO_MISC, "Received SIOCSIFCAP = 0x%08X\n", (u32) mask);
5045 #ifdef DEVICE_POLLING
5046 if (mask & IFCAP_POLLING) {
5047 if (ifr->ifr_reqcap & IFCAP_POLLING) {
5049 /* Setup the poll routine to call. */
5050 error = ether_poll_register(bce_poll, ifp);
5052 BCE_PRINTF("%s(%d): Error registering poll function!\n",
5053 __FILE__, __LINE__);
5054 goto bce_ioctl_exit;
5057 /* Clear the interrupt. */
5059 bce_disable_intr(sc);
5061 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
5062 (1 << 16) | sc->bce_rx_quick_cons_trip);
5063 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
5064 (1 << 16) | sc->bce_tx_quick_cons_trip);
5066 ifp->if_capenable |= IFCAP_POLLING;
5069 /* Clear the poll routine. */
5070 error = ether_poll_deregister(ifp);
5072 /* Enable interrupt even in error case */
5074 bce_enable_intr(sc);
5076 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
5077 (sc->bce_tx_quick_cons_trip_int << 16) |
5078 sc->bce_tx_quick_cons_trip);
5079 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
5080 (sc->bce_rx_quick_cons_trip_int << 16) |
5081 sc->bce_rx_quick_cons_trip);
5083 ifp->if_capenable &= ~IFCAP_POLLING;
5087 #endif /*DEVICE_POLLING */
5089 /* Toggle the TX checksum capabilites enable flag. */
5090 if (mask & IFCAP_TXCSUM) {
5091 ifp->if_capenable ^= IFCAP_TXCSUM;
5092 if (IFCAP_TXCSUM & ifp->if_capenable)
5093 ifp->if_hwassist = BCE_IF_HWASSIST;
5095 ifp->if_hwassist = 0;
5098 /* Toggle the RX checksum capabilities enable flag. */
5099 if (mask & IFCAP_RXCSUM) {
5100 ifp->if_capenable ^= IFCAP_RXCSUM;
5101 if (IFCAP_RXCSUM & ifp->if_capenable)
5102 ifp->if_hwassist = BCE_IF_HWASSIST;
5104 ifp->if_hwassist = 0;
5107 /* Toggle VLAN_MTU capabilities enable flag. */
5108 if (mask & IFCAP_VLAN_MTU) {
5109 BCE_PRINTF("%s(%d): Changing VLAN_MTU not supported.\n",
5110 __FILE__, __LINE__);
5113 /* Toggle VLANHWTAG capabilities enabled flag. */
5114 if (mask & IFCAP_VLAN_HWTAGGING) {
5115 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
5116 BCE_PRINTF("%s(%d): Cannot change VLAN_HWTAGGING while "
5117 "management firmware (ASF/IPMI/UMP) is running!\n",
5118 __FILE__, __LINE__);
5120 BCE_PRINTF("%s(%d): Changing VLAN_HWTAGGING not supported!\n",
5121 __FILE__, __LINE__);
5126 /* We don't know how to handle the IOCTL, pass it on. */
5127 error = ether_ioctl(ifp, command, data);
5131 #ifdef DEVICE_POLLING
5138 /****************************************************************************/
5139 /* Transmit timeout handler. */
5143 /****************************************************************************/
5145 bce_watchdog(struct bce_softc *sc)
5148 BCE_LOCK_ASSERT(sc);
5150 if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
5154 * If we are in this routine because of pause frames, then
5155 * don't reset the hardware.
5157 if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED)
5160 BCE_PRINTF("%s(%d): Watchdog timeout occurred, resetting!\n",
5161 __FILE__, __LINE__);
5163 DBRUN(BCE_VERBOSE_SEND,
5164 bce_dump_driver_state(sc);
5165 bce_dump_status_block(sc));
5167 /* DBRUN(BCE_FATAL, bce_breakpoint(sc)); */
5169 sc->bce_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5171 bce_init_locked(sc);
5172 sc->bce_ifp->if_oerrors++;
5177 #ifdef DEVICE_POLLING
5179 bce_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
5181 struct bce_softc *sc = ifp->if_softc;
5183 BCE_LOCK_ASSERT(sc);
5185 sc->bce_rxcycles = count;
5187 bus_dmamap_sync(sc->status_tag, sc->status_map,
5188 BUS_DMASYNC_POSTWRITE);
5190 /* Check for any completed RX frames. */
5191 if (sc->status_block->status_rx_quick_consumer_index0 !=
5195 /* Check for any completed TX frames. */
5196 if (sc->status_block->status_tx_quick_consumer_index0 !=
5200 /* Check for new frames to transmit. */
5201 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5202 bce_start_locked(ifp);
5208 bce_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
5210 struct bce_softc *sc = ifp->if_softc;
5213 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5214 bce_poll_locked(ifp, cmd, count);
5217 #endif /* DEVICE_POLLING */
5222 bce_has_work(struct bce_softc *sc)
5224 struct status_block *stat = sc->status_block;
5226 if ((stat->status_rx_quick_consumer_index0 != sc->hw_rx_cons) ||
5227 (stat->status_tx_quick_consumer_index0 != sc->hw_tx_cons))
5230 if (((stat->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
5240 * Interrupt handler.
5242 /****************************************************************************/
5243 /* Main interrupt entry point. Verifies that the controller generated the */
5244 /* interrupt and then calls a separate routine for handle the various */
5245 /* interrupt causes (PHY, TX, RX). */
5248 /* 0 for success, positive value for failure. */
5249 /****************************************************************************/
5253 struct bce_softc *sc;
5255 u32 status_attn_bits;
5260 DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __FUNCTION__);
5263 DBRUNIF(1, sc->interrupts_generated++);
5265 #ifdef DEVICE_POLLING
5266 if (ifp->if_capenable & IFCAP_POLLING) {
5267 DBPRINT(sc, BCE_INFO_MISC, "Polling enabled!\n");
5272 bus_dmamap_sync(sc->status_tag, sc->status_map,
5273 BUS_DMASYNC_POSTWRITE);
5276 * If the hardware status block index
5277 * matches the last value read by the
5278 * driver and we haven't asserted our
5279 * interrupt then there's nothing to do.
5281 if ((sc->status_block->status_idx == sc->last_status_idx) &&
5282 (REG_RD(sc, BCE_PCICFG_MISC_STATUS) & BCE_PCICFG_MISC_STATUS_INTA_VALUE))
5285 /* Ack the interrupt and stop others from occuring. */
5286 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5287 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5288 BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5290 /* Keep processing data as long as there is work to do. */
5293 status_attn_bits = sc->status_block->status_attn_bits;
5295 DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention),
5296 BCE_PRINTF("Simulating unexpected status attention bit set.");
5297 status_attn_bits = status_attn_bits | STATUS_ATTN_BITS_PARITY_ERROR);
5299 /* Was it a link change interrupt? */
5300 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5301 (sc->status_block->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
5304 /* If any other attention is asserted then the chip is toast. */
5305 if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5306 (sc->status_block->status_attn_bits_ack &
5307 ~STATUS_ATTN_BITS_LINK_STATE))) {
5309 DBRUN(1, sc->unexpected_attentions++);
5311 BCE_PRINTF("%s(%d): Fatal attention detected: 0x%08X\n",
5312 __FILE__, __LINE__, sc->status_block->status_attn_bits);
5315 if (bce_debug_unexpected_attention == 0)
5316 bce_breakpoint(sc));
5318 bce_init_locked(sc);
5322 /* Check for any completed RX frames. */
5323 if (sc->status_block->status_rx_quick_consumer_index0 != sc->hw_rx_cons)
5326 /* Check for any completed TX frames. */
5327 if (sc->status_block->status_tx_quick_consumer_index0 != sc->hw_tx_cons)
5330 /* Save the status block index value for use during the next interrupt. */
5331 sc->last_status_idx = sc->status_block->status_idx;
5333 /* Prevent speculative reads from getting ahead of the status block. */
5334 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
5335 BUS_SPACE_BARRIER_READ);
5337 /* If there's no work left then exit the interrupt service routine. */
5338 if ((sc->status_block->status_rx_quick_consumer_index0 == sc->hw_rx_cons) &&
5339 (sc->status_block->status_tx_quick_consumer_index0 == sc->hw_tx_cons))
5344 bus_dmamap_sync(sc->status_tag, sc->status_map,
5345 BUS_DMASYNC_PREWRITE);
5347 /* Re-enable interrupts. */
5348 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5349 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx |
5350 BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5351 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5352 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
5354 /* Handle any frames that arrived while handling the interrupt. */
5355 if (ifp->if_drv_flags & IFF_DRV_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5356 bce_start_locked(ifp);
5363 /****************************************************************************/
5364 /* Programs the various packet receive modes (broadcast and multicast). */
5368 /****************************************************************************/
5370 bce_set_rx_mode(struct bce_softc *sc)
5373 struct ifmultiaddr *ifma;
5374 u32 hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
5375 u32 rx_mode, sort_mode;
5378 BCE_LOCK_ASSERT(sc);
5382 /* Initialize receive mode default settings. */
5383 rx_mode = sc->rx_mode & ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
5384 BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
5385 sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
5388 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5391 if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
5392 (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)))
5393 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
5396 * Check for promiscuous, all multicast, or selected
5397 * multicast address filtering.
5399 if (ifp->if_flags & IFF_PROMISC) {
5400 DBPRINT(sc, BCE_INFO_MISC, "Enabling promiscuous mode.\n");
5402 /* Enable promiscuous mode. */
5403 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
5404 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
5405 } else if (ifp->if_flags & IFF_ALLMULTI) {
5406 DBPRINT(sc, BCE_INFO_MISC, "Enabling all multicast mode.\n");
5408 /* Enable all multicast addresses. */
5409 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5410 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff);
5412 sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
5414 /* Accept one or more multicast(s). */
5415 DBPRINT(sc, BCE_INFO_MISC, "Enabling selective multicast mode.\n");
5418 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
5419 if (ifma->ifma_addr->sa_family != AF_LINK)
5421 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
5422 ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF;
5423 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
5425 IF_ADDR_UNLOCK(ifp);
5427 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
5428 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]);
5430 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
5433 /* Only make changes if the recive mode has actually changed. */
5434 if (rx_mode != sc->rx_mode) {
5435 DBPRINT(sc, BCE_VERBOSE_MISC, "Enabling new receive mode: 0x%08X\n",
5438 sc->rx_mode = rx_mode;
5439 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
5442 /* Disable and clear the exisitng sort before enabling a new sort. */
5443 REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
5444 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
5445 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
5449 /****************************************************************************/
5450 /* Called periodically to updates statistics from the controllers */
5451 /* statistics block. */
5455 /****************************************************************************/
5457 bce_stats_update(struct bce_softc *sc)
5460 struct statistics_block *stats;
5462 DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __FUNCTION__);
5466 stats = (struct statistics_block *) sc->stats_block;
5469 * Update the interface statistics from the
5470 * hardware statistics.
5472 ifp->if_collisions = (u_long) stats->stat_EtherStatsCollisions;
5474 ifp->if_ierrors = (u_long) stats->stat_EtherStatsUndersizePkts +
5475 (u_long) stats->stat_EtherStatsOverrsizePkts +
5476 (u_long) stats->stat_IfInMBUFDiscards +
5477 (u_long) stats->stat_Dot3StatsAlignmentErrors +
5478 (u_long) stats->stat_Dot3StatsFCSErrors;
5480 ifp->if_oerrors = (u_long) stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5481 (u_long) stats->stat_Dot3StatsExcessiveCollisions +
5482 (u_long) stats->stat_Dot3StatsLateCollisions;
5485 * Certain controllers don't report
5486 * carrier sense errors correctly.
5487 * See errata E11_5708CA0_1165.
5489 if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
5490 !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0))
5491 ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
5494 * Update the sysctl statistics from the
5495 * hardware statistics.
5497 sc->stat_IfHCInOctets =
5498 ((u64) stats->stat_IfHCInOctets_hi << 32) +
5499 (u64) stats->stat_IfHCInOctets_lo;
5501 sc->stat_IfHCInBadOctets =
5502 ((u64) stats->stat_IfHCInBadOctets_hi << 32) +
5503 (u64) stats->stat_IfHCInBadOctets_lo;
5505 sc->stat_IfHCOutOctets =
5506 ((u64) stats->stat_IfHCOutOctets_hi << 32) +
5507 (u64) stats->stat_IfHCOutOctets_lo;
5509 sc->stat_IfHCOutBadOctets =
5510 ((u64) stats->stat_IfHCOutBadOctets_hi << 32) +
5511 (u64) stats->stat_IfHCOutBadOctets_lo;
5513 sc->stat_IfHCInUcastPkts =
5514 ((u64) stats->stat_IfHCInUcastPkts_hi << 32) +
5515 (u64) stats->stat_IfHCInUcastPkts_lo;
5517 sc->stat_IfHCInMulticastPkts =
5518 ((u64) stats->stat_IfHCInMulticastPkts_hi << 32) +
5519 (u64) stats->stat_IfHCInMulticastPkts_lo;
5521 sc->stat_IfHCInBroadcastPkts =
5522 ((u64) stats->stat_IfHCInBroadcastPkts_hi << 32) +
5523 (u64) stats->stat_IfHCInBroadcastPkts_lo;
5525 sc->stat_IfHCOutUcastPkts =
5526 ((u64) stats->stat_IfHCOutUcastPkts_hi << 32) +
5527 (u64) stats->stat_IfHCOutUcastPkts_lo;
5529 sc->stat_IfHCOutMulticastPkts =
5530 ((u64) stats->stat_IfHCOutMulticastPkts_hi << 32) +
5531 (u64) stats->stat_IfHCOutMulticastPkts_lo;
5533 sc->stat_IfHCOutBroadcastPkts =
5534 ((u64) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5535 (u64) stats->stat_IfHCOutBroadcastPkts_lo;
5537 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5538 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5540 sc->stat_Dot3StatsCarrierSenseErrors =
5541 stats->stat_Dot3StatsCarrierSenseErrors;
5543 sc->stat_Dot3StatsFCSErrors =
5544 stats->stat_Dot3StatsFCSErrors;
5546 sc->stat_Dot3StatsAlignmentErrors =
5547 stats->stat_Dot3StatsAlignmentErrors;
5549 sc->stat_Dot3StatsSingleCollisionFrames =
5550 stats->stat_Dot3StatsSingleCollisionFrames;
5552 sc->stat_Dot3StatsMultipleCollisionFrames =
5553 stats->stat_Dot3StatsMultipleCollisionFrames;
5555 sc->stat_Dot3StatsDeferredTransmissions =
5556 stats->stat_Dot3StatsDeferredTransmissions;
5558 sc->stat_Dot3StatsExcessiveCollisions =
5559 stats->stat_Dot3StatsExcessiveCollisions;
5561 sc->stat_Dot3StatsLateCollisions =
5562 stats->stat_Dot3StatsLateCollisions;
5564 sc->stat_EtherStatsCollisions =
5565 stats->stat_EtherStatsCollisions;
5567 sc->stat_EtherStatsFragments =
5568 stats->stat_EtherStatsFragments;
5570 sc->stat_EtherStatsJabbers =
5571 stats->stat_EtherStatsJabbers;
5573 sc->stat_EtherStatsUndersizePkts =
5574 stats->stat_EtherStatsUndersizePkts;
5576 sc->stat_EtherStatsOverrsizePkts =
5577 stats->stat_EtherStatsOverrsizePkts;
5579 sc->stat_EtherStatsPktsRx64Octets =
5580 stats->stat_EtherStatsPktsRx64Octets;
5582 sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5583 stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5585 sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5586 stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5588 sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5589 stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5591 sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5592 stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5594 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5595 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5597 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5598 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5600 sc->stat_EtherStatsPktsTx64Octets =
5601 stats->stat_EtherStatsPktsTx64Octets;
5603 sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5604 stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5606 sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5607 stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5609 sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5610 stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5612 sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5613 stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5615 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5616 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5618 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5619 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5621 sc->stat_XonPauseFramesReceived =
5622 stats->stat_XonPauseFramesReceived;
5624 sc->stat_XoffPauseFramesReceived =
5625 stats->stat_XoffPauseFramesReceived;
5627 sc->stat_OutXonSent =
5628 stats->stat_OutXonSent;
5630 sc->stat_OutXoffSent =
5631 stats->stat_OutXoffSent;
5633 sc->stat_FlowControlDone =
5634 stats->stat_FlowControlDone;
5636 sc->stat_MacControlFramesReceived =
5637 stats->stat_MacControlFramesReceived;
5639 sc->stat_XoffStateEntered =
5640 stats->stat_XoffStateEntered;
5642 sc->stat_IfInFramesL2FilterDiscards =
5643 stats->stat_IfInFramesL2FilterDiscards;
5645 sc->stat_IfInRuleCheckerDiscards =
5646 stats->stat_IfInRuleCheckerDiscards;
5648 sc->stat_IfInFTQDiscards =
5649 stats->stat_IfInFTQDiscards;
5651 sc->stat_IfInMBUFDiscards =
5652 stats->stat_IfInMBUFDiscards;
5654 sc->stat_IfInRuleCheckerP4Hit =
5655 stats->stat_IfInRuleCheckerP4Hit;
5657 sc->stat_CatchupInRuleCheckerDiscards =
5658 stats->stat_CatchupInRuleCheckerDiscards;
5660 sc->stat_CatchupInFTQDiscards =
5661 stats->stat_CatchupInFTQDiscards;
5663 sc->stat_CatchupInMBUFDiscards =
5664 stats->stat_CatchupInMBUFDiscards;
5666 sc->stat_CatchupInRuleCheckerP4Hit =
5667 stats->stat_CatchupInRuleCheckerP4Hit;
5669 sc->com_no_buffers = REG_RD_IND(sc, 0x120084);
5671 DBPRINT(sc, BCE_EXCESSIVE, "Exiting %s()\n", __FUNCTION__);
5675 /****************************************************************************/
5676 /* Periodic function to notify the bootcode that the driver is still */
5681 /****************************************************************************/
5683 bce_pulse(void *xsc)
5685 struct bce_softc *sc = xsc;
5688 DBPRINT(sc, BCE_EXCESSIVE_MISC, "pulse\n");
5690 BCE_LOCK_ASSERT(sc);
5692 /* Tell the firmware that the driver is still running. */
5693 msg = (u32) ++sc->bce_fw_drv_pulse_wr_seq;
5694 REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_PULSE_MB, msg);
5696 /* Schedule the next pulse. */
5697 callout_reset(&sc->bce_pulse_callout, hz, bce_pulse, sc);
5703 /****************************************************************************/
5704 /* Periodic function to perform maintenance tasks. */
5708 /****************************************************************************/
5712 struct bce_softc *sc = xsc;
5713 struct mii_data *mii;
5718 BCE_LOCK_ASSERT(sc);
5720 /* Update the statistics from the hardware statistics block. */
5721 bce_stats_update(sc);
5723 /* Check that chip hasn't hung. */
5726 /* Schedule the next tick. */
5727 callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc);
5729 /* If link is up already up then we're done. */
5731 goto bce_tick_locked_exit;
5733 mii = device_get_softc(sc->bce_miibus);
5736 /* Check if the link has come up. */
5737 if (!sc->bce_link && mii->mii_media_status & IFM_ACTIVE &&
5738 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5740 if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
5741 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) &&
5743 BCE_PRINTF("Gigabit link up\n");
5744 /* Now that link is up, handle any outstanding TX traffic. */
5745 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5746 bce_start_locked(ifp);
5749 bce_tick_locked_exit:
5755 /****************************************************************************/
5756 /* Allows the driver state to be dumped through the sysctl interface. */
5759 /* 0 for success, positive value for failure. */
5760 /****************************************************************************/
5762 bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS)
5766 struct bce_softc *sc;
5769 error = sysctl_handle_int(oidp, &result, 0, req);
5771 if (error || !req->newptr)
5775 sc = (struct bce_softc *)arg1;
5776 bce_dump_driver_state(sc);
5783 /****************************************************************************/
5784 /* Allows the hardware state to be dumped through the sysctl interface. */
5787 /* 0 for success, positive value for failure. */
5788 /****************************************************************************/
5790 bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS)
5794 struct bce_softc *sc;
5797 error = sysctl_handle_int(oidp, &result, 0, req);
5799 if (error || !req->newptr)
5803 sc = (struct bce_softc *)arg1;
5804 bce_dump_hw_state(sc);
5811 /****************************************************************************/
5812 /* Allows the bootcode state to be dumped through the sysctl interface. */
5815 /* 0 for success, positive value for failure. */
5816 /****************************************************************************/
5818 bce_sysctl_bc_state(SYSCTL_HANDLER_ARGS)
5822 struct bce_softc *sc;
5825 error = sysctl_handle_int(oidp, &result, 0, req);
5827 if (error || !req->newptr)
5831 sc = (struct bce_softc *)arg1;
5832 bce_dump_bc_state(sc);
5839 /****************************************************************************/
5840 /* Provides a sysctl interface to allow dumping the RX chain. */
5843 /* 0 for success, positive value for failure. */
5844 /****************************************************************************/
5846 bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS)
5850 struct bce_softc *sc;
5853 error = sysctl_handle_int(oidp, &result, 0, req);
5855 if (error || !req->newptr)
5859 sc = (struct bce_softc *)arg1;
5860 bce_dump_rx_chain(sc, 0, sc->max_rx_bd);
5867 /****************************************************************************/
5868 /* Provides a sysctl interface to allow dumping the TX chain. */
5871 /* 0 for success, positive value for failure. */
5872 /****************************************************************************/
5874 bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS)
5878 struct bce_softc *sc;
5881 error = sysctl_handle_int(oidp, &result, 0, req);
5883 if (error || !req->newptr)
5887 sc = (struct bce_softc *)arg1;
5888 bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
5895 /****************************************************************************/
5896 /* Provides a sysctl interface to allow reading arbitrary registers in the */
5897 /* device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */
5900 /* 0 for success, positive value for failure. */
5901 /****************************************************************************/
5903 bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5905 struct bce_softc *sc;
5910 error = sysctl_handle_int(oidp, &result, 0, req);
5911 if (error || (req->newptr == NULL))
5914 /* Make sure the register is accessible. */
5915 if (result < 0x8000) {
5916 sc = (struct bce_softc *)arg1;
5917 val = REG_RD(sc, result);
5918 BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val);
5919 } else if (result < 0x0280000) {
5920 sc = (struct bce_softc *)arg1;
5921 val = REG_RD_IND(sc, result);
5922 BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val);
5929 /****************************************************************************/
5930 /* Provides a sysctl interface to allow reading arbitrary PHY registers in */
5931 /* the device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */
5934 /* 0 for success, positive value for failure. */
5935 /****************************************************************************/
5937 bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS)
5939 struct bce_softc *sc;
5945 error = sysctl_handle_int(oidp, &result, 0, req);
5946 if (error || (req->newptr == NULL))
5949 /* Make sure the register is accessible. */
5950 if (result < 0x20) {
5951 sc = (struct bce_softc *)arg1;
5953 val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result);
5954 BCE_PRINTF("phy 0x%02X = 0x%04X\n", result, val);
5960 /****************************************************************************/
5961 /* Provides a sysctl interface to forcing the driver to dump state and */
5962 /* enter the debugger. DO NOT ENABLE ON PRODUCTION SYSTEMS! */
5965 /* 0 for success, positive value for failure. */
5966 /****************************************************************************/
5968 bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS)
5972 struct bce_softc *sc;
5975 error = sysctl_handle_int(oidp, &result, 0, req);
5977 if (error || !req->newptr)
5981 sc = (struct bce_softc *)arg1;
5990 /****************************************************************************/
5991 /* Adds any sysctl parameters for tuning or debugging purposes. */
5994 /* 0 for success, positive value for failure. */
5995 /****************************************************************************/
5997 bce_add_sysctls(struct bce_softc *sc)
5999 struct sysctl_ctx_list *ctx;
6000 struct sysctl_oid_list *children;
6002 ctx = device_get_sysctl_ctx(sc->bce_dev);
6003 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev));
6006 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6008 CTLFLAG_RD, &sc->rx_low_watermark,
6009 0, "Lowest level of free rx_bd's");
6011 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6013 CTLFLAG_RD, &sc->rx_empty_count,
6014 0, "Number of times the RX chain was empty");
6016 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6018 CTLFLAG_RD, &sc->tx_hi_watermark,
6019 0, "Highest level of used tx_bd's");
6021 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6023 CTLFLAG_RD, &sc->tx_full_count,
6024 0, "Number of times the TX chain was full");
6026 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6027 "l2fhdr_status_errors",
6028 CTLFLAG_RD, &sc->l2fhdr_status_errors,
6029 0, "l2_fhdr status errors");
6031 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6032 "unexpected_attentions",
6033 CTLFLAG_RD, &sc->unexpected_attentions,
6034 0, "unexpected attentions");
6036 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6037 "lost_status_block_updates",
6038 CTLFLAG_RD, &sc->lost_status_block_updates,
6039 0, "lost status block updates");
6041 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6042 "mbuf_sim_alloc_failed",
6043 CTLFLAG_RD, &sc->mbuf_sim_alloc_failed,
6044 0, "mbuf cluster simulated allocation failures");
6046 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6048 CTLFLAG_RD, &sc->rx_mbuf_segs[1],
6049 0, "mbuf cluster with 1 segment");
6051 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6053 CTLFLAG_RD, &sc->rx_mbuf_segs[2],
6054 0, "mbuf cluster with 2 segments");
6056 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6058 CTLFLAG_RD, &sc->rx_mbuf_segs[3],
6059 0, "mbuf cluster with 3 segments");
6061 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6063 CTLFLAG_RD, &sc->rx_mbuf_segs[4],
6064 0, "mbuf cluster with 4 segments");
6066 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6068 CTLFLAG_RD, &sc->rx_mbuf_segs[5],
6069 0, "mbuf cluster with 5 segments");
6071 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6073 CTLFLAG_RD, &sc->rx_mbuf_segs[6],
6074 0, "mbuf cluster with 6 segments");
6076 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6078 CTLFLAG_RD, &sc->rx_mbuf_segs[7],
6079 0, "mbuf cluster with 7 segments");
6081 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6083 CTLFLAG_RD, &sc->rx_mbuf_segs[8],
6084 0, "mbuf cluster with 8 segments");
6088 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6089 "mbuf_alloc_failed",
6090 CTLFLAG_RD, &sc->mbuf_alloc_failed,
6091 0, "mbuf cluster allocation failures");
6093 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6094 "tx_dma_map_failures",
6095 CTLFLAG_RD, &sc->tx_dma_map_failures,
6096 0, "tx dma mapping failures");
6098 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6099 "stat_IfHcInOctets",
6100 CTLFLAG_RD, &sc->stat_IfHCInOctets,
6103 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6104 "stat_IfHCInBadOctets",
6105 CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
6106 "Bad bytes received");
6108 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6109 "stat_IfHCOutOctets",
6110 CTLFLAG_RD, &sc->stat_IfHCOutOctets,
6113 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6114 "stat_IfHCOutBadOctets",
6115 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
6118 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6119 "stat_IfHCInUcastPkts",
6120 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
6121 "Unicast packets received");
6123 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6124 "stat_IfHCInMulticastPkts",
6125 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
6126 "Multicast packets received");
6128 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6129 "stat_IfHCInBroadcastPkts",
6130 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
6131 "Broadcast packets received");
6133 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6134 "stat_IfHCOutUcastPkts",
6135 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
6136 "Unicast packets sent");
6138 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6139 "stat_IfHCOutMulticastPkts",
6140 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
6141 "Multicast packets sent");
6143 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6144 "stat_IfHCOutBroadcastPkts",
6145 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
6146 "Broadcast packets sent");
6148 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6149 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
6150 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
6151 0, "Internal MAC transmit errors");
6153 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6154 "stat_Dot3StatsCarrierSenseErrors",
6155 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
6156 0, "Carrier sense errors");
6158 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6159 "stat_Dot3StatsFCSErrors",
6160 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
6161 0, "Frame check sequence errors");
6163 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6164 "stat_Dot3StatsAlignmentErrors",
6165 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
6166 0, "Alignment errors");
6168 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6169 "stat_Dot3StatsSingleCollisionFrames",
6170 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
6171 0, "Single Collision Frames");
6173 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6174 "stat_Dot3StatsMultipleCollisionFrames",
6175 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
6176 0, "Multiple Collision Frames");
6178 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6179 "stat_Dot3StatsDeferredTransmissions",
6180 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
6181 0, "Deferred Transmissions");
6183 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6184 "stat_Dot3StatsExcessiveCollisions",
6185 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
6186 0, "Excessive Collisions");
6188 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6189 "stat_Dot3StatsLateCollisions",
6190 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
6191 0, "Late Collisions");
6193 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6194 "stat_EtherStatsCollisions",
6195 CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
6198 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6199 "stat_EtherStatsFragments",
6200 CTLFLAG_RD, &sc->stat_EtherStatsFragments,
6203 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6204 "stat_EtherStatsJabbers",
6205 CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
6208 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6209 "stat_EtherStatsUndersizePkts",
6210 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
6211 0, "Undersize packets");
6213 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6214 "stat_EtherStatsOverrsizePkts",
6215 CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
6216 0, "stat_EtherStatsOverrsizePkts");
6218 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6219 "stat_EtherStatsPktsRx64Octets",
6220 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
6221 0, "Bytes received in 64 byte packets");
6223 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6224 "stat_EtherStatsPktsRx65Octetsto127Octets",
6225 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
6226 0, "Bytes received in 65 to 127 byte packets");
6228 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6229 "stat_EtherStatsPktsRx128Octetsto255Octets",
6230 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
6231 0, "Bytes received in 128 to 255 byte packets");
6233 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6234 "stat_EtherStatsPktsRx256Octetsto511Octets",
6235 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
6236 0, "Bytes received in 256 to 511 byte packets");
6238 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6239 "stat_EtherStatsPktsRx512Octetsto1023Octets",
6240 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
6241 0, "Bytes received in 512 to 1023 byte packets");
6243 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6244 "stat_EtherStatsPktsRx1024Octetsto1522Octets",
6245 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
6246 0, "Bytes received in 1024 t0 1522 byte packets");
6248 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6249 "stat_EtherStatsPktsRx1523Octetsto9022Octets",
6250 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
6251 0, "Bytes received in 1523 to 9022 byte packets");
6253 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6254 "stat_EtherStatsPktsTx64Octets",
6255 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
6256 0, "Bytes sent in 64 byte packets");
6258 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6259 "stat_EtherStatsPktsTx65Octetsto127Octets",
6260 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
6261 0, "Bytes sent in 65 to 127 byte packets");
6263 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6264 "stat_EtherStatsPktsTx128Octetsto255Octets",
6265 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
6266 0, "Bytes sent in 128 to 255 byte packets");
6268 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6269 "stat_EtherStatsPktsTx256Octetsto511Octets",
6270 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
6271 0, "Bytes sent in 256 to 511 byte packets");
6273 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6274 "stat_EtherStatsPktsTx512Octetsto1023Octets",
6275 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
6276 0, "Bytes sent in 512 to 1023 byte packets");
6278 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6279 "stat_EtherStatsPktsTx1024Octetsto1522Octets",
6280 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
6281 0, "Bytes sent in 1024 to 1522 byte packets");
6283 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6284 "stat_EtherStatsPktsTx1523Octetsto9022Octets",
6285 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
6286 0, "Bytes sent in 1523 to 9022 byte packets");
6288 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6289 "stat_XonPauseFramesReceived",
6290 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
6291 0, "XON pause frames receved");
6293 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6294 "stat_XoffPauseFramesReceived",
6295 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
6296 0, "XOFF pause frames received");
6298 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6300 CTLFLAG_RD, &sc->stat_OutXonSent,
6301 0, "XON pause frames sent");
6303 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6305 CTLFLAG_RD, &sc->stat_OutXoffSent,
6306 0, "XOFF pause frames sent");
6308 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6309 "stat_FlowControlDone",
6310 CTLFLAG_RD, &sc->stat_FlowControlDone,
6311 0, "Flow control done");
6313 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6314 "stat_MacControlFramesReceived",
6315 CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
6316 0, "MAC control frames received");
6318 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6319 "stat_XoffStateEntered",
6320 CTLFLAG_RD, &sc->stat_XoffStateEntered,
6321 0, "XOFF state entered");
6323 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6324 "stat_IfInFramesL2FilterDiscards",
6325 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
6326 0, "Received L2 packets discarded");
6328 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6329 "stat_IfInRuleCheckerDiscards",
6330 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
6331 0, "Received packets discarded by rule");
6333 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6334 "stat_IfInFTQDiscards",
6335 CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
6336 0, "Received packet FTQ discards");
6338 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6339 "stat_IfInMBUFDiscards",
6340 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
6341 0, "Received packets discarded due to lack of controller buffer memory");
6343 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6344 "stat_IfInRuleCheckerP4Hit",
6345 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
6346 0, "Received packets rule checker hits");
6348 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6349 "stat_CatchupInRuleCheckerDiscards",
6350 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
6351 0, "Received packets discarded in Catchup path");
6353 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6354 "stat_CatchupInFTQDiscards",
6355 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
6356 0, "Received packets discarded in FTQ in Catchup path");
6358 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6359 "stat_CatchupInMBUFDiscards",
6360 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
6361 0, "Received packets discarded in controller buffer memory in Catchup path");
6363 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6364 "stat_CatchupInRuleCheckerP4Hit",
6365 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
6366 0, "Received packets rule checker hits in Catchup path");
6368 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6370 CTLFLAG_RD, &sc->com_no_buffers,
6371 0, "Valid packets received but no RX buffers available");
6374 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6375 "driver_state", CTLTYPE_INT | CTLFLAG_RW,
6377 bce_sysctl_driver_state, "I", "Drive state information");
6379 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6380 "hw_state", CTLTYPE_INT | CTLFLAG_RW,
6382 bce_sysctl_hw_state, "I", "Hardware state information");
6384 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6385 "bc_state", CTLTYPE_INT | CTLFLAG_RW,
6387 bce_sysctl_bc_state, "I", "Bootcode state information");
6389 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6390 "dump_rx_chain", CTLTYPE_INT | CTLFLAG_RW,
6392 bce_sysctl_dump_rx_chain, "I", "Dump rx_bd chain");
6394 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6395 "dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW,
6397 bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain");
6399 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6400 "breakpoint", CTLTYPE_INT | CTLFLAG_RW,
6402 bce_sysctl_breakpoint, "I", "Driver breakpoint");
6404 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6405 "reg_read", CTLTYPE_INT | CTLFLAG_RW,
6407 bce_sysctl_reg_read, "I", "Register read");
6409 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6410 "phy_read", CTLTYPE_INT | CTLFLAG_RW,
6412 bce_sysctl_phy_read, "I", "PHY register read");
6419 /****************************************************************************/
6420 /* BCE Debug Routines */
6421 /****************************************************************************/
6424 /****************************************************************************/
6425 /* Freezes the controller to allow for a cohesive state dump. */
6429 /****************************************************************************/
6431 bce_freeze_controller(struct bce_softc *sc)
6434 val = REG_RD(sc, BCE_MISC_COMMAND);
6435 val |= BCE_MISC_COMMAND_DISABLE_ALL;
6436 REG_WR(sc, BCE_MISC_COMMAND, val);
6441 /****************************************************************************/
6442 /* Unfreezes the controller after a freeze operation. This may not always */
6443 /* work and the controller will require a reset! */
6447 /****************************************************************************/
6449 bce_unfreeze_controller(struct bce_softc *sc)
6452 val = REG_RD(sc, BCE_MISC_COMMAND);
6453 val |= BCE_MISC_COMMAND_ENABLE_ALL;
6454 REG_WR(sc, BCE_MISC_COMMAND, val);
6458 /****************************************************************************/
6459 /* Prints out information about an mbuf. */
6463 /****************************************************************************/
6465 bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
6468 struct mbuf *mp = m;
6471 BCE_PRINTF("mbuf: null pointer\n");
6476 val_hi = BCE_ADDR_HI(mp);
6477 val_lo = BCE_ADDR_LO(mp);
6478 BCE_PRINTF("mbuf: vaddr = 0x%08X:%08X, m_len = %d, m_flags = ( ",
6479 val_hi, val_lo, mp->m_len);
6481 if (mp->m_flags & M_EXT)
6483 if (mp->m_flags & M_PKTHDR)
6484 printf("M_PKTHDR ");
6485 if (mp->m_flags & M_EOR)
6487 if (mp->m_flags & M_RDONLY)
6488 printf("M_RDONLY ");
6490 val_hi = BCE_ADDR_HI(mp->m_data);
6491 val_lo = BCE_ADDR_LO(mp->m_data);
6492 printf(") m_data = 0x%08X:%08X\n",
6495 if (mp->m_flags & M_PKTHDR) {
6496 BCE_PRINTF("- m_pkthdr: flags = ( ");
6497 if (mp->m_flags & M_BCAST)
6499 if (mp->m_flags & M_MCAST)
6501 if (mp->m_flags & M_FRAG)
6503 if (mp->m_flags & M_FIRSTFRAG)
6504 printf("M_FIRSTFRAG ");
6505 if (mp->m_flags & M_LASTFRAG)
6506 printf("M_LASTFRAG ");
6507 if (mp->m_flags & M_VLANTAG)
6508 printf("M_VLANTAG ");
6509 printf(") csum_flags = ( ");
6510 if (mp->m_pkthdr.csum_flags & CSUM_IP)
6512 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
6513 printf("CSUM_TCP ");
6514 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
6515 printf("CSUM_UDP ");
6516 if (mp->m_pkthdr.csum_flags & CSUM_IP_FRAGS)
6517 printf("CSUM_IP_FRAGS ");
6518 if (mp->m_pkthdr.csum_flags & CSUM_FRAGMENT)
6519 printf("CSUM_FRAGMENT ");
6520 if (mp->m_pkthdr.csum_flags & CSUM_IP_CHECKED)
6521 printf("CSUM_IP_CHECKED ");
6522 if (mp->m_pkthdr.csum_flags & CSUM_IP_VALID)
6523 printf("CSUM_IP_VALID ");
6524 if (mp->m_pkthdr.csum_flags & CSUM_DATA_VALID)
6525 printf("CSUM_DATA_VALID ");
6529 if (mp->m_flags & M_EXT) {
6530 val_hi = BCE_ADDR_HI(mp->m_ext.ext_buf);
6531 val_lo = BCE_ADDR_LO(mp->m_ext.ext_buf);
6532 BCE_PRINTF("- m_ext: vaddr = 0x%08X:%08X, ext_size = %d, type = ",
6533 val_hi, val_lo, mp->m_ext.ext_size);
6534 switch (mp->m_ext.ext_type) {
6535 case EXT_CLUSTER: printf("EXT_CLUSTER\n"); break;
6536 case EXT_SFBUF: printf("EXT_SFBUF\n"); break;
6537 case EXT_JUMBO9: printf("EXT_JUMBO9\n"); break;
6538 case EXT_JUMBO16: printf("EXT_JUMBO16\n"); break;
6539 case EXT_PACKET: printf("EXT_PACKET\n"); break;
6540 case EXT_NET_DRV: printf("EXT_NET_DRV\n"); break;
6541 case EXT_MOD_TYPE: printf("EXT_MDD_TYPE\n"); break;
6542 case EXT_DISPOSABLE: printf("EXT_DISPOSABLE\n"); break;
6543 case EXT_EXTREF: printf("EXT_EXTREF\n"); break;
6544 default: printf("UNKNOWN\n");
6553 /****************************************************************************/
6554 /* Prints out the mbufs in the TX mbuf chain. */
6558 /****************************************************************************/
6560 bce_dump_tx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6565 "----------------------------"
6567 "----------------------------\n");
6569 for (int i = 0; i < count; i++) {
6570 m = sc->tx_mbuf_ptr[chain_prod];
6571 BCE_PRINTF("txmbuf[%d]\n", chain_prod);
6572 bce_dump_mbuf(sc, m);
6573 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
6577 "----------------------------"
6579 "----------------------------\n");
6583 /****************************************************************************/
6584 /* Prints out the mbufs in the RX mbuf chain. */
6588 /****************************************************************************/
6590 bce_dump_rx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6595 "----------------------------"
6597 "----------------------------\n");
6599 for (int i = 0; i < count; i++) {
6600 m = sc->rx_mbuf_ptr[chain_prod];
6601 BCE_PRINTF("rxmbuf[0x%04X]\n", chain_prod);
6602 bce_dump_mbuf(sc, m);
6603 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
6608 "----------------------------"
6610 "----------------------------\n");
6614 /****************************************************************************/
6615 /* Prints out a tx_bd structure. */
6619 /****************************************************************************/
6621 bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd)
6623 if (idx > MAX_TX_BD)
6624 /* Index out of range. */
6625 BCE_PRINTF("tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
6626 else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
6627 /* TX Chain page pointer. */
6628 BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
6629 idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo);
6631 /* Normal tx_bd entry. */
6632 BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
6633 "vlan tag= 0x%04X, flags = 0x%04X (", idx,
6634 txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
6635 txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag,
6638 if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT)
6639 printf(" CONN_FAULT");
6641 if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM)
6642 printf(" TCP_UDP_CKSUM");
6644 if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM)
6645 printf(" IP_CKSUM");
6647 if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG)
6650 if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW)
6651 printf(" COAL_NOW");
6653 if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC)
6654 printf(" DONT_GEN_CRC");
6656 if (txbd->tx_bd_flags & TX_BD_FLAGS_START)
6659 if (txbd->tx_bd_flags & TX_BD_FLAGS_END)
6662 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO)
6665 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD)
6666 printf(" OPTION_WORD");
6668 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS)
6671 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP)
6680 /****************************************************************************/
6681 /* Prints out a rx_bd structure. */
6685 /****************************************************************************/
6687 bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
6689 if (idx > MAX_RX_BD)
6690 /* Index out of range. */
6691 BCE_PRINTF("rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
6692 else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
6693 /* TX Chain page pointer. */
6694 BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
6695 idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo);
6697 /* Normal tx_bd entry. */
6698 BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
6699 "flags = 0x%08X\n", idx,
6700 rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
6701 rxbd->rx_bd_len, rxbd->rx_bd_flags);
6705 /****************************************************************************/
6706 /* Prints out a l2_fhdr structure. */
6710 /****************************************************************************/
6712 bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr)
6714 BCE_PRINTF("l2_fhdr[0x%04X]: status = 0x%08X, "
6715 "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, "
6716 "tcp_udp_xsum = 0x%04X\n", idx,
6717 l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len,
6718 l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum,
6719 l2fhdr->l2_fhdr_tcp_udp_xsum);
6723 /****************************************************************************/
6724 /* Prints out the TX chain. */
6728 /****************************************************************************/
6730 bce_dump_tx_chain(struct bce_softc *sc, int tx_prod, int count)
6734 /* First some info about the tx_bd chain structure. */
6736 "----------------------------"
6738 "----------------------------\n");
6740 BCE_PRINTF("page size = 0x%08X, tx chain pages = 0x%08X\n",
6741 (u32) BCM_PAGE_SIZE, (u32) TX_PAGES);
6743 BCE_PRINTF("tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
6744 (u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE);
6746 BCE_PRINTF("total tx_bd = 0x%08X\n", (u32) TOTAL_TX_BD);
6749 "----------------------------"
6751 "----------------------------\n");
6753 /* Now print out the tx_bd's themselves. */
6754 for (int i = 0; i < count; i++) {
6755 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
6756 bce_dump_txbd(sc, tx_prod, txbd);
6757 tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
6761 "----------------------------"
6763 "----------------------------\n");
6767 /****************************************************************************/
6768 /* Prints out the RX chain. */
6772 /****************************************************************************/
6774 bce_dump_rx_chain(struct bce_softc *sc, int rx_prod, int count)
6778 /* First some info about the tx_bd chain structure. */
6780 "----------------------------"
6782 "----------------------------\n");
6784 BCE_PRINTF("page size = 0x%08X, rx chain pages = 0x%08X\n",
6785 (u32) BCM_PAGE_SIZE, (u32) RX_PAGES);
6787 BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
6788 (u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE);
6790 BCE_PRINTF("total rx_bd = 0x%08X\n", (u32) TOTAL_RX_BD);
6793 "----------------------------"
6795 "----------------------------\n");
6797 /* Now print out the rx_bd's themselves. */
6798 for (int i = 0; i < count; i++) {
6799 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
6800 bce_dump_rxbd(sc, rx_prod, rxbd);
6801 rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
6805 "----------------------------"
6807 "----------------------------\n");
6811 /****************************************************************************/
6812 /* Prints out the status block from host memory. */
6816 /****************************************************************************/
6818 bce_dump_status_block(struct bce_softc *sc)
6820 struct status_block *sblk;
6822 sblk = sc->status_block;
6825 "----------------------------"
6827 "----------------------------\n");
6829 BCE_PRINTF(" 0x%08X - attn_bits\n",
6830 sblk->status_attn_bits);
6832 BCE_PRINTF(" 0x%08X - attn_bits_ack\n",
6833 sblk->status_attn_bits_ack);
6835 BCE_PRINTF("0x%04X(0x%04X) - rx_cons0\n",
6836 sblk->status_rx_quick_consumer_index0,
6837 (u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index0));
6839 BCE_PRINTF("0x%04X(0x%04X) - tx_cons0\n",
6840 sblk->status_tx_quick_consumer_index0,
6841 (u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index0));
6843 BCE_PRINTF(" 0x%04X - status_idx\n", sblk->status_idx);
6845 /* Theses indices are not used for normal L2 drivers. */
6846 if (sblk->status_rx_quick_consumer_index1)
6847 BCE_PRINTF("0x%04X(0x%04X) - rx_cons1\n",
6848 sblk->status_rx_quick_consumer_index1,
6849 (u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index1));
6851 if (sblk->status_tx_quick_consumer_index1)
6852 BCE_PRINTF("0x%04X(0x%04X) - tx_cons1\n",
6853 sblk->status_tx_quick_consumer_index1,
6854 (u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index1));
6856 if (sblk->status_rx_quick_consumer_index2)
6857 BCE_PRINTF("0x%04X(0x%04X)- rx_cons2\n",
6858 sblk->status_rx_quick_consumer_index2,
6859 (u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index2));
6861 if (sblk->status_tx_quick_consumer_index2)
6862 BCE_PRINTF("0x%04X(0x%04X) - tx_cons2\n",
6863 sblk->status_tx_quick_consumer_index2,
6864 (u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index2));
6866 if (sblk->status_rx_quick_consumer_index3)
6867 BCE_PRINTF("0x%04X(0x%04X) - rx_cons3\n",
6868 sblk->status_rx_quick_consumer_index3,
6869 (u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index3));
6871 if (sblk->status_tx_quick_consumer_index3)
6872 BCE_PRINTF("0x%04X(0x%04X) - tx_cons3\n",
6873 sblk->status_tx_quick_consumer_index3,
6874 (u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index3));
6876 if (sblk->status_rx_quick_consumer_index4 ||
6877 sblk->status_rx_quick_consumer_index5)
6878 BCE_PRINTF("rx_cons4 = 0x%08X, rx_cons5 = 0x%08X\n",
6879 sblk->status_rx_quick_consumer_index4,
6880 sblk->status_rx_quick_consumer_index5);
6882 if (sblk->status_rx_quick_consumer_index6 ||
6883 sblk->status_rx_quick_consumer_index7)
6884 BCE_PRINTF("rx_cons6 = 0x%08X, rx_cons7 = 0x%08X\n",
6885 sblk->status_rx_quick_consumer_index6,
6886 sblk->status_rx_quick_consumer_index7);
6888 if (sblk->status_rx_quick_consumer_index8 ||
6889 sblk->status_rx_quick_consumer_index9)
6890 BCE_PRINTF("rx_cons8 = 0x%08X, rx_cons9 = 0x%08X\n",
6891 sblk->status_rx_quick_consumer_index8,
6892 sblk->status_rx_quick_consumer_index9);
6894 if (sblk->status_rx_quick_consumer_index10 ||
6895 sblk->status_rx_quick_consumer_index11)
6896 BCE_PRINTF("rx_cons10 = 0x%08X, rx_cons11 = 0x%08X\n",
6897 sblk->status_rx_quick_consumer_index10,
6898 sblk->status_rx_quick_consumer_index11);
6900 if (sblk->status_rx_quick_consumer_index12 ||
6901 sblk->status_rx_quick_consumer_index13)
6902 BCE_PRINTF("rx_cons12 = 0x%08X, rx_cons13 = 0x%08X\n",
6903 sblk->status_rx_quick_consumer_index12,
6904 sblk->status_rx_quick_consumer_index13);
6906 if (sblk->status_rx_quick_consumer_index14 ||
6907 sblk->status_rx_quick_consumer_index15)
6908 BCE_PRINTF("rx_cons14 = 0x%08X, rx_cons15 = 0x%08X\n",
6909 sblk->status_rx_quick_consumer_index14,
6910 sblk->status_rx_quick_consumer_index15);
6912 if (sblk->status_completion_producer_index ||
6913 sblk->status_cmd_consumer_index)
6914 BCE_PRINTF("com_prod = 0x%08X, cmd_cons = 0x%08X\n",
6915 sblk->status_completion_producer_index,
6916 sblk->status_cmd_consumer_index);
6919 "----------------------------"
6921 "----------------------------\n");
6925 /****************************************************************************/
6926 /* Prints out the statistics block from host memory. */
6930 /****************************************************************************/
6932 bce_dump_stats_block(struct bce_softc *sc)
6934 struct statistics_block *sblk;
6936 sblk = sc->stats_block;
6940 " Stats Block (All Stats Not Shown Are 0) "
6941 "---------------\n");
6943 if (sblk->stat_IfHCInOctets_hi
6944 || sblk->stat_IfHCInOctets_lo)
6945 BCE_PRINTF("0x%08X:%08X : "
6947 sblk->stat_IfHCInOctets_hi,
6948 sblk->stat_IfHCInOctets_lo);
6950 if (sblk->stat_IfHCInBadOctets_hi
6951 || sblk->stat_IfHCInBadOctets_lo)
6952 BCE_PRINTF("0x%08X:%08X : "
6953 "IfHcInBadOctets\n",
6954 sblk->stat_IfHCInBadOctets_hi,
6955 sblk->stat_IfHCInBadOctets_lo);
6957 if (sblk->stat_IfHCOutOctets_hi
6958 || sblk->stat_IfHCOutOctets_lo)
6959 BCE_PRINTF("0x%08X:%08X : "
6961 sblk->stat_IfHCOutOctets_hi,
6962 sblk->stat_IfHCOutOctets_lo);
6964 if (sblk->stat_IfHCOutBadOctets_hi
6965 || sblk->stat_IfHCOutBadOctets_lo)
6966 BCE_PRINTF("0x%08X:%08X : "
6967 "IfHcOutBadOctets\n",
6968 sblk->stat_IfHCOutBadOctets_hi,
6969 sblk->stat_IfHCOutBadOctets_lo);
6971 if (sblk->stat_IfHCInUcastPkts_hi
6972 || sblk->stat_IfHCInUcastPkts_lo)
6973 BCE_PRINTF("0x%08X:%08X : "
6974 "IfHcInUcastPkts\n",
6975 sblk->stat_IfHCInUcastPkts_hi,
6976 sblk->stat_IfHCInUcastPkts_lo);
6978 if (sblk->stat_IfHCInBroadcastPkts_hi
6979 || sblk->stat_IfHCInBroadcastPkts_lo)
6980 BCE_PRINTF("0x%08X:%08X : "
6981 "IfHcInBroadcastPkts\n",
6982 sblk->stat_IfHCInBroadcastPkts_hi,
6983 sblk->stat_IfHCInBroadcastPkts_lo);
6985 if (sblk->stat_IfHCInMulticastPkts_hi
6986 || sblk->stat_IfHCInMulticastPkts_lo)
6987 BCE_PRINTF("0x%08X:%08X : "
6988 "IfHcInMulticastPkts\n",
6989 sblk->stat_IfHCInMulticastPkts_hi,
6990 sblk->stat_IfHCInMulticastPkts_lo);
6992 if (sblk->stat_IfHCOutUcastPkts_hi
6993 || sblk->stat_IfHCOutUcastPkts_lo)
6994 BCE_PRINTF("0x%08X:%08X : "
6995 "IfHcOutUcastPkts\n",
6996 sblk->stat_IfHCOutUcastPkts_hi,
6997 sblk->stat_IfHCOutUcastPkts_lo);
6999 if (sblk->stat_IfHCOutBroadcastPkts_hi
7000 || sblk->stat_IfHCOutBroadcastPkts_lo)
7001 BCE_PRINTF("0x%08X:%08X : "
7002 "IfHcOutBroadcastPkts\n",
7003 sblk->stat_IfHCOutBroadcastPkts_hi,
7004 sblk->stat_IfHCOutBroadcastPkts_lo);
7006 if (sblk->stat_IfHCOutMulticastPkts_hi
7007 || sblk->stat_IfHCOutMulticastPkts_lo)
7008 BCE_PRINTF("0x%08X:%08X : "
7009 "IfHcOutMulticastPkts\n",
7010 sblk->stat_IfHCOutMulticastPkts_hi,
7011 sblk->stat_IfHCOutMulticastPkts_lo);
7013 if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
7014 BCE_PRINTF(" 0x%08X : "
7015 "emac_tx_stat_dot3statsinternalmactransmiterrors\n",
7016 sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
7018 if (sblk->stat_Dot3StatsCarrierSenseErrors)
7019 BCE_PRINTF(" 0x%08X : Dot3StatsCarrierSenseErrors\n",
7020 sblk->stat_Dot3StatsCarrierSenseErrors);
7022 if (sblk->stat_Dot3StatsFCSErrors)
7023 BCE_PRINTF(" 0x%08X : Dot3StatsFCSErrors\n",
7024 sblk->stat_Dot3StatsFCSErrors);
7026 if (sblk->stat_Dot3StatsAlignmentErrors)
7027 BCE_PRINTF(" 0x%08X : Dot3StatsAlignmentErrors\n",
7028 sblk->stat_Dot3StatsAlignmentErrors);
7030 if (sblk->stat_Dot3StatsSingleCollisionFrames)
7031 BCE_PRINTF(" 0x%08X : Dot3StatsSingleCollisionFrames\n",
7032 sblk->stat_Dot3StatsSingleCollisionFrames);
7034 if (sblk->stat_Dot3StatsMultipleCollisionFrames)
7035 BCE_PRINTF(" 0x%08X : Dot3StatsMultipleCollisionFrames\n",
7036 sblk->stat_Dot3StatsMultipleCollisionFrames);
7038 if (sblk->stat_Dot3StatsDeferredTransmissions)
7039 BCE_PRINTF(" 0x%08X : Dot3StatsDeferredTransmissions\n",
7040 sblk->stat_Dot3StatsDeferredTransmissions);
7042 if (sblk->stat_Dot3StatsExcessiveCollisions)
7043 BCE_PRINTF(" 0x%08X : Dot3StatsExcessiveCollisions\n",
7044 sblk->stat_Dot3StatsExcessiveCollisions);
7046 if (sblk->stat_Dot3StatsLateCollisions)
7047 BCE_PRINTF(" 0x%08X : Dot3StatsLateCollisions\n",
7048 sblk->stat_Dot3StatsLateCollisions);
7050 if (sblk->stat_EtherStatsCollisions)
7051 BCE_PRINTF(" 0x%08X : EtherStatsCollisions\n",
7052 sblk->stat_EtherStatsCollisions);
7054 if (sblk->stat_EtherStatsFragments)
7055 BCE_PRINTF(" 0x%08X : EtherStatsFragments\n",
7056 sblk->stat_EtherStatsFragments);
7058 if (sblk->stat_EtherStatsJabbers)
7059 BCE_PRINTF(" 0x%08X : EtherStatsJabbers\n",
7060 sblk->stat_EtherStatsJabbers);
7062 if (sblk->stat_EtherStatsUndersizePkts)
7063 BCE_PRINTF(" 0x%08X : EtherStatsUndersizePkts\n",
7064 sblk->stat_EtherStatsUndersizePkts);
7066 if (sblk->stat_EtherStatsOverrsizePkts)
7067 BCE_PRINTF(" 0x%08X : EtherStatsOverrsizePkts\n",
7068 sblk->stat_EtherStatsOverrsizePkts);
7070 if (sblk->stat_EtherStatsPktsRx64Octets)
7071 BCE_PRINTF(" 0x%08X : EtherStatsPktsRx64Octets\n",
7072 sblk->stat_EtherStatsPktsRx64Octets);
7074 if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
7075 BCE_PRINTF(" 0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
7076 sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
7078 if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
7079 BCE_PRINTF(" 0x%08X : EtherStatsPktsRx128Octetsto255Octets\n",
7080 sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
7082 if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
7083 BCE_PRINTF(" 0x%08X : EtherStatsPktsRx256Octetsto511Octets\n",
7084 sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
7086 if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
7087 BCE_PRINTF(" 0x%08X : EtherStatsPktsRx512Octetsto1023Octets\n",
7088 sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
7090 if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
7091 BCE_PRINTF(" 0x%08X : EtherStatsPktsRx1024Octetsto1522Octets\n",
7092 sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
7094 if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
7095 BCE_PRINTF(" 0x%08X : EtherStatsPktsRx1523Octetsto9022Octets\n",
7096 sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
7098 if (sblk->stat_EtherStatsPktsTx64Octets)
7099 BCE_PRINTF(" 0x%08X : EtherStatsPktsTx64Octets\n",
7100 sblk->stat_EtherStatsPktsTx64Octets);
7102 if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
7103 BCE_PRINTF(" 0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
7104 sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
7106 if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
7107 BCE_PRINTF(" 0x%08X : EtherStatsPktsTx128Octetsto255Octets\n",
7108 sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
7110 if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
7111 BCE_PRINTF(" 0x%08X : EtherStatsPktsTx256Octetsto511Octets\n",
7112 sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
7114 if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
7115 BCE_PRINTF(" 0x%08X : EtherStatsPktsTx512Octetsto1023Octets\n",
7116 sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
7118 if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
7119 BCE_PRINTF(" 0x%08X : EtherStatsPktsTx1024Octetsto1522Octets\n",
7120 sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
7122 if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
7123 BCE_PRINTF(" 0x%08X : EtherStatsPktsTx1523Octetsto9022Octets\n",
7124 sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
7126 if (sblk->stat_XonPauseFramesReceived)
7127 BCE_PRINTF(" 0x%08X : XonPauseFramesReceived\n",
7128 sblk->stat_XonPauseFramesReceived);
7130 if (sblk->stat_XoffPauseFramesReceived)
7131 BCE_PRINTF(" 0x%08X : XoffPauseFramesReceived\n",
7132 sblk->stat_XoffPauseFramesReceived);
7134 if (sblk->stat_OutXonSent)
7135 BCE_PRINTF(" 0x%08X : OutXonSent\n",
7136 sblk->stat_OutXonSent);
7138 if (sblk->stat_OutXoffSent)
7139 BCE_PRINTF(" 0x%08X : OutXoffSent\n",
7140 sblk->stat_OutXoffSent);
7142 if (sblk->stat_FlowControlDone)
7143 BCE_PRINTF(" 0x%08X : FlowControlDone\n",
7144 sblk->stat_FlowControlDone);
7146 if (sblk->stat_MacControlFramesReceived)
7147 BCE_PRINTF(" 0x%08X : MacControlFramesReceived\n",
7148 sblk->stat_MacControlFramesReceived);
7150 if (sblk->stat_XoffStateEntered)
7151 BCE_PRINTF(" 0x%08X : XoffStateEntered\n",
7152 sblk->stat_XoffStateEntered);
7154 if (sblk->stat_IfInFramesL2FilterDiscards)
7155 BCE_PRINTF(" 0x%08X : IfInFramesL2FilterDiscards\n",
7156 sblk->stat_IfInFramesL2FilterDiscards);
7158 if (sblk->stat_IfInRuleCheckerDiscards)
7159 BCE_PRINTF(" 0x%08X : IfInRuleCheckerDiscards\n",
7160 sblk->stat_IfInRuleCheckerDiscards);
7162 if (sblk->stat_IfInFTQDiscards)
7163 BCE_PRINTF(" 0x%08X : IfInFTQDiscards\n",
7164 sblk->stat_IfInFTQDiscards);
7166 if (sblk->stat_IfInMBUFDiscards)
7167 BCE_PRINTF(" 0x%08X : IfInMBUFDiscards\n",
7168 sblk->stat_IfInMBUFDiscards);
7170 if (sblk->stat_IfInRuleCheckerP4Hit)
7171 BCE_PRINTF(" 0x%08X : IfInRuleCheckerP4Hit\n",
7172 sblk->stat_IfInRuleCheckerP4Hit);
7174 if (sblk->stat_CatchupInRuleCheckerDiscards)
7175 BCE_PRINTF(" 0x%08X : CatchupInRuleCheckerDiscards\n",
7176 sblk->stat_CatchupInRuleCheckerDiscards);
7178 if (sblk->stat_CatchupInFTQDiscards)
7179 BCE_PRINTF(" 0x%08X : CatchupInFTQDiscards\n",
7180 sblk->stat_CatchupInFTQDiscards);
7182 if (sblk->stat_CatchupInMBUFDiscards)
7183 BCE_PRINTF(" 0x%08X : CatchupInMBUFDiscards\n",
7184 sblk->stat_CatchupInMBUFDiscards);
7186 if (sblk->stat_CatchupInRuleCheckerP4Hit)
7187 BCE_PRINTF(" 0x%08X : CatchupInRuleCheckerP4Hit\n",
7188 sblk->stat_CatchupInRuleCheckerP4Hit);
7191 "----------------------------"
7193 "----------------------------\n");
7197 /****************************************************************************/
7198 /* Prints out a summary of the driver state. */
7202 /****************************************************************************/
7204 bce_dump_driver_state(struct bce_softc *sc)
7209 "-----------------------------"
7211 "-----------------------------\n");
7213 val_hi = BCE_ADDR_HI(sc);
7214 val_lo = BCE_ADDR_LO(sc);
7215 BCE_PRINTF("0x%08X:%08X - (sc) driver softc structure virtual address\n",
7218 val_hi = BCE_ADDR_HI(sc->bce_vhandle);
7219 val_lo = BCE_ADDR_LO(sc->bce_vhandle);
7220 BCE_PRINTF("0x%08X:%08X - (sc->bce_vhandle) PCI BAR virtual address\n",
7223 val_hi = BCE_ADDR_HI(sc->status_block);
7224 val_lo = BCE_ADDR_LO(sc->status_block);
7225 BCE_PRINTF("0x%08X:%08X - (sc->status_block) status block virtual address\n",
7228 val_hi = BCE_ADDR_HI(sc->stats_block);
7229 val_lo = BCE_ADDR_LO(sc->stats_block);
7230 BCE_PRINTF("0x%08X:%08X - (sc->stats_block) statistics block virtual address\n",
7233 val_hi = BCE_ADDR_HI(sc->tx_bd_chain);
7234 val_lo = BCE_ADDR_LO(sc->tx_bd_chain);
7236 "0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain virtual adddress\n",
7239 val_hi = BCE_ADDR_HI(sc->rx_bd_chain);
7240 val_lo = BCE_ADDR_LO(sc->rx_bd_chain);
7242 "0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain virtual address\n",
7245 val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr);
7246 val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr);
7248 "0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
7251 val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr);
7252 val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr);
7254 "0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
7257 BCE_PRINTF(" 0x%08X - (sc->interrupts_generated) h/w intrs\n",
7258 sc->interrupts_generated);
7260 BCE_PRINTF(" 0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
7263 BCE_PRINTF(" 0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
7266 BCE_PRINTF(" 0x%08X - (sc->last_status_idx) status block index\n",
7267 sc->last_status_idx);
7269 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->tx_prod) tx producer index\n",
7270 sc->tx_prod, (u16) TX_CHAIN_IDX(sc->tx_prod));
7272 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->tx_cons) tx consumer index\n",
7273 sc->tx_cons, (u16) TX_CHAIN_IDX(sc->tx_cons));
7275 BCE_PRINTF(" 0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
7278 BCE_PRINTF(" 0x%08X - (sc->tx_mbuf_alloc) tx mbufs allocated\n",
7281 BCE_PRINTF(" 0x%08X - (sc->used_tx_bd) used tx_bd's\n",
7284 BCE_PRINTF("0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
7285 sc->tx_hi_watermark, sc->max_tx_bd);
7287 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->rx_prod) rx producer index\n",
7288 sc->rx_prod, (u16) RX_CHAIN_IDX(sc->rx_prod));
7290 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->rx_cons) rx consumer index\n",
7291 sc->rx_cons, (u16) RX_CHAIN_IDX(sc->rx_cons));
7293 BCE_PRINTF(" 0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
7296 BCE_PRINTF(" 0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
7299 BCE_PRINTF(" 0x%08X - (sc->free_rx_bd) free rx_bd's\n",
7302 BCE_PRINTF("0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n",
7303 sc->rx_low_watermark, sc->max_rx_bd);
7305 BCE_PRINTF(" 0x%08X - (sc->mbuf_alloc_failed) "
7306 "mbuf alloc failures\n",
7307 sc->mbuf_alloc_failed);
7309 BCE_PRINTF(" 0x%08X - (sc->mbuf_sim_alloc_failed) "
7310 "simulated mbuf alloc failures\n",
7311 sc->mbuf_sim_alloc_failed);
7314 "----------------------------"
7316 "----------------------------\n");
7320 /****************************************************************************/
7321 /* Prints out the hardware state through a summary of important register, */
7322 /* followed by a complete register dump. */
7326 /****************************************************************************/
7328 bce_dump_hw_state(struct bce_softc *sc)
7333 "----------------------------"
7335 "----------------------------\n");
7337 BCE_PRINTF("0x%08X - bootcode version\n", sc->bce_fw_ver);
7339 val1 = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS);
7340 BCE_PRINTF("0x%08X - (0x%06X) misc_enable_status_bits\n",
7341 val1, BCE_MISC_ENABLE_STATUS_BITS);
7343 val1 = REG_RD(sc, BCE_DMA_STATUS);
7344 BCE_PRINTF("0x%08X - (0x%06X) dma_status\n", val1, BCE_DMA_STATUS);
7346 val1 = REG_RD(sc, BCE_CTX_STATUS);
7347 BCE_PRINTF("0x%08X - (0x%06X) ctx_status\n", val1, BCE_CTX_STATUS);
7349 val1 = REG_RD(sc, BCE_EMAC_STATUS);
7350 BCE_PRINTF("0x%08X - (0x%06X) emac_status\n", val1, BCE_EMAC_STATUS);
7352 val1 = REG_RD(sc, BCE_RPM_STATUS);
7353 BCE_PRINTF("0x%08X - (0x%06X) rpm_status\n", val1, BCE_RPM_STATUS);
7355 val1 = REG_RD(sc, BCE_TBDR_STATUS);
7356 BCE_PRINTF("0x%08X - (0x%06X) tbdr_status\n", val1, BCE_TBDR_STATUS);
7358 val1 = REG_RD(sc, BCE_TDMA_STATUS);
7359 BCE_PRINTF("0x%08X - (0x%06X) tdma_status\n", val1, BCE_TDMA_STATUS);
7361 val1 = REG_RD(sc, BCE_HC_STATUS);
7362 BCE_PRINTF("0x%08X - (0x%06X) hc_status\n", val1, BCE_HC_STATUS);
7364 val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
7365 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", val1, BCE_TXP_CPU_STATE);
7367 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
7368 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", val1, BCE_TPAT_CPU_STATE);
7370 val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
7371 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", val1, BCE_RXP_CPU_STATE);
7373 val1 = REG_RD_IND(sc, BCE_COM_CPU_STATE);
7374 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n", val1, BCE_COM_CPU_STATE);
7376 val1 = REG_RD_IND(sc, BCE_MCP_CPU_STATE);
7377 BCE_PRINTF("0x%08X - (0x%06X) mcp_cpu_state\n", val1, BCE_MCP_CPU_STATE);
7379 val1 = REG_RD_IND(sc, BCE_CP_CPU_STATE);
7380 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n", val1, BCE_CP_CPU_STATE);
7383 "----------------------------"
7385 "----------------------------\n");
7388 "----------------------------"
7390 "----------------------------\n");
7392 for (int i = 0x400; i < 0x8000; i += 0x10)
7393 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
7394 i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
7395 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
7398 "----------------------------"
7400 "----------------------------\n");
7404 /****************************************************************************/
7405 /* Prints out the bootcode state. */
7409 /****************************************************************************/
7411 bce_dump_bc_state(struct bce_softc *sc)
7416 "----------------------------"
7418 "----------------------------\n");
7420 BCE_PRINTF("0x%08X - bootcode version\n", sc->bce_fw_ver);
7422 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_RESET_TYPE);
7423 BCE_PRINTF("0x%08X - (0x%06X) reset_type\n",
7424 val, BCE_BC_RESET_TYPE);
7426 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_STATE);
7427 BCE_PRINTF("0x%08X - (0x%06X) state\n",
7430 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_CONDITION);
7431 BCE_PRINTF("0x%08X - (0x%06X) condition\n",
7432 val, BCE_BC_CONDITION);
7434 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_STATE_DEBUG_CMD);
7435 BCE_PRINTF("0x%08X - (0x%06X) debug_cmd\n",
7436 val, BCE_BC_STATE_DEBUG_CMD);
7439 "----------------------------"
7441 "----------------------------\n");
7445 /****************************************************************************/
7446 /* Prints out the TXP state. */
7450 /****************************************************************************/
7452 bce_dump_txp_state(struct bce_softc *sc)
7457 "----------------------------"
7459 "----------------------------\n");
7461 val1 = REG_RD_IND(sc, BCE_TXP_CPU_MODE);
7462 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_mode\n", val1, BCE_TXP_CPU_MODE);
7464 val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
7465 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", val1, BCE_TXP_CPU_STATE);
7467 val1 = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK);
7468 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_event_mask\n", val1, BCE_TXP_CPU_EVENT_MASK);
7471 "----------------------------"
7473 "----------------------------\n");
7475 for (int i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) {
7476 /* Skip the big blank spaces */
7477 if (i < 0x454000 && i > 0x5ffff)
7478 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
7479 i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
7480 REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
7484 "----------------------------"
7486 "----------------------------\n");
7490 /****************************************************************************/
7491 /* Prints out the RXP state. */
7495 /****************************************************************************/
7497 bce_dump_rxp_state(struct bce_softc *sc)
7502 "----------------------------"
7504 "----------------------------\n");
7506 val1 = REG_RD_IND(sc, BCE_RXP_CPU_MODE);
7507 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_mode\n", val1, BCE_RXP_CPU_MODE);
7509 val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
7510 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", val1, BCE_RXP_CPU_STATE);
7512 val1 = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK);
7513 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_event_mask\n", val1, BCE_RXP_CPU_EVENT_MASK);
7516 "----------------------------"
7518 "----------------------------\n");
7520 for (int i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) {
7521 /* Skip the big blank sapces */
7522 if (i < 0xc5400 && i > 0xdffff)
7523 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
7524 i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
7525 REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
7529 "----------------------------"
7531 "----------------------------\n");
7535 /****************************************************************************/
7536 /* Prints out the TPAT state. */
7540 /****************************************************************************/
7542 bce_dump_tpat_state(struct bce_softc *sc)
7547 "----------------------------"
7549 "----------------------------\n");
7551 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_MODE);
7552 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_mode\n", val1, BCE_TPAT_CPU_MODE);
7554 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
7555 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", val1, BCE_TPAT_CPU_STATE);
7557 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK);
7558 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_event_mask\n", val1, BCE_TPAT_CPU_EVENT_MASK);
7561 "----------------------------"
7563 "----------------------------\n");
7565 for (int i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) {
7566 /* Skip the big blank spaces */
7567 if (i < 0x854000 && i > 0x9ffff)
7568 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
7569 i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
7570 REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
7574 "----------------------------"
7576 "----------------------------\n");
7580 /****************************************************************************/
7581 /* Prints out the driver state and then enters the debugger. */
7585 /****************************************************************************/
7587 bce_breakpoint(struct bce_softc *sc)
7590 /* Unreachable code to shut the compiler up about unused functions. */
7592 bce_freeze_controller(sc);
7593 bce_unfreeze_controller(sc);
7594 bce_dump_txbd(sc, 0, NULL);
7595 bce_dump_rxbd(sc, 0, NULL);
7596 bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
7597 bce_dump_rx_mbuf_chain(sc, 0, sc->max_rx_bd);
7598 bce_dump_l2fhdr(sc, 0, NULL);
7599 bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
7600 bce_dump_rx_chain(sc, 0, sc->max_rx_bd);
7601 bce_dump_status_block(sc);
7602 bce_dump_stats_block(sc);
7603 bce_dump_driver_state(sc);
7604 bce_dump_hw_state(sc);
7605 bce_dump_bc_state(sc);
7606 bce_dump_txp_state(sc);
7607 bce_dump_rxp_state(sc);
7608 bce_dump_tpat_state(sc);
7611 /* bce_freeze_controller(sc); */
7612 bce_dump_driver_state(sc);
7613 bce_dump_status_block(sc);
7614 bce_dump_tx_chain(sc, 0, TOTAL_TX_BD);
7615 bce_dump_hw_state(sc);
7616 bce_dump_txp_state(sc);
7617 /* bce_unfreeze_controller(sc); */
7619 /* Call the debugger. */