2 * Copyright (c) 2006 Broadcom Corporation
3 * David Christensen <davidch@broadcom.com>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written consent.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
35 * The following controllers are supported by this driver:
39 * The following controllers are not supported by this driver:
40 * (These are not "Production" versions of the controller.)
43 * BCM5706S A0, A1, A2, A3
50 #include <dev/bce/if_bcereg.h>
51 #include <dev/bce/if_bcefw.h>
53 /****************************************************************************/
54 /* BCE Driver Version */
55 /****************************************************************************/
56 char bce_driver_version[] = "v0.9.6";
59 /****************************************************************************/
60 /* BCE Debug Options */
61 /****************************************************************************/
63 u32 bce_debug = BCE_WARN;
66 /* 1 = 1 in 2,147,483,648 */
67 /* 256 = 1 in 8,388,608 */
68 /* 2048 = 1 in 1,048,576 */
69 /* 65536 = 1 in 32,768 */
70 /* 1048576 = 1 in 2,048 */
71 /* 268435456 = 1 in 8 */
72 /* 536870912 = 1 in 4 */
73 /* 1073741824 = 1 in 2 */
75 /* Controls how often the l2_fhdr frame error check will fail. */
76 int bce_debug_l2fhdr_status_check = 0;
78 /* Controls how often the unexpected attention check will fail. */
79 int bce_debug_unexpected_attention = 0;
81 /* Controls how often to simulate an mbuf allocation failure. */
82 int bce_debug_mbuf_allocation_failure = 0;
84 /* Controls how often to simulate a DMA mapping failure. */
85 int bce_debug_dma_map_addr_failure = 0;
87 /* Controls how often to simulate a bootcode failure. */
88 int bce_debug_bootcode_running_failure = 0;
92 /****************************************************************************/
93 /* PCI Device ID Table */
95 /* Used by bce_probe() to identify the devices supported by this driver. */
96 /****************************************************************************/
97 #define BCE_DEVDESC_MAX 64
99 static struct bce_type bce_devs[] = {
100 /* BCM5706C Controllers and OEM boards. */
101 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101,
102 "HP NC370T Multifunction Gigabit Server Adapter" },
103 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106,
104 "HP NC370i Multifunction Gigabit Server Adapter" },
105 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID,
106 "Broadcom NetXtreme II BCM5706 1000Base-T" },
108 /* BCM5706S controllers and OEM boards. */
109 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
110 "HP NC370F Multifunction Gigabit Server Adapter" },
111 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID,
112 "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114 /* BCM5708C controllers and OEM boards. */
115 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID,
116 "Broadcom NetXtreme II BCM5708 1000Base-T" },
118 /* BCM5708S controllers and OEM boards. */
119 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID,
120 "Broadcom NetXtreme II BCM5708 1000Base-T" },
125 /****************************************************************************/
126 /* Supported Flash NVRAM device data. */
127 /****************************************************************************/
128 static struct flash_spec flash_table[] =
131 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
132 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
133 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
135 /* Expansion entry 0001 */
136 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
137 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
138 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
140 /* Saifun SA25F010 (non-buffered flash) */
141 /* strap, cfg1, & write1 need updates */
142 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
143 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
144 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
145 "Non-buffered flash (128kB)"},
146 /* Saifun SA25F020 (non-buffered flash) */
147 /* strap, cfg1, & write1 need updates */
148 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
149 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
150 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
151 "Non-buffered flash (256kB)"},
152 /* Expansion entry 0100 */
153 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
154 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
155 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
157 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
158 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
159 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
160 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
161 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
162 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
163 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
164 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
165 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
166 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
167 /* Saifun SA25F005 (non-buffered flash) */
168 /* strap, cfg1, & write1 need updates */
169 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
170 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
171 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
172 "Non-buffered flash (64kB)"},
174 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
175 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
176 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
178 /* Expansion entry 1001 */
179 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
180 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
181 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
183 /* Expansion entry 1010 */
184 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
185 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
186 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
188 /* ATMEL AT45DB011B (buffered flash) */
189 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
190 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
191 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
192 "Buffered flash (128kB)"},
193 /* Expansion entry 1100 */
194 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
195 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
196 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
198 /* Expansion entry 1101 */
199 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
200 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
201 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
203 /* Ateml Expansion entry 1110 */
204 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
205 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
206 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
207 "Entry 1110 (Atmel)"},
208 /* ATMEL AT45DB021B (buffered flash) */
209 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
210 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
211 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
212 "Buffered flash (256kB)"},
216 /****************************************************************************/
217 /* FreeBSD device entry points. */
218 /****************************************************************************/
219 static int bce_probe (device_t);
220 static int bce_attach (device_t);
221 static int bce_detach (device_t);
222 static void bce_shutdown (device_t);
225 /****************************************************************************/
226 /* BCE Debug Data Structure Dump Routines */
227 /****************************************************************************/
229 static void bce_dump_mbuf (struct bce_softc *, struct mbuf *);
230 static void bce_dump_tx_mbuf_chain (struct bce_softc *, int, int);
231 static void bce_dump_rx_mbuf_chain (struct bce_softc *, int, int);
232 static void bce_dump_txbd (struct bce_softc *, int, struct tx_bd *);
233 static void bce_dump_rxbd (struct bce_softc *, int, struct rx_bd *);
234 static void bce_dump_l2fhdr (struct bce_softc *, int, struct l2_fhdr *);
235 static void bce_dump_tx_chain (struct bce_softc *, int, int);
236 static void bce_dump_rx_chain (struct bce_softc *, int, int);
237 static void bce_dump_status_block (struct bce_softc *);
238 static void bce_dump_stats_block (struct bce_softc *);
239 static void bce_dump_driver_state (struct bce_softc *);
240 static void bce_dump_hw_state (struct bce_softc *);
241 static void bce_breakpoint (struct bce_softc *);
245 /****************************************************************************/
246 /* BCE Register/Memory Access Routines */
247 /****************************************************************************/
248 static u32 bce_reg_rd_ind (struct bce_softc *, u32);
249 static void bce_reg_wr_ind (struct bce_softc *, u32, u32);
250 static void bce_ctx_wr (struct bce_softc *, u32, u32, u32);
251 static int bce_miibus_read_reg (device_t, int, int);
252 static int bce_miibus_write_reg (device_t, int, int, int);
253 static void bce_miibus_statchg (device_t);
256 /****************************************************************************/
257 /* BCE NVRAM Access Routines */
258 /****************************************************************************/
259 static int bce_acquire_nvram_lock (struct bce_softc *);
260 static int bce_release_nvram_lock (struct bce_softc *);
261 static void bce_enable_nvram_access (struct bce_softc *);
262 static void bce_disable_nvram_access(struct bce_softc *);
263 static int bce_nvram_read_dword (struct bce_softc *, u32, u8 *, u32);
264 static int bce_init_nvram (struct bce_softc *);
265 static int bce_nvram_read (struct bce_softc *, u32, u8 *, int);
266 static int bce_nvram_test (struct bce_softc *);
267 #ifdef BCE_NVRAM_WRITE_SUPPORT
268 static int bce_enable_nvram_write (struct bce_softc *);
269 static void bce_disable_nvram_write (struct bce_softc *);
270 static int bce_nvram_erase_page (struct bce_softc *, u32);
271 static int bce_nvram_write_dword (struct bce_softc *, u32, u8 *, u32);
272 static int bce_nvram_write (struct bce_softc *, u32, u8 *, int);
275 /****************************************************************************/
277 /****************************************************************************/
278 static void bce_dma_map_addr (void *, bus_dma_segment_t *, int, int);
279 static void bce_dma_map_tx_desc (void *, bus_dma_segment_t *, int, bus_size_t, int);
280 static int bce_dma_alloc (device_t);
281 static void bce_dma_free (struct bce_softc *);
282 static void bce_release_resources (struct bce_softc *);
284 /****************************************************************************/
285 /* BCE Firmware Synchronization and Load */
286 /****************************************************************************/
287 static int bce_fw_sync (struct bce_softc *, u32);
288 static void bce_load_rv2p_fw (struct bce_softc *, u32 *, u32, u32);
289 static void bce_load_cpu_fw (struct bce_softc *, struct cpu_reg *, struct fw_info *);
290 static void bce_init_cpus (struct bce_softc *);
292 static void bce_stop (struct bce_softc *);
293 static int bce_reset (struct bce_softc *, u32);
294 static int bce_chipinit (struct bce_softc *);
295 static int bce_blockinit (struct bce_softc *);
296 static int bce_get_buf (struct bce_softc *, struct mbuf *, u16 *, u16 *, u32 *);
298 static int bce_init_tx_chain (struct bce_softc *);
299 static int bce_init_rx_chain (struct bce_softc *);
300 static void bce_free_rx_chain (struct bce_softc *);
301 static void bce_free_tx_chain (struct bce_softc *);
303 static int bce_tx_encap (struct bce_softc *, struct mbuf *, u16 *);
304 static void bce_start_locked (struct ifnet *);
305 static void bce_start (struct ifnet *);
306 static int bce_ioctl (struct ifnet *, u_long, caddr_t);
307 static void bce_watchdog (struct ifnet *);
308 static int bce_ifmedia_upd (struct ifnet *);
309 static void bce_ifmedia_sts (struct ifnet *, struct ifmediareq *);
310 static void bce_init_locked (struct bce_softc *);
311 static void bce_init (void *);
312 static void bce_mgmt_init_locked(struct bce_softc *sc);
314 static void bce_init_context (struct bce_softc *);
315 static void bce_get_mac_addr (struct bce_softc *);
316 static void bce_set_mac_addr (struct bce_softc *);
317 static void bce_phy_intr (struct bce_softc *);
318 static void bce_rx_intr (struct bce_softc *);
319 static void bce_tx_intr (struct bce_softc *);
320 static void bce_disable_intr (struct bce_softc *);
321 static void bce_enable_intr (struct bce_softc *);
323 #ifdef DEVICE_POLLING
324 static void bce_poll_locked (struct ifnet *, enum poll_cmd, int);
325 static void bce_poll (struct ifnet *, enum poll_cmd, int);
327 static void bce_intr (void *);
328 static void bce_set_rx_mode (struct bce_softc *);
329 static void bce_stats_update (struct bce_softc *);
330 static void bce_tick_locked (struct bce_softc *);
331 static void bce_tick (void *);
332 static void bce_add_sysctls (struct bce_softc *);
335 /****************************************************************************/
336 /* FreeBSD device dispatch table. */
337 /****************************************************************************/
338 static device_method_t bce_methods[] = {
339 /* Device interface */
340 DEVMETHOD(device_probe, bce_probe),
341 DEVMETHOD(device_attach, bce_attach),
342 DEVMETHOD(device_detach, bce_detach),
343 DEVMETHOD(device_shutdown, bce_shutdown),
346 DEVMETHOD(bus_print_child, bus_generic_print_child),
347 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
350 DEVMETHOD(miibus_readreg, bce_miibus_read_reg),
351 DEVMETHOD(miibus_writereg, bce_miibus_write_reg),
352 DEVMETHOD(miibus_statchg, bce_miibus_statchg),
357 static driver_t bce_driver = {
360 sizeof(struct bce_softc)
363 static devclass_t bce_devclass;
365 MODULE_DEPEND(bce, pci, 1, 1, 1);
366 MODULE_DEPEND(bce, ether, 1, 1, 1);
367 MODULE_DEPEND(bce, miibus, 1, 1, 1);
369 DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, 0, 0);
370 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
373 /****************************************************************************/
374 /* Device probe function. */
376 /* Compares the device to the driver's list of supported devices and */
377 /* reports back to the OS whether this is the right driver for the device. */
380 /* BUS_PROBE_DEFAULT on success, positive value on failure. */
381 /****************************************************************************/
383 bce_probe(device_t dev)
386 struct bce_softc *sc;
388 u16 vid = 0, did = 0, svid = 0, sdid = 0;
392 sc = device_get_softc(dev);
393 bzero(sc, sizeof(struct bce_softc));
394 sc->bce_unit = device_get_unit(dev);
397 /* Get the data for the device to be probed. */
398 vid = pci_get_vendor(dev);
399 did = pci_get_device(dev);
400 svid = pci_get_subvendor(dev);
401 sdid = pci_get_subdevice(dev);
403 DBPRINT(sc, BCE_VERBOSE_LOAD,
404 "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
405 "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
407 /* Look through the list of known devices for a match. */
408 while(t->bce_name != NULL) {
410 if ((vid == t->bce_vid) && (did == t->bce_did) &&
411 ((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) &&
412 ((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) {
414 descbuf = malloc(BCE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
419 /* Print out the device identity. */
420 snprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d), %s",
422 (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
423 (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
426 device_set_desc_copy(dev, descbuf);
427 free(descbuf, M_TEMP);
428 return(BUS_PROBE_DEFAULT);
433 DBPRINT(sc, BCE_VERBOSE_LOAD, "%s(%d): No IOCTL match found!\n",
440 /****************************************************************************/
441 /* Device attach function. */
443 /* Allocates device resources, performs secondary chip identification, */
444 /* resets and initializes the hardware, and initializes driver instance */
448 /* 0 on success, positive value on failure. */
449 /****************************************************************************/
451 bce_attach(device_t dev)
453 struct bce_softc *sc;
456 int mbuf, rid, rc = 0;
458 sc = device_get_softc(dev);
461 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
463 mbuf = device_get_unit(dev);
466 pci_enable_busmaster(dev);
468 /* Allocate PCI memory resources. */
470 sc->bce_res = bus_alloc_resource_any(
472 SYS_RES_MEMORY, /* type */
474 RF_ACTIVE | PCI_RF_DENSE); /* flags */
476 if (sc->bce_res == NULL) {
477 BCE_PRINTF(sc, "%s(%d): PCI memory allocation failed\n",
480 goto bce_attach_fail;
483 /* Get various resource handles. */
484 sc->bce_btag = rman_get_bustag(sc->bce_res);
485 sc->bce_bhandle = rman_get_bushandle(sc->bce_res);
486 sc->bce_vhandle = (vm_offset_t) rman_get_virtual(sc->bce_res);
488 /* Allocate PCI IRQ resources. */
490 sc->bce_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
491 RF_SHAREABLE | RF_ACTIVE);
493 if (sc->bce_irq == NULL) {
494 BCE_PRINTF(sc, "%s(%d): PCI map interrupt failed\n",
497 goto bce_attach_fail;
500 /* Initialize mutex for the current device instance. */
501 BCE_LOCK_INIT(sc, device_get_nameunit(dev));
504 * Configure byte swap and enable indirect register access.
505 * Rely on CPU to do target byte swapping on big endian systems.
506 * Access to registers outside of PCI configurtion space are not
507 * valid until this is done.
509 pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
510 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
511 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
513 /* Save ASIC revsion info. */
514 sc->bce_chipid = REG_RD(sc, BCE_MISC_ID);
516 /* Weed out any non-production controller revisions. */
517 switch(BCE_CHIP_ID(sc)) {
518 case BCE_CHIP_ID_5706_A0:
519 case BCE_CHIP_ID_5706_A1:
520 case BCE_CHIP_ID_5708_A0:
521 case BCE_CHIP_ID_5708_B0:
522 BCE_PRINTF(sc, "%s(%d): Unsupported controller revision (%c%d)!\n",
524 (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
525 (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
527 goto bce_attach_fail;
530 if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
531 BCE_PRINTF(sc, "%s(%d): SerDes controllers are not supported!\n",
534 goto bce_attach_fail;
538 * The embedded PCIe to PCI-X bridge (EPB)
539 * in the 5708 cannot address memory above
540 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
542 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
543 sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR;
545 sc->max_bus_addr = BUS_SPACE_MAXADDR;
548 * Find the base address for shared memory access.
549 * Newer versions of bootcode use a signature and offset
550 * while older versions use a fixed address.
552 val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
553 if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG)
554 sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0);
556 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
558 DBPRINT(sc, BCE_INFO, "bce_shmem_base = 0x%08X\n", sc->bce_shmem_base);
560 /* Set initial device and PHY flags */
562 sc->bce_phy_flags = 0;
564 /* Get PCI bus information (speed and type). */
565 val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
566 if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
569 sc->bce_flags |= BCE_PCIX_FLAG;
571 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS);
573 clkreg &= BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
575 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
576 sc->bus_speed_mhz = 133;
579 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
580 sc->bus_speed_mhz = 100;
583 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
584 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
585 sc->bus_speed_mhz = 66;
588 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
589 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
590 sc->bus_speed_mhz = 50;
593 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
594 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
595 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
596 sc->bus_speed_mhz = 33;
600 if (val & BCE_PCICFG_MISC_STATUS_M66EN)
601 sc->bus_speed_mhz = 66;
603 sc->bus_speed_mhz = 33;
606 if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
607 sc->bce_flags |= BCE_PCI_32BIT_FLAG;
609 BCE_PRINTF(sc, "ASIC ID 0x%08X; Revision (%c%d); PCI%s %s %dMHz\n",
611 ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
612 ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4),
613 ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
614 ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
617 /* Reset the controller. */
618 if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
620 goto bce_attach_fail;
623 /* Initialize the controller. */
624 if (bce_chipinit(sc)) {
625 BCE_PRINTF(sc, "%s(%d): Controller initialization failed!\n",
628 goto bce_attach_fail;
631 /* Perform NVRAM test. */
632 if (bce_nvram_test(sc)) {
633 BCE_PRINTF(sc, "%s(%d): NVRAM test failed!\n",
636 goto bce_attach_fail;
639 /* Fetch the permanent Ethernet MAC address. */
640 bce_get_mac_addr(sc);
643 * Trip points control how many BDs
644 * should be ready before generating an
645 * interrupt while ticks control how long
646 * a BD can sit in the chain before
647 * generating an interrupt. Set the default
648 * values for the RX and TX rings.
652 /* Force more frequent interrupts. */
653 sc->bce_tx_quick_cons_trip_int = 1;
654 sc->bce_tx_quick_cons_trip = 1;
655 sc->bce_tx_ticks_int = 0;
656 sc->bce_tx_ticks = 0;
658 sc->bce_rx_quick_cons_trip_int = 1;
659 sc->bce_rx_quick_cons_trip = 1;
660 sc->bce_rx_ticks_int = 0;
661 sc->bce_rx_ticks = 0;
663 sc->bce_tx_quick_cons_trip_int = 20;
664 sc->bce_tx_quick_cons_trip = 20;
665 sc->bce_tx_ticks_int = 80;
666 sc->bce_tx_ticks = 80;
668 sc->bce_rx_quick_cons_trip_int = 6;
669 sc->bce_rx_quick_cons_trip = 6;
670 sc->bce_rx_ticks_int = 18;
671 sc->bce_rx_ticks = 18;
674 /* Update statistics once every second. */
675 sc->bce_stats_ticks = 1000000 & 0xffff00;
678 * The copper based NetXtreme II controllers
679 * use an integrated PHY at address 1 while
680 * the SerDes controllers use a PHY at
683 sc->bce_phy_addr = 1;
685 if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
686 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
687 sc->bce_flags |= BCE_NO_WOL_FLAG;
688 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) {
689 sc->bce_phy_addr = 2;
690 val = REG_RD_IND(sc, sc->bce_shmem_base +
691 BCE_SHARED_HW_CFG_CONFIG);
692 if (val & BCE_SHARED_HW_CFG_PHY_2_5G)
693 sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
697 /* Allocate DMA memory resources. */
698 if (bce_dma_alloc(dev)) {
699 BCE_PRINTF(sc, "%s(%d): DMA resource allocation failed!\n",
702 goto bce_attach_fail;
705 /* Allocate an ifnet structure. */
706 ifp = sc->bce_ifp = if_alloc(IFT_ETHER);
708 BCE_PRINTF(sc, "%s(%d): Interface allocation failed!\n",
711 goto bce_attach_fail;
714 /* Initialize the ifnet interface. */
716 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
717 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
718 ifp->if_ioctl = bce_ioctl;
719 ifp->if_start = bce_start;
721 ifp->if_watchdog = bce_watchdog;
722 ifp->if_init = bce_init;
723 ifp->if_mtu = ETHERMTU;
724 ifp->if_hwassist = BCE_IF_HWASSIST;
725 ifp->if_capabilities = BCE_IF_CAPABILITIES;
726 ifp->if_capenable = ifp->if_capabilities;
728 /* Assume a standard 1500 byte MTU size for mbuf allocations. */
729 sc->mbuf_alloc_size = MCLBYTES;
730 #ifdef DEVICE_POLLING
731 ifp->if_capabilities |= IFCAP_POLLING;
734 ifp->if_snd.ifq_drv_maxlen = USABLE_TX_BD;
735 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
736 ifp->if_baudrate = IF_Gbps(2.5);
738 ifp->if_baudrate = IF_Gbps(1);
740 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
741 IFQ_SET_READY(&ifp->if_snd);
743 if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
744 BCE_PRINTF(sc, "%s(%d): SerDes is not supported by this driver!\n",
747 goto bce_attach_fail;
749 /* Look for our PHY. */
750 if (mii_phy_probe(dev, &sc->bce_miibus, bce_ifmedia_upd,
752 BCE_PRINTF(sc, "%s(%d): PHY probe failed!\n",
755 goto bce_attach_fail;
759 /* Attach to the Ethernet interface list. */
760 ether_ifattach(ifp, sc->eaddr);
762 #if __FreeBSD_version < 500000
763 callout_init(&sc->bce_stat_ch);
765 callout_init(&sc->bce_stat_ch, CALLOUT_MPSAFE);
768 /* Hookup IRQ last. */
769 rc = bus_setup_intr(dev, sc->bce_irq, INTR_TYPE_NET | INTR_MPSAFE,
770 bce_intr, sc, &sc->bce_intrhand);
773 BCE_PRINTF(sc, "%s(%d): Failed to setup IRQ!\n",
776 goto bce_attach_exit;
779 /* Print some important debugging info. */
780 DBRUN(BCE_INFO, bce_dump_driver_state(sc));
782 /* Add the supported sysctls to the kernel. */
785 /* Get the firmware running so IPMI still works */
787 bce_mgmt_init_locked(sc);
790 goto bce_attach_exit;
793 bce_release_resources(sc);
797 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
803 /****************************************************************************/
804 /* Device detach function. */
806 /* Stops the controller, resets the controller, and releases resources. */
809 /* 0 on success, positive value on failure. */
810 /****************************************************************************/
812 bce_detach(device_t dev)
814 struct bce_softc *sc;
817 sc = device_get_softc(dev);
819 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
823 #ifdef DEVICE_POLLING
824 if (ifp->if_capenable & IFCAP_POLLING)
825 ether_poll_deregister(ifp);
828 /* Stop and reset the controller. */
831 bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
836 /* If we have a child device on the MII bus remove it too. */
837 if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
838 ifmedia_removeall(&sc->bce_ifmedia);
840 bus_generic_detach(dev);
841 device_delete_child(dev, sc->bce_miibus);
844 /* Release all remaining resources. */
845 bce_release_resources(sc);
847 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
853 /****************************************************************************/
854 /* Device shutdown function. */
856 /* Stops and resets the controller. */
860 /****************************************************************************/
862 bce_shutdown(device_t dev)
864 struct bce_softc *sc = device_get_softc(dev);
868 bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
873 /****************************************************************************/
874 /* Indirect register read. */
876 /* Reads NetXtreme II registers using an index/data register pair in PCI */
877 /* configuration space. Using this mechanism avoids issues with posted */
878 /* reads but is much slower than memory-mapped I/O. */
881 /* The value of the register. */
882 /****************************************************************************/
884 bce_reg_rd_ind(struct bce_softc *sc, u32 offset)
889 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
893 val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
894 DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
895 __FUNCTION__, offset, val);
899 return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
904 /****************************************************************************/
905 /* Indirect register write. */
907 /* Writes NetXtreme II registers using an index/data register pair in PCI */
908 /* configuration space. Using this mechanism avoids issues with posted */
909 /* writes but is muchh slower than memory-mapped I/O. */
913 /****************************************************************************/
915 bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val)
920 DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
921 __FUNCTION__, offset, val);
923 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
924 pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
928 /****************************************************************************/
929 /* Context memory write. */
931 /* The NetXtreme II controller uses context memory to track connection */
932 /* information for L2 and higher network protocols. */
936 /****************************************************************************/
938 bce_ctx_wr(struct bce_softc *sc, u32 cid_addr, u32 offset, u32 val)
941 DBPRINT(sc, BCE_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
942 "val = 0x%08X\n", __FUNCTION__, cid_addr, offset, val);
945 REG_WR(sc, BCE_CTX_DATA_ADR, offset);
946 REG_WR(sc, BCE_CTX_DATA, val);
950 /****************************************************************************/
951 /* PHY register read. */
953 /* Implements register reads on the MII bus. */
956 /* The value of the register. */
957 /****************************************************************************/
959 bce_miibus_read_reg(device_t dev, int phy, int reg)
961 struct bce_softc *sc;
965 sc = device_get_softc(dev);
967 /* Make sure we are accessing the correct PHY address. */
968 if (phy != sc->bce_phy_addr) {
969 DBPRINT(sc, BCE_VERBOSE, "Invalid PHY address %d for PHY read!\n", phy);
973 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
974 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
975 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
977 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
978 REG_RD(sc, BCE_EMAC_MDIO_MODE);
983 val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
984 BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
985 BCE_EMAC_MDIO_COMM_START_BUSY;
986 REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
988 for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
991 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
992 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
995 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
996 val &= BCE_EMAC_MDIO_COMM_DATA;
1002 if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1003 BCE_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
1004 __FILE__, __LINE__, phy, reg);
1007 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1010 DBPRINT(sc, BCE_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1011 __FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff);
1013 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1014 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1015 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1017 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1018 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1023 return (val & 0xffff);
1028 /****************************************************************************/
1029 /* PHY register write. */
1031 /* Implements register writes on the MII bus. */
1034 /* The value of the register. */
1035 /****************************************************************************/
1037 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1039 struct bce_softc *sc;
1043 sc = device_get_softc(dev);
1045 /* Make sure we are accessing the correct PHY address. */
1046 if (phy != sc->bce_phy_addr) {
1047 DBPRINT(sc, BCE_WARN, "Invalid PHY address %d for PHY write!\n", phy);
1051 DBPRINT(sc, BCE_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1052 __FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff);
1054 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1055 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1056 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1058 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1059 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1064 val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1065 BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1066 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1067 REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1069 for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1072 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1073 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1079 if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1080 BCE_PRINTF(sc, "%s(%d): PHY write timeout!\n",
1081 __FILE__, __LINE__);
1083 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1084 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1085 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1087 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1088 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1097 /****************************************************************************/
1098 /* MII bus status change. */
1100 /* Called by the MII bus driver when the PHY establishes link to set the */
1101 /* MAC interface registers. */
1105 /****************************************************************************/
1107 bce_miibus_statchg(device_t dev)
1109 struct bce_softc *sc;
1110 struct mii_data *mii;
1112 sc = device_get_softc(dev);
1114 mii = device_get_softc(sc->bce_miibus);
1116 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT);
1118 /* Set MII or GMII inerface based on the speed negotiated by the PHY. */
1119 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
1120 DBPRINT(sc, BCE_INFO, "Setting GMII interface.\n");
1121 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII);
1123 DBPRINT(sc, BCE_INFO, "Setting MII interface.\n");
1124 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII);
1127 /* Set half or full duplex based on the duplicity negotiated by the PHY. */
1128 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1129 DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n");
1130 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1132 DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n");
1133 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1138 /****************************************************************************/
1139 /* Acquire NVRAM lock. */
1141 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */
1142 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1143 /* for use by the driver. */
1146 /* 0 on success, positive value on failure. */
1147 /****************************************************************************/
1149 bce_acquire_nvram_lock(struct bce_softc *sc)
1154 DBPRINT(sc, BCE_VERBOSE, "Acquiring NVRAM lock.\n");
1156 /* Request access to the flash interface. */
1157 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1158 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1159 val = REG_RD(sc, BCE_NVM_SW_ARB);
1160 if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1166 if (j >= NVRAM_TIMEOUT_COUNT) {
1167 DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n");
1175 /****************************************************************************/
1176 /* Release NVRAM lock. */
1178 /* When the caller is finished accessing NVRAM the lock must be released. */
1179 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1180 /* for use by the driver. */
1183 /* 0 on success, positive value on failure. */
1184 /****************************************************************************/
1186 bce_release_nvram_lock(struct bce_softc *sc)
1191 DBPRINT(sc, BCE_VERBOSE, "Releasing NVRAM lock.\n");
1194 * Relinquish nvram interface.
1196 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1198 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1199 val = REG_RD(sc, BCE_NVM_SW_ARB);
1200 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1206 if (j >= NVRAM_TIMEOUT_COUNT) {
1207 DBPRINT(sc, BCE_WARN, "Timeout reeasing NVRAM lock!\n");
1215 #ifdef BCE_NVRAM_WRITE_SUPPORT
1216 /****************************************************************************/
1217 /* Enable NVRAM write access. */
1219 /* Before writing to NVRAM the caller must enable NVRAM writes. */
1222 /* 0 on success, positive value on failure. */
1223 /****************************************************************************/
1225 bce_enable_nvram_write(struct bce_softc *sc)
1229 DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM write.\n");
1231 val = REG_RD(sc, BCE_MISC_CFG);
1232 REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI);
1234 if (!sc->bce_flash_info->buffered) {
1237 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1238 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT);
1240 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1243 val = REG_RD(sc, BCE_NVM_COMMAND);
1244 if (val & BCE_NVM_COMMAND_DONE)
1248 if (j >= NVRAM_TIMEOUT_COUNT) {
1249 DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n");
1257 /****************************************************************************/
1258 /* Disable NVRAM write access. */
1260 /* When the caller is finished writing to NVRAM write access must be */
1265 /****************************************************************************/
1267 bce_disable_nvram_write(struct bce_softc *sc)
1271 DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM write.\n");
1273 val = REG_RD(sc, BCE_MISC_CFG);
1274 REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN);
1279 /****************************************************************************/
1280 /* Enable NVRAM access. */
1282 /* Before accessing NVRAM for read or write operations the caller must */
1283 /* enabled NVRAM access. */
1287 /****************************************************************************/
1289 bce_enable_nvram_access(struct bce_softc *sc)
1293 DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM access.\n");
1295 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1296 /* Enable both bits, even on read. */
1297 REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1298 val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1302 /****************************************************************************/
1303 /* Disable NVRAM access. */
1305 /* When the caller is finished accessing NVRAM access must be disabled. */
1309 /****************************************************************************/
1311 bce_disable_nvram_access(struct bce_softc *sc)
1315 DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM access.\n");
1317 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1319 /* Disable both bits, even after read. */
1320 REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1321 val & ~(BCE_NVM_ACCESS_ENABLE_EN |
1322 BCE_NVM_ACCESS_ENABLE_WR_EN));
1326 #ifdef BCE_NVRAM_WRITE_SUPPORT
1327 /****************************************************************************/
1328 /* Erase NVRAM page before writing. */
1330 /* Non-buffered flash parts require that a page be erased before it is */
1334 /* 0 on success, positive value on failure. */
1335 /****************************************************************************/
1337 bce_nvram_erase_page(struct bce_softc *sc, u32 offset)
1342 /* Buffered flash doesn't require an erase. */
1343 if (sc->bce_flash_info->buffered)
1346 DBPRINT(sc, BCE_VERBOSE, "Erasing NVRAM page.\n");
1348 /* Build an erase command. */
1349 cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR |
1350 BCE_NVM_COMMAND_DOIT;
1353 * Clear the DONE bit separately, set the NVRAM adress to erase,
1354 * and issue the erase command.
1356 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1357 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1358 REG_WR(sc, BCE_NVM_COMMAND, cmd);
1360 /* Wait for completion. */
1361 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1366 val = REG_RD(sc, BCE_NVM_COMMAND);
1367 if (val & BCE_NVM_COMMAND_DONE)
1371 if (j >= NVRAM_TIMEOUT_COUNT) {
1372 DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n");
1378 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1381 /****************************************************************************/
1382 /* Read a dword (32 bits) from NVRAM. */
1384 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */
1385 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */
1388 /* 0 on success and the 32 bit value read, positive value on failure. */
1389 /****************************************************************************/
1391 bce_nvram_read_dword(struct bce_softc *sc, u32 offset, u8 *ret_val,
1397 /* Build the command word. */
1398 cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1400 /* Calculate the offset for buffered flash. */
1401 if (sc->bce_flash_info->buffered) {
1402 offset = ((offset / sc->bce_flash_info->page_size) <<
1403 sc->bce_flash_info->page_bits) +
1404 (offset % sc->bce_flash_info->page_size);
1408 * Clear the DONE bit separately, set the address to read,
1409 * and issue the read.
1411 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1412 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1413 REG_WR(sc, BCE_NVM_COMMAND, cmd);
1415 /* Wait for completion. */
1416 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1421 val = REG_RD(sc, BCE_NVM_COMMAND);
1422 if (val & BCE_NVM_COMMAND_DONE) {
1423 val = REG_RD(sc, BCE_NVM_READ);
1425 val = bce_be32toh(val);
1426 memcpy(ret_val, &val, 4);
1431 /* Check for errors. */
1432 if (i >= NVRAM_TIMEOUT_COUNT) {
1433 BCE_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at offset 0x%08X!\n",
1434 __FILE__, __LINE__, offset);
1442 #ifdef BCE_NVRAM_WRITE_SUPPORT
1443 /****************************************************************************/
1444 /* Write a dword (32 bits) to NVRAM. */
1446 /* Write a 32 bit word to NVRAM. The caller is assumed to have already */
1447 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and */
1448 /* enabled NVRAM write access. */
1451 /* 0 on success, positive value on failure. */
1452 /****************************************************************************/
1454 bce_nvram_write_dword(struct bce_softc *sc, u32 offset, u8 *val,
1460 /* Build the command word. */
1461 cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags;
1463 /* Calculate the offset for buffered flash. */
1464 if (sc->bce_flash_info->buffered) {
1465 offset = ((offset / sc->bce_flash_info->page_size) <<
1466 sc->bce_flash_info->page_bits) +
1467 (offset % sc->bce_flash_info->page_size);
1471 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1472 * set the NVRAM address to write, and issue the write command
1474 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1475 memcpy(&val32, val, 4);
1476 val32 = htobe32(val32);
1477 REG_WR(sc, BCE_NVM_WRITE, val32);
1478 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1479 REG_WR(sc, BCE_NVM_COMMAND, cmd);
1481 /* Wait for completion. */
1482 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1485 if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE)
1488 if (j >= NVRAM_TIMEOUT_COUNT) {
1489 BCE_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at offset 0x%08X\n",
1490 __FILE__, __LINE__, offset);
1496 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1499 /****************************************************************************/
1500 /* Initialize NVRAM access. */
1502 /* Identify the NVRAM device in use and prepare the NVRAM interface to */
1503 /* access that device. */
1506 /* 0 on success, positive value on failure. */
1507 /****************************************************************************/
1509 bce_init_nvram(struct bce_softc *sc)
1512 int j, entry_count, rc;
1513 struct flash_spec *flash;
1515 DBPRINT(sc,BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
1517 /* Determine the selected interface. */
1518 val = REG_RD(sc, BCE_NVM_CFG1);
1520 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1525 * Flash reconfiguration is required to support additional
1526 * NVRAM devices not directly supported in hardware.
1527 * Check if the flash interface was reconfigured
1531 if (val & 0x40000000) {
1532 /* Flash interface reconfigured by bootcode. */
1534 DBPRINT(sc,BCE_INFO_LOAD,
1535 "bce_init_nvram(): Flash WAS reconfigured.\n");
1537 for (j = 0, flash = &flash_table[0]; j < entry_count;
1539 if ((val & FLASH_BACKUP_STRAP_MASK) ==
1540 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1541 sc->bce_flash_info = flash;
1546 /* Flash interface not yet reconfigured. */
1549 DBPRINT(sc,BCE_INFO_LOAD,
1550 "bce_init_nvram(): Flash was NOT reconfigured.\n");
1552 if (val & (1 << 23))
1553 mask = FLASH_BACKUP_STRAP_MASK;
1555 mask = FLASH_STRAP_MASK;
1557 /* Look for the matching NVRAM device configuration data. */
1558 for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) {
1560 /* Check if the device matches any of the known devices. */
1561 if ((val & mask) == (flash->strapping & mask)) {
1562 /* Found a device match. */
1563 sc->bce_flash_info = flash;
1565 /* Request access to the flash interface. */
1566 if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1569 /* Reconfigure the flash interface. */
1570 bce_enable_nvram_access(sc);
1571 REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1572 REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1573 REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1574 REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1575 bce_disable_nvram_access(sc);
1576 bce_release_nvram_lock(sc);
1583 /* Check if a matching device was found. */
1584 if (j == entry_count) {
1585 sc->bce_flash_info = NULL;
1586 BCE_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n",
1587 __FILE__, __LINE__);
1591 /* Write the flash config data to the shared memory interface. */
1592 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_SHARED_HW_CFG_CONFIG2);
1593 val &= BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1595 sc->bce_flash_size = val;
1597 sc->bce_flash_size = sc->bce_flash_info->total_size;
1599 DBPRINT(sc, BCE_INFO_LOAD, "bce_init_nvram() flash->total_size = 0x%08X\n",
1600 sc->bce_flash_info->total_size);
1602 DBPRINT(sc,BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
1608 /****************************************************************************/
1609 /* Read an arbitrary range of data from NVRAM. */
1611 /* Prepares the NVRAM interface for access and reads the requested data */
1612 /* into the supplied buffer. */
1615 /* 0 on success and the data read, positive value on failure. */
1616 /****************************************************************************/
1618 bce_nvram_read(struct bce_softc *sc, u32 offset, u8 *ret_buf,
1622 u32 cmd_flags, offset32, len32, extra;
1627 /* Request access to the flash interface. */
1628 if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1631 /* Enable access to flash interface */
1632 bce_enable_nvram_access(sc);
1645 pre_len = 4 - (offset & 3);
1647 if (pre_len >= len32) {
1649 cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1652 cmd_flags = BCE_NVM_COMMAND_FIRST;
1655 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1660 memcpy(ret_buf, buf + (offset & 3), pre_len);
1668 extra = 4 - (len32 & 3);
1669 len32 = (len32 + 4) & ~3;
1676 cmd_flags = BCE_NVM_COMMAND_LAST;
1678 cmd_flags = BCE_NVM_COMMAND_FIRST |
1679 BCE_NVM_COMMAND_LAST;
1681 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1683 memcpy(ret_buf, buf, 4 - extra);
1685 else if (len32 > 0) {
1688 /* Read the first word. */
1692 cmd_flags = BCE_NVM_COMMAND_FIRST;
1694 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1696 /* Advance to the next dword. */
1701 while (len32 > 4 && rc == 0) {
1702 rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1704 /* Advance to the next dword. */
1713 cmd_flags = BCE_NVM_COMMAND_LAST;
1714 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1716 memcpy(ret_buf, buf, 4 - extra);
1719 /* Disable access to flash interface and release the lock. */
1720 bce_disable_nvram_access(sc);
1721 bce_release_nvram_lock(sc);
1727 #ifdef BCE_NVRAM_WRITE_SUPPORT
1728 /****************************************************************************/
1729 /* Write an arbitrary range of data from NVRAM. */
1731 /* Prepares the NVRAM interface for write access and writes the requested */
1732 /* data from the supplied buffer. The caller is responsible for */
1733 /* calculating any appropriate CRCs. */
1736 /* 0 on success, positive value on failure. */
1737 /****************************************************************************/
1739 bce_nvram_write(struct bce_softc *sc, u32 offset, u8 *data_buf,
1742 u32 written, offset32, len32;
1743 u8 *buf, start[4], end[4];
1745 int align_start, align_end;
1750 align_start = align_end = 0;
1752 if ((align_start = (offset32 & 3))) {
1754 len32 += align_start;
1755 if ((rc = bce_nvram_read(sc, offset32, start, 4)))
1760 if ((len32 > 4) || !align_start) {
1761 align_end = 4 - (len32 & 3);
1763 if ((rc = bce_nvram_read(sc, offset32 + len32 - 4,
1770 if (align_start || align_end) {
1771 buf = malloc(len32, M_DEVBUF, M_NOWAIT);
1775 memcpy(buf, start, 4);
1778 memcpy(buf + len32 - 4, end, 4);
1780 memcpy(buf + align_start, data_buf, buf_size);
1784 while ((written < len32) && (rc == 0)) {
1785 u32 page_start, page_end, data_start, data_end;
1786 u32 addr, cmd_flags;
1788 u8 flash_buffer[264];
1790 /* Find the page_start addr */
1791 page_start = offset32 + written;
1792 page_start -= (page_start % sc->bce_flash_info->page_size);
1793 /* Find the page_end addr */
1794 page_end = page_start + sc->bce_flash_info->page_size;
1795 /* Find the data_start addr */
1796 data_start = (written == 0) ? offset32 : page_start;
1797 /* Find the data_end addr */
1798 data_end = (page_end > offset32 + len32) ?
1799 (offset32 + len32) : page_end;
1801 /* Request access to the flash interface. */
1802 if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1803 goto nvram_write_end;
1805 /* Enable access to flash interface */
1806 bce_enable_nvram_access(sc);
1808 cmd_flags = BCE_NVM_COMMAND_FIRST;
1809 if (sc->bce_flash_info->buffered == 0) {
1812 /* Read the whole page into the buffer
1813 * (non-buffer flash only) */
1814 for (j = 0; j < sc->bce_flash_info->page_size; j += 4) {
1815 if (j == (sc->bce_flash_info->page_size - 4)) {
1816 cmd_flags |= BCE_NVM_COMMAND_LAST;
1818 rc = bce_nvram_read_dword(sc,
1824 goto nvram_write_end;
1830 /* Enable writes to flash interface (unlock write-protect) */
1831 if ((rc = bce_enable_nvram_write(sc)) != 0)
1832 goto nvram_write_end;
1834 /* Erase the page */
1835 if ((rc = bce_nvram_erase_page(sc, page_start)) != 0)
1836 goto nvram_write_end;
1838 /* Re-enable the write again for the actual write */
1839 bce_enable_nvram_write(sc);
1841 /* Loop to write back the buffer data from page_start to
1844 if (sc->bce_flash_info->buffered == 0) {
1845 for (addr = page_start; addr < data_start;
1846 addr += 4, i += 4) {
1848 rc = bce_nvram_write_dword(sc, addr,
1849 &flash_buffer[i], cmd_flags);
1852 goto nvram_write_end;
1858 /* Loop to write the new data from data_start to data_end */
1859 for (addr = data_start; addr < data_end; addr += 4, i++) {
1860 if ((addr == page_end - 4) ||
1861 ((sc->bce_flash_info->buffered) &&
1862 (addr == data_end - 4))) {
1864 cmd_flags |= BCE_NVM_COMMAND_LAST;
1866 rc = bce_nvram_write_dword(sc, addr, buf,
1870 goto nvram_write_end;
1876 /* Loop to write back the buffer data from data_end
1878 if (sc->bce_flash_info->buffered == 0) {
1879 for (addr = data_end; addr < page_end;
1880 addr += 4, i += 4) {
1882 if (addr == page_end-4) {
1883 cmd_flags = BCE_NVM_COMMAND_LAST;
1885 rc = bce_nvram_write_dword(sc, addr,
1886 &flash_buffer[i], cmd_flags);
1889 goto nvram_write_end;
1895 /* Disable writes to flash interface (lock write-protect) */
1896 bce_disable_nvram_write(sc);
1898 /* Disable access to flash interface */
1899 bce_disable_nvram_access(sc);
1900 bce_release_nvram_lock(sc);
1902 /* Increment written */
1903 written += data_end - data_start;
1907 if (align_start || align_end)
1908 free(buf, M_DEVBUF);
1912 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1915 /****************************************************************************/
1916 /* Verifies that NVRAM is accessible and contains valid data. */
1918 /* Reads the configuration data from NVRAM and verifies that the CRC is */
1922 /* 0 on success, positive value on failure. */
1923 /****************************************************************************/
1925 bce_nvram_test(struct bce_softc *sc)
1927 u32 buf[BCE_NVRAM_SIZE / 4];
1928 u8 *data = (u8 *) buf;
1934 * Check that the device NVRAM is valid by reading
1935 * the magic value at offset 0.
1937 if ((rc = bce_nvram_read(sc, 0, data, 4)) != 0)
1938 goto bce_nvram_test_done;
1941 magic = bce_be32toh(buf[0]);
1942 if (magic != BCE_NVRAM_MAGIC) {
1944 BCE_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! Expected: 0x%08X, "
1946 __FILE__, __LINE__, BCE_NVRAM_MAGIC, magic);
1947 goto bce_nvram_test_done;
1951 * Verify that the device NVRAM includes valid
1952 * configuration data.
1954 if ((rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE)) != 0)
1955 goto bce_nvram_test_done;
1957 csum = ether_crc32_le(data, 0x100);
1958 if (csum != BCE_CRC32_RESIDUAL) {
1960 BCE_PRINTF(sc, "%s(%d): Invalid Manufacturing Information NVRAM CRC! "
1961 "Expected: 0x%08X, Found: 0x%08X\n",
1962 __FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
1963 goto bce_nvram_test_done;
1966 csum = ether_crc32_le(data + 0x100, 0x100);
1967 if (csum != BCE_CRC32_RESIDUAL) {
1968 BCE_PRINTF(sc, "%s(%d): Invalid Feature Configuration Information "
1969 "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
1970 __FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
1974 bce_nvram_test_done:
1979 /****************************************************************************/
1980 /* Free any DMA memory owned by the driver. */
1982 /* Scans through each data structre that requires DMA memory and frees */
1983 /* the memory if allocated. */
1987 /****************************************************************************/
1989 bce_dma_free(struct bce_softc *sc)
1993 DBPRINT(sc,BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
1995 /* Destroy the status block. */
1996 if (sc->status_block != NULL)
2002 if (sc->status_map != NULL) {
2006 bus_dmamap_destroy(sc->status_tag,
2010 if (sc->status_tag != NULL)
2011 bus_dma_tag_destroy(sc->status_tag);
2014 /* Destroy the statistics block. */
2015 if (sc->stats_block != NULL)
2021 if (sc->stats_map != NULL) {
2025 bus_dmamap_destroy(sc->stats_tag,
2029 if (sc->stats_tag != NULL)
2030 bus_dma_tag_destroy(sc->stats_tag);
2033 /* Free, unmap and destroy all TX buffer descriptor chain pages. */
2034 for (i = 0; i < TX_PAGES; i++ ) {
2035 if (sc->tx_bd_chain[i] != NULL)
2037 sc->tx_bd_chain_tag,
2039 sc->tx_bd_chain_map[i]);
2041 if (sc->tx_bd_chain_map[i] != NULL) {
2043 sc->tx_bd_chain_tag,
2044 sc->tx_bd_chain_map[i]);
2046 sc->tx_bd_chain_tag,
2047 sc->tx_bd_chain_map[i]);
2052 /* Destroy the TX buffer descriptor tag. */
2053 if (sc->tx_bd_chain_tag != NULL)
2054 bus_dma_tag_destroy(sc->tx_bd_chain_tag);
2057 /* Free, unmap and destroy all RX buffer descriptor chain pages. */
2058 for (i = 0; i < RX_PAGES; i++ ) {
2059 if (sc->rx_bd_chain[i] != NULL)
2061 sc->rx_bd_chain_tag,
2063 sc->rx_bd_chain_map[i]);
2065 if (sc->rx_bd_chain_map[i] != NULL) {
2067 sc->rx_bd_chain_tag,
2068 sc->rx_bd_chain_map[i]);
2070 sc->rx_bd_chain_tag,
2071 sc->rx_bd_chain_map[i]);
2075 /* Destroy the RX buffer descriptor tag. */
2076 if (sc->rx_bd_chain_tag != NULL)
2077 bus_dma_tag_destroy(sc->rx_bd_chain_tag);
2080 /* Unload and destroy the TX mbuf maps. */
2081 for (i = 0; i < TOTAL_TX_BD; i++) {
2082 if (sc->tx_mbuf_map[i] != NULL) {
2083 bus_dmamap_unload(sc->tx_mbuf_tag,
2084 sc->tx_mbuf_map[i]);
2085 bus_dmamap_destroy(sc->tx_mbuf_tag,
2086 sc->tx_mbuf_map[i]);
2090 /* Destroy the TX mbuf tag. */
2091 if (sc->tx_mbuf_tag != NULL)
2092 bus_dma_tag_destroy(sc->tx_mbuf_tag);
2095 /* Unload and destroy the RX mbuf maps. */
2096 for (i = 0; i < TOTAL_RX_BD; i++) {
2097 if (sc->rx_mbuf_map[i] != NULL) {
2098 bus_dmamap_unload(sc->rx_mbuf_tag,
2099 sc->rx_mbuf_map[i]);
2100 bus_dmamap_destroy(sc->rx_mbuf_tag,
2101 sc->rx_mbuf_map[i]);
2105 /* Destroy the RX mbuf tag. */
2106 if (sc->rx_mbuf_tag != NULL)
2107 bus_dma_tag_destroy(sc->rx_mbuf_tag);
2110 /* Destroy the parent tag */
2111 if (sc->parent_tag != NULL)
2112 bus_dma_tag_destroy(sc->parent_tag);
2114 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2119 /****************************************************************************/
2120 /* Get DMA memory from the OS. */
2122 /* Validates that the OS has provided DMA buffers in response to a */
2123 /* bus_dmamap_load() call and saves the physical address of those buffers. */
2124 /* When the callback is used the OS will return 0 for the mapping function */
2125 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */
2126 /* failures back to the caller. */
2130 /****************************************************************************/
2132 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2134 struct bce_dmamap_arg *map_arg = arg;
2135 struct bce_softc *sc = map_arg->sc;
2137 /* Simulate a mapping failure. */
2138 DBRUNIF(DB_RANDOMTRUE(bce_debug_dma_map_addr_failure),
2139 BCE_PRINTF(sc, "%s(%d): Simulating DMA mapping error.\n",
2140 __FILE__, __LINE__);
2143 /* Check for an error and signal the caller that an error occurred. */
2144 if (error || (nseg > map_arg->maxsegs)) {
2145 BCE_PRINTF(sc, "%s(%d): DMA mapping error! error = %d, "
2146 "nseg = %d, maxsegs = %d\n",
2147 __FILE__, __LINE__, error, nseg, map_arg->maxsegs);
2148 map_arg->maxsegs = 0;
2149 goto bce_dma_map_addr_exit;
2152 map_arg->busaddr = segs->ds_addr;
2154 bce_dma_map_addr_exit:
2159 /****************************************************************************/
2160 /* Map TX buffers into TX buffer descriptors. */
2162 /* Given a series of DMA memory containting an outgoing frame, map the */
2163 /* segments into the tx_bd structure used by the hardware. */
2167 /****************************************************************************/
2169 bce_dma_map_tx_desc(void *arg, bus_dma_segment_t *segs,
2170 int nseg, bus_size_t mapsize, int error)
2172 struct bce_dmamap_arg *map_arg;
2173 struct bce_softc *sc;
2174 struct tx_bd *txbd = NULL;
2176 u16 prod, chain_prod;
2186 DBPRINT(sc, BCE_WARN, "%s(): Called with error = %d\n",
2187 __FUNCTION__, error);
2191 /* Signal error to caller if there's too many segments */
2192 if (nseg > map_arg->maxsegs) {
2193 DBPRINT(sc, BCE_WARN,
2194 "%s(): Mapped TX descriptors: max segs = %d, "
2195 "actual segs = %d\n",
2196 __FUNCTION__, map_arg->maxsegs, nseg);
2198 map_arg->maxsegs = 0;
2202 /* prod points to an empty tx_bd at this point. */
2203 prod = map_arg->prod;
2204 chain_prod = map_arg->chain_prod;
2205 prod_bseq = map_arg->prod_bseq;
2208 debug_prod = chain_prod;
2211 DBPRINT(sc, BCE_INFO_SEND,
2212 "%s(): Start: prod = 0x%04X, chain_prod = %04X, "
2213 "prod_bseq = 0x%08X\n",
2214 __FUNCTION__, prod, chain_prod, prod_bseq);
2217 * Cycle through each mbuf segment that makes up
2218 * the outgoing frame, gathering the mapping info
2219 * for that segment and creating a tx_bd to for
2223 txbd = &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
2225 /* Setup the first tx_bd for the first segment. */
2226 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
2227 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
2228 txbd->tx_bd_mss_nbytes = htole16(segs[i].ds_len);
2229 txbd->tx_bd_vlan_tag_flags = htole16(map_arg->tx_flags |
2231 prod_bseq += segs[i].ds_len;
2233 /* Setup any remaing segments. */
2234 for (i = 1; i < nseg; i++) {
2235 prod = NEXT_TX_BD(prod);
2236 chain_prod = TX_CHAIN_IDX(prod);
2238 txbd = &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
2240 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
2241 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
2242 txbd->tx_bd_mss_nbytes = htole16(segs[i].ds_len);
2243 txbd->tx_bd_vlan_tag_flags = htole16(map_arg->tx_flags);
2245 prod_bseq += segs[i].ds_len;
2248 /* Set the END flag on the last TX buffer descriptor. */
2249 txbd->tx_bd_vlan_tag_flags |= htole16(TX_BD_FLAGS_END);
2251 DBRUN(BCE_INFO_SEND, bce_dump_tx_chain(sc, debug_prod, nseg));
2253 DBPRINT(sc, BCE_INFO_SEND,
2254 "%s(): End: prod = 0x%04X, chain_prod = %04X, "
2255 "prod_bseq = 0x%08X\n",
2256 __FUNCTION__, prod, chain_prod, prod_bseq);
2258 /* prod points to the last tx_bd at this point. */
2259 map_arg->maxsegs = nseg;
2260 map_arg->prod = prod;
2261 map_arg->chain_prod = chain_prod;
2262 map_arg->prod_bseq = prod_bseq;
2266 /****************************************************************************/
2267 /* Allocate any DMA memory needed by the driver. */
2269 /* Allocates DMA memory needed for the various global structures needed by */
2273 /* 0 for success, positive value for failure. */
2274 /****************************************************************************/
2276 bce_dma_alloc(device_t dev)
2278 struct bce_softc *sc;
2279 int i, error, rc = 0;
2280 struct bce_dmamap_arg map_arg;
2282 sc = device_get_softc(dev);
2284 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2287 * Allocate the parent bus DMA tag appropriate for PCI.
2289 if (bus_dma_tag_create(NULL, /* parent */
2290 BCE_DMA_ALIGN, /* alignment */
2291 BCE_DMA_BOUNDARY, /* boundary */
2292 sc->max_bus_addr, /* lowaddr */
2293 BUS_SPACE_MAXADDR, /* highaddr */
2294 NULL, /* filterfunc */
2295 NULL, /* filterarg */
2296 MAXBSIZE, /* maxsize */
2297 BUS_SPACE_UNRESTRICTED, /* nsegments */
2298 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
2303 BCE_PRINTF(sc, "%s(%d): Could not allocate parent DMA tag!\n",
2304 __FILE__, __LINE__);
2306 goto bce_dma_alloc_exit;
2310 * Create a DMA tag for the status block, allocate and clear the
2311 * memory, map the memory into DMA space, and fetch the physical
2312 * address of the block.
2314 if (bus_dma_tag_create(
2315 sc->parent_tag, /* parent */
2316 BCE_DMA_ALIGN, /* alignment */
2317 BCE_DMA_BOUNDARY, /* boundary */
2318 sc->max_bus_addr, /* lowaddr */
2319 BUS_SPACE_MAXADDR, /* highaddr */
2320 NULL, /* filterfunc */
2321 NULL, /* filterarg */
2322 BCE_STATUS_BLK_SZ, /* maxsize */
2324 BCE_STATUS_BLK_SZ, /* maxsegsize */
2326 NULL, /* lockfunc */
2329 BCE_PRINTF(sc, "%s(%d): Could not allocate status block DMA tag!\n",
2330 __FILE__, __LINE__);
2332 goto bce_dma_alloc_exit;
2335 if(bus_dmamem_alloc(
2336 sc->status_tag, /* dmat */
2337 (void **)&sc->status_block, /* vaddr */
2338 BUS_DMA_NOWAIT, /* flags */
2340 BCE_PRINTF(sc, "%s(%d): Could not allocate status block DMA memory!\n",
2341 __FILE__, __LINE__);
2343 goto bce_dma_alloc_exit;
2346 bzero((char *)sc->status_block, BCE_STATUS_BLK_SZ);
2349 map_arg.maxsegs = 1;
2351 error = bus_dmamap_load(
2352 sc->status_tag, /* dmat */
2353 sc->status_map, /* map */
2354 sc->status_block, /* buf */
2355 BCE_STATUS_BLK_SZ, /* buflen */
2356 bce_dma_map_addr, /* callback */
2357 &map_arg, /* callbackarg */
2358 BUS_DMA_NOWAIT); /* flags */
2360 if(error || (map_arg.maxsegs == 0)) {
2361 BCE_PRINTF(sc, "%s(%d): Could not map status block DMA memory!\n",
2362 __FILE__, __LINE__);
2364 goto bce_dma_alloc_exit;
2367 sc->status_block_paddr = map_arg.busaddr;
2368 /* DRC - Fix for 64 bit addresses. */
2369 DBPRINT(sc, BCE_INFO, "status_block_paddr = 0x%08X\n",
2370 (u32) sc->status_block_paddr);
2373 * Create a DMA tag for the statistics block, allocate and clear the
2374 * memory, map the memory into DMA space, and fetch the physical
2375 * address of the block.
2377 if (bus_dma_tag_create(
2378 sc->parent_tag, /* parent */
2379 BCE_DMA_ALIGN, /* alignment */
2380 BCE_DMA_BOUNDARY, /* boundary */
2381 sc->max_bus_addr, /* lowaddr */
2382 BUS_SPACE_MAXADDR, /* highaddr */
2383 NULL, /* filterfunc */
2384 NULL, /* filterarg */
2385 BCE_STATS_BLK_SZ, /* maxsize */
2387 BCE_STATS_BLK_SZ, /* maxsegsize */
2389 NULL, /* lockfunc */
2392 BCE_PRINTF(sc, "%s(%d): Could not allocate statistics block DMA tag!\n",
2393 __FILE__, __LINE__);
2395 goto bce_dma_alloc_exit;
2398 if (bus_dmamem_alloc(
2399 sc->stats_tag, /* dmat */
2400 (void **)&sc->stats_block, /* vaddr */
2401 BUS_DMA_NOWAIT, /* flags */
2403 BCE_PRINTF(sc, "%s(%d): Could not allocate statistics block DMA memory!\n",
2404 __FILE__, __LINE__);
2406 goto bce_dma_alloc_exit;
2409 bzero((char *)sc->stats_block, BCE_STATS_BLK_SZ);
2412 map_arg.maxsegs = 1;
2414 error = bus_dmamap_load(
2415 sc->stats_tag, /* dmat */
2416 sc->stats_map, /* map */
2417 sc->stats_block, /* buf */
2418 BCE_STATS_BLK_SZ, /* buflen */
2419 bce_dma_map_addr, /* callback */
2420 &map_arg, /* callbackarg */
2421 BUS_DMA_NOWAIT); /* flags */
2423 if(error || (map_arg.maxsegs == 0)) {
2424 BCE_PRINTF(sc, "%s(%d): Could not map statistics block DMA memory!\n",
2425 __FILE__, __LINE__);
2427 goto bce_dma_alloc_exit;
2430 sc->stats_block_paddr = map_arg.busaddr;
2431 /* DRC - Fix for 64 bit address. */
2432 DBPRINT(sc,BCE_INFO, "stats_block_paddr = 0x%08X\n",
2433 (u32) sc->stats_block_paddr);
2436 * Create a DMA tag for the TX buffer descriptor chain,
2437 * allocate and clear the memory, and fetch the
2438 * physical address of the block.
2440 if(bus_dma_tag_create(
2441 sc->parent_tag, /* parent */
2442 BCM_PAGE_SIZE, /* alignment */
2443 BCE_DMA_BOUNDARY, /* boundary */
2444 sc->max_bus_addr, /* lowaddr */
2445 BUS_SPACE_MAXADDR, /* highaddr */
2446 NULL, /* filterfunc */
2447 NULL, /* filterarg */
2448 BCE_TX_CHAIN_PAGE_SZ, /* maxsize */
2450 BCE_TX_CHAIN_PAGE_SZ, /* maxsegsize */
2452 NULL, /* lockfunc */
2454 &sc->tx_bd_chain_tag)) {
2455 BCE_PRINTF(sc, "%s(%d): Could not allocate TX descriptor chain DMA tag!\n",
2456 __FILE__, __LINE__);
2458 goto bce_dma_alloc_exit;
2461 for (i = 0; i < TX_PAGES; i++) {
2463 if(bus_dmamem_alloc(
2464 sc->tx_bd_chain_tag, /* tag */
2465 (void **)&sc->tx_bd_chain[i], /* vaddr */
2466 BUS_DMA_NOWAIT, /* flags */
2467 &sc->tx_bd_chain_map[i])) {
2468 BCE_PRINTF(sc, "%s(%d): Could not allocate TX descriptor "
2469 "chain DMA memory!\n", __FILE__, __LINE__);
2471 goto bce_dma_alloc_exit;
2474 map_arg.maxsegs = 1;
2477 error = bus_dmamap_load(
2478 sc->tx_bd_chain_tag, /* dmat */
2479 sc->tx_bd_chain_map[i], /* map */
2480 sc->tx_bd_chain[i], /* buf */
2481 BCE_TX_CHAIN_PAGE_SZ, /* buflen */
2482 bce_dma_map_addr, /* callback */
2483 &map_arg, /* callbackarg */
2484 BUS_DMA_NOWAIT); /* flags */
2486 if(error || (map_arg.maxsegs == 0)) {
2487 BCE_PRINTF(sc, "%s(%d): Could not map TX descriptor chain DMA memory!\n",
2488 __FILE__, __LINE__);
2490 goto bce_dma_alloc_exit;
2493 sc->tx_bd_chain_paddr[i] = map_arg.busaddr;
2494 /* DRC - Fix for 64 bit systems. */
2495 DBPRINT(sc, BCE_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2496 i, (u32) sc->tx_bd_chain_paddr[i]);
2499 /* Create a DMA tag for TX mbufs. */
2500 if (bus_dma_tag_create(
2501 sc->parent_tag, /* parent */
2502 BCE_DMA_ALIGN, /* alignment */
2503 BCE_DMA_BOUNDARY, /* boundary */
2504 sc->max_bus_addr, /* lowaddr */
2505 BUS_SPACE_MAXADDR, /* highaddr */
2506 NULL, /* filterfunc */
2507 NULL, /* filterarg */
2508 MCLBYTES * BCE_MAX_SEGMENTS, /* maxsize */
2509 BCE_MAX_SEGMENTS, /* nsegments */
2510 MCLBYTES, /* maxsegsize */
2512 NULL, /* lockfunc */
2514 &sc->tx_mbuf_tag)) {
2515 BCE_PRINTF(sc, "%s(%d): Could not allocate TX mbuf DMA tag!\n",
2516 __FILE__, __LINE__);
2518 goto bce_dma_alloc_exit;
2521 /* Create DMA maps for the TX mbufs clusters. */
2522 for (i = 0; i < TOTAL_TX_BD; i++) {
2523 if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT,
2524 &sc->tx_mbuf_map[i])) {
2525 BCE_PRINTF(sc, "%s(%d): Unable to create TX mbuf DMA map!\n",
2526 __FILE__, __LINE__);
2528 goto bce_dma_alloc_exit;
2533 * Create a DMA tag for the RX buffer descriptor chain,
2534 * allocate and clear the memory, and fetch the physical
2535 * address of the blocks.
2537 if (bus_dma_tag_create(
2538 sc->parent_tag, /* parent */
2539 BCM_PAGE_SIZE, /* alignment */
2540 BCE_DMA_BOUNDARY, /* boundary */
2541 BUS_SPACE_MAXADDR, /* lowaddr */
2542 sc->max_bus_addr, /* lowaddr */
2544 NULL, /* filterarg */
2545 BCE_RX_CHAIN_PAGE_SZ, /* maxsize */
2547 BCE_RX_CHAIN_PAGE_SZ, /* maxsegsize */
2549 NULL, /* lockfunc */
2551 &sc->rx_bd_chain_tag)) {
2552 BCE_PRINTF(sc, "%s(%d): Could not allocate RX descriptor chain DMA tag!\n",
2553 __FILE__, __LINE__);
2555 goto bce_dma_alloc_exit;
2558 for (i = 0; i < RX_PAGES; i++) {
2560 if (bus_dmamem_alloc(
2561 sc->rx_bd_chain_tag, /* tag */
2562 (void **)&sc->rx_bd_chain[i], /* vaddr */
2563 BUS_DMA_NOWAIT, /* flags */
2564 &sc->rx_bd_chain_map[i])) {
2565 BCE_PRINTF(sc, "%s(%d): Could not allocate RX descriptor chain "
2566 "DMA memory!\n", __FILE__, __LINE__);
2568 goto bce_dma_alloc_exit;
2571 bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
2573 map_arg.maxsegs = 1;
2576 error = bus_dmamap_load(
2577 sc->rx_bd_chain_tag, /* dmat */
2578 sc->rx_bd_chain_map[i], /* map */
2579 sc->rx_bd_chain[i], /* buf */
2580 BCE_RX_CHAIN_PAGE_SZ, /* buflen */
2581 bce_dma_map_addr, /* callback */
2582 &map_arg, /* callbackarg */
2583 BUS_DMA_NOWAIT); /* flags */
2585 if(error || (map_arg.maxsegs == 0)) {
2586 BCE_PRINTF(sc, "%s(%d): Could not map RX descriptor chain DMA memory!\n",
2587 __FILE__, __LINE__);
2589 goto bce_dma_alloc_exit;
2592 sc->rx_bd_chain_paddr[i] = map_arg.busaddr;
2593 /* DRC - Fix for 64 bit systems. */
2594 DBPRINT(sc, BCE_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2595 i, (u32) sc->rx_bd_chain_paddr[i]);
2599 * Create a DMA tag for RX mbufs.
2601 if (bus_dma_tag_create(
2602 sc->parent_tag, /* parent */
2603 BCE_DMA_ALIGN, /* alignment */
2604 BCE_DMA_BOUNDARY, /* boundary */
2605 sc->max_bus_addr, /* lowaddr */
2606 BUS_SPACE_MAXADDR, /* highaddr */
2607 NULL, /* filterfunc */
2608 NULL, /* filterarg */
2609 MJUM9BYTES, /* maxsize */
2610 BCE_MAX_SEGMENTS, /* nsegments */
2611 MJUM9BYTES, /* maxsegsize */
2613 NULL, /* lockfunc */
2615 &sc->rx_mbuf_tag)) {
2616 BCE_PRINTF(sc, "%s(%d): Could not allocate RX mbuf DMA tag!\n",
2617 __FILE__, __LINE__);
2619 goto bce_dma_alloc_exit;
2622 /* Create DMA maps for the RX mbuf clusters. */
2623 for (i = 0; i < TOTAL_RX_BD; i++) {
2624 if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT,
2625 &sc->rx_mbuf_map[i])) {
2626 BCE_PRINTF(sc, "%s(%d): Unable to create RX mbuf DMA map!\n",
2627 __FILE__, __LINE__);
2629 goto bce_dma_alloc_exit;
2634 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2640 /****************************************************************************/
2641 /* Release all resources used by the driver. */
2643 /* Releases all resources acquired by the driver including interrupts, */
2644 /* interrupt handler, interfaces, mutexes, and DMA memory. */
2648 /****************************************************************************/
2650 bce_release_resources(struct bce_softc *sc)
2654 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2660 if (sc->bce_intrhand != NULL)
2661 bus_teardown_intr(dev, sc->bce_irq, sc->bce_intrhand);
2663 if (sc->bce_irq != NULL)
2664 bus_release_resource(dev,
2669 if (sc->bce_res != NULL)
2670 bus_release_resource(dev,
2675 if (sc->bce_ifp != NULL)
2676 if_free(sc->bce_ifp);
2679 if (mtx_initialized(&sc->bce_mtx))
2680 BCE_LOCK_DESTROY(sc);
2682 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2687 /****************************************************************************/
2688 /* Firmware synchronization. */
2690 /* Before performing certain events such as a chip reset, synchronize with */
2691 /* the firmware first. */
2694 /* 0 for success, positive value for failure. */
2695 /****************************************************************************/
2697 bce_fw_sync(struct bce_softc *sc, u32 msg_data)
2702 /* Don't waste any time if we've timed out before. */
2703 if (sc->bce_fw_timed_out) {
2705 goto bce_fw_sync_exit;
2708 /* Increment the message sequence number. */
2709 sc->bce_fw_wr_seq++;
2710 msg_data |= sc->bce_fw_wr_seq;
2712 DBPRINT(sc, BCE_VERBOSE, "bce_fw_sync(): msg_data = 0x%08X\n", msg_data);
2714 /* Send the message to the bootcode driver mailbox. */
2715 REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2717 /* Wait for the bootcode to acknowledge the message. */
2718 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2719 /* Check for a response in the bootcode firmware mailbox. */
2720 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_FW_MB);
2721 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2726 /* If we've timed out, tell the bootcode that we've stopped waiting. */
2727 if (((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ)) &&
2728 ((msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0)) {
2730 BCE_PRINTF(sc, "%s(%d): Firmware synchronization timeout! "
2731 "msg_data = 0x%08X\n",
2732 __FILE__, __LINE__, msg_data);
2734 msg_data &= ~BCE_DRV_MSG_CODE;
2735 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2737 REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2739 sc->bce_fw_timed_out = 1;
2748 /****************************************************************************/
2749 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */
2753 /****************************************************************************/
2755 bce_load_rv2p_fw(struct bce_softc *sc, u32 *rv2p_code,
2756 u32 rv2p_code_len, u32 rv2p_proc)
2761 for (i = 0; i < rv2p_code_len; i += 8) {
2762 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2764 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2767 if (rv2p_proc == RV2P_PROC1) {
2768 val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2769 REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2772 val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2773 REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2777 /* Reset the processor, un-stall is done later. */
2778 if (rv2p_proc == RV2P_PROC1) {
2779 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2782 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2787 /****************************************************************************/
2788 /* Load RISC processor firmware. */
2790 /* Loads firmware from the file if_bcefw.h into the scratchpad memory */
2791 /* associated with a particular processor. */
2795 /****************************************************************************/
2797 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2804 val = REG_RD_IND(sc, cpu_reg->mode);
2805 val |= cpu_reg->mode_value_halt;
2806 REG_WR_IND(sc, cpu_reg->mode, val);
2807 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2809 /* Load the Text area. */
2810 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2814 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2815 REG_WR_IND(sc, offset, fw->text[j]);
2819 /* Load the Data area. */
2820 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2824 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2825 REG_WR_IND(sc, offset, fw->data[j]);
2829 /* Load the SBSS area. */
2830 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2834 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2835 REG_WR_IND(sc, offset, fw->sbss[j]);
2839 /* Load the BSS area. */
2840 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2844 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2845 REG_WR_IND(sc, offset, fw->bss[j]);
2849 /* Load the Read-Only area. */
2850 offset = cpu_reg->spad_base +
2851 (fw->rodata_addr - cpu_reg->mips_view_base);
2855 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2856 REG_WR_IND(sc, offset, fw->rodata[j]);
2860 /* Clear the pre-fetch instruction. */
2861 REG_WR_IND(sc, cpu_reg->inst, 0);
2862 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2864 /* Start the CPU. */
2865 val = REG_RD_IND(sc, cpu_reg->mode);
2866 val &= ~cpu_reg->mode_value_halt;
2867 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2868 REG_WR_IND(sc, cpu_reg->mode, val);
2872 /****************************************************************************/
2873 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs. */
2875 /* Loads the firmware for each CPU and starts the CPU. */
2879 /****************************************************************************/
2881 bce_init_cpus(struct bce_softc *sc)
2883 struct cpu_reg cpu_reg;
2886 /* Initialize the RV2P processor. */
2887 bce_load_rv2p_fw(sc, bce_rv2p_proc1, sizeof(bce_rv2p_proc1), RV2P_PROC1);
2888 bce_load_rv2p_fw(sc, bce_rv2p_proc2, sizeof(bce_rv2p_proc2), RV2P_PROC2);
2890 /* Initialize the RX Processor. */
2891 cpu_reg.mode = BCE_RXP_CPU_MODE;
2892 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2893 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2894 cpu_reg.state = BCE_RXP_CPU_STATE;
2895 cpu_reg.state_value_clear = 0xffffff;
2896 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2897 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2898 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2899 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2900 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2901 cpu_reg.spad_base = BCE_RXP_SCRATCH;
2902 cpu_reg.mips_view_base = 0x8000000;
2904 fw.ver_major = bce_RXP_b06FwReleaseMajor;
2905 fw.ver_minor = bce_RXP_b06FwReleaseMinor;
2906 fw.ver_fix = bce_RXP_b06FwReleaseFix;
2907 fw.start_addr = bce_RXP_b06FwStartAddr;
2909 fw.text_addr = bce_RXP_b06FwTextAddr;
2910 fw.text_len = bce_RXP_b06FwTextLen;
2912 fw.text = bce_RXP_b06FwText;
2914 fw.data_addr = bce_RXP_b06FwDataAddr;
2915 fw.data_len = bce_RXP_b06FwDataLen;
2917 fw.data = bce_RXP_b06FwData;
2919 fw.sbss_addr = bce_RXP_b06FwSbssAddr;
2920 fw.sbss_len = bce_RXP_b06FwSbssLen;
2922 fw.sbss = bce_RXP_b06FwSbss;
2924 fw.bss_addr = bce_RXP_b06FwBssAddr;
2925 fw.bss_len = bce_RXP_b06FwBssLen;
2927 fw.bss = bce_RXP_b06FwBss;
2929 fw.rodata_addr = bce_RXP_b06FwRodataAddr;
2930 fw.rodata_len = bce_RXP_b06FwRodataLen;
2931 fw.rodata_index = 0;
2932 fw.rodata = bce_RXP_b06FwRodata;
2934 DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n");
2935 bce_load_cpu_fw(sc, &cpu_reg, &fw);
2937 /* Initialize the TX Processor. */
2938 cpu_reg.mode = BCE_TXP_CPU_MODE;
2939 cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
2940 cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
2941 cpu_reg.state = BCE_TXP_CPU_STATE;
2942 cpu_reg.state_value_clear = 0xffffff;
2943 cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
2944 cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
2945 cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
2946 cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
2947 cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
2948 cpu_reg.spad_base = BCE_TXP_SCRATCH;
2949 cpu_reg.mips_view_base = 0x8000000;
2951 fw.ver_major = bce_TXP_b06FwReleaseMajor;
2952 fw.ver_minor = bce_TXP_b06FwReleaseMinor;
2953 fw.ver_fix = bce_TXP_b06FwReleaseFix;
2954 fw.start_addr = bce_TXP_b06FwStartAddr;
2956 fw.text_addr = bce_TXP_b06FwTextAddr;
2957 fw.text_len = bce_TXP_b06FwTextLen;
2959 fw.text = bce_TXP_b06FwText;
2961 fw.data_addr = bce_TXP_b06FwDataAddr;
2962 fw.data_len = bce_TXP_b06FwDataLen;
2964 fw.data = bce_TXP_b06FwData;
2966 fw.sbss_addr = bce_TXP_b06FwSbssAddr;
2967 fw.sbss_len = bce_TXP_b06FwSbssLen;
2969 fw.sbss = bce_TXP_b06FwSbss;
2971 fw.bss_addr = bce_TXP_b06FwBssAddr;
2972 fw.bss_len = bce_TXP_b06FwBssLen;
2974 fw.bss = bce_TXP_b06FwBss;
2976 fw.rodata_addr = bce_TXP_b06FwRodataAddr;
2977 fw.rodata_len = bce_TXP_b06FwRodataLen;
2978 fw.rodata_index = 0;
2979 fw.rodata = bce_TXP_b06FwRodata;
2981 DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n");
2982 bce_load_cpu_fw(sc, &cpu_reg, &fw);
2984 /* Initialize the TX Patch-up Processor. */
2985 cpu_reg.mode = BCE_TPAT_CPU_MODE;
2986 cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
2987 cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
2988 cpu_reg.state = BCE_TPAT_CPU_STATE;
2989 cpu_reg.state_value_clear = 0xffffff;
2990 cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
2991 cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
2992 cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
2993 cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
2994 cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
2995 cpu_reg.spad_base = BCE_TPAT_SCRATCH;
2996 cpu_reg.mips_view_base = 0x8000000;
2998 fw.ver_major = bce_TPAT_b06FwReleaseMajor;
2999 fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
3000 fw.ver_fix = bce_TPAT_b06FwReleaseFix;
3001 fw.start_addr = bce_TPAT_b06FwStartAddr;
3003 fw.text_addr = bce_TPAT_b06FwTextAddr;
3004 fw.text_len = bce_TPAT_b06FwTextLen;
3006 fw.text = bce_TPAT_b06FwText;
3008 fw.data_addr = bce_TPAT_b06FwDataAddr;
3009 fw.data_len = bce_TPAT_b06FwDataLen;
3011 fw.data = bce_TPAT_b06FwData;
3013 fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
3014 fw.sbss_len = bce_TPAT_b06FwSbssLen;
3016 fw.sbss = bce_TPAT_b06FwSbss;
3018 fw.bss_addr = bce_TPAT_b06FwBssAddr;
3019 fw.bss_len = bce_TPAT_b06FwBssLen;
3021 fw.bss = bce_TPAT_b06FwBss;
3023 fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
3024 fw.rodata_len = bce_TPAT_b06FwRodataLen;
3025 fw.rodata_index = 0;
3026 fw.rodata = bce_TPAT_b06FwRodata;
3028 DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n");
3029 bce_load_cpu_fw(sc, &cpu_reg, &fw);
3031 /* Initialize the Completion Processor. */
3032 cpu_reg.mode = BCE_COM_CPU_MODE;
3033 cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
3034 cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
3035 cpu_reg.state = BCE_COM_CPU_STATE;
3036 cpu_reg.state_value_clear = 0xffffff;
3037 cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
3038 cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
3039 cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
3040 cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
3041 cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
3042 cpu_reg.spad_base = BCE_COM_SCRATCH;
3043 cpu_reg.mips_view_base = 0x8000000;
3045 fw.ver_major = bce_COM_b06FwReleaseMajor;
3046 fw.ver_minor = bce_COM_b06FwReleaseMinor;
3047 fw.ver_fix = bce_COM_b06FwReleaseFix;
3048 fw.start_addr = bce_COM_b06FwStartAddr;
3050 fw.text_addr = bce_COM_b06FwTextAddr;
3051 fw.text_len = bce_COM_b06FwTextLen;
3053 fw.text = bce_COM_b06FwText;
3055 fw.data_addr = bce_COM_b06FwDataAddr;
3056 fw.data_len = bce_COM_b06FwDataLen;
3058 fw.data = bce_COM_b06FwData;
3060 fw.sbss_addr = bce_COM_b06FwSbssAddr;
3061 fw.sbss_len = bce_COM_b06FwSbssLen;
3063 fw.sbss = bce_COM_b06FwSbss;
3065 fw.bss_addr = bce_COM_b06FwBssAddr;
3066 fw.bss_len = bce_COM_b06FwBssLen;
3068 fw.bss = bce_COM_b06FwBss;
3070 fw.rodata_addr = bce_COM_b06FwRodataAddr;
3071 fw.rodata_len = bce_COM_b06FwRodataLen;
3072 fw.rodata_index = 0;
3073 fw.rodata = bce_COM_b06FwRodata;
3075 DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
3076 bce_load_cpu_fw(sc, &cpu_reg, &fw);
3080 /****************************************************************************/
3081 /* Initialize context memory. */
3083 /* Clears the memory associated with each Context ID (CID). */
3087 /****************************************************************************/
3089 bce_init_context(struct bce_softc *sc)
3095 u32 vcid_addr, pcid_addr, offset;
3099 vcid_addr = GET_CID_ADDR(vcid);
3100 pcid_addr = vcid_addr;
3102 REG_WR(sc, BCE_CTX_VIRT_ADDR, 0x00);
3103 REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
3105 /* Zero out the context. */
3106 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
3107 CTX_WR(sc, 0x00, offset, 0);
3110 REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
3111 REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
3116 /****************************************************************************/
3117 /* Fetch the permanent MAC address of the controller. */
3121 /****************************************************************************/
3123 bce_get_mac_addr(struct bce_softc *sc)
3125 u32 mac_lo = 0, mac_hi = 0;
3128 * The NetXtreme II bootcode populates various NIC
3129 * power-on and runtime configuration items in a
3130 * shared memory area. The factory configured MAC
3131 * address is available from both NVRAM and the
3132 * shared memory area so we'll read the value from
3133 * shared memory for speed.
3136 mac_hi = REG_RD_IND(sc, sc->bce_shmem_base +
3137 BCE_PORT_HW_CFG_MAC_UPPER);
3138 mac_lo = REG_RD_IND(sc, sc->bce_shmem_base +
3139 BCE_PORT_HW_CFG_MAC_LOWER);
3141 if ((mac_lo == 0) && (mac_hi == 0)) {
3142 BCE_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n",
3143 __FILE__, __LINE__);
3145 sc->eaddr[0] = (u_char)(mac_hi >> 8);
3146 sc->eaddr[1] = (u_char)(mac_hi >> 0);
3147 sc->eaddr[2] = (u_char)(mac_lo >> 24);
3148 sc->eaddr[3] = (u_char)(mac_lo >> 16);
3149 sc->eaddr[4] = (u_char)(mac_lo >> 8);
3150 sc->eaddr[5] = (u_char)(mac_lo >> 0);
3153 DBPRINT(sc, BCE_INFO, "Permanent Ethernet address = %6D\n", sc->eaddr, ":");
3157 /****************************************************************************/
3158 /* Program the MAC address. */
3162 /****************************************************************************/
3164 bce_set_mac_addr(struct bce_softc *sc)
3167 u8 *mac_addr = sc->eaddr;
3169 DBPRINT(sc, BCE_INFO, "Setting Ethernet address = %6D\n", sc->eaddr, ":");
3171 val = (mac_addr[0] << 8) | mac_addr[1];
3173 REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
3175 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
3176 (mac_addr[4] << 8) | mac_addr[5];
3178 REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
3182 /****************************************************************************/
3183 /* Stop the controller. */
3187 /****************************************************************************/
3189 bce_stop(struct bce_softc *sc)
3192 struct ifmedia_entry *ifm;
3193 struct mii_data *mii = NULL;
3196 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3198 BCE_LOCK_ASSERT(sc);
3202 mii = device_get_softc(sc->bce_miibus);
3204 callout_stop(&sc->bce_stat_ch);
3206 /* Disable the transmit/receive blocks. */
3207 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 0x5ffffff);
3208 REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3211 bce_disable_intr(sc);
3213 /* Tell firmware that the driver is going away. */
3214 bce_reset(sc, BCE_DRV_MSG_CODE_SUSPEND_NO_WOL);
3216 /* Free the RX lists. */
3217 bce_free_rx_chain(sc);
3219 /* Free TX buffers. */
3220 bce_free_tx_chain(sc);
3223 * Isolate/power down the PHY, but leave the media selection
3224 * unchanged so that things will be put back to normal when
3225 * we bring the interface back up.
3228 itmp = ifp->if_flags;
3229 ifp->if_flags |= IFF_UP;
3231 * If we are called from bce_detach(), mii is already NULL.
3234 ifm = mii->mii_media.ifm_cur;
3235 mtmp = ifm->ifm_media;
3236 ifm->ifm_media = IFM_ETHER | IFM_NONE;
3238 ifm->ifm_media = mtmp;
3241 ifp->if_flags = itmp;
3246 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3248 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3250 bce_mgmt_init_locked(sc);
3255 bce_reset(struct bce_softc *sc, u32 reset_code)
3260 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3262 /* Wait for pending PCI transactions to complete. */
3263 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
3264 BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3265 BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3266 BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3267 BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3268 val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3271 /* Assume bootcode is running. */
3272 sc->bce_fw_timed_out = 0;
3274 /* Give the firmware a chance to prepare for the reset. */
3275 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
3277 goto bce_reset_exit;
3279 /* Set a firmware reminder that this is a soft reset. */
3280 REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_RESET_SIGNATURE,
3281 BCE_DRV_RESET_SIGNATURE_MAGIC);
3283 /* Dummy read to force the chip to complete all current transactions. */
3284 val = REG_RD(sc, BCE_MISC_ID);
3287 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3288 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3289 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3290 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
3292 /* Allow up to 30us for reset to complete. */
3293 for (i = 0; i < 10; i++) {
3294 val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
3295 if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3296 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3302 /* Check that reset completed successfully. */
3303 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3304 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3305 BCE_PRINTF(sc, "%s(%d): Reset failed!\n",
3306 __FILE__, __LINE__);
3308 goto bce_reset_exit;
3311 /* Make sure byte swapping is properly configured. */
3312 val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
3313 if (val != 0x01020304) {
3314 BCE_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n",
3315 __FILE__, __LINE__);
3317 goto bce_reset_exit;
3320 /* Just completed a reset, assume that firmware is running again. */
3321 sc->bce_fw_timed_out = 0;
3323 /* Wait for the firmware to finish its initialization. */
3324 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
3326 BCE_PRINTF(sc, "%s(%d): Firmware did not complete initialization!\n",
3327 __FILE__, __LINE__);
3330 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3337 bce_chipinit(struct bce_softc *sc)
3342 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3344 /* Make sure the interrupt is not active. */
3345 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3347 /* Initialize DMA byte/word swapping, configure the number of DMA */
3348 /* channels and PCI clock compensation delay. */
3349 val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3350 BCE_DMA_CONFIG_DATA_WORD_SWAP |
3351 #if BYTE_ORDER == BIG_ENDIAN
3352 BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3354 BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3355 DMA_READ_CHANS << 12 |
3356 DMA_WRITE_CHANS << 16;
3358 val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3360 if ((sc->bce_flags & BCE_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
3361 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3364 * This setting resolves a problem observed on certain Intel PCI
3365 * chipsets that cannot handle multiple outstanding DMA operations.
3366 * See errata E9_5706A1_65.
3368 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
3369 (BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0) &&
3370 !(sc->bce_flags & BCE_PCIX_FLAG))
3371 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3373 REG_WR(sc, BCE_DMA_CONFIG, val);
3375 /* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3376 if (sc->bce_flags & BCE_PCIX_FLAG) {
3379 val = pci_read_config(sc->bce_dev, BCE_PCI_PCIX_CMD, 2);
3380 pci_write_config(sc->bce_dev, BCE_PCI_PCIX_CMD, val & ~0x2, 2);
3383 /* Enable the RX_V2P and Context state machines before access. */
3384 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3385 BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3386 BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3387 BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3389 /* Initialize context mapping and zero out the quick contexts. */
3390 bce_init_context(sc);
3392 /* Initialize the on-boards CPUs */
3395 /* Prepare NVRAM for access. */
3396 if (bce_init_nvram(sc)) {
3398 goto bce_chipinit_exit;
3401 /* Set the kernel bypass block size */
3402 val = REG_RD(sc, BCE_MQ_CONFIG);
3403 val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3404 val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3405 REG_WR(sc, BCE_MQ_CONFIG, val);
3407 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3408 REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3409 REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3411 val = (BCM_PAGE_BITS - 8) << 24;
3412 REG_WR(sc, BCE_RV2P_CONFIG, val);
3414 /* Configure page size. */
3415 val = REG_RD(sc, BCE_TBDR_CONFIG);
3416 val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3417 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3418 REG_WR(sc, BCE_TBDR_CONFIG, val);
3421 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3427 /****************************************************************************/
3428 /* Initialize the controller in preparation to send/receive traffic. */
3431 /* 0 for success, positive value for failure. */
3432 /****************************************************************************/
3434 bce_blockinit(struct bce_softc *sc)
3439 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3441 /* Load the hardware default MAC address. */
3442 bce_set_mac_addr(sc);
3444 /* Set the Ethernet backoff seed value */
3445 val = sc->eaddr[0] + (sc->eaddr[1] << 8) +
3446 (sc->eaddr[2] << 16) + (sc->eaddr[3] ) +
3447 (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3448 REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3450 sc->last_status_idx = 0;
3451 sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3453 /* Set up link change interrupt generation. */
3454 REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3456 /* Program the physical address of the status block. */
3457 REG_WR(sc, BCE_HC_STATUS_ADDR_L,
3458 BCE_ADDR_LO(sc->status_block_paddr));
3459 REG_WR(sc, BCE_HC_STATUS_ADDR_H,
3460 BCE_ADDR_HI(sc->status_block_paddr));
3462 /* Program the physical address of the statistics block. */
3463 REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3464 BCE_ADDR_LO(sc->stats_block_paddr));
3465 REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3466 BCE_ADDR_HI(sc->stats_block_paddr));
3468 /* Program various host coalescing parameters. */
3469 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3470 (sc->bce_tx_quick_cons_trip_int << 16) | sc->bce_tx_quick_cons_trip);
3471 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3472 (sc->bce_rx_quick_cons_trip_int << 16) | sc->bce_rx_quick_cons_trip);
3473 REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3474 (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3475 REG_WR(sc, BCE_HC_TX_TICKS,
3476 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3477 REG_WR(sc, BCE_HC_RX_TICKS,
3478 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3479 REG_WR(sc, BCE_HC_COM_TICKS,
3480 (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3481 REG_WR(sc, BCE_HC_CMD_TICKS,
3482 (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3483 REG_WR(sc, BCE_HC_STATS_TICKS,
3484 (sc->bce_stats_ticks & 0xffff00));
3485 REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS,
3487 REG_WR(sc, BCE_HC_CONFIG,
3488 (BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE |
3489 BCE_HC_CONFIG_COLLECT_STATS));
3491 /* Clear the internal statistics counters. */
3492 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3494 /* Verify that bootcode is running. */
3495 reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_SIGNATURE);
3497 DBRUNIF(DB_RANDOMTRUE(bce_debug_bootcode_running_failure),
3498 BCE_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n",
3499 __FILE__, __LINE__);
3502 if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3503 BCE_DEV_INFO_SIGNATURE_MAGIC) {
3504 BCE_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, "
3505 "Expected: 08%08X\n", __FILE__, __LINE__,
3506 (reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK),
3507 BCE_DEV_INFO_SIGNATURE_MAGIC);
3509 goto bce_blockinit_exit;
3512 /* Check if any management firmware is running. */
3513 reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_FEATURE);
3514 if (reg & (BCE_PORT_FEATURE_ASF_ENABLED | BCE_PORT_FEATURE_IMD_ENABLED)) {
3515 DBPRINT(sc, BCE_INFO, "Management F/W Enabled.\n");
3516 sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
3519 sc->bce_fw_ver = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_BC_REV);
3520 DBPRINT(sc, BCE_INFO, "bootcode rev = 0x%08X\n", sc->bce_fw_ver);
3522 /* Allow bootcode to apply any additional fixes before enabling MAC. */
3523 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3525 /* Enable link state change interrupt generation. */
3526 REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3528 /* Enable all remaining blocks in the MAC. */
3529 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 0x5ffffff);
3530 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3534 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3540 /****************************************************************************/
3541 /* Encapsulate an mbuf cluster into the rx_bd chain. */
3543 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */
3544 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */
3548 /* 0 for success, positive value for failure. */
3549 /****************************************************************************/
3551 bce_get_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod, u16 *chain_prod,
3555 bus_dma_segment_t segs[4];
3556 struct mbuf *m_new = NULL;
3558 int i, nsegs, error, rc = 0;
3560 u16 debug_chain_prod = *chain_prod;
3563 DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Entering %s()\n",
3566 /* Make sure the inputs are valid. */
3567 DBRUNIF((*chain_prod > MAX_RX_BD),
3568 BCE_PRINTF(sc, "%s(%d): RX producer out of range: 0x%04X > 0x%04X\n",
3569 __FILE__, __LINE__, *chain_prod, (u16) MAX_RX_BD));
3571 DBPRINT(sc, BCE_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, "
3572 "prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
3576 DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
3577 BCE_PRINTF(sc, "%s(%d): Simulating mbuf allocation failure.\n",
3578 __FILE__, __LINE__);
3579 sc->mbuf_alloc_failed++;
3581 goto bce_get_buf_exit);
3583 /* This is a new mbuf allocation. */
3584 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
3585 if (m_new == NULL) {
3587 DBPRINT(sc, BCE_WARN, "%s(%d): RX mbuf header allocation failed!\n",
3588 __FILE__, __LINE__);
3590 DBRUNIF(1, sc->mbuf_alloc_failed++);
3593 goto bce_get_buf_exit;
3596 DBRUNIF(1, sc->rx_mbuf_alloc++);
3597 m_cljget(m_new, M_DONTWAIT, sc->mbuf_alloc_size);
3598 if (!(m_new->m_flags & M_EXT)) {
3600 DBPRINT(sc, BCE_WARN, "%s(%d): RX mbuf chain allocation failed!\n",
3601 __FILE__, __LINE__);
3605 DBRUNIF(1, sc->rx_mbuf_alloc--);
3606 DBRUNIF(1, sc->mbuf_alloc_failed++);
3609 goto bce_get_buf_exit;
3612 m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3615 m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3616 m_new->m_data = m_new->m_ext.ext_buf;
3619 /* Map the mbuf cluster into device memory. */
3620 map = sc->rx_mbuf_map[*chain_prod];
3621 error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag, map, m_new,
3622 segs, &nsegs, BUS_DMA_NOWAIT);
3625 BCE_PRINTF(sc, "%s(%d): Error mapping mbuf into RX chain!\n",
3626 __FILE__, __LINE__);
3630 DBRUNIF(1, sc->rx_mbuf_alloc--);
3633 goto bce_get_buf_exit;
3636 /* Watch for overflow. */
3637 DBRUNIF((sc->free_rx_bd > USABLE_RX_BD),
3638 BCE_PRINTF(sc, "%s(%d): Too many free rx_bd (0x%04X > 0x%04X)!\n",
3639 __FILE__, __LINE__, sc->free_rx_bd, (u16) USABLE_RX_BD));
3641 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3642 sc->rx_low_watermark = sc->free_rx_bd);
3644 /* Setup the rx_bd for the first segment. */
3645 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3647 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[0].ds_addr));
3648 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[0].ds_addr));
3649 rxbd->rx_bd_len = htole32(segs[0].ds_len);
3650 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
3651 *prod_bseq += segs[0].ds_len;
3653 for (i = 1; i < nsegs; i++) {
3655 *prod = NEXT_RX_BD(*prod);
3656 *chain_prod = RX_CHAIN_IDX(*prod);
3658 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3660 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
3661 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
3662 rxbd->rx_bd_len = htole32(segs[i].ds_len);
3663 rxbd->rx_bd_flags = 0;
3664 *prod_bseq += segs[i].ds_len;
3667 rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3669 /* Save the mbuf and update our counter. */
3670 sc->rx_mbuf_ptr[*chain_prod] = m_new;
3671 sc->free_rx_bd -= nsegs;
3673 DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_mbuf_chain(sc, debug_chain_prod,
3676 DBPRINT(sc, BCE_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, "
3677 "prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
3680 DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Exiting %s()\n",
3687 /****************************************************************************/
3688 /* Allocate memory and initialize the TX data structures. */
3691 /* 0 for success, positive value for failure. */
3692 /****************************************************************************/
3694 bce_init_tx_chain(struct bce_softc *sc)
3700 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3702 /* Set the initial TX producer/consumer indices. */
3705 sc->tx_prod_bseq = 0;
3707 DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3710 * The NetXtreme II supports a linked-list structre called
3711 * a Buffer Descriptor Chain (or BD chain). A BD chain
3712 * consists of a series of 1 or more chain pages, each of which
3713 * consists of a fixed number of BD entries.
3714 * The last BD entry on each page is a pointer to the next page
3715 * in the chain, and the last pointer in the BD chain
3716 * points back to the beginning of the chain.
3719 /* Set the TX next pointer chain entries. */
3720 for (i = 0; i < TX_PAGES; i++) {
3723 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3725 /* Check if we've reached the last page. */
3726 if (i == (TX_PAGES - 1))
3731 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
3732 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
3736 * Initialize the context ID for an L2 TX chain.
3738 val = BCE_L2CTX_TYPE_TYPE_L2;
3739 val |= BCE_L2CTX_TYPE_SIZE_L2;
3740 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TYPE, val);
3742 val = BCE_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3743 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_CMD_TYPE, val);
3745 /* Point the hardware to the first page in the chain. */
3746 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
3747 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_HI, val);
3748 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
3749 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_LO, val);
3751 DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
3753 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3759 /****************************************************************************/
3760 /* Free memory and clear the TX data structures. */
3764 /****************************************************************************/
3766 bce_free_tx_chain(struct bce_softc *sc)
3770 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3772 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3773 for (i = 0; i < TOTAL_TX_BD; i++) {
3774 if (sc->tx_mbuf_ptr[i] != NULL) {
3775 if (sc->tx_mbuf_map != NULL)
3776 bus_dmamap_sync(sc->tx_mbuf_tag, sc->tx_mbuf_map[i],
3777 BUS_DMASYNC_POSTWRITE);
3778 m_freem(sc->tx_mbuf_ptr[i]);
3779 sc->tx_mbuf_ptr[i] = NULL;
3780 DBRUNIF(1, sc->tx_mbuf_alloc--);
3784 /* Clear each TX chain page. */
3785 for (i = 0; i < TX_PAGES; i++)
3786 bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
3788 /* Check if we lost any mbufs in the process. */
3789 DBRUNIF((sc->tx_mbuf_alloc),
3790 BCE_PRINTF(sc, "%s(%d): Memory leak! Lost %d mbufs "
3792 __FILE__, __LINE__, sc->tx_mbuf_alloc));
3794 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3798 /****************************************************************************/
3799 /* Allocate memory and initialize the RX data structures. */
3802 /* 0 for success, positive value for failure. */
3803 /****************************************************************************/
3805 bce_init_rx_chain(struct bce_softc *sc)
3809 u16 prod, chain_prod;
3812 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3814 /* Initialize the RX producer and consumer indices. */
3817 sc->rx_prod_bseq = 0;
3818 sc->free_rx_bd = BCE_RX_SLACK_SPACE;
3819 DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
3821 /* Initialize the RX next pointer chain entries. */
3822 for (i = 0; i < RX_PAGES; i++) {
3825 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
3827 /* Check if we've reached the last page. */
3828 if (i == (RX_PAGES - 1))
3833 /* Setup the chain page pointers. */
3834 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j]));
3835 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j]));
3838 /* Initialize the context ID for an L2 RX chain. */
3839 val = BCE_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3840 val |= BCE_L2CTX_CTX_TYPE_SIZE_L2;
3842 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_CTX_TYPE, val);
3844 /* Point the hardware to the first page in the chain. */
3845 val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]);
3846 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_HI, val);
3847 val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
3848 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_LO, val);
3850 /* Allocate mbuf clusters for the rx_bd chain. */
3851 prod = prod_bseq = 0;
3852 while (prod < BCE_RX_SLACK_SPACE) {
3853 chain_prod = RX_CHAIN_IDX(prod);
3854 if (bce_get_buf(sc, NULL, &prod, &chain_prod, &prod_bseq)) {
3855 BCE_PRINTF(sc, "%s(%d): Error filling RX chain: rx_bd[0x%04X]!\n",
3856 __FILE__, __LINE__, chain_prod);
3860 prod = NEXT_RX_BD(prod);
3863 /* Save the RX chain producer index. */
3865 sc->rx_prod_bseq = prod_bseq;
3867 for (i = 0; i < RX_PAGES; i++) {
3869 sc->rx_bd_chain_tag,
3870 sc->rx_bd_chain_map[i],
3871 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3874 /* Tell the chip about the waiting rx_bd's. */
3875 REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
3876 REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3878 DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD));
3880 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3886 /****************************************************************************/
3887 /* Free memory and clear the RX data structures. */
3891 /****************************************************************************/
3893 bce_free_rx_chain(struct bce_softc *sc)
3897 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3899 /* Free any mbufs still in the RX mbuf chain. */
3900 for (i = 0; i < TOTAL_RX_BD; i++) {
3901 if (sc->rx_mbuf_ptr[i] != NULL) {
3902 if (sc->rx_mbuf_map[i] != NULL)
3903 bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[i],
3904 BUS_DMASYNC_POSTREAD);
3905 m_freem(sc->rx_mbuf_ptr[i]);
3906 sc->rx_mbuf_ptr[i] = NULL;
3907 DBRUNIF(1, sc->rx_mbuf_alloc--);
3911 /* Clear each RX chain page. */
3912 for (i = 0; i < RX_PAGES; i++)
3913 bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
3915 /* Check if we lost any mbufs in the process. */
3916 DBRUNIF((sc->rx_mbuf_alloc),
3917 BCE_PRINTF(sc, "%s(%d): Memory leak! Lost %d mbufs from rx chain!\n",
3918 __FILE__, __LINE__, sc->rx_mbuf_alloc));
3920 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3924 /****************************************************************************/
3925 /* Set media options. */
3928 /* 0 for success, positive value for failure. */
3929 /****************************************************************************/
3931 bce_ifmedia_upd(struct ifnet *ifp)
3933 struct bce_softc *sc;
3934 struct mii_data *mii;
3935 struct ifmedia *ifm;
3939 ifm = &sc->bce_ifmedia;
3941 /* DRC - ToDo: Add SerDes support. */
3943 mii = device_get_softc(sc->bce_miibus);
3945 if (mii->mii_instance) {
3946 struct mii_softc *miisc;
3947 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3948 miisc = LIST_NEXT(miisc, mii_list))
3949 mii_phy_reset(miisc);
3957 /****************************************************************************/
3958 /* Reports current media status. */
3962 /****************************************************************************/
3964 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3966 struct bce_softc *sc;
3967 struct mii_data *mii;
3973 mii = device_get_softc(sc->bce_miibus);
3975 /* DRC - ToDo: Add SerDes support. */
3978 ifmr->ifm_active = mii->mii_media_active;
3979 ifmr->ifm_status = mii->mii_media_status;
3985 /****************************************************************************/
3986 /* Handles PHY generated interrupt events. */
3990 /****************************************************************************/
3992 bce_phy_intr(struct bce_softc *sc)
3994 u32 new_link_state, old_link_state;
3996 new_link_state = sc->status_block->status_attn_bits &
3997 STATUS_ATTN_BITS_LINK_STATE;
3998 old_link_state = sc->status_block->status_attn_bits_ack &
3999 STATUS_ATTN_BITS_LINK_STATE;
4001 /* Handle any changes if the link state has changed. */
4002 if (new_link_state != old_link_state) {
4004 DBRUN(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
4007 callout_stop(&sc->bce_stat_ch);
4008 bce_tick_locked(sc);
4010 /* Update the status_attn_bits_ack field in the status block. */
4011 if (new_link_state) {
4012 REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
4013 STATUS_ATTN_BITS_LINK_STATE);
4014 DBPRINT(sc, BCE_INFO, "Link is now UP.\n");
4017 REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
4018 STATUS_ATTN_BITS_LINK_STATE);
4019 DBPRINT(sc, BCE_INFO, "Link is now DOWN.\n");
4024 /* Acknowledge the link change interrupt. */
4025 REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
4029 /****************************************************************************/
4030 /* Handles received frame interrupt events. */
4034 /****************************************************************************/
4036 bce_rx_intr(struct bce_softc *sc)
4038 struct status_block *sblk = sc->status_block;
4039 struct ifnet *ifp = sc->bce_ifp;
4040 u16 hw_cons, sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
4042 struct l2_fhdr *l2fhdr;
4044 DBRUNIF(1, sc->rx_interrupts++);
4046 /* Prepare the RX chain pages to be accessed by the host CPU. */
4047 for (int i = 0; i < RX_PAGES; i++)
4048 bus_dmamap_sync(sc->rx_bd_chain_tag,
4049 sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTWRITE);
4051 /* Get the hardware's view of the RX consumer index. */
4052 hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
4053 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4056 /* Get working copies of the driver's view of the RX indices. */
4057 sw_cons = sc->rx_cons;
4058 sw_prod = sc->rx_prod;
4059 sw_prod_bseq = sc->rx_prod_bseq;
4061 DBPRINT(sc, BCE_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
4062 "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
4063 __FUNCTION__, sw_prod, sw_cons,
4066 /* Prevent speculative reads from getting ahead of the status block. */
4067 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4068 BUS_SPACE_BARRIER_READ);
4070 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
4071 sc->rx_low_watermark = sc->free_rx_bd);
4074 * Scan through the receive chain as long
4075 * as there is work to do.
4077 while (sw_cons != hw_cons) {
4083 /* Convert the producer/consumer indices to an actual rx_bd index. */
4084 sw_chain_cons = RX_CHAIN_IDX(sw_cons);
4085 sw_chain_prod = RX_CHAIN_IDX(sw_prod);
4087 /* Get the used rx_bd. */
4088 rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)];
4091 DBRUN(BCE_VERBOSE_RECV,
4092 BCE_PRINTF(sc, "%s(): ", __FUNCTION__);
4093 bce_dump_rxbd(sc, sw_chain_cons, rxbd));
4095 #ifdef DEVICE_POLLING
4096 if (ifp->if_capenable & IFCAP_POLLING) {
4097 if (sc->bce_rxcycles <= 0)
4103 /* The mbuf is stored with the last rx_bd entry of a packet. */
4104 if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
4106 /* Validate that this is the last rx_bd. */
4107 DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)),
4108 BCE_PRINTF(sc, "%s(%d): Unexpected mbuf found in rx_bd[0x%04X]!\n",
4109 __FILE__, __LINE__, sw_chain_cons);
4110 bce_breakpoint(sc));
4112 /* DRC - ToDo: If the received packet is small, say less */
4113 /* than 128 bytes, allocate a new mbuf here, */
4114 /* copy the data to that mbuf, and recycle */
4115 /* the mapped jumbo frame. */
4117 /* Unmap the mbuf from DMA space. */
4118 bus_dmamap_sync(sc->rx_mbuf_tag,
4119 sc->rx_mbuf_map[sw_chain_cons],
4120 BUS_DMASYNC_POSTREAD);
4121 bus_dmamap_unload(sc->rx_mbuf_tag,
4122 sc->rx_mbuf_map[sw_chain_cons]);
4124 /* Remove the mbuf from the driver's chain. */
4125 m = sc->rx_mbuf_ptr[sw_chain_cons];
4126 sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
4129 * Frames received on the NetXteme II are prepended
4130 * with the l2_fhdr structure which provides status
4131 * information about the received frame (including
4132 * VLAN tags and checksum info) and are also
4133 * automatically adjusted to align the IP header
4134 * (i.e. two null bytes are inserted before the
4137 l2fhdr = mtod(m, struct l2_fhdr *);
4139 len = l2fhdr->l2_fhdr_pkt_len;
4140 status = l2fhdr->l2_fhdr_status;
4142 DBRUNIF(DB_RANDOMTRUE(bce_debug_l2fhdr_status_check),
4143 BCE_PRINTF(sc, "Simulating l2_fhdr status error.\n");
4144 status = status | L2_FHDR_ERRORS_PHY_DECODE);
4146 /* Watch for unusual sized frames. */
4147 DBRUNIF(((len < BCE_MIN_MTU) || (len > BCE_MAX_JUMBO_ETHER_MTU_VLAN)),
4148 BCE_PRINTF(sc, "%s(%d): Unusual frame size found. "
4149 "Min(%d), Actual(%d), Max(%d)\n",
4150 __FILE__, __LINE__, (int) BCE_MIN_MTU,
4151 len, (int) BCE_MAX_JUMBO_ETHER_MTU_VLAN);
4152 bce_dump_mbuf(sc, m);
4153 bce_breakpoint(sc));
4155 len -= ETHER_CRC_LEN;
4157 /* Check the received frame for errors. */
4158 if (status & (L2_FHDR_ERRORS_BAD_CRC |
4159 L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT |
4160 L2_FHDR_ERRORS_TOO_SHORT | L2_FHDR_ERRORS_GIANT_FRAME)) {
4163 DBRUNIF(1, sc->l2fhdr_status_errors++);
4165 /* Reuse the mbuf for a new frame. */
4166 if (bce_get_buf(sc, m, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) {
4168 DBRUNIF(1, bce_breakpoint(sc));
4169 panic("bce%d: Can't reuse RX mbuf!\n", sc->bce_unit);
4172 goto bce_rx_int_next_rx;
4176 * Get a new mbuf for the rx_bd. If no new
4177 * mbufs are available then reuse the current mbuf,
4178 * log an ierror on the interface, and generate
4179 * an error in the system log.
4181 if (bce_get_buf(sc, NULL, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) {
4184 BCE_PRINTF(sc, "%s(%d): Failed to allocate "
4185 "new mbuf, incoming frame dropped!\n",
4186 __FILE__, __LINE__));
4190 /* Try and reuse the exisitng mbuf. */
4191 if (bce_get_buf(sc, m, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) {
4193 DBRUNIF(1, bce_breakpoint(sc));
4194 panic("bce%d: Double mbuf allocation failure!", sc->bce_unit);
4197 goto bce_rx_int_next_rx;
4200 /* Skip over the l2_fhdr when passing the data up the stack. */
4201 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4203 /* Adjust the packet length to match the received data. */
4204 m->m_pkthdr.len = m->m_len = len;
4206 /* Send the packet to the appropriate interface. */
4207 m->m_pkthdr.rcvif = ifp;
4209 DBRUN(BCE_VERBOSE_RECV,
4210 struct ether_header *eh;
4211 eh = mtod(m, struct ether_header *);
4212 BCE_PRINTF(sc, "%s(): to: %6D, from: %6D, type: 0x%04X\n",
4213 __FUNCTION__, eh->ether_dhost, ":",
4214 eh->ether_shost, ":", htons(eh->ether_type)));
4216 /* Validate the checksum if offload enabled. */
4217 if (ifp->if_capenable & IFCAP_RXCSUM) {
4219 /* Check for an IP datagram. */
4220 if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4221 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4223 /* Check if the IP checksum is valid. */
4224 if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0)
4225 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4227 DBPRINT(sc, BCE_WARN_SEND,
4228 "%s(): Invalid IP checksum = 0x%04X!\n",
4229 __FUNCTION__, l2fhdr->l2_fhdr_ip_xsum);
4232 /* Check for a valid TCP/UDP frame. */
4233 if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4234 L2_FHDR_STATUS_UDP_DATAGRAM)) {
4236 /* Check for a good TCP/UDP checksum. */
4237 if ((status & (L2_FHDR_ERRORS_TCP_XSUM |
4238 L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4239 m->m_pkthdr.csum_data =
4240 l2fhdr->l2_fhdr_tcp_udp_xsum;
4241 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID
4244 DBPRINT(sc, BCE_WARN_SEND,
4245 "%s(): Invalid TCP/UDP checksum = 0x%04X!\n",
4246 __FUNCTION__, l2fhdr->l2_fhdr_tcp_udp_xsum);
4252 * If we received a packet with a vlan tag,
4253 * attach that information to the packet.
4255 if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
4256 DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): VLAN tag = 0x%04X\n",
4257 __FUNCTION__, l2fhdr->l2_fhdr_vlan_tag);
4258 #if __FreeBSD_version < 700000
4259 VLAN_INPUT_TAG(ifp, m, l2fhdr->l2_fhdr_vlan_tag, continue);
4261 m->m_pkthdr.ether_vtag = l2fhdr->l2_fhdr_vlan_tag;
4262 m->m_flags |= M_VLANTAG;
4266 /* Pass the mbuf off to the upper layers. */
4268 DBPRINT(sc, BCE_VERBOSE_RECV, "%s(): Passing received frame up.\n",
4271 (*ifp->if_input)(ifp, m);
4272 DBRUNIF(1, sc->rx_mbuf_alloc--);
4276 sw_prod = NEXT_RX_BD(sw_prod);
4279 sw_cons = NEXT_RX_BD(sw_cons);
4281 /* Refresh hw_cons to see if there's new work */
4282 if (sw_cons == hw_cons) {
4283 hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
4284 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4288 /* Prevent speculative reads from getting ahead of the status block. */
4289 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4290 BUS_SPACE_BARRIER_READ);
4293 for (int i = 0; i < RX_PAGES; i++)
4294 bus_dmamap_sync(sc->rx_bd_chain_tag,
4295 sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
4297 sc->rx_cons = sw_cons;
4298 sc->rx_prod = sw_prod;
4299 sc->rx_prod_bseq = sw_prod_bseq;
4301 REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
4302 REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
4304 DBPRINT(sc, BCE_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
4305 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
4306 __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
4310 /****************************************************************************/
4311 /* Handles transmit completion interrupt events. */
4315 /****************************************************************************/
4317 bce_tx_intr(struct bce_softc *sc)
4319 struct status_block *sblk = sc->status_block;
4320 struct ifnet *ifp = sc->bce_ifp;
4321 u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
4323 BCE_LOCK_ASSERT(sc);
4325 DBRUNIF(1, sc->tx_interrupts++);
4327 /* Get the hardware's view of the TX consumer index. */
4328 hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4330 /* Skip to the next entry if this is a chain page pointer. */
4331 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4334 sw_tx_cons = sc->tx_cons;
4336 /* Prevent speculative reads from getting ahead of the status block. */
4337 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4338 BUS_SPACE_BARRIER_READ);
4340 /* Cycle through any completed TX chain page entries. */
4341 while (sw_tx_cons != hw_tx_cons) {
4343 struct tx_bd *txbd = NULL;
4345 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4347 DBPRINT(sc, BCE_INFO_SEND,
4348 "%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, "
4349 "sw_tx_chain_cons = 0x%04X\n",
4350 __FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4352 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4353 BCE_PRINTF(sc, "%s(%d): TX chain consumer out of range! "
4354 " 0x%04X > 0x%04X\n",
4355 __FILE__, __LINE__, sw_tx_chain_cons,
4357 bce_breakpoint(sc));
4360 txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
4361 [TX_IDX(sw_tx_chain_cons)]);
4363 DBRUNIF((txbd == NULL),
4364 BCE_PRINTF(sc, "%s(%d): Unexpected NULL tx_bd[0x%04X]!\n",
4365 __FILE__, __LINE__, sw_tx_chain_cons);
4366 bce_breakpoint(sc));
4368 DBRUN(BCE_INFO_SEND,
4369 BCE_PRINTF(sc, "%s(): ", __FUNCTION__);
4370 bce_dump_txbd(sc, sw_tx_chain_cons, txbd));
4373 * Free the associated mbuf. Remember
4374 * that only the last tx_bd of a packet
4375 * has an mbuf pointer and DMA map.
4377 if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
4379 /* Validate that this is the last tx_bd. */
4380 DBRUNIF((!(txbd->tx_bd_vlan_tag_flags & TX_BD_FLAGS_END)),
4381 BCE_PRINTF(sc, "%s(%d): tx_bd END flag not set but "
4382 "txmbuf == NULL!\n", __FILE__, __LINE__);
4383 bce_breakpoint(sc));
4385 DBRUN(BCE_INFO_SEND,
4386 BCE_PRINTF(sc, "%s(): Unloading map/freeing mbuf "
4387 "from tx_bd[0x%04X]\n", __FUNCTION__, sw_tx_chain_cons));
4389 /* Unmap the mbuf. */
4390 bus_dmamap_unload(sc->tx_mbuf_tag,
4391 sc->tx_mbuf_map[sw_tx_chain_cons]);
4393 /* Free the mbuf. */
4394 m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
4395 sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
4396 DBRUNIF(1, sc->tx_mbuf_alloc--);
4402 sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4404 /* Refresh hw_cons to see if there's new work. */
4405 hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4406 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4409 /* Prevent speculative reads from getting ahead of the status block. */
4410 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4411 BUS_SPACE_BARRIER_READ);
4414 /* Clear the TX timeout timer. */
4417 /* Clear the tx hardware queue full flag. */
4418 if ((sc->used_tx_bd + BCE_TX_SLACK_SPACE) < USABLE_TX_BD) {
4419 DBRUNIF((ifp->if_drv_flags & IFF_DRV_OACTIVE),
4420 BCE_PRINTF(sc, "%s(): TX chain is open for business! Used tx_bd = %d\n",
4421 __FUNCTION__, sc->used_tx_bd));
4422 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4425 sc->tx_cons = sw_tx_cons;
4429 /****************************************************************************/
4430 /* Disables interrupt generation. */
4434 /****************************************************************************/
4436 bce_disable_intr(struct bce_softc *sc)
4438 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4439 BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4440 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
4444 /****************************************************************************/
4445 /* Enables interrupt generation. */
4449 /****************************************************************************/
4451 bce_enable_intr(struct bce_softc *sc)
4455 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4456 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4457 BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4459 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4460 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4462 val = REG_RD(sc, BCE_HC_COMMAND);
4463 REG_WR(sc, BCE_HC_COMMAND, val | BCE_HC_COMMAND_COAL_NOW);
4467 /****************************************************************************/
4468 /* Handles controller initialization. */
4470 /* Must be called from a locked routine. */
4474 /****************************************************************************/
4476 bce_init_locked(struct bce_softc *sc)
4481 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4483 BCE_LOCK_ASSERT(sc);
4487 /* Check if the driver is still running and bail out if it is. */
4488 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4489 goto bce_init_locked_exit;
4493 if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
4494 BCE_PRINTF(sc, "%s(%d): Controller reset failed!\n",
4495 __FILE__, __LINE__);
4496 goto bce_init_locked_exit;
4499 if (bce_chipinit(sc)) {
4500 BCE_PRINTF(sc, "%s(%d): Controller initialization failed!\n",
4501 __FILE__, __LINE__);
4502 goto bce_init_locked_exit;
4505 if (bce_blockinit(sc)) {
4506 BCE_PRINTF(sc, "%s(%d): Block initialization failed!\n",
4507 __FILE__, __LINE__);
4508 goto bce_init_locked_exit;
4511 /* Load our MAC address. */
4512 bcopy(IF_LLADDR(sc->bce_ifp), sc->eaddr, ETHER_ADDR_LEN);
4513 bce_set_mac_addr(sc);
4515 /* Calculate and program the Ethernet MTU size. */
4516 ether_mtu = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ifp->if_mtu +
4519 DBPRINT(sc, BCE_INFO, "%s(): setting mtu = %d\n",__FUNCTION__, ether_mtu);
4522 * Program the mtu, enabling jumbo frame
4523 * support if necessary. Also set the mbuf
4524 * allocation count for RX frames.
4526 if (ether_mtu > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) {
4527 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu |
4528 BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4529 sc->mbuf_alloc_size = MJUM9BYTES;
4531 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
4532 sc->mbuf_alloc_size = MCLBYTES;
4535 /* Calculate the RX Ethernet frame size for rx_bd's. */
4536 sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4538 DBPRINT(sc, BCE_INFO,
4539 "%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4540 "max_frame_size = %d\n",
4541 __FUNCTION__, (int) MCLBYTES, sc->mbuf_alloc_size, sc->max_frame_size);
4543 /* Program appropriate promiscuous/multicast filtering. */
4544 bce_set_rx_mode(sc);
4546 /* Init RX buffer descriptor chain. */
4547 bce_init_rx_chain(sc);
4549 /* Init TX buffer descriptor chain. */
4550 bce_init_tx_chain(sc);
4552 #ifdef DEVICE_POLLING
4553 /* Disable interrupts if we are polling. */
4554 if (ifp->if_capenable & IFCAP_POLLING) {
4555 bce_disable_intr(sc);
4557 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4558 (1 << 16) | sc->bce_rx_quick_cons_trip);
4559 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4560 (1 << 16) | sc->bce_tx_quick_cons_trip);
4563 /* Enable host interrupts. */
4564 bce_enable_intr(sc);
4566 bce_ifmedia_upd(ifp);
4568 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4569 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4571 callout_reset(&sc->bce_stat_ch, hz, bce_tick, sc);
4573 bce_init_locked_exit:
4574 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4580 bce_mgmt_init_locked(struct bce_softc *sc)
4585 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4587 BCE_LOCK_ASSERT(sc);
4591 /* Check if the driver is still running and bail out if it is. */
4592 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4593 goto bce_mgmt_init_locked_exit;
4595 /* Initialize the on-boards CPUs */
4598 val = (BCM_PAGE_BITS - 8) << 24;
4599 REG_WR(sc, BCE_RV2P_CONFIG, val);
4601 /* Enable all critical blocks in the MAC. */
4602 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4603 BCE_MISC_ENABLE_SET_BITS_RX_V2P_ENABLE |
4604 BCE_MISC_ENABLE_SET_BITS_RX_DMA_ENABLE |
4605 BCE_MISC_ENABLE_SET_BITS_COMPLETION_ENABLE);
4606 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
4609 bce_ifmedia_upd(ifp);
4610 bce_mgmt_init_locked_exit:
4611 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4617 /****************************************************************************/
4618 /* Handles controller initialization when called from an unlocked routine. */
4622 /****************************************************************************/
4626 struct bce_softc *sc = xsc;
4629 bce_init_locked(sc);
4634 /****************************************************************************/
4635 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4636 /* memory visible to the controller. */
4639 /* 0 for success, positive value for failure. */
4640 /****************************************************************************/
4642 bce_tx_encap(struct bce_softc *sc, struct mbuf *m_head, u16 *prod)
4644 u32 vlan_tag_flags = 0;
4646 struct bce_dmamap_arg map_arg;
4650 /* Transfer any checksum offload flags to the bd. */
4651 if (m_head->m_pkthdr.csum_flags) {
4652 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
4653 vlan_tag_flags |= TX_BD_FLAGS_IP_CKSUM;
4654 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
4655 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4658 /* Transfer any VLAN tags to the bd. */
4659 if (m_head->m_flags & M_VLANTAG)
4660 vlan_tag_flags |= (TX_BD_FLAGS_VLAN_TAG |
4661 (m_head->m_pkthdr.ether_vtag << 16));
4663 /* Map the mbuf into DMAable memory. */
4664 chain_prod = TX_CHAIN_IDX(*prod);
4665 map = sc->tx_mbuf_map[chain_prod];
4667 map_arg.prod = *prod;
4668 map_arg.chain_prod = chain_prod;
4669 map_arg.prod_bseq = sc->tx_prod_bseq;
4670 map_arg.tx_flags = vlan_tag_flags;
4671 map_arg.maxsegs = USABLE_TX_BD - sc->used_tx_bd -
4674 KASSERT(map_arg.maxsegs > 0, ("Invalid TX maxsegs value!"));
4676 /* Map the mbuf into our DMA address space. */
4677 error = bus_dmamap_load_mbuf(sc->tx_mbuf_tag, map, m_head,
4678 bce_dma_map_tx_desc, &map_arg, BUS_DMA_NOWAIT);
4680 if (error || map_arg.maxsegs == 0) {
4682 /* Try to defrag the mbuf if there are too many segments. */
4683 if (error == EFBIG && map_arg.maxsegs != 0) {
4686 DBPRINT(sc, BCE_WARN, "%s(): fragmented mbuf (%d pieces)\n",
4687 __FUNCTION__, map_arg.maxsegs);
4689 m0 = m_defrag(m_head, M_DONTWAIT);
4692 error = bus_dmamap_load_mbuf(sc->tx_mbuf_tag,
4693 map, m_head, bce_dma_map_tx_desc, &map_arg,
4698 /* Still getting an error after a defrag. */
4701 "%s(%d): Error mapping mbuf into TX chain!\n",
4702 __FILE__, __LINE__);
4704 goto bce_tx_encap_exit;
4710 * Ensure that the map for this transmission
4711 * is placed at the array index of the last
4712 * descriptor in this chain. This is done
4713 * because a single map is used for all
4714 * segments of the mbuf and we don't want to
4715 * delete the map before all of the segments
4718 sc->tx_mbuf_map[chain_prod] =
4719 sc->tx_mbuf_map[map_arg.chain_prod];
4720 sc->tx_mbuf_map[map_arg.chain_prod] = map;
4721 sc->tx_mbuf_ptr[map_arg.chain_prod] = m_head;
4722 sc->used_tx_bd += map_arg.maxsegs;
4724 DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
4725 sc->tx_hi_watermark = sc->used_tx_bd);
4727 DBRUNIF(1, sc->tx_mbuf_alloc++);
4729 DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_mbuf_chain(sc, chain_prod,
4732 /* prod still points the last used tx_bd at this point. */
4733 *prod = map_arg.prod;
4734 sc->tx_prod_bseq = map_arg.prod_bseq;
4742 /****************************************************************************/
4743 /* Main transmit routine when called from another routine with a lock. */
4747 /****************************************************************************/
4749 bce_start_locked(struct ifnet *ifp)
4751 struct bce_softc *sc = ifp->if_softc;
4752 struct mbuf *m_head = NULL;
4754 u16 tx_prod, tx_chain_prod;
4756 /* If there's no link or the transmit queue is empty then just exit. */
4757 if (!sc->bce_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
4758 DBPRINT(sc, BCE_INFO_SEND, "%s(): No link or transmit queue empty.\n",
4760 goto bce_start_locked_exit;
4763 /* prod points to the next free tx_bd. */
4764 tx_prod = sc->tx_prod;
4765 tx_chain_prod = TX_CHAIN_IDX(tx_prod);
4767 DBPRINT(sc, BCE_INFO_SEND,
4768 "%s(): Start: tx_prod = 0x%04X, tx_chain_prod = %04X, "
4769 "tx_prod_bseq = 0x%08X\n",
4770 __FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
4772 /* Keep adding entries while there is space in the ring. */
4773 while(sc->tx_mbuf_ptr[tx_chain_prod] == NULL) {
4775 /* Check for any frames to send. */
4776 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4781 * Pack the data into the transmit ring. If we
4782 * don't have room, place the mbuf back at the
4783 * head of the queue and set the OACTIVE flag
4784 * to wait for the NIC to drain the chain.
4786 if (bce_tx_encap(sc, m_head, &tx_prod)) {
4787 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4788 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4789 DBPRINT(sc, BCE_INFO_SEND,
4790 "TX chain is closed for business! Total tx_bd used = %d\n",
4797 /* Send a copy of the frame to any BPF listeners. */
4798 BPF_MTAP(ifp, m_head);
4800 tx_prod = NEXT_TX_BD(tx_prod);
4804 /* no packets were dequeued */
4805 DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): No packets were dequeued\n",
4807 goto bce_start_locked_exit;
4810 /* Update the driver's counters. */
4811 sc->tx_prod = tx_prod;
4812 tx_chain_prod = TX_CHAIN_IDX(tx_prod);
4814 DBPRINT(sc, BCE_INFO_SEND,
4815 "%s(): End: tx_prod = 0x%04X, tx_chain_prod = 0x%04X, "
4816 "tx_prod_bseq = 0x%08X\n",
4817 __FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
4819 /* Start the transmit. */
4820 REG_WR16(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BIDX, sc->tx_prod);
4821 REG_WR(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
4823 /* Set the tx timeout. */
4824 ifp->if_timer = BCE_TX_TIMEOUT;
4826 bce_start_locked_exit:
4831 /****************************************************************************/
4832 /* Main transmit routine when called from another routine without a lock. */
4836 /****************************************************************************/
4838 bce_start(struct ifnet *ifp)
4840 struct bce_softc *sc = ifp->if_softc;
4843 bce_start_locked(ifp);
4848 /****************************************************************************/
4849 /* Handles any IOCTL calls from the operating system. */
4852 /* 0 for success, positive value for failure. */
4853 /****************************************************************************/
4855 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4857 struct bce_softc *sc = ifp->if_softc;
4858 struct ifreq *ifr = (struct ifreq *) data;
4859 struct mii_data *mii;
4860 int mask, error = 0;
4862 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4868 /* Check that the MTU setting is supported. */
4869 if ((ifr->ifr_mtu < BCE_MIN_MTU) ||
4870 (ifr->ifr_mtu > BCE_MAX_JUMBO_MTU)) {
4875 DBPRINT(sc, BCE_INFO, "Setting new MTU of %d\n", ifr->ifr_mtu);
4878 ifp->if_mtu = ifr->ifr_mtu;
4879 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4880 bce_init_locked(sc);
4884 /* Set interface. */
4886 DBPRINT(sc, BCE_VERBOSE, "Received SIOCSIFFLAGS\n");
4890 /* Check if the interface is up. */
4891 if (ifp->if_flags & IFF_UP) {
4892 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4893 /* Change the promiscuous/multicast flags as necessary. */
4894 bce_set_rx_mode(sc);
4897 bce_init_locked(sc);
4900 /* The interface is down. Check if the driver is running. */
4901 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4911 /* Add/Delete multicast address */
4914 DBPRINT(sc, BCE_VERBOSE, "Received SIOCADDMULTI/SIOCDELMULTI\n");
4917 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4918 bce_set_rx_mode(sc);
4925 /* Set/Get Interface media */
4928 DBPRINT(sc, BCE_VERBOSE, "Received SIOCSIFMEDIA/SIOCGIFMEDIA\n");
4930 DBPRINT(sc, BCE_VERBOSE, "bce_phy_flags = 0x%08X\n",
4933 if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
4934 DBPRINT(sc, BCE_VERBOSE, "SerDes media set/get\n");
4936 error = ifmedia_ioctl(ifp, ifr,
4937 &sc->bce_ifmedia, command);
4939 DBPRINT(sc, BCE_VERBOSE, "Copper media set/get\n");
4940 mii = device_get_softc(sc->bce_miibus);
4941 error = ifmedia_ioctl(ifp, ifr,
4942 &mii->mii_media, command);
4946 /* Set interface capability */
4948 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4949 DBPRINT(sc, BCE_INFO, "Received SIOCSIFCAP = 0x%08X\n", (u32) mask);
4951 #ifdef DEVICE_POLLING
4952 if (mask & IFCAP_POLLING) {
4953 if (ifr->ifr_reqcap & IFCAP_POLLING) {
4955 /* Setup the poll routine to call. */
4956 error = ether_poll_register(bce_poll, ifp);
4958 BCE_PRINTF(sc, "%s(%d): Error registering poll function!\n",
4959 __FILE__, __LINE__);
4960 goto bce_ioctl_exit;
4963 /* Clear the interrupt. */
4965 bce_disable_intr(sc);
4967 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4968 (1 << 16) | sc->bce_rx_quick_cons_trip);
4969 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4970 (1 << 16) | sc->bce_tx_quick_cons_trip);
4972 ifp->if_capenable |= IFCAP_POLLING;
4975 /* Clear the poll routine. */
4976 error = ether_poll_deregister(ifp);
4978 /* Enable interrupt even in error case */
4980 bce_enable_intr(sc);
4982 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4983 (sc->bce_tx_quick_cons_trip_int << 16) |
4984 sc->bce_tx_quick_cons_trip);
4985 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4986 (sc->bce_rx_quick_cons_trip_int << 16) |
4987 sc->bce_rx_quick_cons_trip);
4989 ifp->if_capenable &= ~IFCAP_POLLING;
4993 #endif /*DEVICE_POLLING */
4995 /* Toggle the TX checksum capabilites enable flag. */
4996 if (mask & IFCAP_TXCSUM) {
4997 ifp->if_capenable ^= IFCAP_TXCSUM;
4998 if (IFCAP_TXCSUM & ifp->if_capenable)
4999 ifp->if_hwassist = BCE_IF_HWASSIST;
5001 ifp->if_hwassist = 0;
5004 /* Toggle the RX checksum capabilities enable flag. */
5005 if (mask & IFCAP_RXCSUM) {
5006 ifp->if_capenable ^= IFCAP_RXCSUM;
5007 if (IFCAP_RXCSUM & ifp->if_capenable)
5008 ifp->if_hwassist = BCE_IF_HWASSIST;
5010 ifp->if_hwassist = 0;
5013 /* Toggle VLAN_MTU capabilities enable flag. */
5014 if (mask & IFCAP_VLAN_MTU) {
5015 BCE_PRINTF(sc, "%s(%d): Changing VLAN_MTU not supported.\n",
5016 __FILE__, __LINE__);
5019 /* Toggle VLANHWTAG capabilities enabled flag. */
5020 if (mask & IFCAP_VLAN_HWTAGGING) {
5021 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
5022 BCE_PRINTF(sc, "%s(%d): Cannot change VLAN_HWTAGGING while "
5023 "management firmware (ASF/IPMI/UMP) is running!\n",
5024 __FILE__, __LINE__);
5026 BCE_PRINTF(sc, "%s(%d): Changing VLAN_HWTAGGING not supported!\n",
5027 __FILE__, __LINE__);
5032 DBPRINT(sc, BCE_INFO, "Received unsupported IOCTL: 0x%08X\n",
5035 /* We don't know how to handle the IOCTL, pass it on. */
5036 error = ether_ioctl(ifp, command, data);
5040 #ifdef DEVICE_POLLING
5044 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
5050 /****************************************************************************/
5051 /* Transmit timeout handler. */
5055 /****************************************************************************/
5057 bce_watchdog(struct ifnet *ifp)
5059 struct bce_softc *sc = ifp->if_softc;
5061 DBRUN(BCE_WARN_SEND,
5062 bce_dump_driver_state(sc);
5063 bce_dump_status_block(sc));
5065 BCE_PRINTF(sc, "%s(%d): Watchdog timeout occurred, resetting!\n",
5066 __FILE__, __LINE__);
5068 /* DBRUN(BCE_FATAL, bce_breakpoint(sc)); */
5071 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5073 bce_init_locked(sc);
5080 #ifdef DEVICE_POLLING
5082 bce_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
5084 struct bce_softc *sc = ifp->if_softc;
5086 BCE_LOCK_ASSERT(sc);
5088 sc->bce_rxcycles = count;
5090 bus_dmamap_sync(sc->status_tag, sc->status_map,
5091 BUS_DMASYNC_POSTWRITE);
5093 /* Check for any completed RX frames. */
5094 if (sc->status_block->status_rx_quick_consumer_index0 !=
5098 /* Check for any completed TX frames. */
5099 if (sc->status_block->status_tx_quick_consumer_index0 !=
5103 /* Check for new frames to transmit. */
5104 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5105 bce_start_locked(ifp);
5111 bce_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
5113 struct bce_softc *sc = ifp->if_softc;
5116 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5117 bce_poll_locked(ifp, cmd, count);
5120 #endif /* DEVICE_POLLING */
5125 bce_has_work(struct bce_softc *sc)
5127 struct status_block *stat = sc->status_block;
5129 if ((stat->status_rx_quick_consumer_index0 != sc->hw_rx_cons) ||
5130 (stat->status_tx_quick_consumer_index0 != sc->hw_tx_cons))
5133 if (((stat->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
5143 * Interrupt handler.
5145 /****************************************************************************/
5146 /* Main interrupt entry point. Verifies that the controller generated the */
5147 /* interrupt and then calls a separate routine for handle the various */
5148 /* interrupt causes (PHY, TX, RX). */
5151 /* 0 for success, positive value for failure. */
5152 /****************************************************************************/
5156 struct bce_softc *sc;
5158 u32 status_attn_bits;
5165 DBRUNIF(1, sc->interrupts_generated++);
5167 #ifdef DEVICE_POLLING
5168 if (ifp->if_capenable & IFCAP_POLLING) {
5169 DBPRINT(sc, BCE_INFO, "Polling enabled!\n");
5174 bus_dmamap_sync(sc->status_tag, sc->status_map,
5175 BUS_DMASYNC_POSTWRITE);
5178 * If the hardware status block index
5179 * matches the last value read by the
5180 * driver and we haven't asserted our
5181 * interrupt then there's nothing to do.
5183 if ((sc->status_block->status_idx == sc->last_status_idx) &&
5184 (REG_RD(sc, BCE_PCICFG_MISC_STATUS) & BCE_PCICFG_MISC_STATUS_INTA_VALUE))
5187 /* Ack the interrupt and stop others from occuring. */
5188 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5189 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5190 BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5192 /* Keep processing data as long as there is work to do. */
5195 status_attn_bits = sc->status_block->status_attn_bits;
5197 DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention),
5198 BCE_PRINTF(sc, "Simulating unexpected status attention bit set.");
5199 status_attn_bits = status_attn_bits | STATUS_ATTN_BITS_PARITY_ERROR);
5201 /* Was it a link change interrupt? */
5202 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5203 (sc->status_block->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
5206 /* If any other attention is asserted then the chip is toast. */
5207 if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5208 (sc->status_block->status_attn_bits_ack &
5209 ~STATUS_ATTN_BITS_LINK_STATE))) {
5211 DBRUN(1, sc->unexpected_attentions++);
5213 BCE_PRINTF(sc, "%s(%d): Fatal attention detected: 0x%08X\n",
5214 __FILE__, __LINE__, sc->status_block->status_attn_bits);
5217 if (bce_debug_unexpected_attention == 0)
5218 bce_breakpoint(sc));
5220 bce_init_locked(sc);
5224 /* Check for any completed RX frames. */
5225 if (sc->status_block->status_rx_quick_consumer_index0 != sc->hw_rx_cons)
5228 /* Check for any completed TX frames. */
5229 if (sc->status_block->status_tx_quick_consumer_index0 != sc->hw_tx_cons)
5232 /* Save the status block index value for use during the next interrupt. */
5233 sc->last_status_idx = sc->status_block->status_idx;
5235 /* Prevent speculative reads from getting ahead of the status block. */
5236 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
5237 BUS_SPACE_BARRIER_READ);
5239 /* If there's no work left then exit the interrupt service routine. */
5240 if ((sc->status_block->status_rx_quick_consumer_index0 == sc->hw_rx_cons) &&
5241 (sc->status_block->status_tx_quick_consumer_index0 == sc->hw_tx_cons))
5246 bus_dmamap_sync(sc->status_tag, sc->status_map,
5247 BUS_DMASYNC_PREWRITE);
5249 /* Re-enable interrupts. */
5250 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5251 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx |
5252 BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5253 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5254 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
5256 /* Handle any frames that arrived while handling the interrupt. */
5257 if (ifp->if_drv_flags & IFF_DRV_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5258 bce_start_locked(ifp);
5265 /****************************************************************************/
5266 /* Programs the various packet receive modes (broadcast and multicast). */
5270 /****************************************************************************/
5272 bce_set_rx_mode(struct bce_softc *sc)
5275 struct ifmultiaddr *ifma;
5276 u32 hashes[4] = { 0, 0, 0, 0 };
5277 u32 rx_mode, sort_mode;
5280 BCE_LOCK_ASSERT(sc);
5284 /* Initialize receive mode default settings. */
5285 rx_mode = sc->rx_mode & ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
5286 BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
5287 sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
5290 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5293 if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
5294 (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)))
5295 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
5298 * Check for promiscuous, all multicast, or selected
5299 * multicast address filtering.
5301 if (ifp->if_flags & IFF_PROMISC) {
5302 DBPRINT(sc, BCE_INFO, "Enabling promiscuous mode.\n");
5304 /* Enable promiscuous mode. */
5305 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
5306 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
5307 } else if (ifp->if_flags & IFF_ALLMULTI) {
5308 DBPRINT(sc, BCE_INFO, "Enabling all multicast mode.\n");
5310 /* Enable all multicast addresses. */
5311 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5312 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff);
5314 sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
5316 /* Accept one or more multicast(s). */
5317 DBPRINT(sc, BCE_INFO, "Enabling selective multicast mode.\n");
5320 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
5321 if (ifma->ifma_addr->sa_family != AF_LINK)
5323 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
5324 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
5325 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
5327 IF_ADDR_UNLOCK(ifp);
5329 for (i = 0; i < 4; i++)
5330 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]);
5332 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
5335 /* Only make changes if the recive mode has actually changed. */
5336 if (rx_mode != sc->rx_mode) {
5337 DBPRINT(sc, BCE_VERBOSE, "Enabling new receive mode: 0x%08X\n",
5340 sc->rx_mode = rx_mode;
5341 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
5344 /* Disable and clear the exisitng sort before enabling a new sort. */
5345 REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
5346 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
5347 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
5351 /****************************************************************************/
5352 /* Called periodically to updates statistics from the controllers */
5353 /* statistics block. */
5357 /****************************************************************************/
5359 bce_stats_update(struct bce_softc *sc)
5362 struct statistics_block *stats;
5364 DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __FUNCTION__);
5368 stats = (struct statistics_block *) sc->stats_block;
5371 * Update the interface statistics from the
5372 * hardware statistics.
5374 ifp->if_collisions = (u_long) stats->stat_EtherStatsCollisions;
5376 ifp->if_ierrors = (u_long) stats->stat_EtherStatsUndersizePkts +
5377 (u_long) stats->stat_EtherStatsOverrsizePkts +
5378 (u_long) stats->stat_IfInMBUFDiscards +
5379 (u_long) stats->stat_Dot3StatsAlignmentErrors +
5380 (u_long) stats->stat_Dot3StatsFCSErrors;
5382 ifp->if_oerrors = (u_long) stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5383 (u_long) stats->stat_Dot3StatsExcessiveCollisions +
5384 (u_long) stats->stat_Dot3StatsLateCollisions;
5387 * Certain controllers don't report
5388 * carrier sense errors correctly.
5389 * See errata E11_5708CA0_1165.
5391 if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
5392 !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0))
5393 ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
5396 * Update the sysctl statistics from the
5397 * hardware statistics.
5399 sc->stat_IfHCInOctets =
5400 ((u64) stats->stat_IfHCInOctets_hi << 32) +
5401 (u64) stats->stat_IfHCInOctets_lo;
5403 sc->stat_IfHCInBadOctets =
5404 ((u64) stats->stat_IfHCInBadOctets_hi << 32) +
5405 (u64) stats->stat_IfHCInBadOctets_lo;
5407 sc->stat_IfHCOutOctets =
5408 ((u64) stats->stat_IfHCOutOctets_hi << 32) +
5409 (u64) stats->stat_IfHCOutOctets_lo;
5411 sc->stat_IfHCOutBadOctets =
5412 ((u64) stats->stat_IfHCOutBadOctets_hi << 32) +
5413 (u64) stats->stat_IfHCOutBadOctets_lo;
5415 sc->stat_IfHCInUcastPkts =
5416 ((u64) stats->stat_IfHCInUcastPkts_hi << 32) +
5417 (u64) stats->stat_IfHCInUcastPkts_lo;
5419 sc->stat_IfHCInMulticastPkts =
5420 ((u64) stats->stat_IfHCInMulticastPkts_hi << 32) +
5421 (u64) stats->stat_IfHCInMulticastPkts_lo;
5423 sc->stat_IfHCInBroadcastPkts =
5424 ((u64) stats->stat_IfHCInBroadcastPkts_hi << 32) +
5425 (u64) stats->stat_IfHCInBroadcastPkts_lo;
5427 sc->stat_IfHCOutUcastPkts =
5428 ((u64) stats->stat_IfHCOutUcastPkts_hi << 32) +
5429 (u64) stats->stat_IfHCOutUcastPkts_lo;
5431 sc->stat_IfHCOutMulticastPkts =
5432 ((u64) stats->stat_IfHCOutMulticastPkts_hi << 32) +
5433 (u64) stats->stat_IfHCOutMulticastPkts_lo;
5435 sc->stat_IfHCOutBroadcastPkts =
5436 ((u64) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5437 (u64) stats->stat_IfHCOutBroadcastPkts_lo;
5439 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5440 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5442 sc->stat_Dot3StatsCarrierSenseErrors =
5443 stats->stat_Dot3StatsCarrierSenseErrors;
5445 sc->stat_Dot3StatsFCSErrors =
5446 stats->stat_Dot3StatsFCSErrors;
5448 sc->stat_Dot3StatsAlignmentErrors =
5449 stats->stat_Dot3StatsAlignmentErrors;
5451 sc->stat_Dot3StatsSingleCollisionFrames =
5452 stats->stat_Dot3StatsSingleCollisionFrames;
5454 sc->stat_Dot3StatsMultipleCollisionFrames =
5455 stats->stat_Dot3StatsMultipleCollisionFrames;
5457 sc->stat_Dot3StatsDeferredTransmissions =
5458 stats->stat_Dot3StatsDeferredTransmissions;
5460 sc->stat_Dot3StatsExcessiveCollisions =
5461 stats->stat_Dot3StatsExcessiveCollisions;
5463 sc->stat_Dot3StatsLateCollisions =
5464 stats->stat_Dot3StatsLateCollisions;
5466 sc->stat_EtherStatsCollisions =
5467 stats->stat_EtherStatsCollisions;
5469 sc->stat_EtherStatsFragments =
5470 stats->stat_EtherStatsFragments;
5472 sc->stat_EtherStatsJabbers =
5473 stats->stat_EtherStatsJabbers;
5475 sc->stat_EtherStatsUndersizePkts =
5476 stats->stat_EtherStatsUndersizePkts;
5478 sc->stat_EtherStatsOverrsizePkts =
5479 stats->stat_EtherStatsOverrsizePkts;
5481 sc->stat_EtherStatsPktsRx64Octets =
5482 stats->stat_EtherStatsPktsRx64Octets;
5484 sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5485 stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5487 sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5488 stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5490 sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5491 stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5493 sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5494 stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5496 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5497 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5499 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5500 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5502 sc->stat_EtherStatsPktsTx64Octets =
5503 stats->stat_EtherStatsPktsTx64Octets;
5505 sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5506 stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5508 sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5509 stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5511 sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5512 stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5514 sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5515 stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5517 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5518 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5520 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5521 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5523 sc->stat_XonPauseFramesReceived =
5524 stats->stat_XonPauseFramesReceived;
5526 sc->stat_XoffPauseFramesReceived =
5527 stats->stat_XoffPauseFramesReceived;
5529 sc->stat_OutXonSent =
5530 stats->stat_OutXonSent;
5532 sc->stat_OutXoffSent =
5533 stats->stat_OutXoffSent;
5535 sc->stat_FlowControlDone =
5536 stats->stat_FlowControlDone;
5538 sc->stat_MacControlFramesReceived =
5539 stats->stat_MacControlFramesReceived;
5541 sc->stat_XoffStateEntered =
5542 stats->stat_XoffStateEntered;
5544 sc->stat_IfInFramesL2FilterDiscards =
5545 stats->stat_IfInFramesL2FilterDiscards;
5547 sc->stat_IfInRuleCheckerDiscards =
5548 stats->stat_IfInRuleCheckerDiscards;
5550 sc->stat_IfInFTQDiscards =
5551 stats->stat_IfInFTQDiscards;
5553 sc->stat_IfInMBUFDiscards =
5554 stats->stat_IfInMBUFDiscards;
5556 sc->stat_IfInRuleCheckerP4Hit =
5557 stats->stat_IfInRuleCheckerP4Hit;
5559 sc->stat_CatchupInRuleCheckerDiscards =
5560 stats->stat_CatchupInRuleCheckerDiscards;
5562 sc->stat_CatchupInFTQDiscards =
5563 stats->stat_CatchupInFTQDiscards;
5565 sc->stat_CatchupInMBUFDiscards =
5566 stats->stat_CatchupInMBUFDiscards;
5568 sc->stat_CatchupInRuleCheckerP4Hit =
5569 stats->stat_CatchupInRuleCheckerP4Hit;
5571 DBPRINT(sc, BCE_EXCESSIVE, "Exiting %s()\n", __FUNCTION__);
5576 bce_tick_locked(struct bce_softc *sc)
5578 struct mii_data *mii = NULL;
5584 BCE_LOCK_ASSERT(sc);
5586 /* Tell the firmware that the driver is still running. */
5588 msg = (u32) BCE_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE;
5590 msg = (u32) ++sc->bce_fw_drv_pulse_wr_seq;
5592 REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_PULSE_MB, msg);
5594 /* Update the statistics from the hardware statistics block. */
5595 bce_stats_update(sc);
5597 /* Schedule the next tick. */
5599 &sc->bce_stat_ch, /* callout */
5601 bce_tick, /* function */
5602 sc); /* function argument */
5604 /* If link is up already up then we're done. */
5606 goto bce_tick_locked_exit;
5608 /* DRC - ToDo: Add SerDes support and check SerDes link here. */
5610 mii = device_get_softc(sc->bce_miibus);
5613 /* Check if the link has come up. */
5614 if (!sc->bce_link && mii->mii_media_status & IFM_ACTIVE &&
5615 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5617 if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
5618 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) &&
5620 BCE_PRINTF(sc, "Gigabit link up\n");
5621 /* Now that link is up, handle any outstanding TX traffic. */
5622 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5623 bce_start_locked(ifp);
5626 bce_tick_locked_exit:
5634 struct bce_softc *sc;
5639 bce_tick_locked(sc);
5645 /****************************************************************************/
5646 /* Allows the driver state to be dumped through the sysctl interface. */
5649 /* 0 for success, positive value for failure. */
5650 /****************************************************************************/
5652 bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS)
5656 struct bce_softc *sc;
5659 error = sysctl_handle_int(oidp, &result, 0, req);
5661 if (error || !req->newptr)
5665 sc = (struct bce_softc *)arg1;
5666 bce_dump_driver_state(sc);
5673 /****************************************************************************/
5674 /* Allows the hardware state to be dumped through the sysctl interface. */
5677 /* 0 for success, positive value for failure. */
5678 /****************************************************************************/
5680 bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS)
5684 struct bce_softc *sc;
5687 error = sysctl_handle_int(oidp, &result, 0, req);
5689 if (error || !req->newptr)
5693 sc = (struct bce_softc *)arg1;
5694 bce_dump_hw_state(sc);
5701 /****************************************************************************/
5705 /* 0 for success, positive value for failure. */
5706 /****************************************************************************/
5708 bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS)
5712 struct bce_softc *sc;
5715 error = sysctl_handle_int(oidp, &result, 0, req);
5717 if (error || !req->newptr)
5721 sc = (struct bce_softc *)arg1;
5722 bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
5729 /****************************************************************************/
5733 /* 0 for success, positive value for failure. */
5734 /****************************************************************************/
5736 bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS)
5740 struct bce_softc *sc;
5743 error = sysctl_handle_int(oidp, &result, 0, req);
5745 if (error || !req->newptr)
5749 sc = (struct bce_softc *)arg1;
5758 /****************************************************************************/
5759 /* Adds any sysctl parameters for tuning or debugging purposes. */
5762 /* 0 for success, positive value for failure. */
5763 /****************************************************************************/
5765 bce_add_sysctls(struct bce_softc *sc)
5767 struct sysctl_ctx_list *ctx;
5768 struct sysctl_oid_list *children;
5770 ctx = device_get_sysctl_ctx(sc->bce_dev);
5771 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev));
5773 SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
5775 CTLFLAG_RD, &bce_driver_version,
5776 0, "bce driver version");
5779 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5781 CTLFLAG_RD, &sc->rx_low_watermark,
5782 0, "Lowest level of free rx_bd's");
5784 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5786 CTLFLAG_RD, &sc->tx_hi_watermark,
5787 0, "Highest level of used tx_bd's");
5789 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5790 "l2fhdr_status_errors",
5791 CTLFLAG_RD, &sc->l2fhdr_status_errors,
5792 0, "l2_fhdr status errors");
5794 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5795 "unexpected_attentions",
5796 CTLFLAG_RD, &sc->unexpected_attentions,
5797 0, "unexpected attentions");
5799 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5800 "lost_status_block_updates",
5801 CTLFLAG_RD, &sc->lost_status_block_updates,
5802 0, "lost status block updates");
5804 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5805 "mbuf_alloc_failed",
5806 CTLFLAG_RD, &sc->mbuf_alloc_failed,
5807 0, "mbuf cluster allocation failures");
5810 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5811 "stat_IfHcInOctets",
5812 CTLFLAG_RD, &sc->stat_IfHCInOctets,
5815 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5816 "stat_IfHCInBadOctets",
5817 CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
5818 "Bad bytes received");
5820 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5821 "stat_IfHCOutOctets",
5822 CTLFLAG_RD, &sc->stat_IfHCOutOctets,
5825 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5826 "stat_IfHCOutBadOctets",
5827 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
5830 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5831 "stat_IfHCInUcastPkts",
5832 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
5833 "Unicast packets received");
5835 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5836 "stat_IfHCInMulticastPkts",
5837 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
5838 "Multicast packets received");
5840 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5841 "stat_IfHCInBroadcastPkts",
5842 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
5843 "Broadcast packets received");
5845 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5846 "stat_IfHCOutUcastPkts",
5847 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
5848 "Unicast packets sent");
5850 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5851 "stat_IfHCOutMulticastPkts",
5852 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
5853 "Multicast packets sent");
5855 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5856 "stat_IfHCOutBroadcastPkts",
5857 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
5858 "Broadcast packets sent");
5860 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5861 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
5862 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
5863 0, "Internal MAC transmit errors");
5865 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5866 "stat_Dot3StatsCarrierSenseErrors",
5867 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
5868 0, "Carrier sense errors");
5870 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5871 "stat_Dot3StatsFCSErrors",
5872 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
5873 0, "Frame check sequence errors");
5875 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5876 "stat_Dot3StatsAlignmentErrors",
5877 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
5878 0, "Alignment errors");
5880 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5881 "stat_Dot3StatsSingleCollisionFrames",
5882 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
5883 0, "Single Collision Frames");
5885 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5886 "stat_Dot3StatsMultipleCollisionFrames",
5887 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
5888 0, "Multiple Collision Frames");
5890 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5891 "stat_Dot3StatsDeferredTransmissions",
5892 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
5893 0, "Deferred Transmissions");
5895 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5896 "stat_Dot3StatsExcessiveCollisions",
5897 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
5898 0, "Excessive Collisions");
5900 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5901 "stat_Dot3StatsLateCollisions",
5902 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
5903 0, "Late Collisions");
5905 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5906 "stat_EtherStatsCollisions",
5907 CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
5910 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5911 "stat_EtherStatsFragments",
5912 CTLFLAG_RD, &sc->stat_EtherStatsFragments,
5915 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5916 "stat_EtherStatsJabbers",
5917 CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
5920 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5921 "stat_EtherStatsUndersizePkts",
5922 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
5923 0, "Undersize packets");
5925 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5926 "stat_EtherStatsOverrsizePkts",
5927 CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
5928 0, "stat_EtherStatsOverrsizePkts");
5930 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5931 "stat_EtherStatsPktsRx64Octets",
5932 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
5933 0, "Bytes received in 64 byte packets");
5935 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5936 "stat_EtherStatsPktsRx65Octetsto127Octets",
5937 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
5938 0, "Bytes received in 65 to 127 byte packets");
5940 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5941 "stat_EtherStatsPktsRx128Octetsto255Octets",
5942 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
5943 0, "Bytes received in 128 to 255 byte packets");
5945 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5946 "stat_EtherStatsPktsRx256Octetsto511Octets",
5947 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
5948 0, "Bytes received in 256 to 511 byte packets");
5950 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5951 "stat_EtherStatsPktsRx512Octetsto1023Octets",
5952 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
5953 0, "Bytes received in 512 to 1023 byte packets");
5955 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5956 "stat_EtherStatsPktsRx1024Octetsto1522Octets",
5957 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
5958 0, "Bytes received in 1024 t0 1522 byte packets");
5960 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5961 "stat_EtherStatsPktsRx1523Octetsto9022Octets",
5962 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
5963 0, "Bytes received in 1523 to 9022 byte packets");
5965 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5966 "stat_EtherStatsPktsTx64Octets",
5967 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
5968 0, "Bytes sent in 64 byte packets");
5970 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5971 "stat_EtherStatsPktsTx65Octetsto127Octets",
5972 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
5973 0, "Bytes sent in 65 to 127 byte packets");
5975 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5976 "stat_EtherStatsPktsTx128Octetsto255Octets",
5977 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
5978 0, "Bytes sent in 128 to 255 byte packets");
5980 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5981 "stat_EtherStatsPktsTx256Octetsto511Octets",
5982 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
5983 0, "Bytes sent in 256 to 511 byte packets");
5985 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5986 "stat_EtherStatsPktsTx512Octetsto1023Octets",
5987 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
5988 0, "Bytes sent in 512 to 1023 byte packets");
5990 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5991 "stat_EtherStatsPktsTx1024Octetsto1522Octets",
5992 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
5993 0, "Bytes sent in 1024 to 1522 byte packets");
5995 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5996 "stat_EtherStatsPktsTx1523Octetsto9022Octets",
5997 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
5998 0, "Bytes sent in 1523 to 9022 byte packets");
6000 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6001 "stat_XonPauseFramesReceived",
6002 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
6003 0, "XON pause frames receved");
6005 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6006 "stat_XoffPauseFramesReceived",
6007 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
6008 0, "XOFF pause frames received");
6010 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6012 CTLFLAG_RD, &sc->stat_OutXonSent,
6013 0, "XON pause frames sent");
6015 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6017 CTLFLAG_RD, &sc->stat_OutXoffSent,
6018 0, "XOFF pause frames sent");
6020 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6021 "stat_FlowControlDone",
6022 CTLFLAG_RD, &sc->stat_FlowControlDone,
6023 0, "Flow control done");
6025 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6026 "stat_MacControlFramesReceived",
6027 CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
6028 0, "MAC control frames received");
6030 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6031 "stat_XoffStateEntered",
6032 CTLFLAG_RD, &sc->stat_XoffStateEntered,
6033 0, "XOFF state entered");
6035 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6036 "stat_IfInFramesL2FilterDiscards",
6037 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
6038 0, "Received L2 packets discarded");
6040 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6041 "stat_IfInRuleCheckerDiscards",
6042 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
6043 0, "Received packets discarded by rule");
6045 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6046 "stat_IfInFTQDiscards",
6047 CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
6048 0, "Received packet FTQ discards");
6050 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6051 "stat_IfInMBUFDiscards",
6052 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
6053 0, "Received packets discarded due to lack of controller buffer memory");
6055 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6056 "stat_IfInRuleCheckerP4Hit",
6057 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
6058 0, "Received packets rule checker hits");
6060 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6061 "stat_CatchupInRuleCheckerDiscards",
6062 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
6063 0, "Received packets discarded in Catchup path");
6065 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6066 "stat_CatchupInFTQDiscards",
6067 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
6068 0, "Received packets discarded in FTQ in Catchup path");
6070 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6071 "stat_CatchupInMBUFDiscards",
6072 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
6073 0, "Received packets discarded in controller buffer memory in Catchup path");
6075 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6076 "stat_CatchupInRuleCheckerP4Hit",
6077 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
6078 0, "Received packets rule checker hits in Catchup path");
6081 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6082 "driver_state", CTLTYPE_INT | CTLFLAG_RW,
6084 bce_sysctl_driver_state, "I", "Drive state information");
6086 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6087 "hw_state", CTLTYPE_INT | CTLFLAG_RW,
6089 bce_sysctl_hw_state, "I", "Hardware state information");
6091 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6092 "dump_rx_chain", CTLTYPE_INT | CTLFLAG_RW,
6094 bce_sysctl_dump_rx_chain, "I", "Dump rx_bd chain");
6096 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6097 "breakpoint", CTLTYPE_INT | CTLFLAG_RW,
6099 bce_sysctl_breakpoint, "I", "Driver breakpoint");
6105 /****************************************************************************/
6106 /* BCE Debug Routines */
6107 /****************************************************************************/
6110 /****************************************************************************/
6111 /* Prints out information about an mbuf. */
6115 /****************************************************************************/
6117 bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
6120 struct mbuf *mp = m;
6123 /* Index out of range. */
6124 printf("mbuf ptr is null!\n");
6129 val_hi = BCE_ADDR_HI(mp);
6130 val_lo = BCE_ADDR_LO(mp);
6131 BCE_PRINTF(sc, "mbuf: vaddr = 0x%08X:%08X, m_len = %d, m_flags = ",
6132 val_hi, val_lo, mp->m_len);
6134 if (mp->m_flags & M_EXT)
6136 if (mp->m_flags & M_PKTHDR)
6137 printf("M_PKTHDR ");
6140 if (mp->m_flags & M_EXT) {
6141 val_hi = BCE_ADDR_HI(mp->m_ext.ext_buf);
6142 val_lo = BCE_ADDR_LO(mp->m_ext.ext_buf);
6143 BCE_PRINTF(sc, "- m_ext: vaddr = 0x%08X:%08X, ext_size = 0x%04X\n",
6144 val_hi, val_lo, mp->m_ext.ext_size);
6154 /****************************************************************************/
6155 /* Prints out the mbufs in the TX mbuf chain. */
6159 /****************************************************************************/
6161 bce_dump_tx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6166 "----------------------------"
6168 "----------------------------\n");
6170 for (int i = 0; i < count; i++) {
6171 m = sc->tx_mbuf_ptr[chain_prod];
6172 BCE_PRINTF(sc, "txmbuf[%d]\n", chain_prod);
6173 bce_dump_mbuf(sc, m);
6174 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
6178 "----------------------------"
6180 "----------------------------\n");
6185 * This routine prints the RX mbuf chain.
6188 bce_dump_rx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6193 "----------------------------"
6195 "----------------------------\n");
6197 for (int i = 0; i < count; i++) {
6198 m = sc->rx_mbuf_ptr[chain_prod];
6199 BCE_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod);
6200 bce_dump_mbuf(sc, m);
6201 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
6206 "----------------------------"
6208 "----------------------------\n");
6213 bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd)
6215 if (idx > MAX_TX_BD)
6216 /* Index out of range. */
6217 BCE_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
6218 else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
6219 /* TX Chain page pointer. */
6220 BCE_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
6221 idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo);
6223 /* Normal tx_bd entry. */
6224 BCE_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
6225 "flags = 0x%08X\n", idx,
6226 txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
6227 txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag_flags);
6232 bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
6234 if (idx > MAX_RX_BD)
6235 /* Index out of range. */
6236 BCE_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
6237 else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
6238 /* TX Chain page pointer. */
6239 BCE_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
6240 idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo);
6242 /* Normal tx_bd entry. */
6243 BCE_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
6244 "flags = 0x%08X\n", idx,
6245 rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
6246 rxbd->rx_bd_len, rxbd->rx_bd_flags);
6251 bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr)
6253 BCE_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, "
6254 "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, "
6255 "tcp_udp_xsum = 0x%04X\n", idx,
6256 l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len,
6257 l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum,
6258 l2fhdr->l2_fhdr_tcp_udp_xsum);
6263 * This routine prints the TX chain.
6266 bce_dump_tx_chain(struct bce_softc *sc, int tx_prod, int count)
6270 /* First some info about the tx_bd chain structure. */
6272 "----------------------------"
6274 "----------------------------\n");
6276 BCE_PRINTF(sc, "page size = 0x%08X, tx chain pages = 0x%08X\n",
6277 (u32) BCM_PAGE_SIZE, (u32) TX_PAGES);
6279 BCE_PRINTF(sc, "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
6280 (u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE);
6282 BCE_PRINTF(sc, "total tx_bd = 0x%08X\n", (u32) TOTAL_TX_BD);
6285 "-----------------------------"
6287 "-----------------------------\n");
6289 /* Now print out the tx_bd's themselves. */
6290 for (int i = 0; i < count; i++) {
6291 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
6292 bce_dump_txbd(sc, tx_prod, txbd);
6293 tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
6297 "-----------------------------"
6299 "-----------------------------\n");
6304 * This routine prints the RX chain.
6307 bce_dump_rx_chain(struct bce_softc *sc, int rx_prod, int count)
6311 /* First some info about the tx_bd chain structure. */
6313 "----------------------------"
6315 "----------------------------\n");
6317 BCE_PRINTF(sc, "----- RX_BD Chain -----\n");
6319 BCE_PRINTF(sc, "page size = 0x%08X, rx chain pages = 0x%08X\n",
6320 (u32) BCM_PAGE_SIZE, (u32) RX_PAGES);
6322 BCE_PRINTF(sc, "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
6323 (u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE);
6325 BCE_PRINTF(sc, "total rx_bd = 0x%08X\n", (u32) TOTAL_RX_BD);
6328 "----------------------------"
6330 "----------------------------\n");
6332 /* Now print out the rx_bd's themselves. */
6333 for (int i = 0; i < count; i++) {
6334 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
6335 bce_dump_rxbd(sc, rx_prod, rxbd);
6336 rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
6340 "----------------------------"
6342 "----------------------------\n");
6347 * This routine prints the status block.
6350 bce_dump_status_block(struct bce_softc *sc)
6352 struct status_block *sblk;
6354 sblk = sc->status_block;
6356 BCE_PRINTF(sc, "----------------------------- Status Block "
6357 "-----------------------------\n");
6359 BCE_PRINTF(sc, "attn_bits = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n",
6360 sblk->status_attn_bits, sblk->status_attn_bits_ack,
6363 BCE_PRINTF(sc, "rx_cons0 = 0x%08X, tx_cons0 = 0x%08X\n",
6364 sblk->status_rx_quick_consumer_index0,
6365 sblk->status_tx_quick_consumer_index0);
6367 BCE_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx);
6369 /* Theses indices are not used for normal L2 drivers. */
6370 if (sblk->status_rx_quick_consumer_index1 ||
6371 sblk->status_tx_quick_consumer_index1)
6372 BCE_PRINTF(sc, "rx_cons1 = 0x%08X, tx_cons1 = 0x%08X\n",
6373 sblk->status_rx_quick_consumer_index1,
6374 sblk->status_tx_quick_consumer_index1);
6376 if (sblk->status_rx_quick_consumer_index2 ||
6377 sblk->status_tx_quick_consumer_index2)
6378 BCE_PRINTF(sc, "rx_cons2 = 0x%08X, tx_cons2 = 0x%08X\n",
6379 sblk->status_rx_quick_consumer_index2,
6380 sblk->status_tx_quick_consumer_index2);
6382 if (sblk->status_rx_quick_consumer_index3 ||
6383 sblk->status_tx_quick_consumer_index3)
6384 BCE_PRINTF(sc, "rx_cons3 = 0x%08X, tx_cons3 = 0x%08X\n",
6385 sblk->status_rx_quick_consumer_index3,
6386 sblk->status_tx_quick_consumer_index3);
6388 if (sblk->status_rx_quick_consumer_index4 ||
6389 sblk->status_rx_quick_consumer_index5)
6390 BCE_PRINTF(sc, "rx_cons4 = 0x%08X, rx_cons5 = 0x%08X\n",
6391 sblk->status_rx_quick_consumer_index4,
6392 sblk->status_rx_quick_consumer_index5);
6394 if (sblk->status_rx_quick_consumer_index6 ||
6395 sblk->status_rx_quick_consumer_index7)
6396 BCE_PRINTF(sc, "rx_cons6 = 0x%08X, rx_cons7 = 0x%08X\n",
6397 sblk->status_rx_quick_consumer_index6,
6398 sblk->status_rx_quick_consumer_index7);
6400 if (sblk->status_rx_quick_consumer_index8 ||
6401 sblk->status_rx_quick_consumer_index9)
6402 BCE_PRINTF(sc, "rx_cons8 = 0x%08X, rx_cons9 = 0x%08X\n",
6403 sblk->status_rx_quick_consumer_index8,
6404 sblk->status_rx_quick_consumer_index9);
6406 if (sblk->status_rx_quick_consumer_index10 ||
6407 sblk->status_rx_quick_consumer_index11)
6408 BCE_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11 = 0x%08X\n",
6409 sblk->status_rx_quick_consumer_index10,
6410 sblk->status_rx_quick_consumer_index11);
6412 if (sblk->status_rx_quick_consumer_index12 ||
6413 sblk->status_rx_quick_consumer_index13)
6414 BCE_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13 = 0x%08X\n",
6415 sblk->status_rx_quick_consumer_index12,
6416 sblk->status_rx_quick_consumer_index13);
6418 if (sblk->status_rx_quick_consumer_index14 ||
6419 sblk->status_rx_quick_consumer_index15)
6420 BCE_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15 = 0x%08X\n",
6421 sblk->status_rx_quick_consumer_index14,
6422 sblk->status_rx_quick_consumer_index15);
6424 if (sblk->status_completion_producer_index ||
6425 sblk->status_cmd_consumer_index)
6426 BCE_PRINTF(sc, "com_prod = 0x%08X, cmd_cons = 0x%08X\n",
6427 sblk->status_completion_producer_index,
6428 sblk->status_cmd_consumer_index);
6430 BCE_PRINTF(sc, "-------------------------------------------"
6431 "-----------------------------\n");
6436 * This routine prints the statistics block.
6439 bce_dump_stats_block(struct bce_softc *sc)
6441 struct statistics_block *sblk;
6443 sblk = sc->stats_block;
6446 "-----------------------------"
6448 "-----------------------------\n");
6450 BCE_PRINTF(sc, "IfHcInOctets = 0x%08X:%08X, "
6451 "IfHcInBadOctets = 0x%08X:%08X\n",
6452 sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo,
6453 sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo);
6455 BCE_PRINTF(sc, "IfHcOutOctets = 0x%08X:%08X, "
6456 "IfHcOutBadOctets = 0x%08X:%08X\n",
6457 sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo,
6458 sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo);
6460 BCE_PRINTF(sc, "IfHcInUcastPkts = 0x%08X:%08X, "
6461 "IfHcInMulticastPkts = 0x%08X:%08X\n",
6462 sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo,
6463 sblk->stat_IfHCInMulticastPkts_hi, sblk->stat_IfHCInMulticastPkts_lo);
6465 BCE_PRINTF(sc, "IfHcInBroadcastPkts = 0x%08X:%08X, "
6466 "IfHcOutUcastPkts = 0x%08X:%08X\n",
6467 sblk->stat_IfHCInBroadcastPkts_hi, sblk->stat_IfHCInBroadcastPkts_lo,
6468 sblk->stat_IfHCOutUcastPkts_hi, sblk->stat_IfHCOutUcastPkts_lo);
6470 BCE_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, IfHcOutBroadcastPkts = 0x%08X:%08X\n",
6471 sblk->stat_IfHCOutMulticastPkts_hi, sblk->stat_IfHCOutMulticastPkts_lo,
6472 sblk->stat_IfHCOutBroadcastPkts_hi, sblk->stat_IfHCOutBroadcastPkts_lo);
6474 if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
6475 BCE_PRINTF(sc, "0x%08X : "
6476 "emac_tx_stat_dot3statsinternalmactransmiterrors\n",
6477 sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
6479 if (sblk->stat_Dot3StatsCarrierSenseErrors)
6480 BCE_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n",
6481 sblk->stat_Dot3StatsCarrierSenseErrors);
6483 if (sblk->stat_Dot3StatsFCSErrors)
6484 BCE_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n",
6485 sblk->stat_Dot3StatsFCSErrors);
6487 if (sblk->stat_Dot3StatsAlignmentErrors)
6488 BCE_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n",
6489 sblk->stat_Dot3StatsAlignmentErrors);
6491 if (sblk->stat_Dot3StatsSingleCollisionFrames)
6492 BCE_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n",
6493 sblk->stat_Dot3StatsSingleCollisionFrames);
6495 if (sblk->stat_Dot3StatsMultipleCollisionFrames)
6496 BCE_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n",
6497 sblk->stat_Dot3StatsMultipleCollisionFrames);
6499 if (sblk->stat_Dot3StatsDeferredTransmissions)
6500 BCE_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n",
6501 sblk->stat_Dot3StatsDeferredTransmissions);
6503 if (sblk->stat_Dot3StatsExcessiveCollisions)
6504 BCE_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n",
6505 sblk->stat_Dot3StatsExcessiveCollisions);
6507 if (sblk->stat_Dot3StatsLateCollisions)
6508 BCE_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n",
6509 sblk->stat_Dot3StatsLateCollisions);
6511 if (sblk->stat_EtherStatsCollisions)
6512 BCE_PRINTF(sc, "0x%08X : EtherStatsCollisions\n",
6513 sblk->stat_EtherStatsCollisions);
6515 if (sblk->stat_EtherStatsFragments)
6516 BCE_PRINTF(sc, "0x%08X : EtherStatsFragments\n",
6517 sblk->stat_EtherStatsFragments);
6519 if (sblk->stat_EtherStatsJabbers)
6520 BCE_PRINTF(sc, "0x%08X : EtherStatsJabbers\n",
6521 sblk->stat_EtherStatsJabbers);
6523 if (sblk->stat_EtherStatsUndersizePkts)
6524 BCE_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n",
6525 sblk->stat_EtherStatsUndersizePkts);
6527 if (sblk->stat_EtherStatsOverrsizePkts)
6528 BCE_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n",
6529 sblk->stat_EtherStatsOverrsizePkts);
6531 if (sblk->stat_EtherStatsPktsRx64Octets)
6532 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n",
6533 sblk->stat_EtherStatsPktsRx64Octets);
6535 if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
6536 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
6537 sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
6539 if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
6540 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx128Octetsto255Octets\n",
6541 sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
6543 if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
6544 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx256Octetsto511Octets\n",
6545 sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
6547 if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
6548 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx512Octetsto1023Octets\n",
6549 sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
6551 if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
6552 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx1024Octetsto1522Octets\n",
6553 sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
6555 if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
6556 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx1523Octetsto9022Octets\n",
6557 sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
6559 if (sblk->stat_EtherStatsPktsTx64Octets)
6560 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n",
6561 sblk->stat_EtherStatsPktsTx64Octets);
6563 if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
6564 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
6565 sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
6567 if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
6568 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx128Octetsto255Octets\n",
6569 sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
6571 if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
6572 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx256Octetsto511Octets\n",
6573 sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
6575 if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
6576 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx512Octetsto1023Octets\n",
6577 sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
6579 if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
6580 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx1024Octetsto1522Octets\n",
6581 sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
6583 if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
6584 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx1523Octetsto9022Octets\n",
6585 sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
6587 if (sblk->stat_XonPauseFramesReceived)
6588 BCE_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n",
6589 sblk->stat_XonPauseFramesReceived);
6591 if (sblk->stat_XoffPauseFramesReceived)
6592 BCE_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n",
6593 sblk->stat_XoffPauseFramesReceived);
6595 if (sblk->stat_OutXonSent)
6596 BCE_PRINTF(sc, "0x%08X : OutXonSent\n",
6597 sblk->stat_OutXonSent);
6599 if (sblk->stat_OutXoffSent)
6600 BCE_PRINTF(sc, "0x%08X : OutXoffSent\n",
6601 sblk->stat_OutXoffSent);
6603 if (sblk->stat_FlowControlDone)
6604 BCE_PRINTF(sc, "0x%08X : FlowControlDone\n",
6605 sblk->stat_FlowControlDone);
6607 if (sblk->stat_MacControlFramesReceived)
6608 BCE_PRINTF(sc, "0x%08X : MacControlFramesReceived\n",
6609 sblk->stat_MacControlFramesReceived);
6611 if (sblk->stat_XoffStateEntered)
6612 BCE_PRINTF(sc, "0x%08X : XoffStateEntered\n",
6613 sblk->stat_XoffStateEntered);
6615 if (sblk->stat_IfInFramesL2FilterDiscards)
6616 BCE_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n",
6617 sblk->stat_IfInFramesL2FilterDiscards);
6619 if (sblk->stat_IfInRuleCheckerDiscards)
6620 BCE_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n",
6621 sblk->stat_IfInRuleCheckerDiscards);
6623 if (sblk->stat_IfInFTQDiscards)
6624 BCE_PRINTF(sc, "0x%08X : IfInFTQDiscards\n",
6625 sblk->stat_IfInFTQDiscards);
6627 if (sblk->stat_IfInMBUFDiscards)
6628 BCE_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n",
6629 sblk->stat_IfInMBUFDiscards);
6631 if (sblk->stat_IfInRuleCheckerP4Hit)
6632 BCE_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n",
6633 sblk->stat_IfInRuleCheckerP4Hit);
6635 if (sblk->stat_CatchupInRuleCheckerDiscards)
6636 BCE_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n",
6637 sblk->stat_CatchupInRuleCheckerDiscards);
6639 if (sblk->stat_CatchupInFTQDiscards)
6640 BCE_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n",
6641 sblk->stat_CatchupInFTQDiscards);
6643 if (sblk->stat_CatchupInMBUFDiscards)
6644 BCE_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n",
6645 sblk->stat_CatchupInMBUFDiscards);
6647 if (sblk->stat_CatchupInRuleCheckerP4Hit)
6648 BCE_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n",
6649 sblk->stat_CatchupInRuleCheckerP4Hit);
6652 "-----------------------------"
6654 "-----------------------------\n");
6659 bce_dump_driver_state(struct bce_softc *sc)
6664 "-----------------------------"
6666 "-----------------------------\n");
6668 val_hi = BCE_ADDR_HI(sc);
6669 val_lo = BCE_ADDR_LO(sc);
6670 BCE_PRINTF(sc, "0x%08X:%08X - (sc) driver softc structure virtual address\n",
6673 val_hi = BCE_ADDR_HI(sc->bce_vhandle);
6674 val_lo = BCE_ADDR_LO(sc->bce_vhandle);
6675 BCE_PRINTF(sc, "0x%08X:%08X - (sc->bce_vhandle) PCI BAR virtual address\n",
6678 val_hi = BCE_ADDR_HI(sc->status_block);
6679 val_lo = BCE_ADDR_LO(sc->status_block);
6680 BCE_PRINTF(sc, "0x%08X:%08X - (sc->status_block) status block virtual address\n",
6683 val_hi = BCE_ADDR_HI(sc->stats_block);
6684 val_lo = BCE_ADDR_LO(sc->stats_block);
6685 BCE_PRINTF(sc, "0x%08X:%08X - (sc->stats_block) statistics block virtual address\n",
6688 val_hi = BCE_ADDR_HI(sc->tx_bd_chain);
6689 val_lo = BCE_ADDR_LO(sc->tx_bd_chain);
6691 "0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain virtual adddress\n",
6694 val_hi = BCE_ADDR_HI(sc->rx_bd_chain);
6695 val_lo = BCE_ADDR_LO(sc->rx_bd_chain);
6697 "0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain virtual address\n",
6700 val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr);
6701 val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr);
6703 "0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
6706 val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr);
6707 val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr);
6709 "0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
6712 BCE_PRINTF(sc, " 0x%08X - (sc->interrupts_generated) h/w intrs\n",
6713 sc->interrupts_generated);
6715 BCE_PRINTF(sc, " 0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
6718 BCE_PRINTF(sc, " 0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
6721 BCE_PRINTF(sc, " 0x%08X - (sc->last_status_idx) status block index\n",
6722 sc->last_status_idx);
6724 BCE_PRINTF(sc, " 0x%08X - (sc->tx_prod) tx producer index\n",
6727 BCE_PRINTF(sc, " 0x%08X - (sc->tx_cons) tx consumer index\n",
6730 BCE_PRINTF(sc, " 0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
6733 BCE_PRINTF(sc, " 0x%08X - (sc->rx_prod) rx producer index\n",
6736 BCE_PRINTF(sc, " 0x%08X - (sc->rx_cons) rx consumer index\n",
6739 BCE_PRINTF(sc, " 0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
6742 BCE_PRINTF(sc, " 0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
6745 BCE_PRINTF(sc, " 0x%08X - (sc->free_rx_bd) free rx_bd's\n",
6748 BCE_PRINTF(sc, "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n",
6749 sc->rx_low_watermark, (u32) USABLE_RX_BD);
6751 BCE_PRINTF(sc, " 0x%08X - (sc->txmbuf_alloc) tx mbufs allocated\n",
6754 BCE_PRINTF(sc, " 0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
6757 BCE_PRINTF(sc, " 0x%08X - (sc->used_tx_bd) used tx_bd's\n",
6760 BCE_PRINTF(sc, "0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
6761 sc->tx_hi_watermark, (u32) USABLE_TX_BD);
6763 BCE_PRINTF(sc, " 0x%08X - (sc->mbuf_alloc_failed) failed mbuf alloc\n",
6764 sc->mbuf_alloc_failed);
6767 "-----------------------------"
6769 "-----------------------------\n");
6774 bce_dump_hw_state(struct bce_softc *sc)
6779 "----------------------------"
6781 "----------------------------\n");
6783 BCE_PRINTF(sc, "0x%08X : bootcode version\n", sc->bce_fw_ver);
6785 val1 = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS);
6786 BCE_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n",
6787 val1, BCE_MISC_ENABLE_STATUS_BITS);
6789 val1 = REG_RD(sc, BCE_DMA_STATUS);
6790 BCE_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BCE_DMA_STATUS);
6792 val1 = REG_RD(sc, BCE_CTX_STATUS);
6793 BCE_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BCE_CTX_STATUS);
6795 val1 = REG_RD(sc, BCE_EMAC_STATUS);
6796 BCE_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1, BCE_EMAC_STATUS);
6798 val1 = REG_RD(sc, BCE_RPM_STATUS);
6799 BCE_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BCE_RPM_STATUS);
6801 val1 = REG_RD(sc, BCE_TBDR_STATUS);
6802 BCE_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1, BCE_TBDR_STATUS);
6804 val1 = REG_RD(sc, BCE_TDMA_STATUS);
6805 BCE_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1, BCE_TDMA_STATUS);
6807 val1 = REG_RD(sc, BCE_HC_STATUS);
6808 BCE_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BCE_HC_STATUS);
6811 "----------------------------"
6813 "----------------------------\n");
6816 "----------------------------"
6818 "----------------------------\n");
6820 for (int i = 0x400; i < 0x8000; i += 0x10)
6821 BCE_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
6822 i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
6823 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
6826 "----------------------------"
6828 "----------------------------\n");
6833 bce_breakpoint(struct bce_softc *sc)
6836 /* Unreachable code to shut the compiler up about unused functions. */
6838 bce_dump_txbd(sc, 0, NULL);
6839 bce_dump_rxbd(sc, 0, NULL);
6840 bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
6841 bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
6842 bce_dump_l2fhdr(sc, 0, NULL);
6843 bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
6844 bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
6845 bce_dump_status_block(sc);
6846 bce_dump_stats_block(sc);
6847 bce_dump_driver_state(sc);
6848 bce_dump_hw_state(sc);
6851 bce_dump_driver_state(sc);
6852 /* Print the important status block fields. */
6853 bce_dump_status_block(sc);
6855 /* Call the debugger. */