]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/bce/if_bce.c
This commit was generated by cvs2svn to compensate for changes in r158782,
[FreeBSD/FreeBSD.git] / sys / dev / bce / if_bce.c
1 /*-
2  * Copyright (c) 2006 Broadcom Corporation
3  *      David Christensen <davidch@broadcom.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written consent.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 /*
35  * The following controllers are supported by this driver:
36  *   BCM5706C A2, A3
37  *   BCM5708C B1
38  *
39  * The following controllers are not supported by this driver:
40  * (These are not "Production" versions of the controller.)
41  * 
42  *   BCM5706C A0, A1
43  *   BCM5706S A0, A1, A2, A3
44  *   BCM5708C A0, B0
45  *   BCM5708S A0, B0, B1
46  */
47
48 #include "opt_bce.h"
49
50 #include <dev/bce/if_bcereg.h>
51 #include <dev/bce/if_bcefw.h>
52
53 /****************************************************************************/
54 /* BCE Driver Version                                                       */
55 /****************************************************************************/
56 char bce_driver_version[] = "v0.9.5";
57
58
59 /****************************************************************************/
60 /* BCE Debug Options                                                        */
61 /****************************************************************************/
62 #ifdef BCE_DEBUG
63         u32 bce_debug = BCE_WARN;
64
65         /*          0 = Never              */
66         /*          1 = 1 in 2,147,483,648 */
67         /*        256 = 1 in     8,388,608 */
68         /*       2048 = 1 in     1,048,576 */
69         /*      65536 = 1 in        32,768 */
70         /*    1048576 = 1 in         2,048 */
71         /*  268435456 = 1 in             8 */
72         /*  536870912 = 1 in             4 */
73         /* 1073741824 = 1 in             2 */
74
75         /* Controls how often the l2_fhdr frame error check will fail. */
76         int bce_debug_l2fhdr_status_check = 0;
77
78         /* Controls how often the unexpected attention check will fail. */
79         int bce_debug_unexpected_attention = 0;
80
81         /* Controls how often to simulate an mbuf allocation failure. */
82         int bce_debug_mbuf_allocation_failure = 0;
83
84         /* Controls how often to simulate a DMA mapping failure. */
85         int bce_debug_dma_map_addr_failure = 0;
86
87         /* Controls how often to simulate a bootcode failure. */
88         int bce_debug_bootcode_running_failure = 0;
89 #endif
90
91
92 /****************************************************************************/
93 /* PCI Device ID Table                                                      */
94 /*                                                                          */
95 /* Used by bce_probe() to identify the devices supported by this driver.    */
96 /****************************************************************************/
97 #define BCE_DEVDESC_MAX         64
98
99 static struct bce_type bce_devs[] = {
100         /* BCM5706C Controllers and OEM boards. */
101         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3101,
102                 "HP NC370T Multifunction Gigabit Server Adapter" },
103         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3106,
104                 "HP NC370i Multifunction Gigabit Server Adapter" },
105         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  PCI_ANY_ID,  PCI_ANY_ID,
106                 "Broadcom NetXtreme II BCM5706 1000Base-T" },
107
108         /* BCM5706S controllers and OEM boards. */
109         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
110                 "HP NC370F Multifunction Gigabit Server Adapter" },
111         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID,  PCI_ANY_ID,
112                 "Broadcom NetXtreme II BCM5706 1000Base-SX" },
113
114         /* BCM5708C controllers and OEM boards. */
115         { BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
116                 "Broadcom NetXtreme II BCM5708 1000Base-T" },
117
118         /* BCM5708S controllers and OEM boards. */
119         { BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
120                 "Broadcom NetXtreme II BCM5708 1000Base-T" },
121         { 0, 0, 0, 0, NULL }
122 };
123
124
125 /****************************************************************************/
126 /* Supported Flash NVRAM device data.                                       */
127 /****************************************************************************/
128 static struct flash_spec flash_table[] =
129 {
130         /* Slow EEPROM */
131         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
132          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
133          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
134          "EEPROM - slow"},
135         /* Expansion entry 0001 */
136         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
137          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
138          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
139          "Entry 0001"},
140         /* Saifun SA25F010 (non-buffered flash) */
141         /* strap, cfg1, & write1 need updates */
142         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
143          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
144          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
145          "Non-buffered flash (128kB)"},
146         /* Saifun SA25F020 (non-buffered flash) */
147         /* strap, cfg1, & write1 need updates */
148         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
149          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
150          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
151          "Non-buffered flash (256kB)"},
152         /* Expansion entry 0100 */
153         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
154          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
155          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
156          "Entry 0100"},
157         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
158         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
159          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
160          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
161          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
162         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
163         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
164          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
165          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
166          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
167         /* Saifun SA25F005 (non-buffered flash) */
168         /* strap, cfg1, & write1 need updates */
169         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
170          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
171          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
172          "Non-buffered flash (64kB)"},
173         /* Fast EEPROM */
174         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
175          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
176          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
177          "EEPROM - fast"},
178         /* Expansion entry 1001 */
179         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
180          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
181          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182          "Entry 1001"},
183         /* Expansion entry 1010 */
184         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
185          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
186          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
187          "Entry 1010"},
188         /* ATMEL AT45DB011B (buffered flash) */
189         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
190          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
191          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
192          "Buffered flash (128kB)"},
193         /* Expansion entry 1100 */
194         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
195          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
196          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197          "Entry 1100"},
198         /* Expansion entry 1101 */
199         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
200          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
201          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
202          "Entry 1101"},
203         /* Ateml Expansion entry 1110 */
204         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
205          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
206          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
207          "Entry 1110 (Atmel)"},
208         /* ATMEL AT45DB021B (buffered flash) */
209         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
210          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
211          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
212          "Buffered flash (256kB)"},
213 };
214
215
216 /****************************************************************************/
217 /* FreeBSD device entry points.                                             */
218 /****************************************************************************/
219 static int  bce_probe                           (device_t);
220 static int  bce_attach                          (device_t);
221 static int  bce_detach                          (device_t);
222 static void bce_shutdown                        (device_t);
223
224
225 /****************************************************************************/
226 /* BCE Debug Data Structure Dump Routines                                   */
227 /****************************************************************************/
228 #ifdef BCE_DEBUG
229 static void bce_dump_mbuf                       (struct bce_softc *, struct mbuf *);
230 static void bce_dump_tx_mbuf_chain      (struct bce_softc *, int, int);
231 static void bce_dump_rx_mbuf_chain      (struct bce_softc *, int, int);
232 static void bce_dump_txbd                       (struct bce_softc *, int, struct tx_bd *);
233 static void bce_dump_rxbd                       (struct bce_softc *, int, struct rx_bd *);
234 static void bce_dump_l2fhdr                     (struct bce_softc *, int, struct l2_fhdr *);
235 static void bce_dump_tx_chain           (struct bce_softc *, int, int);
236 static void bce_dump_rx_chain           (struct bce_softc *, int, int);
237 static void bce_dump_status_block       (struct bce_softc *);
238 static void bce_dump_stats_block        (struct bce_softc *);
239 static void bce_dump_driver_state       (struct bce_softc *);
240 static void bce_dump_hw_state           (struct bce_softc *);
241 static void bce_breakpoint                      (struct bce_softc *);
242 #endif
243
244
245 /****************************************************************************/
246 /* BCE Register/Memory Access Routines                                      */
247 /****************************************************************************/
248 static u32  bce_reg_rd_ind                      (struct bce_softc *, u32);
249 static void bce_reg_wr_ind                      (struct bce_softc *, u32, u32);
250 static void bce_ctx_wr                          (struct bce_softc *, u32, u32, u32);
251 static int  bce_miibus_read_reg         (device_t, int, int);
252 static int  bce_miibus_write_reg        (device_t, int, int, int);
253 static void bce_miibus_statchg          (device_t);
254
255
256 /****************************************************************************/
257 /* BCE NVRAM Access Routines                                                */
258 /****************************************************************************/
259 static int  bce_acquire_nvram_lock      (struct bce_softc *);
260 static int  bce_release_nvram_lock      (struct bce_softc *);
261 static void bce_enable_nvram_access     (struct bce_softc *);
262 static void     bce_disable_nvram_access(struct bce_softc *);
263 static int  bce_nvram_read_dword        (struct bce_softc *, u32, u8 *, u32);
264 static int  bce_init_nvram                      (struct bce_softc *);
265 static int  bce_nvram_read                      (struct bce_softc *, u32, u8 *, int);
266 static int  bce_nvram_test                      (struct bce_softc *);
267 #ifdef BCE_NVRAM_WRITE_SUPPORT
268 static int  bce_enable_nvram_write      (struct bce_softc *);
269 static void bce_disable_nvram_write     (struct bce_softc *);
270 static int  bce_nvram_erase_page        (struct bce_softc *, u32);
271 static int  bce_nvram_write_dword       (struct bce_softc *, u32, u8 *, u32);
272 static int  bce_nvram_write                     (struct bce_softc *, u32, u8 *, int);
273 #endif
274
275 /****************************************************************************/
276 /*                                                                          */
277 /****************************************************************************/
278 static void bce_dma_map_addr            (void *, bus_dma_segment_t *, int, int);
279 static void bce_dma_map_tx_desc         (void *, bus_dma_segment_t *, int, bus_size_t, int);
280 static int  bce_dma_alloc                       (device_t);
281 static void bce_dma_free                        (struct bce_softc *);
282 static void bce_release_resources       (struct bce_softc *);
283
284 /****************************************************************************/
285 /* BCE Firmware Synchronization and Load                                    */
286 /****************************************************************************/
287 static int  bce_fw_sync                         (struct bce_softc *, u32);
288 static void bce_load_rv2p_fw            (struct bce_softc *, u32 *, u32, u32);
289 static void bce_load_cpu_fw                     (struct bce_softc *, struct cpu_reg *, struct fw_info *);
290 static void bce_init_cpus                       (struct bce_softc *);
291
292 static void bce_stop                            (struct bce_softc *);
293 static int  bce_reset                           (struct bce_softc *, u32);
294 static int  bce_chipinit                        (struct bce_softc *);
295 static int  bce_blockinit                       (struct bce_softc *);
296 static int  bce_get_buf                         (struct bce_softc *, struct mbuf *, u16 *, u16 *, u32 *);
297
298 static int  bce_init_tx_chain           (struct bce_softc *);
299 static int  bce_init_rx_chain           (struct bce_softc *);
300 static void bce_free_rx_chain           (struct bce_softc *);
301 static void bce_free_tx_chain           (struct bce_softc *);
302
303 static int  bce_tx_encap                        (struct bce_softc *, struct mbuf *, u16 *, u16 *, u32 *);
304 static void bce_start_locked            (struct ifnet *);
305 static void bce_start                           (struct ifnet *);
306 static int  bce_ioctl                           (struct ifnet *, u_long, caddr_t);
307 static void bce_watchdog                        (struct ifnet *);
308 static int  bce_ifmedia_upd                     (struct ifnet *);
309 static void bce_ifmedia_sts                     (struct ifnet *, struct ifmediareq *);
310 static void bce_init_locked                     (struct bce_softc *);
311 static void bce_init                            (void *);
312
313 static void bce_init_context            (struct bce_softc *);
314 static void bce_get_mac_addr            (struct bce_softc *);
315 static void bce_set_mac_addr            (struct bce_softc *);
316 static void bce_phy_intr                        (struct bce_softc *);
317 static void bce_rx_intr                         (struct bce_softc *);
318 static void bce_tx_intr                         (struct bce_softc *);
319 static void bce_disable_intr            (struct bce_softc *);
320 static void bce_enable_intr                     (struct bce_softc *);
321
322 #ifdef DEVICE_POLLING
323 static void bce_poll_locked                     (struct ifnet *, enum poll_cmd, int);
324 static void bce_poll                            (struct ifnet *, enum poll_cmd, int);
325 #endif
326 static void bce_intr                            (void *);
327 static void bce_set_rx_mode                     (struct bce_softc *);
328 static void bce_stats_update            (struct bce_softc *);
329 static void bce_tick_locked                     (struct bce_softc *);
330 static void bce_tick                            (void *);
331 static void bce_add_sysctls                     (struct bce_softc *);
332
333
334 /****************************************************************************/
335 /* FreeBSD device dispatch table.                                           */
336 /****************************************************************************/
337 static device_method_t bce_methods[] = {
338         /* Device interface */
339         DEVMETHOD(device_probe,         bce_probe),
340         DEVMETHOD(device_attach,        bce_attach),
341         DEVMETHOD(device_detach,        bce_detach),
342         DEVMETHOD(device_shutdown,      bce_shutdown),
343
344         /* bus interface */
345         DEVMETHOD(bus_print_child,      bus_generic_print_child),
346         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
347
348         /* MII interface */
349         DEVMETHOD(miibus_readreg,       bce_miibus_read_reg),
350         DEVMETHOD(miibus_writereg,      bce_miibus_write_reg),
351         DEVMETHOD(miibus_statchg,       bce_miibus_statchg),
352
353         { 0, 0 }
354 };
355
356 static driver_t bce_driver = {
357         "bce",
358         bce_methods,
359         sizeof(struct bce_softc)
360 };
361
362 static devclass_t bce_devclass;
363
364 MODULE_DEPEND(bce, pci, 1, 1, 1);
365 MODULE_DEPEND(bce, ether, 1, 1, 1);
366 MODULE_DEPEND(bce, miibus, 1, 1, 1);
367
368 DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, 0, 0);
369 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
370
371
372 /****************************************************************************/
373 /* Device probe function.                                                   */
374 /*                                                                          */
375 /* Compares the device to the driver's list of supported devices and        */
376 /* reports back to the OS whether this is the right driver for the device.  */
377 /*                                                                          */
378 /* Returns:                                                                 */
379 /*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
380 /****************************************************************************/
381 static int
382 bce_probe(device_t dev)
383 {
384         struct bce_type *t;
385         struct bce_softc *sc;
386         char *descbuf;
387         u16 vid = 0, did = 0, svid = 0, sdid = 0;
388
389         t = bce_devs;
390
391         sc = device_get_softc(dev);
392         bzero(sc, sizeof(struct bce_softc));
393         sc->bce_unit = device_get_unit(dev);
394         sc->bce_dev = dev;
395
396         /* Get the data for the device to be probed. */
397         vid  = pci_get_vendor(dev);
398         did  = pci_get_device(dev);
399         svid = pci_get_subvendor(dev);
400         sdid = pci_get_subdevice(dev);
401
402         DBPRINT(sc, BCE_VERBOSE_LOAD, 
403                 "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
404                 "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
405
406         /* Look through the list of known devices for a match. */
407         while(t->bce_name != NULL) {
408
409                 if ((vid == t->bce_vid) && (did == t->bce_did) && 
410                         ((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) &&
411                         ((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) {
412
413                         descbuf = malloc(BCE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
414
415                         if (descbuf == NULL)
416                                 return(ENOMEM);
417
418                         /* Print out the device identity. */
419                         snprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d), %s", 
420                                 t->bce_name,
421                             (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
422                             (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
423                             bce_driver_version);
424
425                         device_set_desc_copy(dev, descbuf);
426                         free(descbuf, M_TEMP);
427                         return(BUS_PROBE_DEFAULT);
428                 }
429                 t++;
430         }
431
432         DBPRINT(sc, BCE_VERBOSE_LOAD, "%s(%d): No IOCTL match found!\n", 
433                 __FILE__, __LINE__);
434
435         return(ENXIO);
436 }
437
438
439 /****************************************************************************/
440 /* Device attach function.                                                  */
441 /*                                                                          */
442 /* Allocates device resources, performs secondary chip identification,      */
443 /* resets and initializes the hardware, and initializes driver instance     */
444 /* variables.                                                               */
445 /*                                                                          */
446 /* Returns:                                                                 */
447 /*   0 on success, positive value on failure.                               */
448 /****************************************************************************/
449 static int
450 bce_attach(device_t dev)
451 {
452         struct bce_softc *sc;
453         struct ifnet *ifp;
454         u32 val;
455         int mbuf, rid, rc = 0;
456
457         sc = device_get_softc(dev);
458         sc->bce_dev = dev;
459
460         DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
461
462         mbuf = device_get_unit(dev);
463         sc->bce_unit = mbuf;
464
465         pci_enable_busmaster(dev);
466
467         /* Allocate PCI memory resources. */
468         rid = PCIR_BAR(0);
469         sc->bce_res = bus_alloc_resource_any(
470                 dev,                                                    /* dev */
471                 SYS_RES_MEMORY,                                 /* type */
472                 &rid,                                                   /* rid */
473             RF_ACTIVE | PCI_RF_DENSE);          /* flags */
474
475         if (sc->bce_res == NULL) {
476                 BCE_PRINTF(sc, "%s(%d): PCI memory allocation failed\n", 
477                         __FILE__, __LINE__);
478                 rc = ENXIO;
479                 goto bce_attach_fail;
480         }
481
482         /* Get various resource handles. */
483         sc->bce_btag    = rman_get_bustag(sc->bce_res);
484         sc->bce_bhandle = rman_get_bushandle(sc->bce_res);
485         sc->bce_vhandle = (vm_offset_t) rman_get_virtual(sc->bce_res);
486
487         /* Allocate PCI IRQ resources. */
488         rid = 0;
489         sc->bce_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
490             RF_SHAREABLE | RF_ACTIVE);
491
492         if (sc->bce_irq == NULL) {
493                 BCE_PRINTF(sc, "%s(%d): PCI map interrupt failed\n", 
494                         __FILE__, __LINE__);
495                 rc = ENXIO;
496                 goto bce_attach_fail;
497         }
498
499         /* Initialize mutex for the current device instance. */
500         BCE_LOCK_INIT(sc, device_get_nameunit(dev));
501
502         /*
503          * Configure byte swap and enable indirect register access.
504          * Rely on CPU to do target byte swapping on big endian systems.
505          * Access to registers outside of PCI configurtion space are not
506          * valid until this is done.
507          */
508         pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
509                                BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
510                                BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
511
512         /* Save ASIC revsion info. */
513         sc->bce_chipid =  REG_RD(sc, BCE_MISC_ID);
514
515         /* Weed out any non-production controller revisions. */
516         switch(BCE_CHIP_ID(sc)) {
517                 case BCE_CHIP_ID_5706_A0:
518                 case BCE_CHIP_ID_5706_A1:
519                 case BCE_CHIP_ID_5708_A0:
520                 case BCE_CHIP_ID_5708_B0:
521                         BCE_PRINTF(sc, "%s(%d): Unsupported controller revision (%c%d)!\n",
522                                 __FILE__, __LINE__, 
523                                 (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
524                             (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
525                         rc = ENODEV;
526                         goto bce_attach_fail;
527         }
528
529         if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
530                 BCE_PRINTF(sc, "%s(%d): SerDes controllers are not supported!\n",
531                         __FILE__, __LINE__);
532                 rc = ENODEV;
533                 goto bce_attach_fail;
534         }
535
536         /* 
537          * The embedded PCIe to PCI-X bridge (EPB) 
538          * in the 5708 cannot address memory above 
539          * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043). 
540          */
541         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
542                 sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR;
543         else
544                 sc->max_bus_addr = BUS_SPACE_MAXADDR;
545
546         /*
547          * Find the base address for shared memory access.
548          * Newer versions of bootcode use a signature and offset
549          * while older versions use a fixed address.
550          */
551         val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
552         if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG)
553                 sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0);
554         else
555                 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
556
557         DBPRINT(sc, BCE_INFO, "bce_shmem_base = 0x%08X\n", sc->bce_shmem_base);
558
559         /* Set initial device and PHY flags */
560         sc->bce_flags = 0;
561         sc->bce_phy_flags = 0;
562
563         /* Get PCI bus information (speed and type). */
564         val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
565         if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
566                 u32 clkreg;
567
568                 sc->bce_flags |= BCE_PCIX_FLAG;
569
570                 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS);
571
572                 clkreg &= BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
573                 switch (clkreg) {
574                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
575                         sc->bus_speed_mhz = 133;
576                         break;
577
578                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
579                         sc->bus_speed_mhz = 100;
580                         break;
581
582                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
583                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
584                         sc->bus_speed_mhz = 66;
585                         break;
586
587                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
588                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
589                         sc->bus_speed_mhz = 50;
590                         break;
591
592                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
593                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
594                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
595                         sc->bus_speed_mhz = 33;
596                         break;
597                 }
598         } else {
599                 if (val & BCE_PCICFG_MISC_STATUS_M66EN)
600                         sc->bus_speed_mhz = 66;
601                 else
602                         sc->bus_speed_mhz = 33;
603         }
604
605         if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
606                 sc->bce_flags |= BCE_PCI_32BIT_FLAG;
607
608         BCE_PRINTF(sc, "ASIC ID 0x%08X; Revision (%c%d); PCI%s %s %dMHz\n",
609                 sc->bce_chipid,
610                 ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
611                 ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4),
612                 ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
613                 ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
614                 sc->bus_speed_mhz);
615
616         /* Reset the controller. */
617         if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
618                 rc = ENXIO;
619                 goto bce_attach_fail;
620         }
621
622         /* Initialize the controller. */
623         if (bce_chipinit(sc)) {
624                 BCE_PRINTF(sc, "%s(%d): Controller initialization failed!\n",
625                         __FILE__, __LINE__);
626                 rc = ENXIO;
627                 goto bce_attach_fail;
628         }
629
630         /* Perform NVRAM test. */
631         if (bce_nvram_test(sc)) {
632                 BCE_PRINTF(sc, "%s(%d): NVRAM test failed!\n",
633                         __FILE__, __LINE__);
634                 rc = ENXIO;
635                 goto bce_attach_fail;
636         }
637
638         /* Fetch the permanent Ethernet MAC address. */
639         bce_get_mac_addr(sc);
640
641         /*
642          * Trip points control how many BDs
643          * should be ready before generating an
644          * interrupt while ticks control how long
645          * a BD can sit in the chain before
646          * generating an interrupt.  Set the default 
647          * values for the RX and TX rings.
648          */
649
650 #ifdef BCE_DRBUG
651         /* Force more frequent interrupts. */
652         sc->bce_tx_quick_cons_trip_int = 1;
653         sc->bce_tx_quick_cons_trip     = 1;
654         sc->bce_tx_ticks_int           = 0;
655         sc->bce_tx_ticks               = 0;
656
657         sc->bce_rx_quick_cons_trip_int = 1;
658         sc->bce_rx_quick_cons_trip     = 1;
659         sc->bce_rx_ticks_int           = 0;
660         sc->bce_rx_ticks               = 0;
661 #else
662         sc->bce_tx_quick_cons_trip_int = 20;
663         sc->bce_tx_quick_cons_trip     = 20;
664         sc->bce_tx_ticks_int           = 80;
665         sc->bce_tx_ticks               = 80;
666
667         sc->bce_rx_quick_cons_trip_int = 6;
668         sc->bce_rx_quick_cons_trip     = 6;
669         sc->bce_rx_ticks_int           = 18;
670         sc->bce_rx_ticks               = 18;
671 #endif
672
673         /* Update statistics once every second. */
674         sc->bce_stats_ticks = 1000000 & 0xffff00;
675
676         /*
677          * The copper based NetXtreme II controllers
678          * use an integrated PHY at address 1 while
679          * the SerDes controllers use a PHY at
680          * address 2.
681          */
682         sc->bce_phy_addr = 1;
683
684         if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
685                 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
686                 sc->bce_flags |= BCE_NO_WOL_FLAG;
687                 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) {
688                         sc->bce_phy_addr = 2;
689                         val = REG_RD_IND(sc, sc->bce_shmem_base +
690                                          BCE_SHARED_HW_CFG_CONFIG);
691                         if (val & BCE_SHARED_HW_CFG_PHY_2_5G)
692                                 sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
693                 }
694         }
695
696         /* Allocate DMA memory resources. */
697         if (bce_dma_alloc(dev)) {
698                 BCE_PRINTF(sc, "%s(%d): DMA resource allocation failed!\n",
699                     __FILE__, __LINE__);
700                 rc = ENXIO;
701                 goto bce_attach_fail;
702         }
703
704         /* Allocate an ifnet structure. */
705         ifp = sc->bce_ifp = if_alloc(IFT_ETHER);
706         if (ifp == NULL) {
707                 BCE_PRINTF(sc, "%s(%d): Interface allocation failed!\n", 
708                         __FILE__, __LINE__);
709                 rc = ENXIO;
710                 goto bce_attach_fail;
711         }
712
713         /* Initialize the ifnet interface. */
714         ifp->if_softc        = sc;
715         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
716         ifp->if_flags        = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
717         ifp->if_ioctl        = bce_ioctl;
718         ifp->if_start        = bce_start;
719         ifp->if_timer        = 0;
720         ifp->if_watchdog     = bce_watchdog;
721         ifp->if_init         = bce_init;
722         ifp->if_mtu          = ETHERMTU;
723         ifp->if_hwassist     = BCE_IF_HWASSIST;
724         ifp->if_capabilities = BCE_IF_CAPABILITIES;
725         ifp->if_capenable    = ifp->if_capabilities;
726
727         /* Assume a standard 1500 byte MTU size for mbuf allocations. */
728         sc->mbuf_alloc_size  = MCLBYTES;
729 #ifdef DEVICE_POLLING
730         ifp->if_capabilities |= IFCAP_POLLING;
731 #endif
732
733         ifp->if_snd.ifq_drv_maxlen = USABLE_TX_BD;
734         if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
735                 ifp->if_baudrate = IF_Gbps(2.5);
736         else
737                 ifp->if_baudrate = IF_Gbps(1);
738
739         IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
740         IFQ_SET_READY(&ifp->if_snd);
741
742         if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
743                 BCE_PRINTF(sc, "%s(%d): SerDes is not supported by this driver!\n", 
744                         __FILE__, __LINE__);
745                 rc = ENODEV;
746                 goto bce_attach_fail;
747         } else {
748                 /* Look for our PHY. */
749                 if (mii_phy_probe(dev, &sc->bce_miibus, bce_ifmedia_upd,
750                         bce_ifmedia_sts)) {
751                         BCE_PRINTF(sc, "%s(%d): PHY probe failed!\n", 
752                                 __FILE__, __LINE__);
753                         rc = ENXIO;
754                         goto bce_attach_fail;
755                 }
756         }
757
758         /* Attach to the Ethernet interface list. */
759         ether_ifattach(ifp, sc->eaddr);
760
761 #if __FreeBSD_version < 500000
762         callout_init(&sc->bce_stat_ch);
763 #else
764         callout_init(&sc->bce_stat_ch, CALLOUT_MPSAFE);
765 #endif
766
767         /* Hookup IRQ last. */
768         rc = bus_setup_intr(dev, sc->bce_irq, INTR_TYPE_NET | INTR_MPSAFE,
769            bce_intr, sc, &sc->bce_intrhand);
770
771         if (rc) {
772                 BCE_PRINTF(sc, "%s(%d): Failed to setup IRQ!\n", 
773                         __FILE__, __LINE__);
774                 bce_detach(dev);
775                 goto bce_attach_exit;
776         }
777
778         /* Print some important debugging info. */
779         DBRUN(BCE_INFO, bce_dump_driver_state(sc));
780
781         /* Add the supported sysctls to the kernel. */
782         bce_add_sysctls(sc);
783
784         goto bce_attach_exit;
785
786 bce_attach_fail:
787         bce_release_resources(sc);
788
789 bce_attach_exit:
790
791         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
792
793         return(rc);
794 }
795
796
797 /****************************************************************************/
798 /* Device detach function.                                                  */
799 /*                                                                          */
800 /* Stops the controller, resets the controller, and releases resources.     */
801 /*                                                                          */
802 /* Returns:                                                                 */
803 /*   0 on success, positive value on failure.                               */
804 /****************************************************************************/
805 static int
806 bce_detach(device_t dev)
807 {
808         struct bce_softc *sc;
809         struct ifnet *ifp;
810
811         sc = device_get_softc(dev);
812
813         DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
814
815         ifp = sc->bce_ifp;
816
817 #ifdef DEVICE_POLLING
818         if (ifp->if_capenable & IFCAP_POLLING)
819                 ether_poll_deregister(ifp);
820 #endif
821
822         /* Stop and reset the controller. */
823         BCE_LOCK(sc);
824         bce_stop(sc);
825         bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
826         BCE_UNLOCK(sc);
827
828         ether_ifdetach(ifp);
829
830         /* If we have a child device on the MII bus remove it too. */
831         if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
832                 ifmedia_removeall(&sc->bce_ifmedia);
833         } else {
834                 bus_generic_detach(dev);
835                 device_delete_child(dev, sc->bce_miibus);
836         }
837
838         /* Release all remaining resources. */
839         bce_release_resources(sc);
840
841         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
842
843         return(0);
844 }
845
846
847 /****************************************************************************/
848 /* Device shutdown function.                                                */
849 /*                                                                          */
850 /* Stops and resets the controller.                                         */
851 /*                                                                          */
852 /* Returns:                                                                 */
853 /*   Nothing                                                                */
854 /****************************************************************************/
855 static void
856 bce_shutdown(device_t dev)
857 {
858         struct bce_softc *sc = device_get_softc(dev);
859
860         BCE_LOCK(sc);
861         bce_stop(sc);
862         bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
863         BCE_UNLOCK(sc);
864 }
865
866
867 /****************************************************************************/
868 /* Indirect register read.                                                  */
869 /*                                                                          */
870 /* Reads NetXtreme II registers using an index/data register pair in PCI    */
871 /* configuration space.  Using this mechanism avoids issues with posted     */
872 /* reads but is much slower than memory-mapped I/O.                         */
873 /*                                                                          */
874 /* Returns:                                                                 */
875 /*   The value of the register.                                             */
876 /****************************************************************************/
877 static u32
878 bce_reg_rd_ind(struct bce_softc *sc, u32 offset)
879 {
880         device_t dev;
881         dev = sc->bce_dev;
882
883         pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
884 #ifdef BCE_DEBUG
885         {
886                 u32 val;
887                 val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
888                 DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
889                         __FUNCTION__, offset, val);
890                 return val;
891         }
892 #else
893         return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
894 #endif
895 }
896
897
898 /****************************************************************************/
899 /* Indirect register write.                                                 */
900 /*                                                                          */
901 /* Writes NetXtreme II registers using an index/data register pair in PCI   */
902 /* configuration space.  Using this mechanism avoids issues with posted     */
903 /* writes but is muchh slower than memory-mapped I/O.                       */
904 /*                                                                          */
905 /* Returns:                                                                 */
906 /*   Nothing.                                                               */
907 /****************************************************************************/
908 static void
909 bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val)
910 {
911         device_t dev;
912         dev = sc->bce_dev;
913
914         DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
915                 __FUNCTION__, offset, val);
916
917         pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
918         pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
919 }
920
921
922 /****************************************************************************/
923 /* Context memory write.                                                    */
924 /*                                                                          */
925 /* The NetXtreme II controller uses context memory to track connection      */
926 /* information for L2 and higher network protocols.                         */
927 /*                                                                          */
928 /* Returns:                                                                 */
929 /*   Nothing.                                                               */
930 /****************************************************************************/
931 static void
932 bce_ctx_wr(struct bce_softc *sc, u32 cid_addr, u32 offset, u32 val)
933 {
934
935         DBPRINT(sc, BCE_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
936                 "val = 0x%08X\n", __FUNCTION__, cid_addr, offset, val);
937
938         offset += cid_addr;
939         REG_WR(sc, BCE_CTX_DATA_ADR, offset);
940         REG_WR(sc, BCE_CTX_DATA, val);
941 }
942
943
944 /****************************************************************************/
945 /* PHY register read.                                                       */
946 /*                                                                          */
947 /* Implements register reads on the MII bus.                                */
948 /*                                                                          */
949 /* Returns:                                                                 */
950 /*   The value of the register.                                             */
951 /****************************************************************************/
952 static int
953 bce_miibus_read_reg(device_t dev, int phy, int reg)
954 {
955         struct bce_softc *sc;
956         u32 val;
957         int i;
958
959         sc = device_get_softc(dev);
960
961         /* Make sure we are accessing the correct PHY address. */
962         if (phy != sc->bce_phy_addr) {
963                 DBPRINT(sc, BCE_VERBOSE, "Invalid PHY address %d for PHY read!\n", phy);
964                 return(0);
965         }
966
967         if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
968                 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
969                 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
970
971                 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
972                 REG_RD(sc, BCE_EMAC_MDIO_MODE);
973
974                 DELAY(40);
975         }
976
977         val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
978                 BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
979                 BCE_EMAC_MDIO_COMM_START_BUSY;
980         REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
981
982         for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
983                 DELAY(10);
984
985                 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
986                 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
987                         DELAY(5);
988
989                         val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
990                         val &= BCE_EMAC_MDIO_COMM_DATA;
991
992                         break;
993                 }
994         }
995
996         if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
997                 BCE_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
998                         __FILE__, __LINE__, phy, reg);
999                 val = 0x0;
1000         } else {
1001                 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1002         }
1003
1004         DBPRINT(sc, BCE_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1005                 __FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff);
1006
1007         if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1008                 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1009                 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1010
1011                 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1012                 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1013
1014                 DELAY(40);
1015         }
1016
1017         return (val & 0xffff);
1018
1019 }
1020
1021
1022 /****************************************************************************/
1023 /* PHY register write.                                                      */
1024 /*                                                                          */
1025 /* Implements register writes on the MII bus.                               */
1026 /*                                                                          */
1027 /* Returns:                                                                 */
1028 /*   The value of the register.                                             */
1029 /****************************************************************************/
1030 static int
1031 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1032 {
1033         struct bce_softc *sc;
1034         u32 val1;
1035         int i;
1036
1037         sc = device_get_softc(dev);
1038
1039         /* Make sure we are accessing the correct PHY address. */
1040         if (phy != sc->bce_phy_addr) {
1041                 DBPRINT(sc, BCE_WARN, "Invalid PHY address %d for PHY write!\n", phy);
1042                 return(0);
1043         }
1044
1045         DBPRINT(sc, BCE_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1046                 __FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff);
1047
1048         if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1049                 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1050                 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1051
1052                 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1053                 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1054
1055                 DELAY(40);
1056         }
1057
1058         val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1059                 BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1060                 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1061         REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1062
1063         for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1064                 DELAY(10);
1065
1066                 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1067                 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1068                         DELAY(5);
1069                         break;
1070                 }
1071         }
1072
1073         if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1074                 BCE_PRINTF(sc, "%s(%d): PHY write timeout!\n", 
1075                         __FILE__, __LINE__);
1076
1077         if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1078                 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1079                 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1080
1081                 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1082                 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1083
1084                 DELAY(40);
1085         }
1086
1087         return 0;
1088 }
1089
1090
1091 /****************************************************************************/
1092 /* MII bus status change.                                                   */
1093 /*                                                                          */
1094 /* Called by the MII bus driver when the PHY establishes link to set the    */
1095 /* MAC interface registers.                                                 */
1096 /*                                                                          */
1097 /* Returns:                                                                 */
1098 /*   Nothing.                                                               */
1099 /****************************************************************************/
1100 static void
1101 bce_miibus_statchg(device_t dev)
1102 {
1103         struct bce_softc *sc;
1104         struct mii_data *mii;
1105
1106         sc = device_get_softc(dev);
1107
1108         mii = device_get_softc(sc->bce_miibus);
1109
1110         BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT);
1111
1112         /* Set MII or GMII inerface based on the speed negotiated by the PHY. */
1113         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
1114                 DBPRINT(sc, BCE_INFO, "Setting GMII interface.\n");
1115                 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII);
1116         } else {
1117                 DBPRINT(sc, BCE_INFO, "Setting MII interface.\n");
1118                 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII);
1119         }
1120
1121         /* Set half or full duplex based on the duplicity negotiated by the PHY. */
1122         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1123                 DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n");
1124                 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1125         } else {
1126                 DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n");
1127                 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1128         }
1129 }
1130
1131
1132 /****************************************************************************/
1133 /* Acquire NVRAM lock.                                                      */
1134 /*                                                                          */
1135 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1136 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1137 /* for use by the driver.                                                   */
1138 /*                                                                          */
1139 /* Returns:                                                                 */
1140 /*   0 on success, positive value on failure.                               */
1141 /****************************************************************************/
1142 static int
1143 bce_acquire_nvram_lock(struct bce_softc *sc)
1144 {
1145         u32 val;
1146         int j;
1147
1148         DBPRINT(sc, BCE_VERBOSE, "Acquiring NVRAM lock.\n");
1149
1150         /* Request access to the flash interface. */
1151         REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1152         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1153                 val = REG_RD(sc, BCE_NVM_SW_ARB);
1154                 if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1155                         break;
1156
1157                 DELAY(5);
1158         }
1159
1160         if (j >= NVRAM_TIMEOUT_COUNT) {
1161                 DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n");
1162                 return EBUSY;
1163         }
1164
1165         return 0;
1166 }
1167
1168
1169 /****************************************************************************/
1170 /* Release NVRAM lock.                                                      */
1171 /*                                                                          */
1172 /* When the caller is finished accessing NVRAM the lock must be released.   */
1173 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1174 /* for use by the driver.                                                   */
1175 /*                                                                          */
1176 /* Returns:                                                                 */
1177 /*   0 on success, positive value on failure.                               */
1178 /****************************************************************************/
1179 static int
1180 bce_release_nvram_lock(struct bce_softc *sc)
1181 {
1182         int j;
1183         u32 val;
1184
1185         DBPRINT(sc, BCE_VERBOSE, "Releasing NVRAM lock.\n");
1186
1187         /*
1188          * Relinquish nvram interface.
1189          */
1190         REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1191
1192         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1193                 val = REG_RD(sc, BCE_NVM_SW_ARB);
1194                 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1195                         break;
1196
1197                 DELAY(5);
1198         }
1199
1200         if (j >= NVRAM_TIMEOUT_COUNT) {
1201                 DBPRINT(sc, BCE_WARN, "Timeout reeasing NVRAM lock!\n");
1202                 return EBUSY;
1203         }
1204
1205         return 0;
1206 }
1207
1208
1209 #ifdef BCE_NVRAM_WRITE_SUPPORT
1210 /****************************************************************************/
1211 /* Enable NVRAM write access.                                               */
1212 /*                                                                          */
1213 /* Before writing to NVRAM the caller must enable NVRAM writes.             */
1214 /*                                                                          */
1215 /* Returns:                                                                 */
1216 /*   0 on success, positive value on failure.                               */
1217 /****************************************************************************/
1218 static int
1219 bce_enable_nvram_write(struct bce_softc *sc)
1220 {
1221         u32 val;
1222
1223         DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM write.\n");
1224
1225         val = REG_RD(sc, BCE_MISC_CFG);
1226         REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI);
1227
1228         if (!sc->bce_flash_info->buffered) {
1229                 int j;
1230
1231                 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1232                 REG_WR(sc, BCE_NVM_COMMAND,     BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT);
1233
1234                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1235                         DELAY(5);
1236
1237                         val = REG_RD(sc, BCE_NVM_COMMAND);
1238                         if (val & BCE_NVM_COMMAND_DONE)
1239                                 break;
1240                 }
1241
1242                 if (j >= NVRAM_TIMEOUT_COUNT) {
1243                         DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n");
1244                         return EBUSY;
1245                 }
1246         }
1247         return 0;
1248 }
1249
1250
1251 /****************************************************************************/
1252 /* Disable NVRAM write access.                                              */
1253 /*                                                                          */
1254 /* When the caller is finished writing to NVRAM write access must be        */
1255 /* disabled.                                                                */
1256 /*                                                                          */
1257 /* Returns:                                                                 */
1258 /*   Nothing.                                                               */
1259 /****************************************************************************/
1260 static void
1261 bce_disable_nvram_write(struct bce_softc *sc)
1262 {
1263         u32 val;
1264
1265         DBPRINT(sc, BCE_VERBOSE,  "Disabling NVRAM write.\n");
1266
1267         val = REG_RD(sc, BCE_MISC_CFG);
1268         REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN);
1269 }
1270 #endif
1271
1272
1273 /****************************************************************************/
1274 /* Enable NVRAM access.                                                     */
1275 /*                                                                          */
1276 /* Before accessing NVRAM for read or write operations the caller must      */
1277 /* enabled NVRAM access.                                                    */
1278 /*                                                                          */
1279 /* Returns:                                                                 */
1280 /*   Nothing.                                                               */
1281 /****************************************************************************/
1282 static void
1283 bce_enable_nvram_access(struct bce_softc *sc)
1284 {
1285         u32 val;
1286
1287         DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM access.\n");
1288
1289         val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1290         /* Enable both bits, even on read. */
1291         REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1292                val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1293 }
1294
1295
1296 /****************************************************************************/
1297 /* Disable NVRAM access.                                                    */
1298 /*                                                                          */
1299 /* When the caller is finished accessing NVRAM access must be disabled.     */
1300 /*                                                                          */
1301 /* Returns:                                                                 */
1302 /*   Nothing.                                                               */
1303 /****************************************************************************/
1304 static void
1305 bce_disable_nvram_access(struct bce_softc *sc)
1306 {
1307         u32 val;
1308
1309         DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM access.\n");
1310
1311         val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1312
1313         /* Disable both bits, even after read. */
1314         REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1315                 val & ~(BCE_NVM_ACCESS_ENABLE_EN |
1316                         BCE_NVM_ACCESS_ENABLE_WR_EN));
1317 }
1318
1319
1320 #ifdef BCE_NVRAM_WRITE_SUPPORT
1321 /****************************************************************************/
1322 /* Erase NVRAM page before writing.                                         */
1323 /*                                                                          */
1324 /* Non-buffered flash parts require that a page be erased before it is      */
1325 /* written.                                                                 */
1326 /*                                                                          */
1327 /* Returns:                                                                 */
1328 /*   0 on success, positive value on failure.                               */
1329 /****************************************************************************/
1330 static int
1331 bce_nvram_erase_page(struct bce_softc *sc, u32 offset)
1332 {
1333         u32 cmd;
1334         int j;
1335
1336         /* Buffered flash doesn't require an erase. */
1337         if (sc->bce_flash_info->buffered)
1338                 return 0;
1339
1340         DBPRINT(sc, BCE_VERBOSE, "Erasing NVRAM page.\n");
1341
1342         /* Build an erase command. */
1343         cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR |
1344               BCE_NVM_COMMAND_DOIT;
1345
1346         /*
1347          * Clear the DONE bit separately, set the NVRAM adress to erase,
1348          * and issue the erase command.
1349          */
1350         REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1351         REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1352         REG_WR(sc, BCE_NVM_COMMAND, cmd);
1353
1354         /* Wait for completion. */
1355          */
1356         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1357                 u32 val;
1358
1359                 DELAY(5);
1360
1361                 val = REG_RD(sc, BCE_NVM_COMMAND);
1362                 if (val & BCE_NVM_COMMAND_DONE)
1363                         break;
1364         }
1365
1366         if (j >= NVRAM_TIMEOUT_COUNT) {
1367                 DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n");
1368                 return EBUSY;
1369         }
1370
1371         return 0;
1372 }
1373 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1374
1375
1376 /****************************************************************************/
1377 /* Read a dword (32 bits) from NVRAM.                                       */
1378 /*                                                                          */
1379 /* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1380 /* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1381 /*                                                                          */
1382 /* Returns:                                                                 */
1383 /*   0 on success and the 32 bit value read, positive value on failure.     */
1384 /****************************************************************************/
1385 static int
1386 bce_nvram_read_dword(struct bce_softc *sc, u32 offset, u8 *ret_val,
1387                                                         u32 cmd_flags)
1388 {
1389         u32 cmd;
1390         int i, rc = 0;
1391
1392         /* Build the command word. */
1393         cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1394
1395         /* Calculate the offset for buffered flash. */
1396         if (sc->bce_flash_info->buffered) {
1397                 offset = ((offset / sc->bce_flash_info->page_size) <<
1398                            sc->bce_flash_info->page_bits) +
1399                           (offset % sc->bce_flash_info->page_size);
1400         }
1401
1402         /*
1403          * Clear the DONE bit separately, set the address to read,
1404          * and issue the read.
1405          */
1406         REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1407         REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1408         REG_WR(sc, BCE_NVM_COMMAND, cmd);
1409
1410         /* Wait for completion. */
1411         for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1412                 u32 val;
1413
1414                 DELAY(5);
1415
1416                 val = REG_RD(sc, BCE_NVM_COMMAND);
1417                 if (val & BCE_NVM_COMMAND_DONE) {
1418                         val = REG_RD(sc, BCE_NVM_READ);
1419
1420                         val = bce_be32toh(val);
1421                         memcpy(ret_val, &val, 4);
1422                         break;
1423                 }
1424         }
1425
1426         /* Check for errors. */
1427         if (i >= NVRAM_TIMEOUT_COUNT) {
1428                 BCE_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at offset 0x%08X!\n",
1429                         __FILE__, __LINE__, offset);
1430                 rc = EBUSY;
1431         }
1432
1433         return(rc);
1434 }
1435
1436
1437 #ifdef BCE_NVRAM_WRITE_SUPPORT
1438 /****************************************************************************/
1439 /* Write a dword (32 bits) to NVRAM.                                        */
1440 /*                                                                          */
1441 /* Write a 32 bit word to NVRAM.  The caller is assumed to have already     */
1442 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and    */
1443 /* enabled NVRAM write access.                                              */
1444 /*                                                                          */
1445 /* Returns:                                                                 */
1446 /*   0 on success, positive value on failure.                               */
1447 /****************************************************************************/
1448 static int
1449 bce_nvram_write_dword(struct bce_softc *sc, u32 offset, u8 *val,
1450         u32 cmd_flags)
1451 {
1452         u32 cmd, val32;
1453         int j;
1454
1455         /* Build the command word. */
1456         cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags;
1457
1458         /* Calculate the offset for buffered flash. */
1459         if (sc->bce_flash_info->buffered) {
1460                 offset = ((offset / sc->bce_flash_info->page_size) <<
1461                           sc->bce_flash_info->page_bits) +
1462                          (offset % sc->bce_flash_info->page_size);
1463         }
1464
1465         /*
1466          * Clear the DONE bit separately, convert NVRAM data to big-endian,
1467          * set the NVRAM address to write, and issue the write command
1468          */
1469         REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1470         memcpy(&val32, val, 4);
1471         val32 = htobe32(val32);
1472         REG_WR(sc, BCE_NVM_WRITE, val32);
1473         REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1474         REG_WR(sc, BCE_NVM_COMMAND, cmd);
1475
1476         /* Wait for completion. */
1477         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1478                 DELAY(5);
1479
1480                 if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE)
1481                         break;
1482         }
1483         if (j >= NVRAM_TIMEOUT_COUNT) {
1484                 BCE_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at offset 0x%08X\n",
1485                         __FILE__, __LINE__, offset);
1486                 return EBUSY;
1487         }
1488
1489         return 0;
1490 }
1491 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1492
1493
1494 /****************************************************************************/
1495 /* Initialize NVRAM access.                                                 */
1496 /*                                                                          */
1497 /* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1498 /* access that device.                                                      */
1499 /*                                                                          */
1500 /* Returns:                                                                 */
1501 /*   0 on success, positive value on failure.                               */
1502 /****************************************************************************/
1503 static int
1504 bce_init_nvram(struct bce_softc *sc)
1505 {
1506         u32 val;
1507         int j, entry_count, rc;
1508         struct flash_spec *flash;
1509
1510         DBPRINT(sc,BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
1511
1512         /* Determine the selected interface. */
1513         val = REG_RD(sc, BCE_NVM_CFG1);
1514
1515         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1516
1517         rc = 0;
1518
1519         /*
1520          * Flash reconfiguration is required to support additional
1521          * NVRAM devices not directly supported in hardware.
1522          * Check if the flash interface was reconfigured
1523          * by the bootcode.
1524          */
1525
1526         if (val & 0x40000000) {
1527                 /* Flash interface reconfigured by bootcode. */
1528
1529                 DBPRINT(sc,BCE_INFO_LOAD, 
1530                         "bce_init_nvram(): Flash WAS reconfigured.\n");
1531
1532                 for (j = 0, flash = &flash_table[0]; j < entry_count;
1533                      j++, flash++) {
1534                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
1535                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1536                                 sc->bce_flash_info = flash;
1537                                 break;
1538                         }
1539                 }
1540         } else {
1541                 /* Flash interface not yet reconfigured. */
1542                 u32 mask;
1543
1544                 DBPRINT(sc,BCE_INFO_LOAD, 
1545                         "bce_init_nvram(): Flash was NOT reconfigured.\n");
1546
1547                 if (val & (1 << 23))
1548                         mask = FLASH_BACKUP_STRAP_MASK;
1549                 else
1550                         mask = FLASH_STRAP_MASK;
1551
1552                 /* Look for the matching NVRAM device configuration data. */
1553                 for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) {
1554
1555                         /* Check if the device matches any of the known devices. */
1556                         if ((val & mask) == (flash->strapping & mask)) {
1557                                 /* Found a device match. */
1558                                 sc->bce_flash_info = flash;
1559
1560                                 /* Request access to the flash interface. */
1561                                 if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1562                                         return rc;
1563
1564                                 /* Reconfigure the flash interface. */
1565                                 bce_enable_nvram_access(sc);
1566                                 REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1567                                 REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1568                                 REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1569                                 REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1570                                 bce_disable_nvram_access(sc);
1571                                 bce_release_nvram_lock(sc);
1572
1573                                 break;
1574                         }
1575                 }
1576         }
1577
1578         /* Check if a matching device was found. */
1579         if (j == entry_count) {
1580                 sc->bce_flash_info = NULL;
1581                 BCE_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n", 
1582                         __FILE__, __LINE__);
1583                 rc = ENODEV;
1584         }
1585
1586         /* Write the flash config data to the shared memory interface. */
1587         val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_SHARED_HW_CFG_CONFIG2);
1588         val &= BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1589         if (val)
1590                 sc->bce_flash_size = val;
1591         else
1592                 sc->bce_flash_size = sc->bce_flash_info->total_size;
1593
1594         DBPRINT(sc, BCE_INFO_LOAD, "bce_init_nvram() flash->total_size = 0x%08X\n",
1595                 sc->bce_flash_info->total_size);
1596
1597         DBPRINT(sc,BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
1598
1599         return rc;
1600 }
1601
1602
1603 /****************************************************************************/
1604 /* Read an arbitrary range of data from NVRAM.                              */
1605 /*                                                                          */
1606 /* Prepares the NVRAM interface for access and reads the requested data     */
1607 /* into the supplied buffer.                                                */
1608 /*                                                                          */
1609 /* Returns:                                                                 */
1610 /*   0 on success and the data read, positive value on failure.             */
1611 /****************************************************************************/
1612 static int
1613 bce_nvram_read(struct bce_softc *sc, u32 offset, u8 *ret_buf,
1614         int buf_size)
1615 {
1616         int rc = 0;
1617         u32 cmd_flags, offset32, len32, extra;
1618
1619         if (buf_size == 0)
1620                 return 0;
1621
1622         /* Request access to the flash interface. */
1623         if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1624                 return rc;
1625
1626         /* Enable access to flash interface */
1627         bce_enable_nvram_access(sc);
1628
1629         len32 = buf_size;
1630         offset32 = offset;
1631         extra = 0;
1632
1633         cmd_flags = 0;
1634
1635         if (offset32 & 3) {
1636                 u8 buf[4];
1637                 u32 pre_len;
1638
1639                 offset32 &= ~3;
1640                 pre_len = 4 - (offset & 3);
1641
1642                 if (pre_len >= len32) {
1643                         pre_len = len32;
1644                         cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1645                 }
1646                 else {
1647                         cmd_flags = BCE_NVM_COMMAND_FIRST;
1648                 }
1649
1650                 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1651
1652                 if (rc)
1653                         return rc;
1654
1655                 memcpy(ret_buf, buf + (offset & 3), pre_len);
1656
1657                 offset32 += 4;
1658                 ret_buf += pre_len;
1659                 len32 -= pre_len;
1660         }
1661
1662         if (len32 & 3) {
1663                 extra = 4 - (len32 & 3);
1664                 len32 = (len32 + 4) & ~3;
1665         }
1666
1667         if (len32 == 4) {
1668                 u8 buf[4];
1669
1670                 if (cmd_flags)
1671                         cmd_flags = BCE_NVM_COMMAND_LAST;
1672                 else
1673                         cmd_flags = BCE_NVM_COMMAND_FIRST |
1674                                     BCE_NVM_COMMAND_LAST;
1675
1676                 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1677
1678                 memcpy(ret_buf, buf, 4 - extra);
1679         }
1680         else if (len32 > 0) {
1681                 u8 buf[4];
1682
1683                 /* Read the first word. */
1684                 if (cmd_flags)
1685                         cmd_flags = 0;
1686                 else
1687                         cmd_flags = BCE_NVM_COMMAND_FIRST;
1688
1689                 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1690
1691                 /* Advance to the next dword. */
1692                 offset32 += 4;
1693                 ret_buf += 4;
1694                 len32 -= 4;
1695
1696                 while (len32 > 4 && rc == 0) {
1697                         rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1698
1699                         /* Advance to the next dword. */
1700                         offset32 += 4;
1701                         ret_buf += 4;
1702                         len32 -= 4;
1703                 }
1704
1705                 if (rc)
1706                         return rc;
1707
1708                 cmd_flags = BCE_NVM_COMMAND_LAST;
1709                 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1710
1711                 memcpy(ret_buf, buf, 4 - extra);
1712         }
1713
1714         /* Disable access to flash interface and release the lock. */
1715         bce_disable_nvram_access(sc);
1716         bce_release_nvram_lock(sc);
1717
1718         return rc;
1719 }
1720
1721
1722 #ifdef BCE_NVRAM_WRITE_SUPPORT
1723 /****************************************************************************/
1724 /* Write an arbitrary range of data from NVRAM.                             */
1725 /*                                                                          */
1726 /* Prepares the NVRAM interface for write access and writes the requested   */
1727 /* data from the supplied buffer.  The caller is responsible for            */
1728 /* calculating any appropriate CRCs.                                        */
1729 /*                                                                          */
1730 /* Returns:                                                                 */
1731 /*   0 on success, positive value on failure.                               */
1732 /****************************************************************************/
1733 static int
1734 bce_nvram_write(struct bce_softc *sc, u32 offset, u8 *data_buf,
1735         int buf_size)
1736 {
1737         u32 written, offset32, len32;
1738         u8 *buf, start[4], end[4];
1739         int rc = 0;
1740         int align_start, align_end;
1741
1742         buf = data_buf;
1743         offset32 = offset;
1744         len32 = buf_size;
1745         align_start = align_end = 0;
1746
1747         if ((align_start = (offset32 & 3))) {
1748                 offset32 &= ~3;
1749                 len32 += align_start;
1750                 if ((rc = bce_nvram_read(sc, offset32, start, 4)))
1751                         return rc;
1752         }
1753
1754         if (len32 & 3) {
1755                 if ((len32 > 4) || !align_start) {
1756                         align_end = 4 - (len32 & 3);
1757                         len32 += align_end;
1758                         if ((rc = bce_nvram_read(sc, offset32 + len32 - 4,
1759                                 end, 4))) {
1760                                 return rc;
1761                         }
1762                 }
1763         }
1764
1765         if (align_start || align_end) {
1766                 buf = malloc(len32, M_DEVBUF, M_NOWAIT);
1767                 if (buf == 0)
1768                         return ENOMEM;
1769                 if (align_start) {
1770                         memcpy(buf, start, 4);
1771                 }
1772                 if (align_end) {
1773                         memcpy(buf + len32 - 4, end, 4);
1774                 }
1775                 memcpy(buf + align_start, data_buf, buf_size);
1776         }
1777
1778         written = 0;
1779         while ((written < len32) && (rc == 0)) {
1780                 u32 page_start, page_end, data_start, data_end;
1781                 u32 addr, cmd_flags;
1782                 int i;
1783                 u8 flash_buffer[264];
1784
1785             /* Find the page_start addr */
1786                 page_start = offset32 + written;
1787                 page_start -= (page_start % sc->bce_flash_info->page_size);
1788                 /* Find the page_end addr */
1789                 page_end = page_start + sc->bce_flash_info->page_size;
1790                 /* Find the data_start addr */
1791                 data_start = (written == 0) ? offset32 : page_start;
1792                 /* Find the data_end addr */
1793                 data_end = (page_end > offset32 + len32) ?
1794                         (offset32 + len32) : page_end;
1795
1796                 /* Request access to the flash interface. */
1797                 if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1798                         goto nvram_write_end;
1799
1800                 /* Enable access to flash interface */
1801                 bce_enable_nvram_access(sc);
1802
1803                 cmd_flags = BCE_NVM_COMMAND_FIRST;
1804                 if (sc->bce_flash_info->buffered == 0) {
1805                         int j;
1806
1807                         /* Read the whole page into the buffer
1808                          * (non-buffer flash only) */
1809                         for (j = 0; j < sc->bce_flash_info->page_size; j += 4) {
1810                                 if (j == (sc->bce_flash_info->page_size - 4)) {
1811                                         cmd_flags |= BCE_NVM_COMMAND_LAST;
1812                                 }
1813                                 rc = bce_nvram_read_dword(sc,
1814                                         page_start + j,
1815                                         &flash_buffer[j],
1816                                         cmd_flags);
1817
1818                                 if (rc)
1819                                         goto nvram_write_end;
1820
1821                                 cmd_flags = 0;
1822                         }
1823                 }
1824
1825                 /* Enable writes to flash interface (unlock write-protect) */
1826                 if ((rc = bce_enable_nvram_write(sc)) != 0)
1827                         goto nvram_write_end;
1828
1829                 /* Erase the page */
1830                 if ((rc = bce_nvram_erase_page(sc, page_start)) != 0)
1831                         goto nvram_write_end;
1832
1833                 /* Re-enable the write again for the actual write */
1834                 bce_enable_nvram_write(sc);
1835
1836                 /* Loop to write back the buffer data from page_start to
1837                  * data_start */
1838                 i = 0;
1839                 if (sc->bce_flash_info->buffered == 0) {
1840                         for (addr = page_start; addr < data_start;
1841                                 addr += 4, i += 4) {
1842
1843                                 rc = bce_nvram_write_dword(sc, addr,
1844                                         &flash_buffer[i], cmd_flags);
1845
1846                                 if (rc != 0)
1847                                         goto nvram_write_end;
1848
1849                                 cmd_flags = 0;
1850                         }
1851                 }
1852
1853                 /* Loop to write the new data from data_start to data_end */
1854                 for (addr = data_start; addr < data_end; addr += 4, i++) {
1855                         if ((addr == page_end - 4) ||
1856                                 ((sc->bce_flash_info->buffered) &&
1857                                  (addr == data_end - 4))) {
1858
1859                                 cmd_flags |= BCE_NVM_COMMAND_LAST;
1860                         }
1861                         rc = bce_nvram_write_dword(sc, addr, buf,
1862                                 cmd_flags);
1863
1864                         if (rc != 0)
1865                                 goto nvram_write_end;
1866
1867                         cmd_flags = 0;
1868                         buf += 4;
1869                 }
1870
1871                 /* Loop to write back the buffer data from data_end
1872                  * to page_end */
1873                 if (sc->bce_flash_info->buffered == 0) {
1874                         for (addr = data_end; addr < page_end;
1875                                 addr += 4, i += 4) {
1876
1877                                 if (addr == page_end-4) {
1878                                         cmd_flags = BCE_NVM_COMMAND_LAST;
1879                                 }
1880                                 rc = bce_nvram_write_dword(sc, addr,
1881                                         &flash_buffer[i], cmd_flags);
1882
1883                                 if (rc != 0)
1884                                         goto nvram_write_end;
1885
1886                                 cmd_flags = 0;
1887                         }
1888                 }
1889
1890                 /* Disable writes to flash interface (lock write-protect) */
1891                 bce_disable_nvram_write(sc);
1892
1893                 /* Disable access to flash interface */
1894                 bce_disable_nvram_access(sc);
1895                 bce_release_nvram_lock(sc);
1896
1897                 /* Increment written */
1898                 written += data_end - data_start;
1899         }
1900
1901 nvram_write_end:
1902         if (align_start || align_end)
1903                 free(buf, M_DEVBUF);
1904
1905         return rc;
1906 }
1907 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1908
1909
1910 /****************************************************************************/
1911 /* Verifies that NVRAM is accessible and contains valid data.               */
1912 /*                                                                          */
1913 /* Reads the configuration data from NVRAM and verifies that the CRC is     */
1914 /* correct.                                                                 */
1915 /*                                                                          */
1916 /* Returns:                                                                 */
1917 /*   0 on success, positive value on failure.                               */
1918 /****************************************************************************/
1919 static int
1920 bce_nvram_test(struct bce_softc *sc)
1921 {
1922         u32 buf[BCE_NVRAM_SIZE / 4];
1923         u8 *data = (u8 *) buf;
1924         int rc = 0;
1925         u32 magic, csum;
1926
1927
1928         /*
1929          * Check that the device NVRAM is valid by reading
1930          * the magic value at offset 0.
1931          */
1932         if ((rc = bce_nvram_read(sc, 0, data, 4)) != 0)
1933                 goto bce_nvram_test_done;
1934
1935
1936     magic = bce_be32toh(buf[0]);
1937         if (magic != BCE_NVRAM_MAGIC) {
1938                 rc = ENODEV;
1939                 BCE_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! Expected: 0x%08X, "
1940                         "Found: 0x%08X\n",
1941                         __FILE__, __LINE__, BCE_NVRAM_MAGIC, magic);
1942                 goto bce_nvram_test_done;
1943         }
1944
1945         /*
1946          * Verify that the device NVRAM includes valid
1947          * configuration data.
1948          */
1949         if ((rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE)) != 0)
1950                 goto bce_nvram_test_done;
1951
1952         csum = ether_crc32_le(data, 0x100);
1953         if (csum != BCE_CRC32_RESIDUAL) {
1954                 rc = ENODEV;
1955                 BCE_PRINTF(sc, "%s(%d): Invalid Manufacturing Information NVRAM CRC! "
1956                         "Expected: 0x%08X, Found: 0x%08X\n",
1957                         __FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
1958                 goto bce_nvram_test_done;
1959         }
1960
1961         csum = ether_crc32_le(data + 0x100, 0x100);
1962         if (csum != BCE_CRC32_RESIDUAL) {
1963                 BCE_PRINTF(sc, "%s(%d): Invalid Feature Configuration Information "
1964                         "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
1965                         __FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
1966                 rc = ENODEV;
1967         }
1968
1969 bce_nvram_test_done:
1970         return rc;
1971 }
1972
1973
1974 /****************************************************************************/
1975 /* Free any DMA memory owned by the driver.                                 */
1976 /*                                                                          */
1977 /* Scans through each data structre that requires DMA memory and frees      */
1978 /* the memory if allocated.                                                 */
1979 /*                                                                          */
1980 /* Returns:                                                                 */
1981 /*   Nothing.                                                               */
1982 /****************************************************************************/
1983 static void
1984 bce_dma_free(struct bce_softc *sc)
1985 {
1986         int i;
1987
1988         DBPRINT(sc,BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
1989
1990         /* Destroy the status block. */
1991         if (sc->status_block != NULL)
1992                 bus_dmamem_free(
1993                         sc->status_tag,
1994                     sc->status_block,
1995                     sc->status_map);
1996
1997         if (sc->status_map != NULL) {
1998                 bus_dmamap_unload(
1999                         sc->status_tag,
2000                     sc->status_map);
2001                 bus_dmamap_destroy(sc->status_tag,
2002                     sc->status_map);
2003         }
2004
2005         if (sc->status_tag != NULL)
2006                 bus_dma_tag_destroy(sc->status_tag);
2007
2008
2009         /* Destroy the statistics block. */
2010         if (sc->stats_block != NULL)
2011                 bus_dmamem_free(
2012                         sc->stats_tag,
2013                     sc->stats_block,
2014                     sc->stats_map);
2015
2016         if (sc->stats_map != NULL) {
2017                 bus_dmamap_unload(
2018                         sc->stats_tag,
2019                     sc->stats_map);
2020                 bus_dmamap_destroy(sc->stats_tag,
2021                     sc->stats_map);
2022         }
2023
2024         if (sc->stats_tag != NULL)
2025                 bus_dma_tag_destroy(sc->stats_tag);
2026
2027
2028         /* Free, unmap and destroy all TX buffer descriptor chain pages. */
2029         for (i = 0; i < TX_PAGES; i++ ) {
2030                 if (sc->tx_bd_chain[i] != NULL)
2031                         bus_dmamem_free(
2032                                 sc->tx_bd_chain_tag,
2033                             sc->tx_bd_chain[i],
2034                             sc->tx_bd_chain_map[i]);
2035
2036                 if (sc->tx_bd_chain_map[i] != NULL) {
2037                         bus_dmamap_unload(
2038                                 sc->tx_bd_chain_tag,
2039                         sc->tx_bd_chain_map[i]);
2040                         bus_dmamap_destroy(
2041                                 sc->tx_bd_chain_tag,
2042                             sc->tx_bd_chain_map[i]);
2043                 }
2044
2045         }
2046
2047         /* Destroy the TX buffer descriptor tag. */
2048         if (sc->tx_bd_chain_tag != NULL)
2049                 bus_dma_tag_destroy(sc->tx_bd_chain_tag);
2050
2051
2052         /* Free, unmap and destroy all RX buffer descriptor chain pages. */
2053         for (i = 0; i < RX_PAGES; i++ ) {
2054                 if (sc->rx_bd_chain[i] != NULL)
2055                         bus_dmamem_free(
2056                                 sc->rx_bd_chain_tag,
2057                             sc->rx_bd_chain[i],
2058                             sc->rx_bd_chain_map[i]);
2059
2060                 if (sc->rx_bd_chain_map[i] != NULL) {
2061                         bus_dmamap_unload(
2062                                 sc->rx_bd_chain_tag,
2063                         sc->rx_bd_chain_map[i]);
2064                         bus_dmamap_destroy(
2065                                 sc->rx_bd_chain_tag,
2066                             sc->rx_bd_chain_map[i]);
2067                 }
2068         }
2069
2070         /* Destroy the RX buffer descriptor tag. */
2071         if (sc->rx_bd_chain_tag != NULL)
2072                 bus_dma_tag_destroy(sc->rx_bd_chain_tag);
2073
2074
2075         /* Unload and destroy the TX mbuf maps. */
2076         for (i = 0; i < TOTAL_TX_BD; i++) {
2077                 if (sc->tx_mbuf_map[i] != NULL) {
2078                         bus_dmamap_unload(sc->tx_mbuf_tag, 
2079                                 sc->tx_mbuf_map[i]);
2080                         bus_dmamap_destroy(sc->tx_mbuf_tag, 
2081                                 sc->tx_mbuf_map[i]);
2082                 }
2083         }
2084
2085         /* Destroy the TX mbuf tag. */
2086         if (sc->tx_mbuf_tag != NULL)
2087                 bus_dma_tag_destroy(sc->tx_mbuf_tag);
2088
2089
2090         /* Unload and destroy the RX mbuf maps. */
2091         for (i = 0; i < TOTAL_RX_BD; i++) {
2092                 if (sc->rx_mbuf_map[i] != NULL) {
2093                         bus_dmamap_unload(sc->rx_mbuf_tag, 
2094                                 sc->rx_mbuf_map[i]);
2095                         bus_dmamap_destroy(sc->rx_mbuf_tag, 
2096                                 sc->rx_mbuf_map[i]);
2097                 }
2098         }
2099
2100         /* Destroy the RX mbuf tag. */
2101         if (sc->rx_mbuf_tag != NULL)
2102                 bus_dma_tag_destroy(sc->rx_mbuf_tag);
2103
2104
2105         /* Destroy the parent tag */
2106         if (sc->parent_tag != NULL)
2107                 bus_dma_tag_destroy(sc->parent_tag);
2108
2109         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2110
2111 }
2112
2113
2114 /****************************************************************************/
2115 /* Get DMA memory from the OS.                                              */
2116 /*                                                                          */
2117 /* Validates that the OS has provided DMA buffers in response to a          */
2118 /* bus_dmamap_load() call and saves the physical address of those buffers.  */
2119 /* When the callback is used the OS will return 0 for the mapping function  */
2120 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any  */
2121 /* failures back to the caller.                                             */
2122 /*                                                                          */
2123 /* Returns:                                                                 */
2124 /*   Nothing.                                                               */
2125 /****************************************************************************/
2126 static void
2127 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2128 {
2129         struct bce_dmamap_arg *map_arg = arg;
2130         struct bce_softc *sc = map_arg->sc;
2131
2132         /* Simulate a mapping failure. */
2133         DBRUNIF(DB_RANDOMTRUE(bce_debug_dma_map_addr_failure),
2134                 BCE_PRINTF(sc, "%s(%d): Simulating DMA mapping error.\n",
2135                         __FILE__, __LINE__);
2136                 error = ENOMEM);
2137                 
2138         /* Check for an error and signal the caller that an error occurred. */
2139         if (error || (nseg > map_arg->maxsegs)) {
2140                 BCE_PRINTF(sc, "%s(%d): DMA mapping error! error = %d, "
2141                 "nseg = %d, maxsegs = %d\n",
2142                         __FILE__, __LINE__, error, nseg, map_arg->maxsegs);
2143                 map_arg->maxsegs = 0;
2144                 goto bce_dma_map_addr_exit;
2145         }
2146
2147         map_arg->busaddr = segs->ds_addr;
2148
2149 bce_dma_map_addr_exit:
2150         return;
2151 }
2152
2153
2154 /****************************************************************************/
2155 /* Map TX buffers into TX buffer descriptors.                               */
2156 /*                                                                          */
2157 /* Given a series of DMA memory containting an outgoing frame, map the      */
2158 /* segments into the tx_bd structure used by the hardware.                  */
2159 /*                                                                          */
2160 /* Returns:                                                                 */
2161 /*   Nothing.                                                               */
2162 /****************************************************************************/
2163 static void
2164 bce_dma_map_tx_desc(void *arg, bus_dma_segment_t *segs,
2165         int nseg, bus_size_t mapsize, int error)
2166 {
2167         struct bce_dmamap_arg *map_arg;
2168         struct bce_softc *sc;
2169         struct tx_bd *txbd = NULL;
2170         int i = 0;
2171         u16 prod, chain_prod;
2172         u32     prod_bseq;
2173 #ifdef BCE_DEBUG
2174         u16 debug_prod;
2175 #endif
2176
2177         map_arg = arg;
2178         sc = map_arg->sc;
2179
2180         if (error) {
2181                 DBPRINT(sc, BCE_WARN, "%s(): Called with error = %d\n",
2182                         __FUNCTION__, error);
2183                 return;
2184         }
2185
2186         /* Signal error to caller if there's too many segments */
2187         if (nseg > map_arg->maxsegs) {
2188                 DBPRINT(sc, BCE_WARN,
2189                         "%s(): Mapped TX descriptors: max segs = %d, "
2190                         "actual segs = %d\n",
2191                         __FUNCTION__, map_arg->maxsegs, nseg);
2192
2193                 map_arg->maxsegs = 0;
2194                 return;
2195         }
2196
2197         /* prod points to an empty tx_bd at this point. */
2198         prod       = map_arg->prod;
2199         chain_prod = map_arg->chain_prod;
2200         prod_bseq  = map_arg->prod_bseq;
2201
2202 #ifdef BCE_DEBUG
2203         debug_prod = chain_prod;
2204 #endif
2205
2206         DBPRINT(sc, BCE_INFO_SEND,
2207                 "%s(): Start: prod = 0x%04X, chain_prod = %04X, "
2208                 "prod_bseq = 0x%08X\n",
2209                 __FUNCTION__, prod, chain_prod, prod_bseq);
2210
2211         /*
2212          * Cycle through each mbuf segment that makes up
2213          * the outgoing frame, gathering the mapping info
2214          * for that segment and creating a tx_bd to for
2215          * the mbuf.
2216          */
2217
2218         txbd = &map_arg->tx_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
2219
2220         /* Setup the first tx_bd for the first segment. */
2221         txbd->tx_bd_haddr_lo       = htole32(BCE_ADDR_LO(segs[i].ds_addr));
2222         txbd->tx_bd_haddr_hi       = htole32(BCE_ADDR_HI(segs[i].ds_addr));
2223         txbd->tx_bd_mss_nbytes     = htole16(segs[i].ds_len);
2224         txbd->tx_bd_vlan_tag_flags = htole16(map_arg->tx_flags |
2225                         TX_BD_FLAGS_START);
2226         prod_bseq += segs[i].ds_len;
2227
2228         /* Setup any remaing segments. */
2229         for (i = 1; i < nseg; i++) {
2230                 prod       = NEXT_TX_BD(prod);
2231                 chain_prod = TX_CHAIN_IDX(prod);
2232
2233                 txbd = &map_arg->tx_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
2234
2235                 txbd->tx_bd_haddr_lo       = htole32(BCE_ADDR_LO(segs[i].ds_addr));
2236                 txbd->tx_bd_haddr_hi       = htole32(BCE_ADDR_HI(segs[i].ds_addr));
2237                 txbd->tx_bd_mss_nbytes     = htole16(segs[i].ds_len);
2238                 txbd->tx_bd_vlan_tag_flags = htole16(map_arg->tx_flags);
2239
2240                 prod_bseq += segs[i].ds_len;
2241         }
2242
2243         /* Set the END flag on the last TX buffer descriptor. */
2244         txbd->tx_bd_vlan_tag_flags |= htole16(TX_BD_FLAGS_END);
2245
2246         DBRUN(BCE_INFO_SEND, bce_dump_tx_chain(sc, debug_prod, nseg));
2247
2248         DBPRINT(sc, BCE_INFO_SEND,
2249                 "%s(): End: prod = 0x%04X, chain_prod = %04X, "
2250                 "prod_bseq = 0x%08X\n",
2251                 __FUNCTION__, prod, chain_prod, prod_bseq);
2252
2253         /* prod points to the last tx_bd at this point. */
2254         map_arg->maxsegs    = nseg;
2255         map_arg->prod       = prod;
2256         map_arg->chain_prod = chain_prod;
2257         map_arg->prod_bseq  = prod_bseq;
2258 }
2259
2260
2261 /****************************************************************************/
2262 /* Allocate any DMA memory needed by the driver.                            */
2263 /*                                                                          */
2264 /* Allocates DMA memory needed for the various global structures needed by  */
2265 /* hardware.                                                                */
2266 /*                                                                          */
2267 /* Returns:                                                                 */
2268 /*   0 for success, positive value for failure.                             */
2269 /****************************************************************************/
2270 static int
2271 bce_dma_alloc(device_t dev)
2272 {
2273         struct bce_softc *sc;
2274         int i, error, rc = 0;
2275         struct bce_dmamap_arg map_arg;
2276
2277         sc = device_get_softc(dev);
2278
2279         DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2280
2281         /*
2282          * Allocate the parent bus DMA tag appropriate for PCI.
2283          */
2284         if (bus_dma_tag_create(NULL,            /* parent     */
2285                         BCE_DMA_ALIGN,                          /* alignment  */
2286                         BCE_DMA_BOUNDARY,                       /* boundary   */
2287                         sc->max_bus_addr,                       /* lowaddr    */
2288                         BUS_SPACE_MAXADDR,                      /* highaddr   */
2289                         NULL,                                           /* filterfunc */
2290                         NULL,                                           /* filterarg  */
2291                         MAXBSIZE,                                       /* maxsize    */
2292                         BUS_SPACE_UNRESTRICTED,         /* nsegments  */
2293                         BUS_SPACE_MAXSIZE_32BIT,        /* maxsegsize */
2294                         0,                                                      /* flags      */
2295                         NULL,                                           /* locfunc    */
2296                         NULL,                                           /* lockarg    */
2297                         &sc->parent_tag)) {
2298                 BCE_PRINTF(sc, "%s(%d): Could not allocate parent DMA tag!\n",
2299                         __FILE__, __LINE__);
2300                 rc = ENOMEM;
2301                 goto bce_dma_alloc_exit;
2302         }
2303
2304         /*
2305          * Create a DMA tag for the status block, allocate and clear the
2306          * memory, map the memory into DMA space, and fetch the physical 
2307          * address of the block.
2308          */
2309         if (bus_dma_tag_create(
2310                         sc->parent_tag,                 /* parent      */
2311                 BCE_DMA_ALIGN,                  /* alignment   */
2312                 BCE_DMA_BOUNDARY,               /* boundary    */
2313                 sc->max_bus_addr,               /* lowaddr     */
2314                 BUS_SPACE_MAXADDR,              /* highaddr    */
2315                 NULL,                                   /* filterfunc  */
2316                 NULL,                                   /* filterarg   */
2317                 BCE_STATUS_BLK_SZ,              /* maxsize     */
2318                 1,                                              /* nsegments   */
2319                 BCE_STATUS_BLK_SZ,              /* maxsegsize  */
2320                 0,                                              /* flags       */
2321                 NULL,                                   /* lockfunc    */
2322                 NULL,                                   /* lockarg     */
2323                 &sc->status_tag)) {
2324                 BCE_PRINTF(sc, "%s(%d): Could not allocate status block DMA tag!\n",
2325                         __FILE__, __LINE__);
2326                 rc = ENOMEM;
2327                 goto bce_dma_alloc_exit;
2328         }
2329
2330         if(bus_dmamem_alloc(
2331                         sc->status_tag,                         /* dmat        */
2332                 (void **)&sc->status_block,     /* vaddr       */
2333                 BUS_DMA_NOWAIT,                                 /* flags       */
2334                 &sc->status_map)) {
2335                 BCE_PRINTF(sc, "%s(%d): Could not allocate status block DMA memory!\n",
2336                         __FILE__, __LINE__);
2337                 rc = ENOMEM;
2338                 goto bce_dma_alloc_exit;
2339         }
2340
2341         bzero((char *)sc->status_block, BCE_STATUS_BLK_SZ);
2342
2343         map_arg.sc = sc;
2344         map_arg.maxsegs = 1;
2345
2346         error = bus_dmamap_load(
2347                         sc->status_tag,                 /* dmat        */
2348                 sc->status_map,                 /* map         */
2349                 sc->status_block,               /* buf         */
2350                 BCE_STATUS_BLK_SZ,              /* buflen      */
2351                 bce_dma_map_addr,               /* callback    */
2352                 &map_arg,                               /* callbackarg */
2353                 BUS_DMA_NOWAIT);                /* flags       */
2354                 
2355         if(error || (map_arg.maxsegs == 0)) {
2356                 BCE_PRINTF(sc, "%s(%d): Could not map status block DMA memory!\n",
2357                         __FILE__, __LINE__);
2358                 rc = ENOMEM;
2359                 goto bce_dma_alloc_exit;
2360         }
2361
2362         sc->status_block_paddr = map_arg.busaddr;
2363         /* DRC - Fix for 64 bit addresses. */
2364         DBPRINT(sc, BCE_INFO, "status_block_paddr = 0x%08X\n",
2365                 (u32) sc->status_block_paddr);
2366
2367         /*
2368          * Create a DMA tag for the statistics block, allocate and clear the
2369          * memory, map the memory into DMA space, and fetch the physical 
2370          * address of the block.
2371          */
2372         if (bus_dma_tag_create(
2373                         sc->parent_tag,                 /* parent      */
2374                 BCE_DMA_ALIGN,                  /* alignment   */
2375                 BCE_DMA_BOUNDARY,               /* boundary    */
2376                 sc->max_bus_addr,               /* lowaddr     */
2377                 BUS_SPACE_MAXADDR,              /* highaddr    */
2378                 NULL,                                   /* filterfunc  */
2379                 NULL,                                   /* filterarg   */
2380                 BCE_STATS_BLK_SZ,               /* maxsize     */
2381                 1,                                              /* nsegments   */
2382                 BCE_STATS_BLK_SZ,               /* maxsegsize  */
2383                 0,                                              /* flags       */
2384                 NULL,                                   /* lockfunc    */
2385                 NULL,                                   /* lockarg     */
2386                 &sc->stats_tag)) {
2387                 BCE_PRINTF(sc, "%s(%d): Could not allocate statistics block DMA tag!\n",
2388                         __FILE__, __LINE__);
2389                 rc = ENOMEM;
2390                 goto bce_dma_alloc_exit;
2391         }
2392
2393         if (bus_dmamem_alloc(
2394                         sc->stats_tag,                          /* dmat        */
2395                 (void **)&sc->stats_block,      /* vaddr       */
2396                 BUS_DMA_NOWAIT,                         /* flags       */
2397                 &sc->stats_map)) {
2398                 BCE_PRINTF(sc, "%s(%d): Could not allocate statistics block DMA memory!\n",
2399                         __FILE__, __LINE__);
2400                 rc = ENOMEM;
2401                 goto bce_dma_alloc_exit;
2402         }
2403
2404         bzero((char *)sc->stats_block, BCE_STATS_BLK_SZ);
2405
2406         map_arg.sc = sc;
2407         map_arg.maxsegs = 1;
2408
2409         error = bus_dmamap_load(
2410                         sc->stats_tag,          /* dmat        */
2411                 sc->stats_map,          /* map         */
2412                 sc->stats_block,        /* buf         */
2413                 BCE_STATS_BLK_SZ,       /* buflen      */
2414                 bce_dma_map_addr,       /* callback    */
2415                 &map_arg,                       /* callbackarg */
2416                 BUS_DMA_NOWAIT);        /* flags       */
2417
2418         if(error || (map_arg.maxsegs == 0)) {
2419                 BCE_PRINTF(sc, "%s(%d): Could not map statistics block DMA memory!\n",
2420                         __FILE__, __LINE__);
2421                 rc = ENOMEM;
2422                 goto bce_dma_alloc_exit;
2423         }
2424
2425         sc->stats_block_paddr = map_arg.busaddr;
2426         /* DRC - Fix for 64 bit address. */
2427         DBPRINT(sc,BCE_INFO, "stats_block_paddr = 0x%08X\n", 
2428                 (u32) sc->stats_block_paddr);
2429
2430         /*
2431          * Create a DMA tag for the TX buffer descriptor chain,
2432          * allocate and clear the  memory, and fetch the
2433          * physical address of the block.
2434          */
2435         if(bus_dma_tag_create(
2436                         sc->parent_tag,           /* parent      */
2437                 BCM_PAGE_SIZE,            /* alignment   */
2438                 BCE_DMA_BOUNDARY,         /* boundary    */
2439                         sc->max_bus_addr,         /* lowaddr     */
2440                         BUS_SPACE_MAXADDR,        /* highaddr    */
2441                         NULL,                             /* filterfunc  */ 
2442                         NULL,                             /* filterarg   */
2443                         BCE_TX_CHAIN_PAGE_SZ, /* maxsize     */
2444                         1,                                        /* nsegments   */
2445                         BCE_TX_CHAIN_PAGE_SZ, /* maxsegsize  */
2446                         0,                                        /* flags       */
2447                         NULL,                             /* lockfunc    */
2448                         NULL,                             /* lockarg     */
2449                         &sc->tx_bd_chain_tag)) {
2450                 BCE_PRINTF(sc, "%s(%d): Could not allocate TX descriptor chain DMA tag!\n",
2451                         __FILE__, __LINE__);
2452                 rc = ENOMEM;
2453                 goto bce_dma_alloc_exit;
2454         }
2455
2456         for (i = 0; i < TX_PAGES; i++) {
2457
2458                 if(bus_dmamem_alloc(
2459                                 sc->tx_bd_chain_tag,                    /* tag   */
2460                         (void **)&sc->tx_bd_chain[i],   /* vaddr */
2461                         BUS_DMA_NOWAIT,                                 /* flags */
2462                         &sc->tx_bd_chain_map[i])) {
2463                         BCE_PRINTF(sc, "%s(%d): Could not allocate TX descriptor "
2464                                 "chain DMA memory!\n", __FILE__, __LINE__);
2465                         rc = ENOMEM;
2466                         goto bce_dma_alloc_exit;
2467                 }
2468
2469                 map_arg.maxsegs = 1;
2470                 map_arg.sc = sc;
2471
2472                 error = bus_dmamap_load(
2473                                 sc->tx_bd_chain_tag,     /* dmat        */
2474                         sc->tx_bd_chain_map[i],  /* map         */
2475                         sc->tx_bd_chain[i],              /* buf         */
2476                         BCE_TX_CHAIN_PAGE_SZ,    /* buflen      */
2477                         bce_dma_map_addr,                /* callback    */
2478                         &map_arg,                                /* callbackarg */
2479                         BUS_DMA_NOWAIT);                 /* flags       */
2480
2481                 if(error || (map_arg.maxsegs == 0)) {
2482                         BCE_PRINTF(sc, "%s(%d): Could not map TX descriptor chain DMA memory!\n",
2483                                 __FILE__, __LINE__);
2484                         rc = ENOMEM;
2485                         goto bce_dma_alloc_exit;
2486                 }
2487
2488                 sc->tx_bd_chain_paddr[i] = map_arg.busaddr;
2489                 /* DRC - Fix for 64 bit systems. */
2490                 DBPRINT(sc, BCE_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n", 
2491                         i, (u32) sc->tx_bd_chain_paddr[i]);
2492         }
2493
2494         /* Create a DMA tag for TX mbufs. */
2495         if (bus_dma_tag_create(
2496                         sc->parent_tag,                 /* parent      */
2497                 BCE_DMA_ALIGN,                  /* alignment   */
2498                 BCE_DMA_BOUNDARY,               /* boundary    */
2499                         sc->max_bus_addr,               /* lowaddr     */
2500                         BUS_SPACE_MAXADDR,              /* highaddr    */
2501                         NULL,                                   /* filterfunc  */
2502                         NULL,                                   /* filterarg   */
2503                         MCLBYTES * BCE_MAX_SEGMENTS,    /* maxsize     */
2504                         BCE_MAX_SEGMENTS,               /* nsegments   */
2505                         MCLBYTES,                               /* maxsegsize  */
2506                         0,                                              /* flags       */
2507                         NULL,                                   /* lockfunc    */
2508                         NULL,                                   /* lockarg     */
2509                 &sc->tx_mbuf_tag)) {
2510                 BCE_PRINTF(sc, "%s(%d): Could not allocate TX mbuf DMA tag!\n",
2511                         __FILE__, __LINE__);
2512                 rc = ENOMEM;
2513                 goto bce_dma_alloc_exit;
2514         }
2515
2516         /* Create DMA maps for the TX mbufs clusters. */
2517         for (i = 0; i < TOTAL_TX_BD; i++) {
2518                 if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT, 
2519                         &sc->tx_mbuf_map[i])) {
2520                         BCE_PRINTF(sc, "%s(%d): Unable to create TX mbuf DMA map!\n",
2521                                 __FILE__, __LINE__);
2522                         rc = ENOMEM;
2523                         goto bce_dma_alloc_exit;
2524                 }
2525         }
2526
2527         /*
2528          * Create a DMA tag for the RX buffer descriptor chain,
2529          * allocate and clear the  memory, and fetch the physical
2530          * address of the blocks.
2531          */
2532         if (bus_dma_tag_create(
2533                         sc->parent_tag,                 /* parent      */
2534                 BCM_PAGE_SIZE,                  /* alignment   */
2535                 BCE_DMA_BOUNDARY,               /* boundary    */
2536                         BUS_SPACE_MAXADDR,              /* lowaddr     */
2537                         sc->max_bus_addr,               /* lowaddr     */
2538                         NULL,                                   /* filter      */
2539                         NULL,                                   /* filterarg   */
2540                         BCE_RX_CHAIN_PAGE_SZ,   /* maxsize     */
2541                         1,                                              /* nsegments   */
2542                         BCE_RX_CHAIN_PAGE_SZ,   /* maxsegsize  */
2543                         0,                                              /* flags       */
2544                         NULL,                                   /* lockfunc    */
2545                         NULL,                                   /* lockarg     */
2546                         &sc->rx_bd_chain_tag)) {
2547                 BCE_PRINTF(sc, "%s(%d): Could not allocate RX descriptor chain DMA tag!\n",
2548                         __FILE__, __LINE__);
2549                 rc = ENOMEM;
2550                 goto bce_dma_alloc_exit;
2551         }
2552
2553         for (i = 0; i < RX_PAGES; i++) {
2554
2555                 if (bus_dmamem_alloc(
2556                                 sc->rx_bd_chain_tag,                    /* tag   */
2557                         (void **)&sc->rx_bd_chain[i],   /* vaddr */
2558                         BUS_DMA_NOWAIT,                                 /* flags */
2559                         &sc->rx_bd_chain_map[i])) {
2560                         BCE_PRINTF(sc, "%s(%d): Could not allocate RX descriptor chain "
2561                                 "DMA memory!\n", __FILE__, __LINE__);
2562                         rc = ENOMEM;
2563                         goto bce_dma_alloc_exit;
2564                 }
2565
2566                 bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
2567
2568                 map_arg.maxsegs = 1;
2569                 map_arg.sc = sc;
2570
2571                 error = bus_dmamap_load(
2572                                 sc->rx_bd_chain_tag,    /* dmat        */
2573                         sc->rx_bd_chain_map[i], /* map         */
2574                         sc->rx_bd_chain[i],             /* buf         */
2575                         BCE_RX_CHAIN_PAGE_SZ,   /* buflen      */
2576                         bce_dma_map_addr,               /* callback    */
2577                         &map_arg,                               /* callbackarg */
2578                         BUS_DMA_NOWAIT);                /* flags       */
2579
2580                 if(error || (map_arg.maxsegs == 0)) {
2581                         BCE_PRINTF(sc, "%s(%d): Could not map RX descriptor chain DMA memory!\n",
2582                                 __FILE__, __LINE__);
2583                         rc = ENOMEM;
2584                         goto bce_dma_alloc_exit;
2585                 }
2586
2587                 sc->rx_bd_chain_paddr[i] = map_arg.busaddr;
2588                 /* DRC - Fix for 64 bit systems. */
2589                 DBPRINT(sc, BCE_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2590                         i, (u32) sc->rx_bd_chain_paddr[i]);
2591         }
2592
2593         /*
2594          * Create a DMA tag for RX mbufs.
2595          */
2596         if (bus_dma_tag_create(
2597                         sc->parent_tag,                 /* parent      */
2598                 BCE_DMA_ALIGN,                  /* alignment   */
2599                 BCE_DMA_BOUNDARY,               /* boundary    */
2600                         sc->max_bus_addr,               /* lowaddr     */
2601                         BUS_SPACE_MAXADDR,              /* highaddr    */
2602                         NULL,                                   /* filterfunc  */
2603                         NULL,                                   /* filterarg   */
2604                         MJUM9BYTES,                             /* maxsize     */
2605                         BCE_MAX_SEGMENTS,               /* nsegments   */
2606                         MJUM9BYTES,                             /* maxsegsize  */
2607                         0,                                              /* flags       */
2608                         NULL,                                   /* lockfunc    */
2609                         NULL,                                   /* lockarg     */
2610                 &sc->rx_mbuf_tag)) {
2611                 BCE_PRINTF(sc, "%s(%d): Could not allocate RX mbuf DMA tag!\n",
2612                         __FILE__, __LINE__);
2613                 rc = ENOMEM;
2614                 goto bce_dma_alloc_exit;
2615         }
2616
2617         /* Create DMA maps for the RX mbuf clusters. */
2618         for (i = 0; i < TOTAL_RX_BD; i++) {
2619                 if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT,
2620                                 &sc->rx_mbuf_map[i])) {
2621                         BCE_PRINTF(sc, "%s(%d): Unable to create RX mbuf DMA map!\n",
2622                                 __FILE__, __LINE__);
2623                         rc = ENOMEM;
2624                         goto bce_dma_alloc_exit;
2625                 }
2626         }
2627
2628 bce_dma_alloc_exit:
2629         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2630
2631         return(rc);
2632 }
2633
2634
2635 /****************************************************************************/
2636 /* Release all resources used by the driver.                                */
2637 /*                                                                          */
2638 /* Releases all resources acquired by the driver including interrupts,      */
2639 /* interrupt handler, interfaces, mutexes, and DMA memory.                  */
2640 /*                                                                          */
2641 /* Returns:                                                                 */
2642 /*   Nothing.                                                               */
2643 /****************************************************************************/
2644 static void
2645 bce_release_resources(struct bce_softc *sc)
2646 {
2647         device_t dev;
2648
2649         DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2650
2651         dev = sc->bce_dev;
2652
2653         bce_dma_free(sc);
2654
2655         if (sc->bce_intrhand != NULL)
2656                 bus_teardown_intr(dev, sc->bce_irq, sc->bce_intrhand);
2657
2658         if (sc->bce_irq != NULL)
2659                 bus_release_resource(dev,
2660                         SYS_RES_IRQ,
2661                         0,
2662                         sc->bce_irq);
2663
2664         if (sc->bce_res != NULL)
2665                 bus_release_resource(dev,
2666                         SYS_RES_MEMORY,
2667                     PCIR_BAR(0),
2668                     sc->bce_res);
2669
2670         if (sc->bce_ifp != NULL)
2671                 if_free(sc->bce_ifp);
2672
2673
2674         if (mtx_initialized(&sc->bce_mtx))
2675                 BCE_LOCK_DESTROY(sc);
2676
2677         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2678
2679 }
2680
2681
2682 /****************************************************************************/
2683 /* Firmware synchronization.                                                */
2684 /*                                                                          */
2685 /* Before performing certain events such as a chip reset, synchronize with  */
2686 /* the firmware first.                                                      */
2687 /*                                                                          */
2688 /* Returns:                                                                 */
2689 /*   0 for success, positive value for failure.                             */
2690 /****************************************************************************/
2691 static int
2692 bce_fw_sync(struct bce_softc *sc, u32 msg_data)
2693 {
2694         int i, rc = 0;
2695         u32 val;
2696
2697         /* Don't waste any time if we've timed out before. */
2698         if (sc->bce_fw_timed_out) {
2699                 rc = EBUSY;
2700                 goto bce_fw_sync_exit;
2701         }
2702
2703         /* Increment the message sequence number. */
2704         sc->bce_fw_wr_seq++;
2705         msg_data |= sc->bce_fw_wr_seq;
2706
2707         DBPRINT(sc, BCE_VERBOSE, "bce_fw_sync(): msg_data = 0x%08X\n", msg_data);
2708
2709         /* Send the message to the bootcode driver mailbox. */
2710         REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2711
2712         /* Wait for the bootcode to acknowledge the message. */
2713         for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2714                 /* Check for a response in the bootcode firmware mailbox. */
2715                 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_FW_MB);
2716                 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2717                         break;
2718                 DELAY(1000);
2719         }
2720
2721         /* If we've timed out, tell the bootcode that we've stopped waiting. */
2722         if (((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ)) &&
2723                 ((msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0)) {
2724
2725                 BCE_PRINTF(sc, "%s(%d): Firmware synchronization timeout! "
2726                         "msg_data = 0x%08X\n",
2727                         __FILE__, __LINE__, msg_data);
2728
2729                 msg_data &= ~BCE_DRV_MSG_CODE;
2730                 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2731
2732                 REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2733
2734                 sc->bce_fw_timed_out = 1;
2735                 rc = EBUSY;
2736         }
2737
2738 bce_fw_sync_exit:
2739         return (rc);
2740 }
2741
2742
2743 /****************************************************************************/
2744 /* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2745 /*                                                                          */
2746 /* Returns:                                                                 */
2747 /*   Nothing.                                                               */
2748 /****************************************************************************/
2749 static void
2750 bce_load_rv2p_fw(struct bce_softc *sc, u32 *rv2p_code, 
2751         u32 rv2p_code_len, u32 rv2p_proc)
2752 {
2753         int i;
2754         u32 val;
2755
2756         for (i = 0; i < rv2p_code_len; i += 8) {
2757                 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2758                 rv2p_code++;
2759                 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2760                 rv2p_code++;
2761
2762                 if (rv2p_proc == RV2P_PROC1) {
2763                         val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2764                         REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2765                 }
2766                 else {
2767                         val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2768                         REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2769                 }
2770         }
2771
2772         /* Reset the processor, un-stall is done later. */
2773         if (rv2p_proc == RV2P_PROC1) {
2774                 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2775         }
2776         else {
2777                 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2778         }
2779 }
2780
2781
2782 /****************************************************************************/
2783 /* Load RISC processor firmware.                                            */
2784 /*                                                                          */
2785 /* Loads firmware from the file if_bcefw.h into the scratchpad memory       */
2786 /* associated with a particular processor.                                  */
2787 /*                                                                          */
2788 /* Returns:                                                                 */
2789 /*   Nothing.                                                               */
2790 /****************************************************************************/
2791 static void
2792 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2793         struct fw_info *fw)
2794 {
2795         u32 offset;
2796         u32 val;
2797
2798         /* Halt the CPU. */
2799         val = REG_RD_IND(sc, cpu_reg->mode);
2800         val |= cpu_reg->mode_value_halt;
2801         REG_WR_IND(sc, cpu_reg->mode, val);
2802         REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2803
2804         /* Load the Text area. */
2805         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2806         if (fw->text) {
2807                 int j;
2808
2809                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2810                         REG_WR_IND(sc, offset, fw->text[j]);
2811                 }
2812         }
2813
2814         /* Load the Data area. */
2815         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2816         if (fw->data) {
2817                 int j;
2818
2819                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2820                         REG_WR_IND(sc, offset, fw->data[j]);
2821                 }
2822         }
2823
2824         /* Load the SBSS area. */
2825         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2826         if (fw->sbss) {
2827                 int j;
2828
2829                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2830                         REG_WR_IND(sc, offset, fw->sbss[j]);
2831                 }
2832         }
2833
2834         /* Load the BSS area. */
2835         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2836         if (fw->bss) {
2837                 int j;
2838
2839                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2840                         REG_WR_IND(sc, offset, fw->bss[j]);
2841                 }
2842         }
2843
2844         /* Load the Read-Only area. */
2845         offset = cpu_reg->spad_base +
2846                 (fw->rodata_addr - cpu_reg->mips_view_base);
2847         if (fw->rodata) {
2848                 int j;
2849
2850                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2851                         REG_WR_IND(sc, offset, fw->rodata[j]);
2852                 }
2853         }
2854
2855         /* Clear the pre-fetch instruction. */
2856         REG_WR_IND(sc, cpu_reg->inst, 0);
2857         REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2858
2859         /* Start the CPU. */
2860         val = REG_RD_IND(sc, cpu_reg->mode);
2861         val &= ~cpu_reg->mode_value_halt;
2862         REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2863         REG_WR_IND(sc, cpu_reg->mode, val);
2864 }
2865
2866
2867 /****************************************************************************/
2868 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs.                         */
2869 /*                                                                          */
2870 /* Loads the firmware for each CPU and starts the CPU.                      */
2871 /*                                                                          */
2872 /* Returns:                                                                 */
2873 /*   Nothing.                                                               */
2874 /****************************************************************************/
2875 static void
2876 bce_init_cpus(struct bce_softc *sc)
2877 {
2878         struct cpu_reg cpu_reg;
2879         struct fw_info fw;
2880
2881         /* Initialize the RV2P processor. */
2882         bce_load_rv2p_fw(sc, bce_rv2p_proc1, sizeof(bce_rv2p_proc1), RV2P_PROC1);
2883         bce_load_rv2p_fw(sc, bce_rv2p_proc2, sizeof(bce_rv2p_proc2), RV2P_PROC2);
2884
2885         /* Initialize the RX Processor. */
2886         cpu_reg.mode = BCE_RXP_CPU_MODE;
2887         cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2888         cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2889         cpu_reg.state = BCE_RXP_CPU_STATE;
2890         cpu_reg.state_value_clear = 0xffffff;
2891         cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2892         cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2893         cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2894         cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2895         cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2896         cpu_reg.spad_base = BCE_RXP_SCRATCH;
2897         cpu_reg.mips_view_base = 0x8000000;
2898
2899         fw.ver_major = bce_RXP_b06FwReleaseMajor;
2900         fw.ver_minor = bce_RXP_b06FwReleaseMinor;
2901         fw.ver_fix = bce_RXP_b06FwReleaseFix;
2902         fw.start_addr = bce_RXP_b06FwStartAddr;
2903
2904         fw.text_addr = bce_RXP_b06FwTextAddr;
2905         fw.text_len = bce_RXP_b06FwTextLen;
2906         fw.text_index = 0;
2907         fw.text = bce_RXP_b06FwText;
2908
2909         fw.data_addr = bce_RXP_b06FwDataAddr;
2910         fw.data_len = bce_RXP_b06FwDataLen;
2911         fw.data_index = 0;
2912         fw.data = bce_RXP_b06FwData;
2913
2914         fw.sbss_addr = bce_RXP_b06FwSbssAddr;
2915         fw.sbss_len = bce_RXP_b06FwSbssLen;
2916         fw.sbss_index = 0;
2917         fw.sbss = bce_RXP_b06FwSbss;
2918
2919         fw.bss_addr = bce_RXP_b06FwBssAddr;
2920         fw.bss_len = bce_RXP_b06FwBssLen;
2921         fw.bss_index = 0;
2922         fw.bss = bce_RXP_b06FwBss;
2923
2924         fw.rodata_addr = bce_RXP_b06FwRodataAddr;
2925         fw.rodata_len = bce_RXP_b06FwRodataLen;
2926         fw.rodata_index = 0;
2927         fw.rodata = bce_RXP_b06FwRodata;
2928
2929         DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n");
2930         bce_load_cpu_fw(sc, &cpu_reg, &fw);
2931
2932         /* Initialize the TX Processor. */
2933         cpu_reg.mode = BCE_TXP_CPU_MODE;
2934         cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
2935         cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
2936         cpu_reg.state = BCE_TXP_CPU_STATE;
2937         cpu_reg.state_value_clear = 0xffffff;
2938         cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
2939         cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
2940         cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
2941         cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
2942         cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
2943         cpu_reg.spad_base = BCE_TXP_SCRATCH;
2944         cpu_reg.mips_view_base = 0x8000000;
2945
2946         fw.ver_major = bce_TXP_b06FwReleaseMajor;
2947         fw.ver_minor = bce_TXP_b06FwReleaseMinor;
2948         fw.ver_fix = bce_TXP_b06FwReleaseFix;
2949         fw.start_addr = bce_TXP_b06FwStartAddr;
2950
2951         fw.text_addr = bce_TXP_b06FwTextAddr;
2952         fw.text_len = bce_TXP_b06FwTextLen;
2953         fw.text_index = 0;
2954         fw.text = bce_TXP_b06FwText;
2955
2956         fw.data_addr = bce_TXP_b06FwDataAddr;
2957         fw.data_len = bce_TXP_b06FwDataLen;
2958         fw.data_index = 0;
2959         fw.data = bce_TXP_b06FwData;
2960
2961         fw.sbss_addr = bce_TXP_b06FwSbssAddr;
2962         fw.sbss_len = bce_TXP_b06FwSbssLen;
2963         fw.sbss_index = 0;
2964         fw.sbss = bce_TXP_b06FwSbss;
2965
2966         fw.bss_addr = bce_TXP_b06FwBssAddr;
2967         fw.bss_len = bce_TXP_b06FwBssLen;
2968         fw.bss_index = 0;
2969         fw.bss = bce_TXP_b06FwBss;
2970
2971         fw.rodata_addr = bce_TXP_b06FwRodataAddr;
2972         fw.rodata_len = bce_TXP_b06FwRodataLen;
2973         fw.rodata_index = 0;
2974         fw.rodata = bce_TXP_b06FwRodata;
2975
2976         DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n");
2977         bce_load_cpu_fw(sc, &cpu_reg, &fw);
2978
2979         /* Initialize the TX Patch-up Processor. */
2980         cpu_reg.mode = BCE_TPAT_CPU_MODE;
2981         cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
2982         cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
2983         cpu_reg.state = BCE_TPAT_CPU_STATE;
2984         cpu_reg.state_value_clear = 0xffffff;
2985         cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
2986         cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
2987         cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
2988         cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
2989         cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
2990         cpu_reg.spad_base = BCE_TPAT_SCRATCH;
2991         cpu_reg.mips_view_base = 0x8000000;
2992
2993         fw.ver_major = bce_TPAT_b06FwReleaseMajor;
2994         fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
2995         fw.ver_fix = bce_TPAT_b06FwReleaseFix;
2996         fw.start_addr = bce_TPAT_b06FwStartAddr;
2997
2998         fw.text_addr = bce_TPAT_b06FwTextAddr;
2999         fw.text_len = bce_TPAT_b06FwTextLen;
3000         fw.text_index = 0;
3001         fw.text = bce_TPAT_b06FwText;
3002
3003         fw.data_addr = bce_TPAT_b06FwDataAddr;
3004         fw.data_len = bce_TPAT_b06FwDataLen;
3005         fw.data_index = 0;
3006         fw.data = bce_TPAT_b06FwData;
3007
3008         fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
3009         fw.sbss_len = bce_TPAT_b06FwSbssLen;
3010         fw.sbss_index = 0;
3011         fw.sbss = bce_TPAT_b06FwSbss;
3012
3013         fw.bss_addr = bce_TPAT_b06FwBssAddr;
3014         fw.bss_len = bce_TPAT_b06FwBssLen;
3015         fw.bss_index = 0;
3016         fw.bss = bce_TPAT_b06FwBss;
3017
3018         fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
3019         fw.rodata_len = bce_TPAT_b06FwRodataLen;
3020         fw.rodata_index = 0;
3021         fw.rodata = bce_TPAT_b06FwRodata;
3022
3023         DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n");
3024         bce_load_cpu_fw(sc, &cpu_reg, &fw);
3025
3026         /* Initialize the Completion Processor. */
3027         cpu_reg.mode = BCE_COM_CPU_MODE;
3028         cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
3029         cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
3030         cpu_reg.state = BCE_COM_CPU_STATE;
3031         cpu_reg.state_value_clear = 0xffffff;
3032         cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
3033         cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
3034         cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
3035         cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
3036         cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
3037         cpu_reg.spad_base = BCE_COM_SCRATCH;
3038         cpu_reg.mips_view_base = 0x8000000;
3039
3040         fw.ver_major = bce_COM_b06FwReleaseMajor;
3041         fw.ver_minor = bce_COM_b06FwReleaseMinor;
3042         fw.ver_fix = bce_COM_b06FwReleaseFix;
3043         fw.start_addr = bce_COM_b06FwStartAddr;
3044
3045         fw.text_addr = bce_COM_b06FwTextAddr;
3046         fw.text_len = bce_COM_b06FwTextLen;
3047         fw.text_index = 0;
3048         fw.text = bce_COM_b06FwText;
3049
3050         fw.data_addr = bce_COM_b06FwDataAddr;
3051         fw.data_len = bce_COM_b06FwDataLen;
3052         fw.data_index = 0;
3053         fw.data = bce_COM_b06FwData;
3054
3055         fw.sbss_addr = bce_COM_b06FwSbssAddr;
3056         fw.sbss_len = bce_COM_b06FwSbssLen;
3057         fw.sbss_index = 0;
3058         fw.sbss = bce_COM_b06FwSbss;
3059
3060         fw.bss_addr = bce_COM_b06FwBssAddr;
3061         fw.bss_len = bce_COM_b06FwBssLen;
3062         fw.bss_index = 0;
3063         fw.bss = bce_COM_b06FwBss;
3064
3065         fw.rodata_addr = bce_COM_b06FwRodataAddr;
3066         fw.rodata_len = bce_COM_b06FwRodataLen;
3067         fw.rodata_index = 0;
3068         fw.rodata = bce_COM_b06FwRodata;
3069
3070         DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
3071         bce_load_cpu_fw(sc, &cpu_reg, &fw);
3072 }
3073
3074
3075 /****************************************************************************/
3076 /* Initialize context memory.                                               */
3077 /*                                                                          */
3078 /* Clears the memory associated with each Context ID (CID).                 */
3079 /*                                                                          */
3080 /* Returns:                                                                 */
3081 /*   Nothing.                                                               */
3082 /****************************************************************************/
3083 static void
3084 bce_init_context(struct bce_softc *sc)
3085 {
3086         u32 vcid;
3087
3088         vcid = 96;
3089         while (vcid) {
3090                 u32 vcid_addr, pcid_addr, offset;
3091
3092                 vcid--;
3093
3094                 vcid_addr = GET_CID_ADDR(vcid);
3095                 pcid_addr = vcid_addr;
3096
3097                 REG_WR(sc, BCE_CTX_VIRT_ADDR, 0x00);
3098                 REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
3099
3100                 /* Zero out the context. */
3101                 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
3102                         CTX_WR(sc, 0x00, offset, 0);
3103                 }
3104
3105                 REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
3106                 REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
3107         }
3108 }
3109
3110
3111 /****************************************************************************/
3112 /* Fetch the permanent MAC address of the controller.                       */
3113 /*                                                                          */
3114 /* Returns:                                                                 */
3115 /*   Nothing.                                                               */
3116 /****************************************************************************/
3117 static void
3118 bce_get_mac_addr(struct bce_softc *sc)
3119 {
3120         u32 mac_lo = 0, mac_hi = 0;
3121
3122         /*
3123          * The NetXtreme II bootcode populates various NIC
3124          * power-on and runtime configuration items in a
3125          * shared memory area.  The factory configured MAC
3126          * address is available from both NVRAM and the
3127          * shared memory area so we'll read the value from
3128          * shared memory for speed.
3129          */
3130
3131         mac_hi = REG_RD_IND(sc, sc->bce_shmem_base +
3132                 BCE_PORT_HW_CFG_MAC_UPPER);
3133         mac_lo = REG_RD_IND(sc, sc->bce_shmem_base +
3134                 BCE_PORT_HW_CFG_MAC_LOWER);
3135
3136         if ((mac_lo == 0) && (mac_hi == 0)) {
3137                 BCE_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n", 
3138                         __FILE__, __LINE__);
3139         } else {
3140                 sc->eaddr[0] = (u_char)(mac_hi >> 8);
3141                 sc->eaddr[1] = (u_char)(mac_hi >> 0);
3142                 sc->eaddr[2] = (u_char)(mac_lo >> 24);
3143                 sc->eaddr[3] = (u_char)(mac_lo >> 16);
3144                 sc->eaddr[4] = (u_char)(mac_lo >> 8);
3145                 sc->eaddr[5] = (u_char)(mac_lo >> 0);
3146         }
3147
3148         DBPRINT(sc, BCE_INFO, "Permanent Ethernet address = %6D\n", sc->eaddr, ":");
3149 }
3150
3151
3152 /****************************************************************************/
3153 /* Program the MAC address.                                                 */
3154 /*                                                                          */
3155 /* Returns:                                                                 */
3156 /*   Nothing.                                                               */
3157 /****************************************************************************/
3158 static void
3159 bce_set_mac_addr(struct bce_softc *sc)
3160 {
3161         u32 val;
3162         u8 *mac_addr = sc->eaddr;
3163
3164         DBPRINT(sc, BCE_INFO, "Setting Ethernet address = %6D\n", sc->eaddr, ":");
3165
3166         val = (mac_addr[0] << 8) | mac_addr[1];
3167
3168         REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
3169
3170         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
3171                 (mac_addr[4] << 8) | mac_addr[5];
3172
3173         REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
3174 }
3175
3176
3177 /****************************************************************************/
3178 /* Stop the controller.                                                     */
3179 /*                                                                          */
3180 /* Returns:                                                                 */
3181 /*   Nothing.                                                               */
3182 /****************************************************************************/
3183 static void
3184 bce_stop(struct bce_softc *sc)
3185 {
3186         struct ifnet *ifp;
3187         struct ifmedia_entry *ifm;
3188         struct mii_data *mii = NULL;
3189         int mtmp, itmp;
3190
3191         DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3192
3193         BCE_LOCK_ASSERT(sc);
3194
3195         ifp = sc->bce_ifp;
3196
3197         mii = device_get_softc(sc->bce_miibus);
3198
3199         callout_stop(&sc->bce_stat_ch);
3200
3201         /* Disable the transmit/receive blocks. */
3202         REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 0x5ffffff);
3203         REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3204         DELAY(20);
3205
3206         bce_disable_intr(sc);
3207
3208         /* Tell firmware that the driver is going away. */
3209         bce_reset(sc, BCE_DRV_MSG_CODE_SUSPEND_NO_WOL);
3210
3211         /* Free the RX lists. */
3212         bce_free_rx_chain(sc);
3213
3214         /* Free TX buffers. */
3215         bce_free_tx_chain(sc);
3216
3217         /*
3218          * Isolate/power down the PHY, but leave the media selection
3219          * unchanged so that things will be put back to normal when
3220          * we bring the interface back up.
3221          */
3222
3223         itmp = ifp->if_flags;
3224         ifp->if_flags |= IFF_UP;
3225         /*
3226          * If we are called from bce_detach(), mii is already NULL.
3227          */
3228         if (mii != NULL) {
3229                 ifm = mii->mii_media.ifm_cur;
3230                 mtmp = ifm->ifm_media;
3231                 ifm->ifm_media = IFM_ETHER | IFM_NONE;
3232                 mii_mediachg(mii);
3233                 ifm->ifm_media = mtmp;
3234         }
3235
3236         ifp->if_flags = itmp;
3237         ifp->if_timer = 0;
3238
3239         sc->bce_link = 0;
3240
3241         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3242
3243         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3244
3245 }
3246
3247
3248 static int
3249 bce_reset(struct bce_softc *sc, u32 reset_code)
3250 {
3251         u32 val;
3252         int i, rc = 0;
3253
3254         DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3255
3256         /* Wait for pending PCI transactions to complete. */
3257         REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
3258                BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3259                BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3260                BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3261                BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3262         val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3263         DELAY(5);
3264
3265         /* Assume bootcode is running. */
3266         sc->bce_fw_timed_out = 0;
3267
3268         /* Give the firmware a chance to prepare for the reset. */
3269         rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
3270         if (rc)
3271                 goto bce_reset_exit;
3272
3273         /* Set a firmware reminder that this is a soft reset. */
3274         REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_RESET_SIGNATURE,
3275                    BCE_DRV_RESET_SIGNATURE_MAGIC);
3276
3277         /* Dummy read to force the chip to complete all current transactions. */
3278         val = REG_RD(sc, BCE_MISC_ID);
3279
3280         /* Chip reset. */
3281         val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3282               BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3283               BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3284         REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
3285
3286         /* Allow up to 30us for reset to complete. */
3287         for (i = 0; i < 10; i++) {
3288                 val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
3289                 if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3290                             BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3291                         break;
3292                 }
3293                 DELAY(10);
3294         }
3295
3296         /* Check that reset completed successfully. */
3297         if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3298                    BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3299                 BCE_PRINTF(sc, "%s(%d): Reset failed!\n", 
3300                         __FILE__, __LINE__);
3301                 rc = EBUSY;
3302                 goto bce_reset_exit;
3303         }
3304
3305         /* Make sure byte swapping is properly configured. */
3306         val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
3307         if (val != 0x01020304) {
3308                 BCE_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n", 
3309                         __FILE__, __LINE__);
3310                 rc = ENODEV;
3311                 goto bce_reset_exit;
3312         }
3313
3314         /* Just completed a reset, assume that firmware is running again. */
3315         sc->bce_fw_timed_out = 0;
3316
3317         /* Wait for the firmware to finish its initialization. */
3318         rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
3319         if (rc)
3320                 BCE_PRINTF(sc, "%s(%d): Firmware did not complete initialization!\n",
3321                         __FILE__, __LINE__);
3322
3323 bce_reset_exit:
3324         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3325
3326         return (rc);
3327 }
3328
3329
3330 static int
3331 bce_chipinit(struct bce_softc *sc)
3332 {
3333         u32 val;
3334         int rc = 0;
3335
3336         DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3337
3338         /* Make sure the interrupt is not active. */
3339         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3340
3341         /* Initialize DMA byte/word swapping, configure the number of DMA  */
3342         /* channels and PCI clock compensation delay.                      */
3343         val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3344               BCE_DMA_CONFIG_DATA_WORD_SWAP |
3345 #if BYTE_ORDER == BIG_ENDIAN
3346               BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3347 #endif
3348               BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3349               DMA_READ_CHANS << 12 |
3350               DMA_WRITE_CHANS << 16;
3351
3352         val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3353
3354         if ((sc->bce_flags & BCE_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
3355                 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3356
3357         /*
3358          * This setting resolves a problem observed on certain Intel PCI
3359          * chipsets that cannot handle multiple outstanding DMA operations.
3360          * See errata E9_5706A1_65.
3361          */
3362         if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
3363             (BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0) &&
3364             !(sc->bce_flags & BCE_PCIX_FLAG))
3365                 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3366
3367         REG_WR(sc, BCE_DMA_CONFIG, val);
3368
3369         /* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3370         if (sc->bce_flags & BCE_PCIX_FLAG) {
3371                 u16 val;
3372
3373                 val = pci_read_config(sc->bce_dev, BCE_PCI_PCIX_CMD, 2);
3374                 pci_write_config(sc->bce_dev, BCE_PCI_PCIX_CMD, val & ~0x2, 2);
3375         }
3376
3377         /* Enable the RX_V2P and Context state machines before access. */
3378         REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3379                BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3380                BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3381                BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3382
3383         /* Initialize context mapping and zero out the quick contexts. */
3384         bce_init_context(sc);
3385
3386         /* Initialize the on-boards CPUs */
3387         bce_init_cpus(sc);
3388
3389         /* Prepare NVRAM for access. */
3390         if (bce_init_nvram(sc)) {
3391                 rc = ENODEV;
3392                 goto bce_chipinit_exit;
3393         }
3394
3395         /* Set the kernel bypass block size */
3396         val = REG_RD(sc, BCE_MQ_CONFIG);
3397         val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3398         val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3399         REG_WR(sc, BCE_MQ_CONFIG, val);
3400
3401         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3402         REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3403         REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3404
3405         val = (BCM_PAGE_BITS - 8) << 24;
3406         REG_WR(sc, BCE_RV2P_CONFIG, val);
3407
3408         /* Configure page size. */
3409         val = REG_RD(sc, BCE_TBDR_CONFIG);
3410         val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3411         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3412         REG_WR(sc, BCE_TBDR_CONFIG, val);
3413
3414 bce_chipinit_exit:
3415         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3416
3417         return(rc);
3418 }
3419
3420
3421 /****************************************************************************/
3422 /* Initialize the controller in preparation to send/receive traffic.        */
3423 /*                                                                          */
3424 /* Returns:                                                                 */
3425 /*   0 for success, positive value for failure.                             */
3426 /****************************************************************************/
3427 static int
3428 bce_blockinit(struct bce_softc *sc)
3429 {
3430         u32 reg, val;
3431         int rc = 0;
3432
3433         DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3434
3435         /* Load the hardware default MAC address. */
3436         bce_set_mac_addr(sc);
3437
3438         /* Set the Ethernet backoff seed value */
3439         val = sc->eaddr[0]         + (sc->eaddr[1] << 8) +
3440               (sc->eaddr[2] << 16) + (sc->eaddr[3]     ) +
3441               (sc->eaddr[4] << 8)  + (sc->eaddr[5] << 16);
3442         REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3443
3444         sc->last_status_idx = 0;
3445         sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3446
3447         /* Set up link change interrupt generation. */
3448         REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3449
3450         /* Program the physical address of the status block. */
3451         REG_WR(sc, BCE_HC_STATUS_ADDR_L,
3452                 BCE_ADDR_LO(sc->status_block_paddr));
3453         REG_WR(sc, BCE_HC_STATUS_ADDR_H,
3454                 BCE_ADDR_HI(sc->status_block_paddr));
3455
3456         /* Program the physical address of the statistics block. */
3457         REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3458                 BCE_ADDR_LO(sc->stats_block_paddr));
3459         REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3460                 BCE_ADDR_HI(sc->stats_block_paddr));
3461
3462         /* Program various host coalescing parameters. */
3463         REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3464                 (sc->bce_tx_quick_cons_trip_int << 16) | sc->bce_tx_quick_cons_trip);
3465         REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3466                 (sc->bce_rx_quick_cons_trip_int << 16) | sc->bce_rx_quick_cons_trip);
3467         REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3468                 (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3469         REG_WR(sc, BCE_HC_TX_TICKS,
3470                 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3471         REG_WR(sc, BCE_HC_RX_TICKS,
3472                 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3473         REG_WR(sc, BCE_HC_COM_TICKS,
3474                 (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3475         REG_WR(sc, BCE_HC_CMD_TICKS,
3476                 (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3477         REG_WR(sc, BCE_HC_STATS_TICKS,
3478                 (sc->bce_stats_ticks & 0xffff00));
3479         REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS,
3480                 0xbb8);  /* 3ms */
3481         REG_WR(sc, BCE_HC_CONFIG,
3482                 (BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE |
3483                 BCE_HC_CONFIG_COLLECT_STATS));
3484
3485         /* Clear the internal statistics counters. */
3486         REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3487
3488         /* Verify that bootcode is running. */
3489         reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_SIGNATURE);
3490
3491         DBRUNIF(DB_RANDOMTRUE(bce_debug_bootcode_running_failure),
3492                 BCE_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n",
3493                         __FILE__, __LINE__);
3494                 reg = 0);
3495
3496         if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3497             BCE_DEV_INFO_SIGNATURE_MAGIC) {
3498                 BCE_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, "
3499                         "Expected: 08%08X\n", __FILE__, __LINE__,
3500                         (reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK),
3501                         BCE_DEV_INFO_SIGNATURE_MAGIC);
3502                 rc = ENODEV;
3503                 goto bce_blockinit_exit;
3504         }
3505
3506         /* Check if any management firmware is running. */
3507         reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_FEATURE);
3508         if (reg & (BCE_PORT_FEATURE_ASF_ENABLED | BCE_PORT_FEATURE_IMD_ENABLED)) {
3509                 DBPRINT(sc, BCE_INFO, "Management F/W Enabled.\n");
3510                 sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
3511         }
3512
3513         sc->bce_fw_ver = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_BC_REV);
3514         DBPRINT(sc, BCE_INFO, "bootcode rev = 0x%08X\n", sc->bce_fw_ver);
3515
3516         /* Allow bootcode to apply any additional fixes before enabling MAC. */
3517         rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3518
3519         /* Enable link state change interrupt generation. */
3520         REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3521
3522         /* Enable all remaining blocks in the MAC. */
3523         REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 0x5ffffff);
3524         REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3525         DELAY(20);
3526
3527 bce_blockinit_exit:
3528         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3529
3530         return (rc);
3531 }
3532
3533
3534 /****************************************************************************/
3535 /* Encapsulate an mbuf cluster into the rx_bd chain.                        */
3536 /*                                                                          */
3537 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's.     */
3538 /* This routine will map an mbuf cluster into 1 or more rx_bd's as          */
3539 /* necessary.                                                               */
3540 /*                                                                          */
3541 /* Returns:                                                                 */
3542 /*   0 for success, positive value for failure.                             */
3543 /****************************************************************************/
3544 static int
3545 bce_get_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod, u16 *chain_prod, 
3546         u32 *prod_bseq)
3547 {
3548         bus_dmamap_t            map;
3549         bus_dma_segment_t       segs[4];
3550         struct mbuf *m_new = NULL;
3551         struct rx_bd            *rxbd;
3552         int i, nsegs, error, rc = 0;
3553 #ifdef BCE_DEBUG
3554         u16 debug_chain_prod = *chain_prod;
3555 #endif
3556
3557         DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Entering %s()\n", 
3558                 __FUNCTION__);
3559
3560         /* Make sure the inputs are valid. */
3561         DBRUNIF((*chain_prod > MAX_RX_BD),
3562                 BCE_PRINTF(sc, "%s(%d): RX producer out of range: 0x%04X > 0x%04X\n",
3563                 __FILE__, __LINE__, *chain_prod, (u16) MAX_RX_BD));
3564
3565         DBPRINT(sc, BCE_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, "
3566                 "prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
3567
3568         if (m == NULL) {
3569
3570                 DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
3571                         BCE_PRINTF(sc, "%s(%d): Simulating mbuf allocation failure.\n", 
3572                                 __FILE__, __LINE__);
3573                         sc->mbuf_alloc_failed++;
3574                         rc = ENOBUFS;
3575                         goto bce_get_buf_exit);
3576
3577                 /* This is a new mbuf allocation. */
3578                 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
3579                 if (m_new == NULL) {
3580
3581                         DBPRINT(sc, BCE_WARN, "%s(%d): RX mbuf header allocation failed!\n", 
3582                                 __FILE__, __LINE__);
3583
3584                         DBRUNIF(1, sc->mbuf_alloc_failed++);
3585
3586                         rc = ENOBUFS;
3587                         goto bce_get_buf_exit;
3588                 }
3589
3590                 DBRUNIF(1, sc->rx_mbuf_alloc++);
3591                 m_cljget(m_new, M_DONTWAIT, sc->mbuf_alloc_size);
3592                 if (!(m_new->m_flags & M_EXT)) {
3593
3594                         DBPRINT(sc, BCE_WARN, "%s(%d): RX mbuf chain allocation failed!\n", 
3595                                 __FILE__, __LINE__);
3596                         
3597                         m_freem(m_new);
3598
3599                         DBRUNIF(1, sc->rx_mbuf_alloc--);
3600                         DBRUNIF(1, sc->mbuf_alloc_failed++);
3601
3602                         rc = ENOBUFS;
3603                         goto bce_get_buf_exit;
3604                 }
3605                         
3606                 m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3607         } else {
3608                 m_new = m;
3609                 m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3610                 m_new->m_data = m_new->m_ext.ext_buf;
3611         }
3612
3613         /* Map the mbuf cluster into device memory. */
3614         map = sc->rx_mbuf_map[*chain_prod];
3615         error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag, map, m_new,
3616             segs, &nsegs, BUS_DMA_NOWAIT);
3617
3618         if (error) {
3619                 BCE_PRINTF(sc, "%s(%d): Error mapping mbuf into RX chain!\n",
3620                         __FILE__, __LINE__);
3621
3622                 m_freem(m_new);
3623
3624                 DBRUNIF(1, sc->rx_mbuf_alloc--);
3625
3626                 rc = ENOBUFS;
3627                 goto bce_get_buf_exit;
3628         }
3629
3630         /* Watch for overflow. */
3631         DBRUNIF((sc->free_rx_bd > USABLE_RX_BD),
3632                 BCE_PRINTF(sc, "%s(%d): Too many free rx_bd (0x%04X > 0x%04X)!\n", 
3633                         __FILE__, __LINE__, sc->free_rx_bd, (u16) USABLE_RX_BD));
3634
3635         DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 
3636                 sc->rx_low_watermark = sc->free_rx_bd);
3637
3638         /* Setup the rx_bd for the first segment. */
3639         rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3640
3641         rxbd->rx_bd_haddr_lo  = htole32(BCE_ADDR_LO(segs[0].ds_addr));
3642         rxbd->rx_bd_haddr_hi  = htole32(BCE_ADDR_HI(segs[0].ds_addr));
3643         rxbd->rx_bd_len       = htole32(segs[0].ds_len);
3644         rxbd->rx_bd_flags     = htole32(RX_BD_FLAGS_START);
3645         *prod_bseq += segs[0].ds_len;
3646
3647         for (i = 1; i < nsegs; i++) {
3648
3649                 *prod = NEXT_RX_BD(*prod);
3650                 *chain_prod = RX_CHAIN_IDX(*prod); 
3651
3652                 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3653
3654                 rxbd->rx_bd_haddr_lo  = htole32(BCE_ADDR_LO(segs[i].ds_addr));
3655                 rxbd->rx_bd_haddr_hi  = htole32(BCE_ADDR_HI(segs[i].ds_addr));
3656                 rxbd->rx_bd_len       = htole32(segs[i].ds_len);
3657                 rxbd->rx_bd_flags     = 0;
3658                 *prod_bseq += segs[i].ds_len;
3659         }
3660
3661         rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3662
3663         /* Save the mbuf and update our counter. */
3664         sc->rx_mbuf_ptr[*chain_prod] = m_new;
3665         sc->free_rx_bd -= nsegs;
3666
3667         DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_mbuf_chain(sc, debug_chain_prod, 
3668                 nsegs));
3669
3670         DBPRINT(sc, BCE_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, "
3671                 "prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
3672
3673 bce_get_buf_exit:
3674         DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Exiting %s()\n", 
3675                 __FUNCTION__);
3676
3677         return(rc);
3678 }
3679
3680
3681 /****************************************************************************/
3682 /* Allocate memory and initialize the TX data structures.                   */
3683 /*                                                                          */
3684 /* Returns:                                                                 */
3685 /*   0 for success, positive value for failure.                             */
3686 /****************************************************************************/
3687 static int
3688 bce_init_tx_chain(struct bce_softc *sc)
3689 {
3690         struct tx_bd *txbd;
3691         u32 val;
3692         int i, rc = 0;
3693
3694         DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3695
3696         /* Set the initial TX producer/consumer indices. */
3697         sc->tx_prod        = 0;
3698         sc->tx_cons        = 0;
3699         sc->tx_prod_bseq   = 0;
3700         sc->used_tx_bd = 0;
3701         DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3702
3703         /*
3704          * The NetXtreme II supports a linked-list structre called
3705          * a Buffer Descriptor Chain (or BD chain).  A BD chain
3706          * consists of a series of 1 or more chain pages, each of which
3707          * consists of a fixed number of BD entries.
3708          * The last BD entry on each page is a pointer to the next page
3709          * in the chain, and the last pointer in the BD chain
3710          * points back to the beginning of the chain.
3711          */
3712
3713         /* Set the TX next pointer chain entries. */
3714         for (i = 0; i < TX_PAGES; i++) {
3715                 int j;
3716
3717                 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3718
3719                 /* Check if we've reached the last page. */
3720                 if (i == (TX_PAGES - 1))
3721                         j = 0;
3722                 else
3723                         j = i + 1;
3724
3725                 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
3726                 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
3727         }
3728
3729         /*
3730          * Initialize the context ID for an L2 TX chain.
3731          */
3732         val = BCE_L2CTX_TYPE_TYPE_L2;
3733         val |= BCE_L2CTX_TYPE_SIZE_L2;
3734         CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TYPE, val);
3735
3736         val = BCE_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3737         CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_CMD_TYPE, val);
3738
3739         /* Point the hardware to the first page in the chain. */
3740         val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
3741         CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_HI, val);
3742         val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
3743         CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_LO, val);
3744
3745         DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
3746
3747         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3748
3749         return(rc);
3750 }
3751
3752
3753 /****************************************************************************/
3754 /* Free memory and clear the TX data structures.                            */
3755 /*                                                                          */
3756 /* Returns:                                                                 */
3757 /*   Nothing.                                                               */
3758 /****************************************************************************/
3759 static void
3760 bce_free_tx_chain(struct bce_softc *sc)
3761 {
3762         int i;
3763
3764         DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3765
3766         /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3767         for (i = 0; i < TOTAL_TX_BD; i++) {
3768                 if (sc->tx_mbuf_ptr[i] != NULL) {
3769                         if (sc->tx_mbuf_map != NULL)
3770                                 bus_dmamap_sync(sc->tx_mbuf_tag, sc->tx_mbuf_map[i],
3771                                         BUS_DMASYNC_POSTWRITE);
3772                         m_freem(sc->tx_mbuf_ptr[i]);
3773                         sc->tx_mbuf_ptr[i] = NULL;
3774                         DBRUNIF(1, sc->tx_mbuf_alloc--);
3775                 }                       
3776         }
3777
3778         /* Clear each TX chain page. */
3779         for (i = 0; i < TX_PAGES; i++)
3780                 bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
3781
3782         /* Check if we lost any mbufs in the process. */
3783         DBRUNIF((sc->tx_mbuf_alloc),
3784                 BCE_PRINTF(sc, "%s(%d): Memory leak! Lost %d mbufs "
3785                         "from tx chain!\n",
3786                         __FILE__, __LINE__, sc->tx_mbuf_alloc));
3787
3788         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3789 }
3790
3791
3792 /****************************************************************************/
3793 /* Allocate memory and initialize the RX data structures.                   */
3794 /*                                                                          */
3795 /* Returns:                                                                 */
3796 /*   0 for success, positive value for failure.                             */
3797 /****************************************************************************/
3798 static int
3799 bce_init_rx_chain(struct bce_softc *sc)
3800 {
3801         struct rx_bd *rxbd;
3802         int i, rc = 0;
3803         u16 prod, chain_prod;
3804         u32 prod_bseq, val;
3805
3806         DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3807
3808         /* Initialize the RX producer and consumer indices. */
3809         sc->rx_prod        = 0;
3810         sc->rx_cons        = 0;
3811         sc->rx_prod_bseq   = 0;
3812         sc->free_rx_bd     = BCE_RX_SLACK_SPACE;
3813         DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
3814
3815         /* Initialize the RX next pointer chain entries. */
3816         for (i = 0; i < RX_PAGES; i++) {
3817                 int j;
3818
3819                 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
3820
3821                 /* Check if we've reached the last page. */
3822                 if (i == (RX_PAGES - 1))
3823                         j = 0;
3824                 else
3825                         j = i + 1;
3826
3827                 /* Setup the chain page pointers. */
3828                 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j]));
3829                 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j]));
3830         }
3831
3832         /* Initialize the context ID for an L2 RX chain. */
3833         val = BCE_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3834         val |= BCE_L2CTX_CTX_TYPE_SIZE_L2;
3835         val |= 0x02 << 8;
3836         CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_CTX_TYPE, val);
3837
3838         /* Point the hardware to the first page in the chain. */
3839         val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]);
3840         CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_HI, val);
3841         val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
3842         CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_LO, val);
3843
3844         /* Allocate mbuf clusters for the rx_bd chain. */
3845         prod = prod_bseq = 0;
3846         while (prod < BCE_RX_SLACK_SPACE) {
3847                 chain_prod = RX_CHAIN_IDX(prod);
3848                 if (bce_get_buf(sc, NULL, &prod, &chain_prod, &prod_bseq)) {
3849                         BCE_PRINTF(sc, "%s(%d): Error filling RX chain: rx_bd[0x%04X]!\n",
3850                                 __FILE__, __LINE__, chain_prod);
3851                         rc = ENOBUFS;
3852                         break;
3853                 }
3854                 prod = NEXT_RX_BD(prod);
3855         }
3856
3857         /* Save the RX chain producer index. */
3858         sc->rx_prod      = prod;
3859         sc->rx_prod_bseq = prod_bseq;
3860
3861         for (i = 0; i < RX_PAGES; i++) {
3862                 bus_dmamap_sync(
3863                         sc->rx_bd_chain_tag,
3864                 sc->rx_bd_chain_map[i],
3865                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3866         }
3867
3868         /* Tell the chip about the waiting rx_bd's. */
3869         REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
3870         REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3871
3872         DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD));
3873
3874         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3875
3876         return(rc);
3877 }
3878
3879
3880 /****************************************************************************/
3881 /* Free memory and clear the RX data structures.                            */
3882 /*                                                                          */
3883 /* Returns:                                                                 */
3884 /*   Nothing.                                                               */
3885 /****************************************************************************/
3886 static void
3887 bce_free_rx_chain(struct bce_softc *sc)
3888 {
3889         int i;
3890
3891         DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3892
3893         /* Free any mbufs still in the RX mbuf chain. */
3894         for (i = 0; i < TOTAL_RX_BD; i++) {
3895                 if (sc->rx_mbuf_ptr[i] != NULL) {
3896                         if (sc->rx_mbuf_map[i] != NULL)
3897                                 bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[i],
3898                                         BUS_DMASYNC_POSTREAD);
3899                         m_freem(sc->rx_mbuf_ptr[i]);
3900                         sc->rx_mbuf_ptr[i] = NULL;
3901                         DBRUNIF(1, sc->rx_mbuf_alloc--);
3902                 }
3903         }
3904
3905         /* Clear each RX chain page. */
3906         for (i = 0; i < RX_PAGES; i++)
3907                 bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
3908
3909         /* Check if we lost any mbufs in the process. */
3910         DBRUNIF((sc->rx_mbuf_alloc),
3911                 BCE_PRINTF(sc, "%s(%d): Memory leak! Lost %d mbufs from rx chain!\n",
3912                         __FILE__, __LINE__, sc->rx_mbuf_alloc));
3913
3914         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3915 }
3916
3917
3918 /****************************************************************************/
3919 /* Set media options.                                                       */
3920 /*                                                                          */
3921 /* Returns:                                                                 */
3922 /*   0 for success, positive value for failure.                             */
3923 /****************************************************************************/
3924 static int
3925 bce_ifmedia_upd(struct ifnet *ifp)
3926 {
3927         struct bce_softc *sc;
3928         struct mii_data *mii;
3929         struct ifmedia *ifm;
3930         int rc = 0;
3931
3932         sc = ifp->if_softc;
3933         ifm = &sc->bce_ifmedia;
3934
3935         /* DRC - ToDo: Add SerDes support. */
3936
3937         mii = device_get_softc(sc->bce_miibus);
3938         sc->bce_link = 0;
3939         if (mii->mii_instance) {
3940                 struct mii_softc *miisc;
3941                 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3942                     miisc = LIST_NEXT(miisc, mii_list))
3943                         mii_phy_reset(miisc);
3944         }
3945         mii_mediachg(mii);
3946
3947         return(rc);
3948 }
3949
3950
3951 /****************************************************************************/
3952 /* Reports current media status.                                            */
3953 /*                                                                          */
3954 /* Returns:                                                                 */
3955 /*   Nothing.                                                               */
3956 /****************************************************************************/
3957 static void
3958 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3959 {
3960         struct bce_softc *sc;
3961         struct mii_data *mii;
3962
3963         sc = ifp->if_softc;
3964
3965         BCE_LOCK(sc);
3966
3967         mii = device_get_softc(sc->bce_miibus);
3968
3969         /* DRC - ToDo: Add SerDes support. */
3970
3971         mii_pollstat(mii);
3972         ifmr->ifm_active = mii->mii_media_active;
3973         ifmr->ifm_status = mii->mii_media_status;
3974
3975         BCE_UNLOCK(sc);
3976 }
3977
3978
3979 /****************************************************************************/
3980 /* Handles PHY generated interrupt events.                                  */
3981 /*                                                                          */
3982 /* Returns:                                                                 */
3983 /*   Nothing.                                                               */
3984 /****************************************************************************/
3985 static void
3986 bce_phy_intr(struct bce_softc *sc)
3987 {
3988         u32 new_link_state, old_link_state;
3989
3990         new_link_state = sc->status_block->status_attn_bits &
3991                 STATUS_ATTN_BITS_LINK_STATE;
3992         old_link_state = sc->status_block->status_attn_bits_ack &
3993                 STATUS_ATTN_BITS_LINK_STATE;
3994
3995         /* Handle any changes if the link state has changed. */
3996         if (new_link_state != old_link_state) {
3997
3998                 DBRUN(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
3999
4000                 sc->bce_link = 0;
4001                 callout_stop(&sc->bce_stat_ch);
4002                 bce_tick_locked(sc);
4003
4004                 /* Update the status_attn_bits_ack field in the status block. */
4005                 if (new_link_state) {
4006                         REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
4007                                 STATUS_ATTN_BITS_LINK_STATE);
4008                         DBPRINT(sc, BCE_INFO, "Link is now UP.\n");
4009                 }
4010                 else {
4011                         REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
4012                                 STATUS_ATTN_BITS_LINK_STATE);
4013                         DBPRINT(sc, BCE_INFO, "Link is now DOWN.\n");
4014                 }
4015
4016         }
4017
4018         /* Acknowledge the link change interrupt. */
4019         REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
4020 }
4021
4022
4023 /****************************************************************************/
4024 /* Handles received frame interrupt events.                                 */
4025 /*                                                                          */
4026 /* Returns:                                                                 */
4027 /*   Nothing.                                                               */
4028 /****************************************************************************/
4029 static void
4030 bce_rx_intr(struct bce_softc *sc)
4031 {
4032         struct status_block *sblk = sc->status_block;
4033         struct ifnet *ifp = sc->bce_ifp;
4034         u16 hw_cons, sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
4035         u32 sw_prod_bseq;
4036         struct l2_fhdr *l2fhdr;
4037
4038         DBRUNIF(1, sc->rx_interrupts++);
4039
4040         /* Prepare the RX chain pages to be accessed by the host CPU. */
4041         for (int i = 0; i < RX_PAGES; i++)
4042                 bus_dmamap_sync(sc->rx_bd_chain_tag,
4043                     sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTWRITE);
4044
4045         /* Get the hardware's view of the RX consumer index. */
4046         hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
4047         if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4048                 hw_cons++;
4049
4050         /* Get working copies of the driver's view of the RX indices. */
4051         sw_cons = sc->rx_cons;
4052         sw_prod = sc->rx_prod;
4053         sw_prod_bseq = sc->rx_prod_bseq;
4054
4055         DBPRINT(sc, BCE_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
4056                 "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
4057                 __FUNCTION__, sw_prod, sw_cons, 
4058                 sw_prod_bseq);
4059
4060         /* Prevent speculative reads from getting ahead of the status block. */
4061         bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 
4062                 BUS_SPACE_BARRIER_READ);
4063
4064         DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
4065                 sc->rx_low_watermark = sc->free_rx_bd);
4066
4067         /* 
4068          * Scan through the receive chain as long 
4069          * as there is work to do.
4070          */
4071         while (sw_cons != hw_cons) {
4072                 struct mbuf *m;
4073                 struct rx_bd *rxbd;
4074                 unsigned int len;
4075                 u32 status;
4076
4077                 /* Convert the producer/consumer indices to an actual rx_bd index. */
4078                 sw_chain_cons = RX_CHAIN_IDX(sw_cons);
4079                 sw_chain_prod = RX_CHAIN_IDX(sw_prod);
4080
4081                 /* Get the used rx_bd. */
4082                 rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)];
4083                 sc->free_rx_bd++;
4084         
4085                 DBRUN(BCE_VERBOSE_RECV, 
4086                         BCE_PRINTF(sc, "%s(): ", __FUNCTION__); 
4087                         bce_dump_rxbd(sc, sw_chain_cons, rxbd));
4088
4089 #ifdef DEVICE_POLLING
4090                 if (ifp->if_capenable & IFCAP_POLLING) {
4091                         if (sc->bce_rxcycles <= 0)
4092                                 break;
4093                         sc->bce_rxcycles--;
4094                 }
4095 #endif
4096
4097                 /* The mbuf is stored with the last rx_bd entry of a packet. */
4098                 if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
4099
4100                         /* Validate that this is the last rx_bd. */
4101                         DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)),
4102                                 BCE_PRINTF(sc, "%s(%d): Unexpected mbuf found in rx_bd[0x%04X]!\n",
4103                                 __FILE__, __LINE__, sw_chain_cons);
4104                                 bce_breakpoint(sc));
4105
4106                         /* DRC - ToDo: If the received packet is small, say less */
4107                         /*             than 128 bytes, allocate a new mbuf here, */
4108                         /*             copy the data to that mbuf, and recycle   */
4109                         /*             the mapped jumbo frame.                   */
4110
4111                         /* Unmap the mbuf from DMA space. */
4112                         bus_dmamap_sync(sc->rx_mbuf_tag, 
4113                             sc->rx_mbuf_map[sw_chain_cons],
4114                         BUS_DMASYNC_POSTREAD);
4115                         bus_dmamap_unload(sc->rx_mbuf_tag,
4116                             sc->rx_mbuf_map[sw_chain_cons]);
4117
4118                         /* Remove the mbuf from the driver's chain. */
4119                         m = sc->rx_mbuf_ptr[sw_chain_cons];
4120                         sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
4121
4122                         /*
4123                          * Frames received on the NetXteme II are prepended 
4124                          * with the l2_fhdr structure which provides status
4125                          * information about the received frame (including
4126                          * VLAN tags and checksum info) and are also
4127                          * automatically adjusted to align the IP header
4128                          * (i.e. two null bytes are inserted before the 
4129                          * Ethernet header).
4130                          */
4131                         l2fhdr = mtod(m, struct l2_fhdr *);
4132
4133                         len    = l2fhdr->l2_fhdr_pkt_len;
4134                         status = l2fhdr->l2_fhdr_status;
4135
4136                         DBRUNIF(DB_RANDOMTRUE(bce_debug_l2fhdr_status_check),
4137                                 BCE_PRINTF(sc, "Simulating l2_fhdr status error.\n");
4138                                 status = status | L2_FHDR_ERRORS_PHY_DECODE);
4139
4140                         /* Watch for unusual sized frames. */
4141                         DBRUNIF(((len < BCE_MIN_MTU) || (len > BCE_MAX_JUMBO_ETHER_MTU_VLAN)),
4142                                 BCE_PRINTF(sc, "%s(%d): Unusual frame size found. "
4143                                         "Min(%d), Actual(%d), Max(%d)\n", 
4144                                         __FILE__, __LINE__, (int) BCE_MIN_MTU, 
4145                                         len, (int) BCE_MAX_JUMBO_ETHER_MTU_VLAN);
4146                                 bce_dump_mbuf(sc, m);
4147                                 bce_breakpoint(sc));
4148
4149                         len -= ETHER_CRC_LEN;
4150
4151                         /* Check the received frame for errors. */
4152                         if (status &  (L2_FHDR_ERRORS_BAD_CRC | 
4153                                 L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT | 
4154                                 L2_FHDR_ERRORS_TOO_SHORT  | L2_FHDR_ERRORS_GIANT_FRAME)) {
4155
4156                                 ifp->if_ierrors++;
4157                                 DBRUNIF(1, sc->l2fhdr_status_errors++);
4158
4159                                 /* Reuse the mbuf for a new frame. */
4160                                 if (bce_get_buf(sc, m, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) {
4161
4162                                         DBRUNIF(1, bce_breakpoint(sc));
4163                                         panic("bce%d: Can't reuse RX mbuf!\n", sc->bce_unit);
4164
4165                                 }
4166                                 goto bce_rx_int_next_rx;
4167                         }
4168
4169                         /* 
4170                          * Get a new mbuf for the rx_bd.   If no new
4171                          * mbufs are available then reuse the current mbuf,
4172                          * log an ierror on the interface, and generate
4173                          * an error in the system log.
4174                          */
4175                         if (bce_get_buf(sc, NULL, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) {
4176
4177                                 DBRUN(BCE_WARN, 
4178                                         BCE_PRINTF(sc, "%s(%d): Failed to allocate "
4179                                         "new mbuf, incoming frame dropped!\n", 
4180                                         __FILE__, __LINE__));
4181
4182                                 ifp->if_ierrors++;
4183
4184                                 /* Try and reuse the exisitng mbuf. */
4185                                 if (bce_get_buf(sc, m, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) {
4186
4187                                         DBRUNIF(1, bce_breakpoint(sc));
4188                                         panic("bce%d: Double mbuf allocation failure!", sc->bce_unit);
4189
4190                                 }
4191                                 goto bce_rx_int_next_rx;
4192                         }
4193
4194                         /* Skip over the l2_fhdr when passing the data up the stack. */
4195                         m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4196
4197                         /* Adjust the packet length to match the received data. */
4198                         m->m_pkthdr.len = m->m_len = len;
4199
4200                         /* Send the packet to the appropriate interface. */
4201                         m->m_pkthdr.rcvif = ifp;
4202
4203                         DBRUN(BCE_VERBOSE_RECV,
4204                                 struct ether_header *eh;
4205                                 eh = mtod(m, struct ether_header *);
4206                                 BCE_PRINTF(sc, "%s(): to: %6D, from: %6D, type: 0x%04X\n",
4207                                         __FUNCTION__, eh->ether_dhost, ":", 
4208                                         eh->ether_shost, ":", htons(eh->ether_type)));
4209
4210                         /* Validate the checksum if offload enabled. */
4211                         if (ifp->if_capenable & IFCAP_RXCSUM) {
4212
4213                                 /* Check for an IP datagram. */
4214                                 if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4215                                         m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4216
4217                                         /* Check if the IP checksum is valid. */
4218                                         if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0)
4219                                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4220                                         else
4221                                                 DBPRINT(sc, BCE_WARN_SEND, 
4222                                                         "%s(): Invalid IP checksum = 0x%04X!\n",
4223                                                         __FUNCTION__, l2fhdr->l2_fhdr_ip_xsum);
4224                                 }
4225
4226                                 /* Check for a valid TCP/UDP frame. */
4227                                 if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4228                                         L2_FHDR_STATUS_UDP_DATAGRAM)) {
4229
4230                                         /* Check for a good TCP/UDP checksum. */
4231                                         if ((status & (L2_FHDR_ERRORS_TCP_XSUM |
4232                                                       L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4233                                                 m->m_pkthdr.csum_data =
4234                                                     l2fhdr->l2_fhdr_tcp_udp_xsum;
4235                                                 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID 
4236                                                         | CSUM_PSEUDO_HDR);
4237                                         } else
4238                                                 DBPRINT(sc, BCE_WARN_SEND, 
4239                                                         "%s(): Invalid TCP/UDP checksum = 0x%04X!\n",
4240                                                         __FUNCTION__, l2fhdr->l2_fhdr_tcp_udp_xsum);
4241                                 }
4242                         }               
4243
4244
4245                         /*
4246                          * If we received a packet with a vlan tag,
4247                          * attach that information to the packet.
4248                          */
4249                         if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
4250                                 DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): VLAN tag = 0x%04X\n",
4251                                         __FUNCTION__, l2fhdr->l2_fhdr_vlan_tag);
4252 #if __FreeBSD_version < 700000
4253                                 VLAN_INPUT_TAG(ifp, m, l2fhdr->l2_fhdr_vlan_tag, continue);
4254 #else
4255                                 VLAN_INPUT_TAG(ifp, m, l2fhdr->l2_fhdr_vlan_tag);
4256                                 if (m == NULL)
4257                                         continue;
4258 #endif  
4259                         }
4260
4261                         /* Pass the mbuf off to the upper layers. */
4262                         ifp->if_ipackets++;
4263                         DBPRINT(sc, BCE_VERBOSE_RECV, "%s(): Passing received frame up.\n",
4264                                 __FUNCTION__);
4265                         BCE_UNLOCK(sc);
4266                         (*ifp->if_input)(ifp, m);
4267                         DBRUNIF(1, sc->rx_mbuf_alloc--);
4268                         BCE_LOCK(sc);
4269
4270 bce_rx_int_next_rx:
4271                         sw_prod = NEXT_RX_BD(sw_prod);
4272                 }
4273
4274                 sw_cons = NEXT_RX_BD(sw_cons);
4275
4276                 /* Refresh hw_cons to see if there's new work */
4277                 if (sw_cons == hw_cons) {
4278                         hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
4279                         if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4280                                 hw_cons++;
4281                 }
4282
4283                 /* Prevent speculative reads from getting ahead of the status block. */
4284                 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 
4285                         BUS_SPACE_BARRIER_READ);
4286         }
4287
4288         for (int i = 0; i < RX_PAGES; i++)
4289                 bus_dmamap_sync(sc->rx_bd_chain_tag,
4290                     sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
4291
4292         sc->rx_cons = sw_cons;
4293         sc->rx_prod = sw_prod;
4294         sc->rx_prod_bseq = sw_prod_bseq;
4295
4296         REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
4297         REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
4298
4299         DBPRINT(sc, BCE_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
4300                 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
4301                 __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
4302 }
4303
4304
4305 /****************************************************************************/
4306 /* Handles transmit completion interrupt events.                            */
4307 /*                                                                          */
4308 /* Returns:                                                                 */
4309 /*   Nothing.                                                               */
4310 /****************************************************************************/
4311 static void
4312 bce_tx_intr(struct bce_softc *sc)
4313 {
4314         struct status_block *sblk = sc->status_block;
4315         struct ifnet *ifp = sc->bce_ifp;
4316         u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
4317
4318         BCE_LOCK_ASSERT(sc);
4319
4320         DBRUNIF(1, sc->tx_interrupts++);
4321
4322         /* Get the hardware's view of the TX consumer index. */
4323         hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4324
4325         /* Skip to the next entry if this is a chain page pointer. */
4326         if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4327                 hw_tx_cons++;
4328
4329         sw_tx_cons = sc->tx_cons;
4330
4331         /* Prevent speculative reads from getting ahead of the status block. */
4332         bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 
4333                 BUS_SPACE_BARRIER_READ);
4334
4335         /* Cycle through any completed TX chain page entries. */
4336         while (sw_tx_cons != hw_tx_cons) {
4337 #ifdef BCE_DEBUG
4338                 struct tx_bd *txbd = NULL;
4339 #endif
4340                 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4341
4342                 DBPRINT(sc, BCE_INFO_SEND,
4343                         "%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, "
4344                         "sw_tx_chain_cons = 0x%04X\n",
4345                         __FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4346
4347                 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4348                         BCE_PRINTF(sc, "%s(%d): TX chain consumer out of range! "
4349                                 " 0x%04X > 0x%04X\n",
4350                                 __FILE__, __LINE__, sw_tx_chain_cons, 
4351                                 (int) MAX_TX_BD);
4352                         bce_breakpoint(sc));
4353
4354                 DBRUNIF(1,
4355                         txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
4356                                 [TX_IDX(sw_tx_chain_cons)]);
4357                 
4358                 DBRUNIF((txbd == NULL),
4359                         BCE_PRINTF(sc, "%s(%d): Unexpected NULL tx_bd[0x%04X]!\n", 
4360                                 __FILE__, __LINE__, sw_tx_chain_cons);
4361                         bce_breakpoint(sc));
4362
4363                 DBRUN(BCE_INFO_SEND, 
4364                         BCE_PRINTF(sc, "%s(): ", __FUNCTION__);
4365                         bce_dump_txbd(sc, sw_tx_chain_cons, txbd));
4366
4367                 /*
4368                  * Free the associated mbuf. Remember
4369                  * that only the last tx_bd of a packet
4370                  * has an mbuf pointer and DMA map.
4371                  */
4372                 if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
4373
4374                         /* Validate that this is the last tx_bd. */
4375                         DBRUNIF((!(txbd->tx_bd_vlan_tag_flags & TX_BD_FLAGS_END)),
4376                                 BCE_PRINTF(sc, "%s(%d): tx_bd END flag not set but "
4377                                 "txmbuf == NULL!\n", __FILE__, __LINE__);
4378                                 bce_breakpoint(sc));
4379
4380                         DBRUN(BCE_INFO_SEND, 
4381                                 BCE_PRINTF(sc, "%s(): Unloading map/freeing mbuf "
4382                                         "from tx_bd[0x%04X]\n", __FUNCTION__, sw_tx_chain_cons));
4383
4384                         /* Unmap the mbuf. */
4385                         bus_dmamap_unload(sc->tx_mbuf_tag,
4386                             sc->tx_mbuf_map[sw_tx_chain_cons]);
4387         
4388                         /* Free the mbuf. */
4389                         m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
4390                         sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
4391                         DBRUNIF(1, sc->tx_mbuf_alloc--);
4392
4393                         ifp->if_opackets++;
4394                 }
4395
4396                 sc->used_tx_bd--;
4397                 sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4398
4399                 /* Refresh hw_cons to see if there's new work. */
4400                 hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4401                 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4402                         hw_tx_cons++;
4403
4404                 /* Prevent speculative reads from getting ahead of the status block. */
4405                 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 
4406                         BUS_SPACE_BARRIER_READ);
4407         }
4408
4409         /* Clear the TX timeout timer. */
4410         ifp->if_timer = 0;
4411
4412         /* Clear the tx hardware queue full flag. */
4413         if ((sc->used_tx_bd + BCE_TX_SLACK_SPACE) < USABLE_TX_BD) {
4414                 DBRUNIF((ifp->if_drv_flags & IFF_DRV_OACTIVE),
4415                         BCE_PRINTF(sc, "%s(): TX chain is open for business! Used tx_bd = %d\n", 
4416                                 __FUNCTION__, sc->used_tx_bd));
4417                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4418         }
4419
4420         sc->tx_cons = sw_tx_cons;
4421 }
4422
4423
4424 /****************************************************************************/
4425 /* Disables interrupt generation.                                           */
4426 /*                                                                          */
4427 /* Returns:                                                                 */
4428 /*   Nothing.                                                               */
4429 /****************************************************************************/
4430 static void
4431 bce_disable_intr(struct bce_softc *sc)
4432 {
4433         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4434                BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4435         REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
4436 }
4437
4438
4439 /****************************************************************************/
4440 /* Enables interrupt generation.                                            */
4441 /*                                                                          */
4442 /* Returns:                                                                 */
4443 /*   Nothing.                                                               */
4444 /****************************************************************************/
4445 static void
4446 bce_enable_intr(struct bce_softc *sc)
4447 {
4448         u32 val;
4449
4450         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4451                BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4452                BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4453
4454         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4455                BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4456
4457         val = REG_RD(sc, BCE_HC_COMMAND);
4458         REG_WR(sc, BCE_HC_COMMAND, val | BCE_HC_COMMAND_COAL_NOW);
4459 }
4460
4461
4462 /****************************************************************************/
4463 /* Handles controller initialization.                                       */
4464 /*                                                                          */
4465 /* Must be called from a locked routine.                                    */
4466 /*                                                                          */
4467 /* Returns:                                                                 */
4468 /*   Nothing.                                                               */
4469 /****************************************************************************/
4470 static void
4471 bce_init_locked(struct bce_softc *sc)
4472 {
4473         struct ifnet *ifp;
4474         u32 ether_mtu;
4475
4476         DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4477
4478         BCE_LOCK_ASSERT(sc);
4479
4480         ifp = sc->bce_ifp;
4481
4482         /* Check if the driver is still running and bail out if it is. */
4483         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4484                 goto bce_init_locked_exit;
4485
4486         bce_stop(sc);
4487
4488         if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
4489                 BCE_PRINTF(sc, "%s(%d): Controller reset failed!\n", 
4490                         __FILE__, __LINE__);
4491                 goto bce_init_locked_exit;
4492         }
4493
4494         if (bce_chipinit(sc)) {
4495                 BCE_PRINTF(sc, "%s(%d): Controller initialization failed!\n", 
4496                         __FILE__, __LINE__);
4497                 goto bce_init_locked_exit;
4498         }
4499
4500         if (bce_blockinit(sc)) {
4501                 BCE_PRINTF(sc, "%s(%d): Block initialization failed!\n", 
4502                         __FILE__, __LINE__);
4503                 goto bce_init_locked_exit;
4504         }
4505
4506         /* Load our MAC address. */
4507         bcopy(IF_LLADDR(sc->bce_ifp), sc->eaddr, ETHER_ADDR_LEN);
4508         bce_set_mac_addr(sc);
4509
4510         /* Calculate and program the Ethernet MTU size. */
4511         ether_mtu = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ifp->if_mtu + 
4512                 ETHER_CRC_LEN;
4513
4514         DBPRINT(sc, BCE_INFO, "%s(): setting mtu = %d\n",__FUNCTION__, ether_mtu);
4515
4516         /* 
4517          * Program the mtu, enabling jumbo frame 
4518          * support if necessary.  Also set the mbuf
4519          * allocation count for RX frames.
4520          */
4521         if (ether_mtu > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) {
4522                 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu | 
4523                         BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4524                 sc->mbuf_alloc_size = MJUM9BYTES;
4525         } else {
4526                 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
4527                 sc->mbuf_alloc_size = MCLBYTES;
4528         }
4529
4530         /* Calculate the RX Ethernet frame size for rx_bd's. */
4531         sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4532
4533         DBPRINT(sc, BCE_INFO, 
4534                 "%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4535                 "max_frame_size = %d\n",
4536                 __FUNCTION__, (int) MCLBYTES, sc->mbuf_alloc_size, sc->max_frame_size);
4537
4538         /* Program appropriate promiscuous/multicast filtering. */
4539         bce_set_rx_mode(sc);
4540
4541         /* Init RX buffer descriptor chain. */
4542         bce_init_rx_chain(sc);
4543
4544         /* Init TX buffer descriptor chain. */
4545         bce_init_tx_chain(sc);
4546
4547 #ifdef DEVICE_POLLING
4548         /* Disable interrupts if we are polling. */
4549         if (ifp->if_capenable & IFCAP_POLLING) {
4550                 bce_disable_intr(sc);
4551
4552                 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4553                         (1 << 16) | sc->bce_rx_quick_cons_trip);
4554                 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4555                         (1 << 16) | sc->bce_tx_quick_cons_trip);
4556         } else
4557 #endif
4558         /* Enable host interrupts. */
4559         bce_enable_intr(sc);
4560
4561         bce_ifmedia_upd(ifp);
4562
4563         ifp->if_drv_flags |= IFF_DRV_RUNNING;
4564         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4565
4566         callout_reset(&sc->bce_stat_ch, hz, bce_tick, sc);
4567
4568 bce_init_locked_exit:
4569         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4570
4571         return;
4572 }
4573
4574
4575 /****************************************************************************/
4576 /* Handles controller initialization when called from an unlocked routine.  */
4577 /*                                                                          */
4578 /* Returns:                                                                 */
4579 /*   Nothing.                                                               */
4580 /****************************************************************************/
4581 static void
4582 bce_init(void *xsc)
4583 {
4584         struct bce_softc *sc = xsc;
4585
4586         BCE_LOCK(sc);
4587         bce_init_locked(sc);
4588         BCE_UNLOCK(sc);
4589 }
4590
4591
4592 /****************************************************************************/
4593 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4594 /* memory visible to the controller.                                        */
4595 /*                                                                          */
4596 /* Returns:                                                                 */
4597 /*   0 for success, positive value for failure.                             */
4598 /****************************************************************************/
4599 static int
4600 bce_tx_encap(struct bce_softc *sc, struct mbuf *m_head, u16 *prod,
4601         u16 *chain_prod, u32 *prod_bseq)
4602 {
4603         u32 vlan_tag_flags = 0;
4604         struct m_tag *mtag;
4605         struct bce_dmamap_arg map_arg;
4606         bus_dmamap_t map;
4607         int i, error, rc = 0;
4608
4609         /* Transfer any checksum offload flags to the bd. */
4610         if (m_head->m_pkthdr.csum_flags) {
4611                 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
4612                         vlan_tag_flags |= TX_BD_FLAGS_IP_CKSUM;
4613                 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
4614                         vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4615         }
4616
4617         /* Transfer any VLAN tags to the bd. */
4618         mtag = VLAN_OUTPUT_TAG(sc->bce_ifp, m_head);
4619         if (mtag != NULL)
4620                 vlan_tag_flags |= (TX_BD_FLAGS_VLAN_TAG |
4621                         (VLAN_TAG_VALUE(mtag) << 16));
4622
4623         /* Map the mbuf into DMAable memory. */
4624         map = sc->tx_mbuf_map[*chain_prod];
4625         map_arg.sc         = sc;
4626         map_arg.prod       = *prod;
4627         map_arg.chain_prod = *chain_prod;
4628         map_arg.prod_bseq  = *prod_bseq;
4629         map_arg.tx_flags   = vlan_tag_flags;
4630         map_arg.maxsegs    = USABLE_TX_BD - sc->used_tx_bd - 
4631                 BCE_TX_SLACK_SPACE;
4632
4633         KASSERT(map_arg.maxsegs > 0, ("Invalid TX maxsegs value!"));
4634
4635         for (i = 0; i < TX_PAGES; i++)
4636                 map_arg.tx_chain[i] = sc->tx_bd_chain[i];
4637
4638         /* Map the mbuf into our DMA address space. */
4639         error = bus_dmamap_load_mbuf(sc->tx_mbuf_tag, map, m_head,
4640             bce_dma_map_tx_desc, &map_arg, BUS_DMA_NOWAIT);
4641
4642         if (error || map_arg.maxsegs == 0) {
4643                 BCE_PRINTF(sc, "%s(%d): Error mapping mbuf into TX chain!\n",
4644                         __FILE__, __LINE__);
4645                 rc = ENOBUFS;
4646                 goto bce_tx_encap_exit;
4647         }
4648
4649         /*
4650          * Ensure that the map for this transmission
4651          * is placed at the array index of the last
4652          * descriptor in this chain.  This is done
4653          * because a single map is used for all 
4654          * segments of the mbuf and we don't want to
4655          * delete the map before all of the segments
4656          * have been freed.
4657          */
4658         sc->tx_mbuf_map[*chain_prod] = 
4659                 sc->tx_mbuf_map[map_arg.chain_prod];
4660         sc->tx_mbuf_map[map_arg.chain_prod] = map;
4661         sc->tx_mbuf_ptr[map_arg.chain_prod] = m_head;
4662         sc->used_tx_bd += map_arg.maxsegs;
4663
4664         DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark), 
4665                 sc->tx_hi_watermark = sc->used_tx_bd);
4666
4667         DBRUNIF(1, sc->tx_mbuf_alloc++);
4668
4669         DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_mbuf_chain(sc, *chain_prod, 
4670                 map_arg.maxsegs));
4671
4672         /* prod still points the last used tx_bd at this point. */
4673         *prod       = map_arg.prod;
4674         *chain_prod = map_arg.chain_prod;
4675         *prod_bseq  = map_arg.prod_bseq;
4676
4677 bce_tx_encap_exit:
4678
4679         return(rc);
4680 }
4681
4682
4683 /****************************************************************************/
4684 /* Main transmit routine when called from another routine with a lock.      */
4685 /*                                                                          */
4686 /* Returns:                                                                 */
4687 /*   Nothing.                                                               */
4688 /****************************************************************************/
4689 static void
4690 bce_start_locked(struct ifnet *ifp)
4691 {
4692         struct bce_softc *sc = ifp->if_softc;
4693         struct mbuf *m_head = NULL;
4694         int count = 0;
4695         u16 tx_prod, tx_chain_prod;
4696         u32     tx_prod_bseq;
4697
4698         /* If there's no link or the transmit queue is empty then just exit. */
4699         if (!sc->bce_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
4700                 DBPRINT(sc, BCE_INFO_SEND, "%s(): No link or transmit queue empty.\n", 
4701                         __FUNCTION__);
4702                 goto bce_start_locked_exit;
4703         }
4704
4705         /* prod points to the next free tx_bd. */
4706         tx_prod = sc->tx_prod;
4707         tx_chain_prod = TX_CHAIN_IDX(tx_prod);
4708         tx_prod_bseq = sc->tx_prod_bseq;
4709
4710         DBPRINT(sc, BCE_INFO_SEND,
4711                 "%s(): Start: tx_prod = 0x%04X, tx_chain_prod = %04X, "
4712                 "tx_prod_bseq = 0x%08X\n",
4713                 __FUNCTION__, tx_prod, tx_chain_prod, tx_prod_bseq);
4714
4715         /* Keep adding entries while there is space in the ring. */
4716         while(sc->tx_mbuf_ptr[tx_chain_prod] == NULL) {
4717
4718                 /* Check for any frames to send. */
4719                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4720                 if (m_head == NULL)
4721                         break;
4722
4723                 /*
4724                  * Pack the data into the transmit ring. If we
4725                  * don't have room, place the mbuf back at the
4726                  * head of the queue and set the OACTIVE flag
4727                  * to wait for the NIC to drain the chain.
4728                  */
4729                 if (bce_tx_encap(sc, m_head, &tx_prod, &tx_chain_prod, &tx_prod_bseq)) {
4730                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4731                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4732                         DBPRINT(sc, BCE_INFO_SEND,
4733                                 "TX chain is closed for business! Total tx_bd used = %d\n", 
4734                                 sc->used_tx_bd);
4735                         break;
4736                 }
4737
4738                 count++;
4739
4740                 /* Send a copy of the frame to any BPF listeners. */
4741                 BPF_MTAP(ifp, m_head);
4742
4743                 tx_prod = NEXT_TX_BD(tx_prod);
4744                 tx_chain_prod = TX_CHAIN_IDX(tx_prod);
4745         }
4746
4747         if (count == 0) {
4748                 /* no packets were dequeued */
4749                 DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): No packets were dequeued\n", 
4750                         __FUNCTION__);
4751                 goto bce_start_locked_exit;
4752         }
4753
4754         /* Update the driver's counters. */
4755         sc->tx_prod      = tx_prod;
4756         sc->tx_prod_bseq = tx_prod_bseq;
4757
4758         DBPRINT(sc, BCE_INFO_SEND,
4759                 "%s(): End: tx_prod = 0x%04X, tx_chain_prod = 0x%04X, "
4760                 "tx_prod_bseq = 0x%08X\n",
4761                 __FUNCTION__, tx_prod, tx_chain_prod, tx_prod_bseq);
4762
4763         /* Start the transmit. */
4764         REG_WR16(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BIDX, sc->tx_prod);
4765         REG_WR(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
4766
4767         /* Set the tx timeout. */
4768         ifp->if_timer = BCE_TX_TIMEOUT;
4769
4770 bce_start_locked_exit:
4771         return;
4772 }
4773
4774
4775 /****************************************************************************/
4776 /* Main transmit routine when called from another routine without a lock.   */
4777 /*                                                                          */
4778 /* Returns:                                                                 */
4779 /*   Nothing.                                                               */
4780 /****************************************************************************/
4781 static void
4782 bce_start(struct ifnet *ifp)
4783 {
4784         struct bce_softc *sc = ifp->if_softc;
4785
4786         BCE_LOCK(sc);
4787         bce_start_locked(ifp);
4788         BCE_UNLOCK(sc);
4789 }
4790
4791
4792 /****************************************************************************/
4793 /* Handles any IOCTL calls from the operating system.                       */
4794 /*                                                                          */
4795 /* Returns:                                                                 */
4796 /*   0 for success, positive value for failure.                             */
4797 /****************************************************************************/
4798 static int
4799 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4800 {
4801         struct bce_softc *sc = ifp->if_softc;
4802         struct ifreq *ifr = (struct ifreq *) data;
4803         struct mii_data *mii;
4804         int mask, error = 0;
4805
4806         DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4807
4808         switch(command) {
4809
4810                 /* Set the MTU. */
4811                 case SIOCSIFMTU:
4812                         /* Check that the MTU setting is supported. */
4813                         if ((ifr->ifr_mtu < BCE_MIN_MTU) || 
4814                                 (ifr->ifr_mtu > BCE_MAX_JUMBO_MTU)) {
4815                                 error = EINVAL;
4816                                 break;
4817                         }
4818
4819                         DBPRINT(sc, BCE_INFO, "Setting new MTU of %d\n", ifr->ifr_mtu);
4820
4821                         ifp->if_mtu = ifr->ifr_mtu;
4822                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4823                         bce_init(sc);
4824                         break;
4825
4826                 /* Set interface. */
4827                 case SIOCSIFFLAGS:
4828                         DBPRINT(sc, BCE_VERBOSE, "Received SIOCSIFFLAGS\n");
4829
4830                         BCE_LOCK(sc);
4831
4832                         /* Check if the interface is up. */
4833                         if (ifp->if_flags & IFF_UP) {
4834                                 /* Change the promiscuous/multicast flags as necessary. */
4835                                 bce_set_rx_mode(sc);
4836                         } else {
4837                                 /* The interface is down.  Check if the driver is running. */
4838                                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4839                                         bce_stop(sc);
4840                                 }
4841                         }
4842
4843                         BCE_UNLOCK(sc);
4844                         error = 0;
4845
4846                         break;
4847
4848                 /* Add/Delete multicast address */
4849                 case SIOCADDMULTI:
4850                 case SIOCDELMULTI:
4851                         DBPRINT(sc, BCE_VERBOSE, "Received SIOCADDMULTI/SIOCDELMULTI\n");
4852
4853                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4854                                 BCE_LOCK(sc);
4855                                 bce_set_rx_mode(sc);
4856                                 BCE_UNLOCK(sc);
4857                                 error = 0;
4858                         }
4859
4860                         break;
4861
4862                 /* Set/Get Interface media */
4863                 case SIOCSIFMEDIA:
4864                 case SIOCGIFMEDIA:
4865                         DBPRINT(sc, BCE_VERBOSE, "Received SIOCSIFMEDIA/SIOCGIFMEDIA\n");
4866
4867                         DBPRINT(sc, BCE_VERBOSE, "bce_phy_flags = 0x%08X\n",
4868                                 sc->bce_phy_flags);
4869
4870                         if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
4871                                 DBPRINT(sc, BCE_VERBOSE, "SerDes media set/get\n");
4872
4873                                 error = ifmedia_ioctl(ifp, ifr,
4874                                     &sc->bce_ifmedia, command);
4875                         } else {
4876                                 DBPRINT(sc, BCE_VERBOSE, "Copper media set/get\n");
4877                                 mii = device_get_softc(sc->bce_miibus);
4878                                 error = ifmedia_ioctl(ifp, ifr,
4879                                     &mii->mii_media, command);
4880                         }
4881                         break;
4882
4883                 /* Set interface capability */
4884                 case SIOCSIFCAP:
4885                         mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4886                         DBPRINT(sc, BCE_INFO, "Received SIOCSIFCAP = 0x%08X\n", (u32) mask);
4887
4888 #ifdef DEVICE_POLLING
4889                         if (mask & IFCAP_POLLING) {
4890                                 if (ifr->ifr_reqcap & IFCAP_POLLING) {
4891
4892                                         /* Setup the poll routine to call. */
4893                                         error = ether_poll_register(bce_poll, ifp);
4894                                         if (error) {
4895                                                 BCE_PRINTF(sc, "%s(%d): Error registering poll function!\n",
4896                                                         __FILE__, __LINE__);
4897                                                 goto bce_ioctl_exit;
4898                                         }
4899
4900                                         /* Clear the interrupt. */
4901                                         BCE_LOCK(sc);
4902                                         bce_disable_intr(sc);
4903
4904                                         REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4905                                                 (1 << 16) | sc->bce_rx_quick_cons_trip);
4906                                         REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4907                                                 (1 << 16) | sc->bce_tx_quick_cons_trip);
4908
4909                                         ifp->if_capenable |= IFCAP_POLLING;
4910                                         BCE_UNLOCK(sc);
4911                                 } else {
4912                                         /* Clear the poll routine. */
4913                                         error = ether_poll_deregister(ifp);
4914
4915                                         /* Enable interrupt even in error case */
4916                                         BCE_LOCK(sc);
4917                                         bce_enable_intr(sc);
4918
4919                                         REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4920                                                 (sc->bce_tx_quick_cons_trip_int << 16) |
4921                                                 sc->bce_tx_quick_cons_trip);
4922                                         REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4923                                                 (sc->bce_rx_quick_cons_trip_int << 16) |
4924                                                 sc->bce_rx_quick_cons_trip);
4925
4926                                         ifp->if_capenable &= ~IFCAP_POLLING;
4927                                         BCE_UNLOCK(sc);
4928                                 }
4929                         }
4930 #endif /*DEVICE_POLLING */
4931
4932                         /* Toggle the TX checksum capabilites enable flag. */                                           
4933                         if (mask & IFCAP_TXCSUM) {
4934                                 ifp->if_capenable ^= IFCAP_TXCSUM;
4935                                 if (IFCAP_TXCSUM & ifp->if_capenable)
4936                                         ifp->if_hwassist = BCE_IF_HWASSIST;
4937                                 else
4938                                         ifp->if_hwassist = 0;
4939                         }
4940
4941                         /* Toggle the RX checksum capabilities enable flag. */
4942                         if (mask & IFCAP_RXCSUM) {
4943                                 ifp->if_capenable ^= IFCAP_RXCSUM;
4944                                 if (IFCAP_RXCSUM & ifp->if_capenable)
4945                                         ifp->if_hwassist = BCE_IF_HWASSIST;
4946                                 else
4947                                         ifp->if_hwassist = 0;
4948                         }
4949
4950                         /* Toggle VLAN_MTU capabilities enable flag. */
4951                         if (mask & IFCAP_VLAN_MTU) {
4952                                 BCE_PRINTF(sc, "%s(%d): Changing VLAN_MTU not supported.\n",
4953                                         __FILE__, __LINE__);
4954                         }
4955
4956                         /* Toggle VLANHWTAG capabilities enabled flag. */
4957                         if (mask & IFCAP_VLAN_HWTAGGING) {
4958                                 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
4959                                         BCE_PRINTF(sc, "%s(%d): Cannot change VLAN_HWTAGGING while "
4960                                                 "management firmware (ASF/IPMI/UMP) is running!\n",
4961                                                 __FILE__, __LINE__);
4962                                 else
4963                                         BCE_PRINTF(sc, "%s(%d): Changing VLAN_HWTAGGING not supported!\n",
4964                                                 __FILE__, __LINE__);
4965                         }
4966
4967                         break;
4968                 default:
4969                         DBPRINT(sc, BCE_INFO, "Received unsupported IOCTL: 0x%08X\n",
4970                                 (u32) command);
4971
4972                         /* We don't know how to handle the IOCTL, pass it on. */
4973                         error = ether_ioctl(ifp, command, data);
4974                         break;
4975         }
4976
4977 #ifdef DEVICE_POLLING
4978 bce_ioctl_exit:
4979 #endif
4980
4981         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4982
4983         return(error);
4984 }
4985
4986
4987 /****************************************************************************/
4988 /* Transmit timeout handler.                                                */
4989 /*                                                                          */
4990 /* Returns:                                                                 */
4991 /*   Nothing.                                                               */
4992 /****************************************************************************/
4993 static void
4994 bce_watchdog(struct ifnet *ifp)
4995 {
4996         struct bce_softc *sc = ifp->if_softc;
4997
4998         DBRUN(BCE_WARN_SEND, 
4999                 bce_dump_driver_state(sc);
5000                 bce_dump_status_block(sc));
5001
5002         BCE_PRINTF(sc, "%s(%d): Watchdog timeout occurred, resetting!\n", 
5003                 __FILE__, __LINE__);
5004
5005         /* DBRUN(BCE_FATAL, bce_breakpoint(sc)); */
5006
5007         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5008
5009         bce_init(sc);
5010         ifp->if_oerrors++;
5011
5012 }
5013
5014
5015 #ifdef DEVICE_POLLING
5016 static void
5017 bce_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
5018 {
5019         struct bce_softc *sc = ifp->if_softc;
5020
5021         BCE_LOCK_ASSERT(sc);
5022
5023         sc->bce_rxcycles = count;
5024
5025         bus_dmamap_sync(sc->status_tag, sc->status_map,
5026             BUS_DMASYNC_POSTWRITE);
5027
5028         /* Check for any completed RX frames. */
5029         if (sc->status_block->status_rx_quick_consumer_index0 != 
5030                 sc->hw_rx_cons)
5031                 bce_rx_intr(sc);
5032
5033         /* Check for any completed TX frames. */
5034         if (sc->status_block->status_tx_quick_consumer_index0 != 
5035                 sc->hw_tx_cons)
5036                 bce_tx_intr(sc);
5037
5038         /* Check for new frames to transmit. */
5039         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5040                 bce_start_locked(ifp);
5041
5042 }
5043
5044
5045 static void
5046 bce_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
5047 {
5048         struct bce_softc *sc = ifp->if_softc;
5049
5050         BCE_LOCK(sc);
5051         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5052                 bce_poll_locked(ifp, cmd, count);
5053         BCE_UNLOCK(sc);
5054 }
5055 #endif /* DEVICE_POLLING */
5056
5057
5058 #if 0
5059 static inline int
5060 bce_has_work(struct bce_softc *sc)
5061 {
5062         struct status_block *stat = sc->status_block;
5063
5064         if ((stat->status_rx_quick_consumer_index0 != sc->hw_rx_cons) ||
5065             (stat->status_tx_quick_consumer_index0 != sc->hw_tx_cons))
5066                 return 1;
5067
5068         if (((stat->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
5069             bp->link_up)
5070                 return 1;
5071
5072         return 0;
5073 }
5074 #endif
5075
5076
5077 /*
5078  * Interrupt handler.
5079  */
5080 /****************************************************************************/
5081 /* Main interrupt entry point.  Verifies that the controller generated the  */
5082 /* interrupt and then calls a separate routine for handle the various       */
5083 /* interrupt causes (PHY, TX, RX).                                          */
5084 /*                                                                          */
5085 /* Returns:                                                                 */
5086 /*   0 for success, positive value for failure.                             */
5087 /****************************************************************************/
5088 static void
5089 bce_intr(void *xsc)
5090 {
5091         struct bce_softc *sc;
5092         struct ifnet *ifp;
5093         u32 status_attn_bits;
5094
5095         sc = xsc;
5096         ifp = sc->bce_ifp;
5097
5098         BCE_LOCK(sc);
5099
5100         DBRUNIF(1, sc->interrupts_generated++);
5101
5102 #ifdef DEVICE_POLLING
5103         if (ifp->if_capenable & IFCAP_POLLING) {
5104                 DBPRINT(sc, BCE_INFO, "Polling enabled!\n");
5105                 goto bce_intr_exit;
5106         }
5107 #endif
5108
5109         bus_dmamap_sync(sc->status_tag, sc->status_map,
5110             BUS_DMASYNC_POSTWRITE);
5111
5112         /*
5113          * If the hardware status block index
5114          * matches the last value read by the
5115          * driver and we haven't asserted our
5116          * interrupt then there's nothing to do.
5117          */
5118         if ((sc->status_block->status_idx == sc->last_status_idx) && 
5119                 (REG_RD(sc, BCE_PCICFG_MISC_STATUS) & BCE_PCICFG_MISC_STATUS_INTA_VALUE))
5120                 goto bce_intr_exit;
5121
5122         /* Ack the interrupt and stop others from occuring. */
5123         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5124                 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5125                 BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5126
5127         /* Keep processing data as long as there is work to do. */
5128         for (;;) {
5129
5130                 status_attn_bits = sc->status_block->status_attn_bits;
5131
5132                 DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention),
5133                         BCE_PRINTF(sc, "Simulating unexpected status attention bit set.");
5134                         status_attn_bits = status_attn_bits | STATUS_ATTN_BITS_PARITY_ERROR);
5135
5136                 /* Was it a link change interrupt? */
5137                 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5138                         (sc->status_block->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
5139                         bce_phy_intr(sc);
5140
5141                 /* If any other attention is asserted then the chip is toast. */
5142                 if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5143                         (sc->status_block->status_attn_bits_ack & 
5144                         ~STATUS_ATTN_BITS_LINK_STATE))) {
5145
5146                         DBRUN(1, sc->unexpected_attentions++);
5147
5148                         BCE_PRINTF(sc, "%s(%d): Fatal attention detected: 0x%08X\n", 
5149                                 __FILE__, __LINE__, sc->status_block->status_attn_bits);
5150
5151                         DBRUN(BCE_FATAL, 
5152                                 if (bce_debug_unexpected_attention == 0)
5153                                         bce_breakpoint(sc));
5154
5155                         bce_init_locked(sc);
5156                         goto bce_intr_exit;
5157                 }
5158
5159                 /* Check for any completed RX frames. */
5160                 if (sc->status_block->status_rx_quick_consumer_index0 != sc->hw_rx_cons)
5161                         bce_rx_intr(sc);
5162
5163                 /* Check for any completed TX frames. */
5164                 if (sc->status_block->status_tx_quick_consumer_index0 != sc->hw_tx_cons)
5165                         bce_tx_intr(sc);
5166
5167                 /* Save the status block index value for use during the next interrupt. */
5168                 sc->last_status_idx = sc->status_block->status_idx;
5169
5170                 /* Prevent speculative reads from getting ahead of the status block. */
5171                 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 
5172                         BUS_SPACE_BARRIER_READ);
5173
5174                 /* If there's no work left then exit the interrupt service routine. */
5175                 if ((sc->status_block->status_rx_quick_consumer_index0 == sc->hw_rx_cons) &&
5176                 (sc->status_block->status_tx_quick_consumer_index0 == sc->hw_tx_cons))
5177                         break;
5178         
5179         }
5180
5181         bus_dmamap_sync(sc->status_tag, sc->status_map,
5182             BUS_DMASYNC_PREWRITE);
5183
5184         /* Re-enable interrupts. */
5185         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5186                BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx |
5187                BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5188         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5189                BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
5190
5191         /* Handle any frames that arrived while handling the interrupt. */
5192         if (ifp->if_drv_flags & IFF_DRV_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5193                 bce_start_locked(ifp);
5194
5195 bce_intr_exit:
5196         BCE_UNLOCK(sc);
5197 }
5198
5199
5200 /****************************************************************************/
5201 /* Programs the various packet receive modes (broadcast and multicast).     */
5202 /*                                                                          */
5203 /* Returns:                                                                 */
5204 /*   Nothing.                                                               */
5205 /****************************************************************************/
5206 static void
5207 bce_set_rx_mode(struct bce_softc *sc)
5208 {
5209         struct ifnet *ifp;
5210         struct ifmultiaddr *ifma;
5211         u32 hashes[4] = { 0, 0, 0, 0 };
5212         u32 rx_mode, sort_mode;
5213         int h, i;
5214
5215         BCE_LOCK_ASSERT(sc);
5216
5217         ifp = sc->bce_ifp;
5218
5219         /* Initialize receive mode default settings. */
5220         rx_mode   = sc->rx_mode & ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
5221                             BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
5222         sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
5223
5224         /*
5225          * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5226          * be enbled.
5227          */
5228         if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
5229                 (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)))
5230                 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
5231
5232         /*
5233          * Check for promiscuous, all multicast, or selected
5234          * multicast address filtering.
5235          */
5236         if (ifp->if_flags & IFF_PROMISC) {
5237                 DBPRINT(sc, BCE_INFO, "Enabling promiscuous mode.\n");
5238
5239                 /* Enable promiscuous mode. */
5240                 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
5241                 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
5242         } else if (ifp->if_flags & IFF_ALLMULTI) {
5243                 DBPRINT(sc, BCE_INFO, "Enabling all multicast mode.\n");
5244
5245                 /* Enable all multicast addresses. */
5246                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5247                         REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff);
5248         }
5249                 sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
5250         } else {
5251                 /* Accept one or more multicast(s). */
5252                 DBPRINT(sc, BCE_INFO, "Enabling selective multicast mode.\n");
5253
5254                 IF_ADDR_LOCK(ifp);
5255                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
5256                         if (ifma->ifma_addr->sa_family != AF_LINK)
5257                                 continue;
5258                         h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
5259                         ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
5260                         hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
5261                 }
5262                 IF_ADDR_UNLOCK(ifp);
5263
5264                 for (i = 0; i < 4; i++)
5265                         REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]);
5266
5267                 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
5268         }
5269
5270         /* Only make changes if the recive mode has actually changed. */
5271         if (rx_mode != sc->rx_mode) {
5272                 DBPRINT(sc, BCE_VERBOSE, "Enabling new receive mode: 0x%08X\n", 
5273                         rx_mode);
5274
5275                 sc->rx_mode = rx_mode;
5276                 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
5277         }
5278
5279         /* Disable and clear the exisitng sort before enabling a new sort. */
5280         REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
5281         REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
5282         REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
5283 }
5284
5285
5286 /****************************************************************************/
5287 /* Called periodically to updates statistics from the controllers           */
5288 /* statistics block.                                                        */
5289 /*                                                                          */
5290 /* Returns:                                                                 */
5291 /*   Nothing.                                                               */
5292 /****************************************************************************/
5293 static void
5294 bce_stats_update(struct bce_softc *sc)
5295 {
5296         struct ifnet *ifp;
5297         struct statistics_block *stats;
5298
5299         DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __FUNCTION__);
5300
5301         ifp = sc->bce_ifp;
5302
5303         stats = (struct statistics_block *) sc->stats_block;
5304
5305         /* 
5306          * Update the interface statistics from the
5307          * hardware statistics.
5308          */
5309         ifp->if_collisions = (u_long) stats->stat_EtherStatsCollisions;
5310
5311         ifp->if_ibytes  = BCE_STATS(IfHCInOctets);
5312
5313         ifp->if_obytes  = BCE_STATS(IfHCOutOctets);
5314
5315         ifp->if_imcasts = BCE_STATS(IfHCInMulticastPkts);
5316
5317         ifp->if_omcasts = BCE_STATS(IfHCOutMulticastPkts);
5318
5319         ifp->if_ierrors = (u_long) stats->stat_EtherStatsUndersizePkts +
5320                                       (u_long) stats->stat_EtherStatsOverrsizePkts +
5321                                           (u_long) stats->stat_IfInMBUFDiscards +
5322                                           (u_long) stats->stat_Dot3StatsAlignmentErrors +
5323                                           (u_long) stats->stat_Dot3StatsFCSErrors;
5324
5325         ifp->if_oerrors = (u_long) stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5326                                           (u_long) stats->stat_Dot3StatsExcessiveCollisions +
5327                                           (u_long) stats->stat_Dot3StatsLateCollisions;
5328
5329         /* 
5330          * Certain controllers don't report 
5331          * carrier sense errors correctly.
5332          * See errata E11_5708CA0_1165. 
5333          */
5334         if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
5335             !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0))
5336                 ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
5337
5338         /*
5339          * Update the sysctl statistics from the
5340          * hardware statistics.
5341          */
5342         sc->stat_IfHCInOctets = 
5343                 ((u64) stats->stat_IfHCInOctets_hi << 32) + 
5344                  (u64) stats->stat_IfHCInOctets_lo;
5345
5346         sc->stat_IfHCInBadOctets =
5347                 ((u64) stats->stat_IfHCInBadOctets_hi << 32) + 
5348                  (u64) stats->stat_IfHCInBadOctets_lo;
5349
5350         sc->stat_IfHCOutOctets =
5351                 ((u64) stats->stat_IfHCOutOctets_hi << 32) +
5352                  (u64) stats->stat_IfHCOutOctets_lo;
5353
5354         sc->stat_IfHCOutBadOctets =
5355                 ((u64) stats->stat_IfHCOutBadOctets_hi << 32) +
5356                  (u64) stats->stat_IfHCOutBadOctets_lo;
5357
5358         sc->stat_IfHCInUcastPkts =
5359                 ((u64) stats->stat_IfHCInUcastPkts_hi << 32) +
5360                  (u64) stats->stat_IfHCInUcastPkts_lo;
5361
5362         sc->stat_IfHCInMulticastPkts =
5363                 ((u64) stats->stat_IfHCInMulticastPkts_hi << 32) +
5364                  (u64) stats->stat_IfHCInMulticastPkts_lo;
5365
5366         sc->stat_IfHCInBroadcastPkts =
5367                 ((u64) stats->stat_IfHCInBroadcastPkts_hi << 32) +
5368                  (u64) stats->stat_IfHCInBroadcastPkts_lo;
5369
5370         sc->stat_IfHCOutUcastPkts =
5371                 ((u64) stats->stat_IfHCOutUcastPkts_hi << 32) +
5372                  (u64) stats->stat_IfHCOutUcastPkts_lo;
5373
5374         sc->stat_IfHCOutMulticastPkts =
5375                 ((u64) stats->stat_IfHCOutMulticastPkts_hi << 32) +
5376                  (u64) stats->stat_IfHCOutMulticastPkts_lo;
5377
5378         sc->stat_IfHCOutBroadcastPkts =
5379                 ((u64) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5380                  (u64) stats->stat_IfHCOutBroadcastPkts_lo;
5381
5382         sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5383                 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5384
5385         sc->stat_Dot3StatsCarrierSenseErrors =
5386                 stats->stat_Dot3StatsCarrierSenseErrors;
5387
5388         sc->stat_Dot3StatsFCSErrors = 
5389                 stats->stat_Dot3StatsFCSErrors;
5390
5391         sc->stat_Dot3StatsAlignmentErrors =
5392                 stats->stat_Dot3StatsAlignmentErrors;
5393
5394         sc->stat_Dot3StatsSingleCollisionFrames =
5395                 stats->stat_Dot3StatsSingleCollisionFrames;
5396
5397         sc->stat_Dot3StatsMultipleCollisionFrames =
5398                 stats->stat_Dot3StatsMultipleCollisionFrames;
5399
5400         sc->stat_Dot3StatsDeferredTransmissions =
5401                 stats->stat_Dot3StatsDeferredTransmissions;
5402
5403         sc->stat_Dot3StatsExcessiveCollisions =
5404                 stats->stat_Dot3StatsExcessiveCollisions;
5405
5406         sc->stat_Dot3StatsLateCollisions =
5407                 stats->stat_Dot3StatsLateCollisions;
5408
5409         sc->stat_EtherStatsCollisions =
5410                 stats->stat_EtherStatsCollisions;
5411
5412         sc->stat_EtherStatsFragments =
5413                 stats->stat_EtherStatsFragments;
5414
5415         sc->stat_EtherStatsJabbers =
5416                 stats->stat_EtherStatsJabbers;
5417
5418         sc->stat_EtherStatsUndersizePkts =
5419                 stats->stat_EtherStatsUndersizePkts;
5420
5421         sc->stat_EtherStatsOverrsizePkts =
5422                 stats->stat_EtherStatsOverrsizePkts;
5423
5424         sc->stat_EtherStatsPktsRx64Octets =
5425                 stats->stat_EtherStatsPktsRx64Octets;
5426
5427         sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5428                 stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5429
5430         sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5431                 stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5432
5433         sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5434                 stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5435
5436         sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5437                 stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5438
5439         sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5440                 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5441
5442         sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5443                 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5444
5445         sc->stat_EtherStatsPktsTx64Octets =
5446                 stats->stat_EtherStatsPktsTx64Octets;
5447
5448         sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5449                 stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5450
5451         sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5452                 stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5453
5454         sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5455                 stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5456
5457         sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5458                 stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5459
5460         sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5461                 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5462
5463         sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5464                 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5465
5466         sc->stat_XonPauseFramesReceived =
5467                 stats->stat_XonPauseFramesReceived;
5468
5469         sc->stat_XoffPauseFramesReceived =
5470                 stats->stat_XoffPauseFramesReceived;
5471
5472         sc->stat_OutXonSent =
5473                 stats->stat_OutXonSent;
5474
5475         sc->stat_OutXoffSent =
5476                 stats->stat_OutXoffSent;
5477
5478         sc->stat_FlowControlDone =
5479                 stats->stat_FlowControlDone;
5480
5481         sc->stat_MacControlFramesReceived =
5482                 stats->stat_MacControlFramesReceived;
5483
5484         sc->stat_XoffStateEntered =
5485                 stats->stat_XoffStateEntered;
5486
5487         sc->stat_IfInFramesL2FilterDiscards =
5488                 stats->stat_IfInFramesL2FilterDiscards;
5489
5490         sc->stat_IfInRuleCheckerDiscards =
5491                 stats->stat_IfInRuleCheckerDiscards;
5492
5493         sc->stat_IfInFTQDiscards =
5494                 stats->stat_IfInFTQDiscards;
5495
5496         sc->stat_IfInMBUFDiscards =
5497                 stats->stat_IfInMBUFDiscards;
5498
5499         sc->stat_IfInRuleCheckerP4Hit =
5500                 stats->stat_IfInRuleCheckerP4Hit;
5501
5502         sc->stat_CatchupInRuleCheckerDiscards =
5503                 stats->stat_CatchupInRuleCheckerDiscards;
5504
5505         sc->stat_CatchupInFTQDiscards =
5506                 stats->stat_CatchupInFTQDiscards;
5507
5508         sc->stat_CatchupInMBUFDiscards =
5509                 stats->stat_CatchupInMBUFDiscards;
5510
5511         sc->stat_CatchupInRuleCheckerP4Hit =
5512                 stats->stat_CatchupInRuleCheckerP4Hit;
5513
5514         DBPRINT(sc, BCE_EXCESSIVE, "Exiting %s()\n", __FUNCTION__);
5515 }
5516
5517
5518 static void
5519 bce_tick_locked(struct bce_softc *sc)
5520 {
5521         struct mii_data *mii = NULL;
5522         struct ifnet *ifp;
5523         u32 msg;
5524
5525         ifp = sc->bce_ifp;
5526
5527         BCE_LOCK_ASSERT(sc);
5528
5529         /* Tell the firmware that the driver is still running. */
5530 #ifdef BCE_DEBUG
5531         msg = (u32) BCE_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE;
5532 #else
5533         msg = (u32) ++sc->bce_fw_drv_pulse_wr_seq;
5534 #endif
5535         REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_PULSE_MB, msg);
5536
5537         /* Update the statistics from the hardware statistics block. */
5538         bce_stats_update(sc);
5539
5540         /* Schedule the next tick. */
5541         callout_reset(
5542                 &sc->bce_stat_ch,               /* callout */
5543                 hz,                                     /* ticks */
5544                 bce_tick,                               /* function */
5545                 sc);                                    /* function argument */
5546
5547         /* If link is up already up then we're done. */
5548         if (sc->bce_link)
5549                 goto bce_tick_locked_exit;
5550
5551         /* DRC - ToDo: Add SerDes support and check SerDes link here. */
5552
5553         mii = device_get_softc(sc->bce_miibus);
5554         mii_tick(mii);
5555
5556         /* Check if the link has come up. */
5557         if (!sc->bce_link && mii->mii_media_status & IFM_ACTIVE &&
5558             IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5559                 sc->bce_link++;
5560                 if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
5561                     IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) &&
5562                     bootverbose)
5563                         BCE_PRINTF(sc, "Gigabit link up\n");
5564                 /* Now that link is up, handle any outstanding TX traffic. */
5565                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5566                         bce_start_locked(ifp);
5567         }
5568
5569 bce_tick_locked_exit:
5570         return;
5571 }
5572
5573
5574 static void
5575 bce_tick(void *xsc)
5576 {
5577         struct bce_softc *sc;
5578
5579         sc = xsc;
5580
5581         BCE_LOCK(sc);
5582         bce_tick_locked(sc);
5583         BCE_UNLOCK(sc);
5584 }
5585
5586
5587 #ifdef BCE_DEBUG
5588 /****************************************************************************/
5589 /* Allows the driver state to be dumped through the sysctl interface.       */
5590 /*                                                                          */
5591 /* Returns:                                                                 */
5592 /*   0 for success, positive value for failure.                             */
5593 /****************************************************************************/
5594 static int
5595 bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS)
5596 {
5597         int error;
5598         int result;
5599         struct bce_softc *sc;
5600
5601         result = -1;
5602         error = sysctl_handle_int(oidp, &result, 0, req);
5603
5604         if (error || !req->newptr)
5605                 return (error);
5606
5607         if (result == 1) {
5608                 sc = (struct bce_softc *)arg1;
5609                 bce_dump_driver_state(sc);
5610         }
5611
5612         return error;
5613 }
5614
5615
5616 /****************************************************************************/
5617 /* Allows the hardware state to be dumped through the sysctl interface.     */
5618 /*                                                                          */
5619 /* Returns:                                                                 */
5620 /*   0 for success, positive value for failure.                             */
5621 /****************************************************************************/
5622 static int
5623 bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS)
5624 {
5625         int error;
5626         int result;
5627         struct bce_softc *sc;
5628
5629         result = -1;
5630         error = sysctl_handle_int(oidp, &result, 0, req);
5631
5632         if (error || !req->newptr)
5633                 return (error);
5634
5635         if (result == 1) {
5636                 sc = (struct bce_softc *)arg1;
5637                 bce_dump_hw_state(sc);
5638         }
5639
5640         return error;
5641 }
5642
5643
5644 /****************************************************************************/
5645 /*                                                                          */
5646 /*                                                                          */
5647 /* Returns:                                                                 */
5648 /*   0 for success, positive value for failure.                             */
5649 /****************************************************************************/
5650 static int
5651 bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS)
5652 {
5653         int error;
5654         int result;
5655         struct bce_softc *sc;
5656
5657         result = -1;
5658         error = sysctl_handle_int(oidp, &result, 0, req);
5659
5660         if (error || !req->newptr)
5661                 return (error);
5662
5663         if (result == 1) {
5664                 sc = (struct bce_softc *)arg1;
5665                 bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
5666         }
5667
5668         return error;
5669 }
5670
5671
5672 /****************************************************************************/
5673 /*                                                                          */
5674 /*                                                                          */
5675 /* Returns:                                                                 */
5676 /*   0 for success, positive value for failure.                             */
5677 /****************************************************************************/
5678 static int
5679 bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS)
5680 {
5681         int error;
5682         int result;
5683         struct bce_softc *sc;
5684
5685         result = -1;
5686         error = sysctl_handle_int(oidp, &result, 0, req);
5687
5688         if (error || !req->newptr)
5689                 return (error);
5690
5691         if (result == 1) {
5692                 sc = (struct bce_softc *)arg1;
5693                 bce_breakpoint(sc);
5694         }
5695
5696         return error;
5697 }
5698 #endif
5699
5700
5701 /****************************************************************************/
5702 /* Adds any sysctl parameters for tuning or debugging purposes.             */
5703 /*                                                                          */
5704 /* Returns:                                                                 */
5705 /*   0 for success, positive value for failure.                             */
5706 /****************************************************************************/
5707 static void
5708 bce_add_sysctls(struct bce_softc *sc)
5709 {
5710         struct sysctl_ctx_list *ctx;
5711         struct sysctl_oid_list *children;
5712
5713         ctx = device_get_sysctl_ctx(sc->bce_dev);
5714         children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev));
5715
5716         SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
5717                 "driver_version",
5718                 CTLFLAG_RD, &bce_driver_version,
5719                 0, "bce driver version");
5720
5721 #ifdef BCE_DEBUG
5722         SYSCTL_ADD_INT(ctx, children, OID_AUTO, 
5723                 "rx_low_watermark",
5724                 CTLFLAG_RD, &sc->rx_low_watermark,
5725                 0, "Lowest level of free rx_bd's");
5726
5727         SYSCTL_ADD_INT(ctx, children, OID_AUTO, 
5728                 "tx_hi_watermark",
5729                 CTLFLAG_RD, &sc->tx_hi_watermark,
5730                 0, "Highest level of used tx_bd's");
5731
5732         SYSCTL_ADD_INT(ctx, children, OID_AUTO, 
5733                 "l2fhdr_status_errors",
5734                 CTLFLAG_RD, &sc->l2fhdr_status_errors,
5735                 0, "l2_fhdr status errors");
5736
5737         SYSCTL_ADD_INT(ctx, children, OID_AUTO, 
5738                 "unexpected_attentions",
5739                 CTLFLAG_RD, &sc->unexpected_attentions,
5740                 0, "unexpected attentions");
5741
5742         SYSCTL_ADD_INT(ctx, children, OID_AUTO, 
5743                 "lost_status_block_updates",
5744                 CTLFLAG_RD, &sc->lost_status_block_updates,
5745                 0, "lost status block updates");
5746
5747         SYSCTL_ADD_INT(ctx, children, OID_AUTO, 
5748                 "mbuf_alloc_failed",
5749                 CTLFLAG_RD, &sc->mbuf_alloc_failed,
5750                 0, "mbuf cluster allocation failures");
5751 #endif 
5752
5753         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5754                 "stat_IfHcInOctets",
5755                 CTLFLAG_RD, &sc->stat_IfHCInOctets,
5756                 "Bytes received");
5757
5758         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5759                 "stat_IfHCInBadOctets",
5760                 CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
5761                 "Bad bytes received");
5762
5763         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5764                 "stat_IfHCOutOctets",
5765                 CTLFLAG_RD, &sc->stat_IfHCOutOctets,
5766                 "Bytes sent");
5767
5768         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5769                 "stat_IfHCOutBadOctets",
5770                 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
5771                 "Bad bytes sent");
5772
5773         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5774                 "stat_IfHCInUcastPkts",
5775                 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
5776                 "Unicast packets received");
5777
5778         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5779                 "stat_IfHCInMulticastPkts",
5780                 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
5781                 "Multicast packets received");
5782
5783         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5784                 "stat_IfHCInBroadcastPkts",
5785                 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
5786                 "Broadcast packets received");
5787
5788         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5789                 "stat_IfHCOutUcastPkts",
5790                 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
5791                 "Unicast packets sent");
5792
5793         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5794                 "stat_IfHCOutMulticastPkts",
5795                 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
5796                 "Multicast packets sent");
5797
5798         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5799                 "stat_IfHCOutBroadcastPkts",
5800                 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
5801                 "Broadcast packets sent");
5802
5803         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5804                 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
5805                 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
5806                 0, "Internal MAC transmit errors");
5807
5808         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5809                 "stat_Dot3StatsCarrierSenseErrors",
5810                 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
5811                 0, "Carrier sense errors");
5812
5813         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5814                 "stat_Dot3StatsFCSErrors",
5815                 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
5816                 0, "Frame check sequence errors");
5817
5818         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5819                 "stat_Dot3StatsAlignmentErrors",
5820                 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
5821                 0, "Alignment errors");
5822
5823         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5824                 "stat_Dot3StatsSingleCollisionFrames",
5825                 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
5826                 0, "Single Collision Frames");
5827
5828         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5829                 "stat_Dot3StatsMultipleCollisionFrames",
5830                 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
5831                 0, "Multiple Collision Frames");
5832
5833         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5834                 "stat_Dot3StatsDeferredTransmissions",
5835                 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
5836                 0, "Deferred Transmissions");
5837
5838         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5839                 "stat_Dot3StatsExcessiveCollisions",
5840                 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
5841                 0, "Excessive Collisions");
5842
5843         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5844                 "stat_Dot3StatsLateCollisions",
5845                 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
5846                 0, "Late Collisions");
5847
5848         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5849                 "stat_EtherStatsCollisions",
5850                 CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
5851                 0, "Collisions");
5852
5853         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5854                 "stat_EtherStatsFragments",
5855                 CTLFLAG_RD, &sc->stat_EtherStatsFragments,
5856                 0, "Fragments");
5857
5858         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5859                 "stat_EtherStatsJabbers",
5860                 CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
5861                 0, "Jabbers");
5862
5863         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5864                 "stat_EtherStatsUndersizePkts",
5865                 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
5866                 0, "Undersize packets");
5867
5868         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5869                 "stat_EtherStatsOverrsizePkts",
5870                 CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
5871                 0, "stat_EtherStatsOverrsizePkts");
5872
5873         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5874                 "stat_EtherStatsPktsRx64Octets",
5875                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
5876                 0, "Bytes received in 64 byte packets");
5877
5878         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5879                 "stat_EtherStatsPktsRx65Octetsto127Octets",
5880                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
5881                 0, "Bytes received in 65 to 127 byte packets");
5882
5883         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5884                 "stat_EtherStatsPktsRx128Octetsto255Octets",
5885                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
5886                 0, "Bytes received in 128 to 255 byte packets");
5887
5888         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5889                 "stat_EtherStatsPktsRx256Octetsto511Octets",
5890                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
5891                 0, "Bytes received in 256 to 511 byte packets");
5892
5893         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5894                 "stat_EtherStatsPktsRx512Octetsto1023Octets",
5895                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
5896                 0, "Bytes received in 512 to 1023 byte packets");
5897
5898         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5899                 "stat_EtherStatsPktsRx1024Octetsto1522Octets",
5900                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
5901                 0, "Bytes received in 1024 t0 1522 byte packets");
5902
5903         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5904                 "stat_EtherStatsPktsRx1523Octetsto9022Octets",
5905                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
5906                 0, "Bytes received in 1523 to 9022 byte packets");
5907
5908         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5909                 "stat_EtherStatsPktsTx64Octets",
5910                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
5911                 0, "Bytes sent in 64 byte packets");
5912
5913         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5914                 "stat_EtherStatsPktsTx65Octetsto127Octets",
5915                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
5916                 0, "Bytes sent in 65 to 127 byte packets");
5917
5918         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5919                 "stat_EtherStatsPktsTx128Octetsto255Octets",
5920                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
5921                 0, "Bytes sent in 128 to 255 byte packets");
5922
5923         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5924                 "stat_EtherStatsPktsTx256Octetsto511Octets",
5925                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
5926                 0, "Bytes sent in 256 to 511 byte packets");
5927
5928         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5929                 "stat_EtherStatsPktsTx512Octetsto1023Octets",
5930                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
5931                 0, "Bytes sent in 512 to 1023 byte packets");
5932
5933         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5934                 "stat_EtherStatsPktsTx1024Octetsto1522Octets",
5935                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
5936                 0, "Bytes sent in 1024 to 1522 byte packets");
5937
5938         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5939                 "stat_EtherStatsPktsTx1523Octetsto9022Octets",
5940                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
5941                 0, "Bytes sent in 1523 to 9022 byte packets");
5942
5943         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5944                 "stat_XonPauseFramesReceived",
5945                 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
5946                 0, "XON pause frames receved");
5947
5948         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5949                 "stat_XoffPauseFramesReceived",
5950                 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
5951                 0, "XOFF pause frames received");
5952
5953         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5954                 "stat_OutXonSent",
5955                 CTLFLAG_RD, &sc->stat_OutXonSent,
5956                 0, "XON pause frames sent");
5957
5958         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5959                 "stat_OutXoffSent",
5960                 CTLFLAG_RD, &sc->stat_OutXoffSent,
5961                 0, "XOFF pause frames sent");
5962
5963         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5964                 "stat_FlowControlDone",
5965                 CTLFLAG_RD, &sc->stat_FlowControlDone,
5966                 0, "Flow control done");
5967
5968         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5969                 "stat_MacControlFramesReceived",
5970                 CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
5971                 0, "MAC control frames received");
5972
5973         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5974                 "stat_XoffStateEntered",
5975                 CTLFLAG_RD, &sc->stat_XoffStateEntered,
5976                 0, "XOFF state entered");
5977
5978         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5979                 "stat_IfInFramesL2FilterDiscards",
5980                 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
5981                 0, "Received L2 packets discarded");
5982
5983         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5984                 "stat_IfInRuleCheckerDiscards",
5985                 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
5986                 0, "Received packets discarded by rule");
5987
5988         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5989                 "stat_IfInFTQDiscards",
5990                 CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
5991                 0, "Received packet FTQ discards");
5992
5993         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5994                 "stat_IfInMBUFDiscards",
5995                 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
5996                 0, "Received packets discarded due to lack of controller buffer memory");
5997
5998         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5999                 "stat_IfInRuleCheckerP4Hit",
6000                 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
6001                 0, "Received packets rule checker hits");
6002
6003         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6004                 "stat_CatchupInRuleCheckerDiscards",
6005                 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
6006                 0, "Received packets discarded in Catchup path");
6007
6008         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6009                 "stat_CatchupInFTQDiscards",
6010                 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
6011                 0, "Received packets discarded in FTQ in Catchup path");
6012
6013         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6014                 "stat_CatchupInMBUFDiscards",
6015                 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
6016                 0, "Received packets discarded in controller buffer memory in Catchup path");
6017
6018         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6019                 "stat_CatchupInRuleCheckerP4Hit",
6020                 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
6021                 0, "Received packets rule checker hits in Catchup path");
6022
6023 #ifdef BCE_DEBUG
6024         SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6025                 "driver_state", CTLTYPE_INT | CTLFLAG_RW,
6026                 (void *)sc, 0,
6027                 bce_sysctl_driver_state, "I", "Drive state information");
6028
6029         SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6030                 "hw_state", CTLTYPE_INT | CTLFLAG_RW,
6031                 (void *)sc, 0,
6032                 bce_sysctl_hw_state, "I", "Hardware state information");
6033
6034         SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6035                 "dump_rx_chain", CTLTYPE_INT | CTLFLAG_RW,
6036                 (void *)sc, 0,
6037                 bce_sysctl_dump_rx_chain, "I", "Dump rx_bd chain");
6038
6039         SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6040                 "breakpoint", CTLTYPE_INT | CTLFLAG_RW,
6041                 (void *)sc, 0,
6042                 bce_sysctl_breakpoint, "I", "Driver breakpoint");
6043 #endif
6044
6045 }
6046
6047
6048 /****************************************************************************/
6049 /* BCE Debug Routines                                                       */
6050 /****************************************************************************/
6051 #ifdef BCE_DEBUG
6052
6053 /****************************************************************************/
6054 /* Prints out information about an mbuf.                                    */
6055 /*                                                                          */
6056 /* Returns:                                                                 */
6057 /*   Nothing.                                                               */
6058 /****************************************************************************/
6059 static void
6060 bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
6061 {
6062         u32 val_hi, val_lo;
6063         struct mbuf *mp = m;
6064
6065         if (m == NULL) {
6066                 /* Index out of range. */
6067                 printf("mbuf ptr is null!\n");
6068                 return;
6069         }
6070
6071         while (mp) {
6072                 val_hi = BCE_ADDR_HI(mp);
6073                 val_lo = BCE_ADDR_LO(mp);
6074                 BCE_PRINTF(sc, "mbuf: vaddr = 0x%08X:%08X, m_len = %d, m_flags = ", 
6075                            val_hi, val_lo, mp->m_len);
6076
6077                 if (mp->m_flags & M_EXT)
6078                         printf("M_EXT ");
6079                 if (mp->m_flags & M_PKTHDR)
6080                         printf("M_PKTHDR ");
6081                 printf("\n");
6082
6083                 if (mp->m_flags & M_EXT) {
6084                         val_hi = BCE_ADDR_HI(mp->m_ext.ext_buf);
6085                         val_lo = BCE_ADDR_LO(mp->m_ext.ext_buf);
6086                         BCE_PRINTF(sc, "- m_ext: vaddr = 0x%08X:%08X, ext_size = 0x%04X\n", 
6087                                 val_hi, val_lo, mp->m_ext.ext_size);
6088                 }
6089
6090                 mp = mp->m_next;
6091         }
6092
6093
6094 }
6095
6096
6097 /****************************************************************************/
6098 /* Prints out the mbufs in the TX mbuf chain.                               */
6099 /*                                                                          */
6100 /* Returns:                                                                 */
6101 /*   Nothing.                                                               */
6102 /****************************************************************************/
6103 static void
6104 bce_dump_tx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6105 {
6106         struct mbuf *m;
6107
6108         BCE_PRINTF(sc,
6109                 "----------------------------"
6110                 "  tx mbuf data  "
6111                 "----------------------------\n");
6112
6113         for (int i = 0; i < count; i++) {
6114                 m = sc->tx_mbuf_ptr[chain_prod];
6115                 BCE_PRINTF(sc, "txmbuf[%d]\n", chain_prod);
6116                 bce_dump_mbuf(sc, m);
6117                 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
6118         }
6119
6120         BCE_PRINTF(sc,
6121                 "----------------------------"
6122                 "----------------"
6123                 "----------------------------\n");
6124 }
6125
6126
6127 /*
6128  * This routine prints the RX mbuf chain.
6129  */
6130 static void
6131 bce_dump_rx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6132 {
6133         struct mbuf *m;
6134
6135         BCE_PRINTF(sc,
6136                 "----------------------------"
6137                 "  rx mbuf data  "
6138                 "----------------------------\n");
6139
6140         for (int i = 0; i < count; i++) {
6141                 m = sc->rx_mbuf_ptr[chain_prod];
6142                 BCE_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod);
6143                 bce_dump_mbuf(sc, m);
6144                 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
6145         }
6146
6147
6148         BCE_PRINTF(sc,
6149                 "----------------------------"
6150                 "----------------"
6151                 "----------------------------\n");
6152 }
6153
6154
6155 static void
6156 bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd)
6157 {
6158         if (idx > MAX_TX_BD)
6159                 /* Index out of range. */
6160                 BCE_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
6161         else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
6162                 /* TX Chain page pointer. */
6163                 BCE_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n", 
6164                         idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo);
6165         else
6166                 /* Normal tx_bd entry. */
6167                 BCE_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
6168                         "flags = 0x%08X\n", idx, 
6169                         txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
6170                         txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag_flags);
6171 }
6172
6173
6174 static void
6175 bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
6176 {
6177         if (idx > MAX_RX_BD)
6178                 /* Index out of range. */
6179                 BCE_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
6180         else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
6181                 /* TX Chain page pointer. */
6182                 BCE_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n", 
6183                         idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo);
6184         else
6185                 /* Normal tx_bd entry. */
6186                 BCE_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
6187                         "flags = 0x%08X\n", idx, 
6188                         rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
6189                         rxbd->rx_bd_len, rxbd->rx_bd_flags);
6190 }
6191
6192
6193 static void
6194 bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr)
6195 {
6196         BCE_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, "
6197                 "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, "
6198                 "tcp_udp_xsum = 0x%04X\n", idx,
6199                 l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len,
6200                 l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum,
6201                 l2fhdr->l2_fhdr_tcp_udp_xsum);
6202 }
6203
6204
6205 /*
6206  * This routine prints the TX chain.
6207  */
6208 static void
6209 bce_dump_tx_chain(struct bce_softc *sc, int tx_prod, int count)
6210 {
6211         struct tx_bd *txbd;
6212
6213         /* First some info about the tx_bd chain structure. */
6214         BCE_PRINTF(sc,
6215                 "----------------------------"
6216                 "  tx_bd  chain  "
6217                 "----------------------------\n");
6218
6219         BCE_PRINTF(sc, "page size      = 0x%08X, tx chain pages        = 0x%08X\n",
6220                 (u32) BCM_PAGE_SIZE, (u32) TX_PAGES);
6221
6222         BCE_PRINTF(sc, "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
6223                 (u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE);
6224
6225         BCE_PRINTF(sc, "total tx_bd    = 0x%08X\n", (u32) TOTAL_TX_BD);
6226
6227         BCE_PRINTF(sc, ""
6228                 "-----------------------------"
6229                 "   tx_bd data   "
6230                 "-----------------------------\n");
6231
6232         /* Now print out the tx_bd's themselves. */
6233         for (int i = 0; i < count; i++) {
6234                 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
6235                 bce_dump_txbd(sc, tx_prod, txbd);
6236                 tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
6237         }
6238
6239         BCE_PRINTF(sc,
6240                 "-----------------------------"
6241                 "--------------"
6242                 "-----------------------------\n");
6243 }
6244
6245
6246 /*
6247  * This routine prints the RX chain.
6248  */
6249 static void
6250 bce_dump_rx_chain(struct bce_softc *sc, int rx_prod, int count)
6251 {
6252         struct rx_bd *rxbd;
6253
6254         /* First some info about the tx_bd chain structure. */
6255         BCE_PRINTF(sc,
6256                 "----------------------------"
6257                 "  rx_bd  chain  "
6258                 "----------------------------\n");
6259
6260         BCE_PRINTF(sc, "----- RX_BD Chain -----\n");
6261
6262         BCE_PRINTF(sc, "page size      = 0x%08X, rx chain pages        = 0x%08X\n",
6263                 (u32) BCM_PAGE_SIZE, (u32) RX_PAGES);
6264
6265         BCE_PRINTF(sc, "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
6266                 (u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE);
6267
6268         BCE_PRINTF(sc, "total rx_bd    = 0x%08X\n", (u32) TOTAL_RX_BD);
6269
6270         BCE_PRINTF(sc,
6271                 "----------------------------"
6272                 "   rx_bd data   "
6273                 "----------------------------\n");
6274
6275         /* Now print out the rx_bd's themselves. */
6276         for (int i = 0; i < count; i++) {
6277                 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
6278                 bce_dump_rxbd(sc, rx_prod, rxbd);
6279                 rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
6280         }
6281
6282         BCE_PRINTF(sc,
6283                 "----------------------------"
6284                 "--------------"
6285                 "----------------------------\n");
6286 }
6287
6288
6289 /*
6290  * This routine prints the status block.
6291  */
6292 static void
6293 bce_dump_status_block(struct bce_softc *sc)
6294 {
6295         struct status_block *sblk;
6296
6297         sblk = sc->status_block;
6298
6299         BCE_PRINTF(sc, "----------------------------- Status Block "
6300                 "-----------------------------\n");
6301
6302         BCE_PRINTF(sc, "attn_bits  = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n",
6303                 sblk->status_attn_bits, sblk->status_attn_bits_ack,
6304                 sblk->status_idx);
6305
6306         BCE_PRINTF(sc, "rx_cons0   = 0x%08X, tx_cons0      = 0x%08X\n",
6307                 sblk->status_rx_quick_consumer_index0,
6308                 sblk->status_tx_quick_consumer_index0);
6309
6310         BCE_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx);
6311
6312         /* Theses indices are not used for normal L2 drivers. */
6313         if (sblk->status_rx_quick_consumer_index1 || 
6314                 sblk->status_tx_quick_consumer_index1)
6315                 BCE_PRINTF(sc, "rx_cons1  = 0x%08X, tx_cons1      = 0x%08X\n",
6316                         sblk->status_rx_quick_consumer_index1,
6317                         sblk->status_tx_quick_consumer_index1);
6318
6319         if (sblk->status_rx_quick_consumer_index2 || 
6320                 sblk->status_tx_quick_consumer_index2)
6321                 BCE_PRINTF(sc, "rx_cons2  = 0x%08X, tx_cons2      = 0x%08X\n",
6322                         sblk->status_rx_quick_consumer_index2,
6323                         sblk->status_tx_quick_consumer_index2);
6324
6325         if (sblk->status_rx_quick_consumer_index3 || 
6326                 sblk->status_tx_quick_consumer_index3)
6327                 BCE_PRINTF(sc, "rx_cons3  = 0x%08X, tx_cons3      = 0x%08X\n",
6328                         sblk->status_rx_quick_consumer_index3,
6329                         sblk->status_tx_quick_consumer_index3);
6330
6331         if (sblk->status_rx_quick_consumer_index4 || 
6332                 sblk->status_rx_quick_consumer_index5)
6333                 BCE_PRINTF(sc, "rx_cons4  = 0x%08X, rx_cons5      = 0x%08X\n",
6334                         sblk->status_rx_quick_consumer_index4,
6335                         sblk->status_rx_quick_consumer_index5);
6336
6337         if (sblk->status_rx_quick_consumer_index6 || 
6338                 sblk->status_rx_quick_consumer_index7)
6339                 BCE_PRINTF(sc, "rx_cons6  = 0x%08X, rx_cons7      = 0x%08X\n",
6340                         sblk->status_rx_quick_consumer_index6,
6341                         sblk->status_rx_quick_consumer_index7);
6342
6343         if (sblk->status_rx_quick_consumer_index8 || 
6344                 sblk->status_rx_quick_consumer_index9)
6345                 BCE_PRINTF(sc, "rx_cons8  = 0x%08X, rx_cons9      = 0x%08X\n",
6346                         sblk->status_rx_quick_consumer_index8,
6347                         sblk->status_rx_quick_consumer_index9);
6348
6349         if (sblk->status_rx_quick_consumer_index10 || 
6350                 sblk->status_rx_quick_consumer_index11)
6351                 BCE_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11     = 0x%08X\n",
6352                         sblk->status_rx_quick_consumer_index10,
6353                         sblk->status_rx_quick_consumer_index11);
6354
6355         if (sblk->status_rx_quick_consumer_index12 || 
6356                 sblk->status_rx_quick_consumer_index13)
6357                 BCE_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13     = 0x%08X\n",
6358                         sblk->status_rx_quick_consumer_index12,
6359                         sblk->status_rx_quick_consumer_index13);
6360
6361         if (sblk->status_rx_quick_consumer_index14 || 
6362                 sblk->status_rx_quick_consumer_index15)
6363                 BCE_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15     = 0x%08X\n",
6364                         sblk->status_rx_quick_consumer_index14,
6365                         sblk->status_rx_quick_consumer_index15);
6366
6367         if (sblk->status_completion_producer_index || 
6368                 sblk->status_cmd_consumer_index)
6369                 BCE_PRINTF(sc, "com_prod  = 0x%08X, cmd_cons      = 0x%08X\n",
6370                         sblk->status_completion_producer_index,
6371                         sblk->status_cmd_consumer_index);
6372
6373         BCE_PRINTF(sc, "-------------------------------------------"
6374                 "-----------------------------\n");
6375 }
6376
6377
6378 /*
6379  * This routine prints the statistics block.
6380  */
6381 static void
6382 bce_dump_stats_block(struct bce_softc *sc)
6383 {
6384         struct statistics_block *sblk;
6385
6386         sblk = sc->stats_block;
6387
6388         BCE_PRINTF(sc, ""
6389                 "-----------------------------"
6390                 " Stats  Block "
6391                 "-----------------------------\n");
6392
6393         BCE_PRINTF(sc, "IfHcInOctets         = 0x%08X:%08X, "
6394                 "IfHcInBadOctets      = 0x%08X:%08X\n",
6395                 sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo,
6396                 sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo);
6397
6398         BCE_PRINTF(sc, "IfHcOutOctets        = 0x%08X:%08X, "
6399                 "IfHcOutBadOctets     = 0x%08X:%08X\n",
6400                 sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo,
6401                 sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo);
6402
6403         BCE_PRINTF(sc, "IfHcInUcastPkts      = 0x%08X:%08X, "
6404                 "IfHcInMulticastPkts  = 0x%08X:%08X\n",
6405                 sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo,
6406                 sblk->stat_IfHCInMulticastPkts_hi, sblk->stat_IfHCInMulticastPkts_lo);
6407
6408         BCE_PRINTF(sc, "IfHcInBroadcastPkts  = 0x%08X:%08X, "
6409                 "IfHcOutUcastPkts     = 0x%08X:%08X\n",
6410                 sblk->stat_IfHCInBroadcastPkts_hi, sblk->stat_IfHCInBroadcastPkts_lo,
6411                 sblk->stat_IfHCOutUcastPkts_hi, sblk->stat_IfHCOutUcastPkts_lo);
6412
6413         BCE_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, IfHcOutBroadcastPkts = 0x%08X:%08X\n",
6414                 sblk->stat_IfHCOutMulticastPkts_hi, sblk->stat_IfHCOutMulticastPkts_lo,
6415                 sblk->stat_IfHCOutBroadcastPkts_hi, sblk->stat_IfHCOutBroadcastPkts_lo);
6416
6417         if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
6418                 BCE_PRINTF(sc, "0x%08X : "
6419                 "emac_tx_stat_dot3statsinternalmactransmiterrors\n", 
6420                 sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
6421
6422         if (sblk->stat_Dot3StatsCarrierSenseErrors)
6423                 BCE_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n",
6424                         sblk->stat_Dot3StatsCarrierSenseErrors);
6425
6426         if (sblk->stat_Dot3StatsFCSErrors)
6427                 BCE_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n",
6428                         sblk->stat_Dot3StatsFCSErrors);
6429
6430         if (sblk->stat_Dot3StatsAlignmentErrors)
6431                 BCE_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n",
6432                         sblk->stat_Dot3StatsAlignmentErrors);
6433
6434         if (sblk->stat_Dot3StatsSingleCollisionFrames)
6435                 BCE_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n",
6436                         sblk->stat_Dot3StatsSingleCollisionFrames);
6437
6438         if (sblk->stat_Dot3StatsMultipleCollisionFrames)
6439                 BCE_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n",
6440                         sblk->stat_Dot3StatsMultipleCollisionFrames);
6441         
6442         if (sblk->stat_Dot3StatsDeferredTransmissions)
6443                 BCE_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n",
6444                         sblk->stat_Dot3StatsDeferredTransmissions);
6445
6446         if (sblk->stat_Dot3StatsExcessiveCollisions)
6447                 BCE_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n",
6448                         sblk->stat_Dot3StatsExcessiveCollisions);
6449
6450         if (sblk->stat_Dot3StatsLateCollisions)
6451                 BCE_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n",
6452                         sblk->stat_Dot3StatsLateCollisions);
6453
6454         if (sblk->stat_EtherStatsCollisions)
6455                 BCE_PRINTF(sc, "0x%08X : EtherStatsCollisions\n",
6456                         sblk->stat_EtherStatsCollisions);
6457
6458         if (sblk->stat_EtherStatsFragments) 
6459                 BCE_PRINTF(sc, "0x%08X : EtherStatsFragments\n",
6460                         sblk->stat_EtherStatsFragments);
6461
6462         if (sblk->stat_EtherStatsJabbers)
6463                 BCE_PRINTF(sc, "0x%08X : EtherStatsJabbers\n",
6464                         sblk->stat_EtherStatsJabbers);
6465
6466         if (sblk->stat_EtherStatsUndersizePkts)
6467                 BCE_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n",
6468                         sblk->stat_EtherStatsUndersizePkts);
6469
6470         if (sblk->stat_EtherStatsOverrsizePkts)
6471                 BCE_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n",
6472                         sblk->stat_EtherStatsOverrsizePkts);
6473
6474         if (sblk->stat_EtherStatsPktsRx64Octets)
6475                 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n",
6476                         sblk->stat_EtherStatsPktsRx64Octets);
6477
6478         if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
6479                 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
6480                         sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
6481
6482         if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
6483                 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx128Octetsto255Octets\n",
6484                         sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
6485
6486         if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
6487                 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx256Octetsto511Octets\n",
6488                         sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
6489
6490         if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
6491                 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx512Octetsto1023Octets\n",
6492                         sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
6493
6494         if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
6495                 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx1024Octetsto1522Octets\n",
6496                         sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
6497
6498         if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
6499                 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx1523Octetsto9022Octets\n",
6500                         sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
6501
6502         if (sblk->stat_EtherStatsPktsTx64Octets)
6503                 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n",
6504                         sblk->stat_EtherStatsPktsTx64Octets);
6505
6506         if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
6507                 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
6508                         sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
6509
6510         if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
6511                 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx128Octetsto255Octets\n",
6512                         sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
6513
6514         if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
6515                 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx256Octetsto511Octets\n",
6516                         sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
6517
6518         if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
6519                 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx512Octetsto1023Octets\n",
6520                         sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
6521
6522         if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
6523                 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx1024Octetsto1522Octets\n",
6524                         sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
6525
6526         if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
6527                 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx1523Octetsto9022Octets\n",
6528                         sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
6529
6530         if (sblk->stat_XonPauseFramesReceived)
6531                 BCE_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n",
6532                         sblk->stat_XonPauseFramesReceived);
6533
6534         if (sblk->stat_XoffPauseFramesReceived)
6535            BCE_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n",
6536                         sblk->stat_XoffPauseFramesReceived);
6537
6538         if (sblk->stat_OutXonSent)
6539                 BCE_PRINTF(sc, "0x%08X : OutXonSent\n",
6540                         sblk->stat_OutXonSent);
6541
6542         if (sblk->stat_OutXoffSent)
6543                 BCE_PRINTF(sc, "0x%08X : OutXoffSent\n",
6544                         sblk->stat_OutXoffSent);
6545
6546         if (sblk->stat_FlowControlDone)
6547                 BCE_PRINTF(sc, "0x%08X : FlowControlDone\n",
6548                         sblk->stat_FlowControlDone);
6549
6550         if (sblk->stat_MacControlFramesReceived)
6551                 BCE_PRINTF(sc, "0x%08X : MacControlFramesReceived\n",
6552                         sblk->stat_MacControlFramesReceived);
6553
6554         if (sblk->stat_XoffStateEntered)
6555                 BCE_PRINTF(sc, "0x%08X : XoffStateEntered\n",
6556                         sblk->stat_XoffStateEntered);
6557
6558         if (sblk->stat_IfInFramesL2FilterDiscards)
6559                 BCE_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n",
6560                         sblk->stat_IfInFramesL2FilterDiscards);
6561
6562         if (sblk->stat_IfInRuleCheckerDiscards)
6563                 BCE_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n",
6564                         sblk->stat_IfInRuleCheckerDiscards);
6565
6566         if (sblk->stat_IfInFTQDiscards)
6567                 BCE_PRINTF(sc, "0x%08X : IfInFTQDiscards\n",
6568                         sblk->stat_IfInFTQDiscards);
6569
6570         if (sblk->stat_IfInMBUFDiscards)
6571                 BCE_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n",
6572                         sblk->stat_IfInMBUFDiscards);
6573
6574         if (sblk->stat_IfInRuleCheckerP4Hit)
6575                 BCE_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n",
6576                         sblk->stat_IfInRuleCheckerP4Hit);
6577
6578         if (sblk->stat_CatchupInRuleCheckerDiscards)
6579                 BCE_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n",
6580                         sblk->stat_CatchupInRuleCheckerDiscards);
6581
6582         if (sblk->stat_CatchupInFTQDiscards)
6583                 BCE_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n",
6584                         sblk->stat_CatchupInFTQDiscards);
6585
6586         if (sblk->stat_CatchupInMBUFDiscards)
6587                 BCE_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n",
6588                         sblk->stat_CatchupInMBUFDiscards);
6589
6590         if (sblk->stat_CatchupInRuleCheckerP4Hit)
6591                 BCE_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n",
6592                         sblk->stat_CatchupInRuleCheckerP4Hit);
6593
6594         BCE_PRINTF(sc,
6595                 "-----------------------------"
6596                 "--------------"
6597                 "-----------------------------\n");
6598 }
6599
6600
6601 static void
6602 bce_dump_driver_state(struct bce_softc *sc)
6603 {
6604         u32 val_hi, val_lo;
6605
6606         BCE_PRINTF(sc,
6607                 "-----------------------------"
6608                 " Driver State "
6609                 "-----------------------------\n");
6610
6611         val_hi = BCE_ADDR_HI(sc);
6612         val_lo = BCE_ADDR_LO(sc);
6613         BCE_PRINTF(sc, "0x%08X:%08X - (sc) driver softc structure virtual address\n",
6614                 val_hi, val_lo);
6615
6616         val_hi = BCE_ADDR_HI(sc->bce_vhandle);
6617         val_lo = BCE_ADDR_LO(sc->bce_vhandle);
6618         BCE_PRINTF(sc, "0x%08X:%08X - (sc->bce_vhandle) PCI BAR virtual address\n",
6619                 val_hi, val_lo);
6620
6621         val_hi = BCE_ADDR_HI(sc->status_block);
6622         val_lo = BCE_ADDR_LO(sc->status_block);
6623         BCE_PRINTF(sc, "0x%08X:%08X - (sc->status_block) status block virtual address\n",
6624                 val_hi, val_lo);
6625
6626         val_hi = BCE_ADDR_HI(sc->stats_block);
6627         val_lo = BCE_ADDR_LO(sc->stats_block);
6628         BCE_PRINTF(sc, "0x%08X:%08X - (sc->stats_block) statistics block virtual address\n",
6629                 val_hi, val_lo);
6630
6631         val_hi = BCE_ADDR_HI(sc->tx_bd_chain);
6632         val_lo = BCE_ADDR_LO(sc->tx_bd_chain);
6633         BCE_PRINTF(sc,
6634                 "0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain virtual adddress\n",
6635                 val_hi, val_lo);
6636
6637         val_hi = BCE_ADDR_HI(sc->rx_bd_chain);
6638         val_lo = BCE_ADDR_LO(sc->rx_bd_chain);
6639         BCE_PRINTF(sc,
6640                 "0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain virtual address\n",
6641                 val_hi, val_lo);
6642
6643         val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr);
6644         val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr);
6645         BCE_PRINTF(sc,
6646                 "0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
6647                 val_hi, val_lo);
6648
6649         val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr);
6650         val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr);
6651         BCE_PRINTF(sc, 
6652                 "0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
6653                 val_hi, val_lo);
6654
6655         BCE_PRINTF(sc, "         0x%08X - (sc->interrupts_generated) h/w intrs\n",
6656                 sc->interrupts_generated);
6657         
6658         BCE_PRINTF(sc, "         0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
6659                 sc->rx_interrupts);
6660
6661         BCE_PRINTF(sc, "         0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
6662                 sc->tx_interrupts);
6663
6664         BCE_PRINTF(sc, "         0x%08X - (sc->last_status_idx) status block index\n",
6665                 sc->last_status_idx);
6666
6667         BCE_PRINTF(sc, "         0x%08X - (sc->tx_prod) tx producer index\n",
6668                 sc->tx_prod);
6669
6670         BCE_PRINTF(sc, "         0x%08X - (sc->tx_cons) tx consumer index\n",
6671                 sc->tx_cons);
6672
6673         BCE_PRINTF(sc, "         0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
6674                 sc->tx_prod_bseq);
6675
6676         BCE_PRINTF(sc, "         0x%08X - (sc->rx_prod) rx producer index\n",
6677                 sc->rx_prod);
6678
6679         BCE_PRINTF(sc, "         0x%08X - (sc->rx_cons) rx consumer index\n",
6680                 sc->rx_cons);
6681
6682         BCE_PRINTF(sc, "         0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
6683                 sc->rx_prod_bseq);
6684
6685         BCE_PRINTF(sc, "         0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
6686                 sc->rx_mbuf_alloc);
6687
6688         BCE_PRINTF(sc, "         0x%08X - (sc->free_rx_bd) free rx_bd's\n",
6689                 sc->free_rx_bd);
6690
6691         BCE_PRINTF(sc, "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n",
6692                 sc->rx_low_watermark, (u32) USABLE_RX_BD);
6693
6694         BCE_PRINTF(sc, "         0x%08X - (sc->txmbuf_alloc) tx mbufs allocated\n",
6695                 sc->tx_mbuf_alloc);
6696
6697         BCE_PRINTF(sc, "         0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
6698                 sc->rx_mbuf_alloc);
6699
6700         BCE_PRINTF(sc, "         0x%08X - (sc->used_tx_bd) used tx_bd's\n",
6701                 sc->used_tx_bd);
6702
6703         BCE_PRINTF(sc, "0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
6704                 sc->tx_hi_watermark, (u32) USABLE_TX_BD);
6705
6706         BCE_PRINTF(sc, "         0x%08X - (sc->mbuf_alloc_failed) failed mbuf alloc\n",
6707                 sc->mbuf_alloc_failed);
6708
6709         BCE_PRINTF(sc,
6710                 "-----------------------------"
6711                 "--------------"
6712                 "-----------------------------\n");
6713 }
6714
6715
6716 static void
6717 bce_dump_hw_state(struct bce_softc *sc)
6718 {
6719         u32 val1;
6720
6721         BCE_PRINTF(sc,
6722                 "----------------------------"
6723                 " Hardware State "
6724                 "----------------------------\n");
6725
6726         BCE_PRINTF(sc, "0x%08X : bootcode version\n", sc->bce_fw_ver);
6727
6728         val1 = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS);
6729         BCE_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n",
6730                 val1, BCE_MISC_ENABLE_STATUS_BITS);
6731
6732         val1 = REG_RD(sc, BCE_DMA_STATUS);
6733         BCE_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BCE_DMA_STATUS);
6734
6735         val1 = REG_RD(sc, BCE_CTX_STATUS);
6736         BCE_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BCE_CTX_STATUS);
6737
6738         val1 = REG_RD(sc, BCE_EMAC_STATUS);
6739         BCE_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1, BCE_EMAC_STATUS);
6740
6741         val1 = REG_RD(sc, BCE_RPM_STATUS);
6742         BCE_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BCE_RPM_STATUS);
6743
6744         val1 = REG_RD(sc, BCE_TBDR_STATUS);
6745         BCE_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1, BCE_TBDR_STATUS);
6746
6747         val1 = REG_RD(sc, BCE_TDMA_STATUS);
6748         BCE_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1, BCE_TDMA_STATUS);
6749
6750         val1 = REG_RD(sc, BCE_HC_STATUS);
6751         BCE_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BCE_HC_STATUS);
6752
6753         BCE_PRINTF(sc, 
6754                 "----------------------------"
6755                 "----------------"
6756                 "----------------------------\n");
6757
6758         BCE_PRINTF(sc, 
6759                 "----------------------------"
6760                 " Register  Dump "
6761                 "----------------------------\n");
6762
6763         for (int i = 0x400; i < 0x8000; i += 0x10)
6764                 BCE_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
6765                         i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
6766                         REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
6767
6768         BCE_PRINTF(sc, 
6769                 "----------------------------"
6770                 "----------------"
6771                 "----------------------------\n");
6772 }
6773
6774
6775 static void
6776 bce_breakpoint(struct bce_softc *sc)
6777 {
6778
6779         /* Unreachable code to shut the compiler up about unused functions. */
6780         if (0) {
6781                 bce_dump_txbd(sc, 0, NULL);
6782                 bce_dump_rxbd(sc, 0, NULL);
6783                 bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
6784                 bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
6785                 bce_dump_l2fhdr(sc, 0, NULL);
6786                 bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
6787                 bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
6788                 bce_dump_status_block(sc);
6789                 bce_dump_stats_block(sc);
6790                 bce_dump_driver_state(sc);
6791                 bce_dump_hw_state(sc);
6792         }
6793
6794         bce_dump_driver_state(sc);
6795         /* Print the important status block fields. */
6796         bce_dump_status_block(sc);
6797
6798         /* Call the debugger. */
6799         breakpoint();
6800
6801         return;
6802 }
6803 #endif