]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/bce/if_bce.c
Move ethernet VLAN tags from mtags to its own mbuf packet header field
[FreeBSD/FreeBSD.git] / sys / dev / bce / if_bce.c
1 /*-
2  * Copyright (c) 2006 Broadcom Corporation
3  *      David Christensen <davidch@broadcom.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written consent.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 /*
35  * The following controllers are supported by this driver:
36  *   BCM5706C A2, A3
37  *   BCM5708C B1
38  *
39  * The following controllers are not supported by this driver:
40  * (These are not "Production" versions of the controller.)
41  * 
42  *   BCM5706C A0, A1
43  *   BCM5706S A0, A1, A2, A3
44  *   BCM5708C A0, B0
45  *   BCM5708S A0, B0, B1
46  */
47
48 #include "opt_bce.h"
49
50 #include <dev/bce/if_bcereg.h>
51 #include <dev/bce/if_bcefw.h>
52
53 /****************************************************************************/
54 /* BCE Driver Version                                                       */
55 /****************************************************************************/
56 char bce_driver_version[] = "v0.9.6";
57
58
59 /****************************************************************************/
60 /* BCE Debug Options                                                        */
61 /****************************************************************************/
62 #ifdef BCE_DEBUG
63         u32 bce_debug = BCE_WARN;
64
65         /*          0 = Never              */
66         /*          1 = 1 in 2,147,483,648 */
67         /*        256 = 1 in     8,388,608 */
68         /*       2048 = 1 in     1,048,576 */
69         /*      65536 = 1 in        32,768 */
70         /*    1048576 = 1 in         2,048 */
71         /*  268435456 = 1 in             8 */
72         /*  536870912 = 1 in             4 */
73         /* 1073741824 = 1 in             2 */
74
75         /* Controls how often the l2_fhdr frame error check will fail. */
76         int bce_debug_l2fhdr_status_check = 0;
77
78         /* Controls how often the unexpected attention check will fail. */
79         int bce_debug_unexpected_attention = 0;
80
81         /* Controls how often to simulate an mbuf allocation failure. */
82         int bce_debug_mbuf_allocation_failure = 0;
83
84         /* Controls how often to simulate a DMA mapping failure. */
85         int bce_debug_dma_map_addr_failure = 0;
86
87         /* Controls how often to simulate a bootcode failure. */
88         int bce_debug_bootcode_running_failure = 0;
89 #endif
90
91
92 /****************************************************************************/
93 /* PCI Device ID Table                                                      */
94 /*                                                                          */
95 /* Used by bce_probe() to identify the devices supported by this driver.    */
96 /****************************************************************************/
97 #define BCE_DEVDESC_MAX         64
98
99 static struct bce_type bce_devs[] = {
100         /* BCM5706C Controllers and OEM boards. */
101         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3101,
102                 "HP NC370T Multifunction Gigabit Server Adapter" },
103         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3106,
104                 "HP NC370i Multifunction Gigabit Server Adapter" },
105         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  PCI_ANY_ID,  PCI_ANY_ID,
106                 "Broadcom NetXtreme II BCM5706 1000Base-T" },
107
108         /* BCM5706S controllers and OEM boards. */
109         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
110                 "HP NC370F Multifunction Gigabit Server Adapter" },
111         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID,  PCI_ANY_ID,
112                 "Broadcom NetXtreme II BCM5706 1000Base-SX" },
113
114         /* BCM5708C controllers and OEM boards. */
115         { BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
116                 "Broadcom NetXtreme II BCM5708 1000Base-T" },
117
118         /* BCM5708S controllers and OEM boards. */
119         { BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
120                 "Broadcom NetXtreme II BCM5708 1000Base-T" },
121         { 0, 0, 0, 0, NULL }
122 };
123
124
125 /****************************************************************************/
126 /* Supported Flash NVRAM device data.                                       */
127 /****************************************************************************/
128 static struct flash_spec flash_table[] =
129 {
130         /* Slow EEPROM */
131         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
132          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
133          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
134          "EEPROM - slow"},
135         /* Expansion entry 0001 */
136         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
137          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
138          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
139          "Entry 0001"},
140         /* Saifun SA25F010 (non-buffered flash) */
141         /* strap, cfg1, & write1 need updates */
142         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
143          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
144          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
145          "Non-buffered flash (128kB)"},
146         /* Saifun SA25F020 (non-buffered flash) */
147         /* strap, cfg1, & write1 need updates */
148         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
149          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
150          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
151          "Non-buffered flash (256kB)"},
152         /* Expansion entry 0100 */
153         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
154          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
155          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
156          "Entry 0100"},
157         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
158         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
159          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
160          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
161          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
162         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
163         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
164          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
165          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
166          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
167         /* Saifun SA25F005 (non-buffered flash) */
168         /* strap, cfg1, & write1 need updates */
169         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
170          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
171          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
172          "Non-buffered flash (64kB)"},
173         /* Fast EEPROM */
174         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
175          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
176          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
177          "EEPROM - fast"},
178         /* Expansion entry 1001 */
179         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
180          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
181          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182          "Entry 1001"},
183         /* Expansion entry 1010 */
184         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
185          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
186          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
187          "Entry 1010"},
188         /* ATMEL AT45DB011B (buffered flash) */
189         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
190          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
191          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
192          "Buffered flash (128kB)"},
193         /* Expansion entry 1100 */
194         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
195          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
196          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197          "Entry 1100"},
198         /* Expansion entry 1101 */
199         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
200          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
201          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
202          "Entry 1101"},
203         /* Ateml Expansion entry 1110 */
204         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
205          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
206          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
207          "Entry 1110 (Atmel)"},
208         /* ATMEL AT45DB021B (buffered flash) */
209         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
210          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
211          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
212          "Buffered flash (256kB)"},
213 };
214
215
216 /****************************************************************************/
217 /* FreeBSD device entry points.                                             */
218 /****************************************************************************/
219 static int  bce_probe                           (device_t);
220 static int  bce_attach                          (device_t);
221 static int  bce_detach                          (device_t);
222 static void bce_shutdown                        (device_t);
223
224
225 /****************************************************************************/
226 /* BCE Debug Data Structure Dump Routines                                   */
227 /****************************************************************************/
228 #ifdef BCE_DEBUG
229 static void bce_dump_mbuf                       (struct bce_softc *, struct mbuf *);
230 static void bce_dump_tx_mbuf_chain      (struct bce_softc *, int, int);
231 static void bce_dump_rx_mbuf_chain      (struct bce_softc *, int, int);
232 static void bce_dump_txbd                       (struct bce_softc *, int, struct tx_bd *);
233 static void bce_dump_rxbd                       (struct bce_softc *, int, struct rx_bd *);
234 static void bce_dump_l2fhdr                     (struct bce_softc *, int, struct l2_fhdr *);
235 static void bce_dump_tx_chain           (struct bce_softc *, int, int);
236 static void bce_dump_rx_chain           (struct bce_softc *, int, int);
237 static void bce_dump_status_block       (struct bce_softc *);
238 static void bce_dump_stats_block        (struct bce_softc *);
239 static void bce_dump_driver_state       (struct bce_softc *);
240 static void bce_dump_hw_state           (struct bce_softc *);
241 static void bce_breakpoint                      (struct bce_softc *);
242 #endif
243
244
245 /****************************************************************************/
246 /* BCE Register/Memory Access Routines                                      */
247 /****************************************************************************/
248 static u32  bce_reg_rd_ind                      (struct bce_softc *, u32);
249 static void bce_reg_wr_ind                      (struct bce_softc *, u32, u32);
250 static void bce_ctx_wr                          (struct bce_softc *, u32, u32, u32);
251 static int  bce_miibus_read_reg         (device_t, int, int);
252 static int  bce_miibus_write_reg        (device_t, int, int, int);
253 static void bce_miibus_statchg          (device_t);
254
255
256 /****************************************************************************/
257 /* BCE NVRAM Access Routines                                                */
258 /****************************************************************************/
259 static int  bce_acquire_nvram_lock      (struct bce_softc *);
260 static int  bce_release_nvram_lock      (struct bce_softc *);
261 static void bce_enable_nvram_access     (struct bce_softc *);
262 static void     bce_disable_nvram_access(struct bce_softc *);
263 static int  bce_nvram_read_dword        (struct bce_softc *, u32, u8 *, u32);
264 static int  bce_init_nvram                      (struct bce_softc *);
265 static int  bce_nvram_read                      (struct bce_softc *, u32, u8 *, int);
266 static int  bce_nvram_test                      (struct bce_softc *);
267 #ifdef BCE_NVRAM_WRITE_SUPPORT
268 static int  bce_enable_nvram_write      (struct bce_softc *);
269 static void bce_disable_nvram_write     (struct bce_softc *);
270 static int  bce_nvram_erase_page        (struct bce_softc *, u32);
271 static int  bce_nvram_write_dword       (struct bce_softc *, u32, u8 *, u32);
272 static int  bce_nvram_write                     (struct bce_softc *, u32, u8 *, int);
273 #endif
274
275 /****************************************************************************/
276 /*                                                                          */
277 /****************************************************************************/
278 static void bce_dma_map_addr            (void *, bus_dma_segment_t *, int, int);
279 static void bce_dma_map_tx_desc         (void *, bus_dma_segment_t *, int, bus_size_t, int);
280 static int  bce_dma_alloc                       (device_t);
281 static void bce_dma_free                        (struct bce_softc *);
282 static void bce_release_resources       (struct bce_softc *);
283
284 /****************************************************************************/
285 /* BCE Firmware Synchronization and Load                                    */
286 /****************************************************************************/
287 static int  bce_fw_sync                         (struct bce_softc *, u32);
288 static void bce_load_rv2p_fw            (struct bce_softc *, u32 *, u32, u32);
289 static void bce_load_cpu_fw                     (struct bce_softc *, struct cpu_reg *, struct fw_info *);
290 static void bce_init_cpus                       (struct bce_softc *);
291
292 static void bce_stop                            (struct bce_softc *);
293 static int  bce_reset                           (struct bce_softc *, u32);
294 static int  bce_chipinit                        (struct bce_softc *);
295 static int  bce_blockinit                       (struct bce_softc *);
296 static int  bce_get_buf                         (struct bce_softc *, struct mbuf *, u16 *, u16 *, u32 *);
297
298 static int  bce_init_tx_chain           (struct bce_softc *);
299 static int  bce_init_rx_chain           (struct bce_softc *);
300 static void bce_free_rx_chain           (struct bce_softc *);
301 static void bce_free_tx_chain           (struct bce_softc *);
302
303 static int  bce_tx_encap                        (struct bce_softc *, struct mbuf *, u16 *, u16 *, u32 *);
304 static void bce_start_locked            (struct ifnet *);
305 static void bce_start                           (struct ifnet *);
306 static int  bce_ioctl                           (struct ifnet *, u_long, caddr_t);
307 static void bce_watchdog                        (struct ifnet *);
308 static int  bce_ifmedia_upd                     (struct ifnet *);
309 static void bce_ifmedia_sts                     (struct ifnet *, struct ifmediareq *);
310 static void bce_init_locked                     (struct bce_softc *);
311 static void bce_init                            (void *);
312
313 static void bce_init_context            (struct bce_softc *);
314 static void bce_get_mac_addr            (struct bce_softc *);
315 static void bce_set_mac_addr            (struct bce_softc *);
316 static void bce_phy_intr                        (struct bce_softc *);
317 static void bce_rx_intr                         (struct bce_softc *);
318 static void bce_tx_intr                         (struct bce_softc *);
319 static void bce_disable_intr            (struct bce_softc *);
320 static void bce_enable_intr                     (struct bce_softc *);
321
322 #ifdef DEVICE_POLLING
323 static void bce_poll_locked                     (struct ifnet *, enum poll_cmd, int);
324 static void bce_poll                            (struct ifnet *, enum poll_cmd, int);
325 #endif
326 static void bce_intr                            (void *);
327 static void bce_set_rx_mode                     (struct bce_softc *);
328 static void bce_stats_update            (struct bce_softc *);
329 static void bce_tick_locked                     (struct bce_softc *);
330 static void bce_tick                            (void *);
331 static void bce_add_sysctls                     (struct bce_softc *);
332
333
334 /****************************************************************************/
335 /* FreeBSD device dispatch table.                                           */
336 /****************************************************************************/
337 static device_method_t bce_methods[] = {
338         /* Device interface */
339         DEVMETHOD(device_probe,         bce_probe),
340         DEVMETHOD(device_attach,        bce_attach),
341         DEVMETHOD(device_detach,        bce_detach),
342         DEVMETHOD(device_shutdown,      bce_shutdown),
343
344         /* bus interface */
345         DEVMETHOD(bus_print_child,      bus_generic_print_child),
346         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
347
348         /* MII interface */
349         DEVMETHOD(miibus_readreg,       bce_miibus_read_reg),
350         DEVMETHOD(miibus_writereg,      bce_miibus_write_reg),
351         DEVMETHOD(miibus_statchg,       bce_miibus_statchg),
352
353         { 0, 0 }
354 };
355
356 static driver_t bce_driver = {
357         "bce",
358         bce_methods,
359         sizeof(struct bce_softc)
360 };
361
362 static devclass_t bce_devclass;
363
364 MODULE_DEPEND(bce, pci, 1, 1, 1);
365 MODULE_DEPEND(bce, ether, 1, 1, 1);
366 MODULE_DEPEND(bce, miibus, 1, 1, 1);
367
368 DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, 0, 0);
369 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
370
371
372 /****************************************************************************/
373 /* Device probe function.                                                   */
374 /*                                                                          */
375 /* Compares the device to the driver's list of supported devices and        */
376 /* reports back to the OS whether this is the right driver for the device.  */
377 /*                                                                          */
378 /* Returns:                                                                 */
379 /*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
380 /****************************************************************************/
381 static int
382 bce_probe(device_t dev)
383 {
384         struct bce_type *t;
385         struct bce_softc *sc;
386         char *descbuf;
387         u16 vid = 0, did = 0, svid = 0, sdid = 0;
388
389         t = bce_devs;
390
391         sc = device_get_softc(dev);
392         bzero(sc, sizeof(struct bce_softc));
393         sc->bce_unit = device_get_unit(dev);
394         sc->bce_dev = dev;
395
396         /* Get the data for the device to be probed. */
397         vid  = pci_get_vendor(dev);
398         did  = pci_get_device(dev);
399         svid = pci_get_subvendor(dev);
400         sdid = pci_get_subdevice(dev);
401
402         DBPRINT(sc, BCE_VERBOSE_LOAD, 
403                 "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
404                 "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
405
406         /* Look through the list of known devices for a match. */
407         while(t->bce_name != NULL) {
408
409                 if ((vid == t->bce_vid) && (did == t->bce_did) && 
410                         ((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) &&
411                         ((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) {
412
413                         descbuf = malloc(BCE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
414
415                         if (descbuf == NULL)
416                                 return(ENOMEM);
417
418                         /* Print out the device identity. */
419                         snprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d), %s", 
420                                 t->bce_name,
421                             (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
422                             (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
423                             bce_driver_version);
424
425                         device_set_desc_copy(dev, descbuf);
426                         free(descbuf, M_TEMP);
427                         return(BUS_PROBE_DEFAULT);
428                 }
429                 t++;
430         }
431
432         DBPRINT(sc, BCE_VERBOSE_LOAD, "%s(%d): No IOCTL match found!\n", 
433                 __FILE__, __LINE__);
434
435         return(ENXIO);
436 }
437
438
439 /****************************************************************************/
440 /* Device attach function.                                                  */
441 /*                                                                          */
442 /* Allocates device resources, performs secondary chip identification,      */
443 /* resets and initializes the hardware, and initializes driver instance     */
444 /* variables.                                                               */
445 /*                                                                          */
446 /* Returns:                                                                 */
447 /*   0 on success, positive value on failure.                               */
448 /****************************************************************************/
449 static int
450 bce_attach(device_t dev)
451 {
452         struct bce_softc *sc;
453         struct ifnet *ifp;
454         u32 val;
455         int mbuf, rid, rc = 0;
456
457         sc = device_get_softc(dev);
458         sc->bce_dev = dev;
459
460         DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
461
462         mbuf = device_get_unit(dev);
463         sc->bce_unit = mbuf;
464
465         pci_enable_busmaster(dev);
466
467         /* Allocate PCI memory resources. */
468         rid = PCIR_BAR(0);
469         sc->bce_res = bus_alloc_resource_any(
470                 dev,                                                    /* dev */
471                 SYS_RES_MEMORY,                                 /* type */
472                 &rid,                                                   /* rid */
473             RF_ACTIVE | PCI_RF_DENSE);          /* flags */
474
475         if (sc->bce_res == NULL) {
476                 BCE_PRINTF(sc, "%s(%d): PCI memory allocation failed\n", 
477                         __FILE__, __LINE__);
478                 rc = ENXIO;
479                 goto bce_attach_fail;
480         }
481
482         /* Get various resource handles. */
483         sc->bce_btag    = rman_get_bustag(sc->bce_res);
484         sc->bce_bhandle = rman_get_bushandle(sc->bce_res);
485         sc->bce_vhandle = (vm_offset_t) rman_get_virtual(sc->bce_res);
486
487         /* Allocate PCI IRQ resources. */
488         rid = 0;
489         sc->bce_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
490             RF_SHAREABLE | RF_ACTIVE);
491
492         if (sc->bce_irq == NULL) {
493                 BCE_PRINTF(sc, "%s(%d): PCI map interrupt failed\n", 
494                         __FILE__, __LINE__);
495                 rc = ENXIO;
496                 goto bce_attach_fail;
497         }
498
499         /* Initialize mutex for the current device instance. */
500         BCE_LOCK_INIT(sc, device_get_nameunit(dev));
501
502         /*
503          * Configure byte swap and enable indirect register access.
504          * Rely on CPU to do target byte swapping on big endian systems.
505          * Access to registers outside of PCI configurtion space are not
506          * valid until this is done.
507          */
508         pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
509                                BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
510                                BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
511
512         /* Save ASIC revsion info. */
513         sc->bce_chipid =  REG_RD(sc, BCE_MISC_ID);
514
515         /* Weed out any non-production controller revisions. */
516         switch(BCE_CHIP_ID(sc)) {
517                 case BCE_CHIP_ID_5706_A0:
518                 case BCE_CHIP_ID_5706_A1:
519                 case BCE_CHIP_ID_5708_A0:
520                 case BCE_CHIP_ID_5708_B0:
521                         BCE_PRINTF(sc, "%s(%d): Unsupported controller revision (%c%d)!\n",
522                                 __FILE__, __LINE__, 
523                                 (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
524                             (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
525                         rc = ENODEV;
526                         goto bce_attach_fail;
527         }
528
529         if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
530                 BCE_PRINTF(sc, "%s(%d): SerDes controllers are not supported!\n",
531                         __FILE__, __LINE__);
532                 rc = ENODEV;
533                 goto bce_attach_fail;
534         }
535
536         /* 
537          * The embedded PCIe to PCI-X bridge (EPB) 
538          * in the 5708 cannot address memory above 
539          * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043). 
540          */
541         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
542                 sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR;
543         else
544                 sc->max_bus_addr = BUS_SPACE_MAXADDR;
545
546         /*
547          * Find the base address for shared memory access.
548          * Newer versions of bootcode use a signature and offset
549          * while older versions use a fixed address.
550          */
551         val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
552         if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG)
553                 sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0);
554         else
555                 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
556
557         DBPRINT(sc, BCE_INFO, "bce_shmem_base = 0x%08X\n", sc->bce_shmem_base);
558
559         /* Set initial device and PHY flags */
560         sc->bce_flags = 0;
561         sc->bce_phy_flags = 0;
562
563         /* Get PCI bus information (speed and type). */
564         val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
565         if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
566                 u32 clkreg;
567
568                 sc->bce_flags |= BCE_PCIX_FLAG;
569
570                 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS);
571
572                 clkreg &= BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
573                 switch (clkreg) {
574                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
575                         sc->bus_speed_mhz = 133;
576                         break;
577
578                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
579                         sc->bus_speed_mhz = 100;
580                         break;
581
582                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
583                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
584                         sc->bus_speed_mhz = 66;
585                         break;
586
587                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
588                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
589                         sc->bus_speed_mhz = 50;
590                         break;
591
592                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
593                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
594                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
595                         sc->bus_speed_mhz = 33;
596                         break;
597                 }
598         } else {
599                 if (val & BCE_PCICFG_MISC_STATUS_M66EN)
600                         sc->bus_speed_mhz = 66;
601                 else
602                         sc->bus_speed_mhz = 33;
603         }
604
605         if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
606                 sc->bce_flags |= BCE_PCI_32BIT_FLAG;
607
608         BCE_PRINTF(sc, "ASIC ID 0x%08X; Revision (%c%d); PCI%s %s %dMHz\n",
609                 sc->bce_chipid,
610                 ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
611                 ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4),
612                 ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
613                 ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
614                 sc->bus_speed_mhz);
615
616         /* Reset the controller. */
617         if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
618                 rc = ENXIO;
619                 goto bce_attach_fail;
620         }
621
622         /* Initialize the controller. */
623         if (bce_chipinit(sc)) {
624                 BCE_PRINTF(sc, "%s(%d): Controller initialization failed!\n",
625                         __FILE__, __LINE__);
626                 rc = ENXIO;
627                 goto bce_attach_fail;
628         }
629
630         /* Perform NVRAM test. */
631         if (bce_nvram_test(sc)) {
632                 BCE_PRINTF(sc, "%s(%d): NVRAM test failed!\n",
633                         __FILE__, __LINE__);
634                 rc = ENXIO;
635                 goto bce_attach_fail;
636         }
637
638         /* Fetch the permanent Ethernet MAC address. */
639         bce_get_mac_addr(sc);
640
641         /*
642          * Trip points control how many BDs
643          * should be ready before generating an
644          * interrupt while ticks control how long
645          * a BD can sit in the chain before
646          * generating an interrupt.  Set the default 
647          * values for the RX and TX rings.
648          */
649
650 #ifdef BCE_DRBUG
651         /* Force more frequent interrupts. */
652         sc->bce_tx_quick_cons_trip_int = 1;
653         sc->bce_tx_quick_cons_trip     = 1;
654         sc->bce_tx_ticks_int           = 0;
655         sc->bce_tx_ticks               = 0;
656
657         sc->bce_rx_quick_cons_trip_int = 1;
658         sc->bce_rx_quick_cons_trip     = 1;
659         sc->bce_rx_ticks_int           = 0;
660         sc->bce_rx_ticks               = 0;
661 #else
662         sc->bce_tx_quick_cons_trip_int = 20;
663         sc->bce_tx_quick_cons_trip     = 20;
664         sc->bce_tx_ticks_int           = 80;
665         sc->bce_tx_ticks               = 80;
666
667         sc->bce_rx_quick_cons_trip_int = 6;
668         sc->bce_rx_quick_cons_trip     = 6;
669         sc->bce_rx_ticks_int           = 18;
670         sc->bce_rx_ticks               = 18;
671 #endif
672
673         /* Update statistics once every second. */
674         sc->bce_stats_ticks = 1000000 & 0xffff00;
675
676         /*
677          * The copper based NetXtreme II controllers
678          * use an integrated PHY at address 1 while
679          * the SerDes controllers use a PHY at
680          * address 2.
681          */
682         sc->bce_phy_addr = 1;
683
684         if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
685                 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
686                 sc->bce_flags |= BCE_NO_WOL_FLAG;
687                 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) {
688                         sc->bce_phy_addr = 2;
689                         val = REG_RD_IND(sc, sc->bce_shmem_base +
690                                          BCE_SHARED_HW_CFG_CONFIG);
691                         if (val & BCE_SHARED_HW_CFG_PHY_2_5G)
692                                 sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
693                 }
694         }
695
696         /* Allocate DMA memory resources. */
697         if (bce_dma_alloc(dev)) {
698                 BCE_PRINTF(sc, "%s(%d): DMA resource allocation failed!\n",
699                     __FILE__, __LINE__);
700                 rc = ENXIO;
701                 goto bce_attach_fail;
702         }
703
704         /* Allocate an ifnet structure. */
705         ifp = sc->bce_ifp = if_alloc(IFT_ETHER);
706         if (ifp == NULL) {
707                 BCE_PRINTF(sc, "%s(%d): Interface allocation failed!\n", 
708                         __FILE__, __LINE__);
709                 rc = ENXIO;
710                 goto bce_attach_fail;
711         }
712
713         /* Initialize the ifnet interface. */
714         ifp->if_softc        = sc;
715         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
716         ifp->if_flags        = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
717         ifp->if_ioctl        = bce_ioctl;
718         ifp->if_start        = bce_start;
719         ifp->if_timer        = 0;
720         ifp->if_watchdog     = bce_watchdog;
721         ifp->if_init         = bce_init;
722         ifp->if_mtu          = ETHERMTU;
723         ifp->if_hwassist     = BCE_IF_HWASSIST;
724         ifp->if_capabilities = BCE_IF_CAPABILITIES;
725         ifp->if_capenable    = ifp->if_capabilities;
726
727         /* Assume a standard 1500 byte MTU size for mbuf allocations. */
728         sc->mbuf_alloc_size  = MCLBYTES;
729 #ifdef DEVICE_POLLING
730         ifp->if_capabilities |= IFCAP_POLLING;
731 #endif
732
733         ifp->if_snd.ifq_drv_maxlen = USABLE_TX_BD;
734         if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
735                 ifp->if_baudrate = IF_Gbps(2.5);
736         else
737                 ifp->if_baudrate = IF_Gbps(1);
738
739         IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
740         IFQ_SET_READY(&ifp->if_snd);
741
742         if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
743                 BCE_PRINTF(sc, "%s(%d): SerDes is not supported by this driver!\n", 
744                         __FILE__, __LINE__);
745                 rc = ENODEV;
746                 goto bce_attach_fail;
747         } else {
748                 /* Look for our PHY. */
749                 if (mii_phy_probe(dev, &sc->bce_miibus, bce_ifmedia_upd,
750                         bce_ifmedia_sts)) {
751                         BCE_PRINTF(sc, "%s(%d): PHY probe failed!\n", 
752                                 __FILE__, __LINE__);
753                         rc = ENXIO;
754                         goto bce_attach_fail;
755                 }
756         }
757
758         /* Attach to the Ethernet interface list. */
759         ether_ifattach(ifp, sc->eaddr);
760
761 #if __FreeBSD_version < 500000
762         callout_init(&sc->bce_stat_ch);
763 #else
764         callout_init(&sc->bce_stat_ch, CALLOUT_MPSAFE);
765 #endif
766
767         /* Hookup IRQ last. */
768         rc = bus_setup_intr(dev, sc->bce_irq, INTR_TYPE_NET | INTR_MPSAFE,
769            bce_intr, sc, &sc->bce_intrhand);
770
771         if (rc) {
772                 BCE_PRINTF(sc, "%s(%d): Failed to setup IRQ!\n", 
773                         __FILE__, __LINE__);
774                 bce_detach(dev);
775                 goto bce_attach_exit;
776         }
777
778         /* Print some important debugging info. */
779         DBRUN(BCE_INFO, bce_dump_driver_state(sc));
780
781         /* Add the supported sysctls to the kernel. */
782         bce_add_sysctls(sc);
783
784         goto bce_attach_exit;
785
786 bce_attach_fail:
787         bce_release_resources(sc);
788
789 bce_attach_exit:
790
791         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
792
793         return(rc);
794 }
795
796
797 /****************************************************************************/
798 /* Device detach function.                                                  */
799 /*                                                                          */
800 /* Stops the controller, resets the controller, and releases resources.     */
801 /*                                                                          */
802 /* Returns:                                                                 */
803 /*   0 on success, positive value on failure.                               */
804 /****************************************************************************/
805 static int
806 bce_detach(device_t dev)
807 {
808         struct bce_softc *sc;
809         struct ifnet *ifp;
810
811         sc = device_get_softc(dev);
812
813         DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
814
815         ifp = sc->bce_ifp;
816
817 #ifdef DEVICE_POLLING
818         if (ifp->if_capenable & IFCAP_POLLING)
819                 ether_poll_deregister(ifp);
820 #endif
821
822         /* Stop and reset the controller. */
823         BCE_LOCK(sc);
824         bce_stop(sc);
825         bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
826         BCE_UNLOCK(sc);
827
828         ether_ifdetach(ifp);
829
830         /* If we have a child device on the MII bus remove it too. */
831         if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
832                 ifmedia_removeall(&sc->bce_ifmedia);
833         } else {
834                 bus_generic_detach(dev);
835                 device_delete_child(dev, sc->bce_miibus);
836         }
837
838         /* Release all remaining resources. */
839         bce_release_resources(sc);
840
841         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
842
843         return(0);
844 }
845
846
847 /****************************************************************************/
848 /* Device shutdown function.                                                */
849 /*                                                                          */
850 /* Stops and resets the controller.                                         */
851 /*                                                                          */
852 /* Returns:                                                                 */
853 /*   Nothing                                                                */
854 /****************************************************************************/
855 static void
856 bce_shutdown(device_t dev)
857 {
858         struct bce_softc *sc = device_get_softc(dev);
859
860         BCE_LOCK(sc);
861         bce_stop(sc);
862         bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
863         BCE_UNLOCK(sc);
864 }
865
866
867 /****************************************************************************/
868 /* Indirect register read.                                                  */
869 /*                                                                          */
870 /* Reads NetXtreme II registers using an index/data register pair in PCI    */
871 /* configuration space.  Using this mechanism avoids issues with posted     */
872 /* reads but is much slower than memory-mapped I/O.                         */
873 /*                                                                          */
874 /* Returns:                                                                 */
875 /*   The value of the register.                                             */
876 /****************************************************************************/
877 static u32
878 bce_reg_rd_ind(struct bce_softc *sc, u32 offset)
879 {
880         device_t dev;
881         dev = sc->bce_dev;
882
883         pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
884 #ifdef BCE_DEBUG
885         {
886                 u32 val;
887                 val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
888                 DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
889                         __FUNCTION__, offset, val);
890                 return val;
891         }
892 #else
893         return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
894 #endif
895 }
896
897
898 /****************************************************************************/
899 /* Indirect register write.                                                 */
900 /*                                                                          */
901 /* Writes NetXtreme II registers using an index/data register pair in PCI   */
902 /* configuration space.  Using this mechanism avoids issues with posted     */
903 /* writes but is muchh slower than memory-mapped I/O.                       */
904 /*                                                                          */
905 /* Returns:                                                                 */
906 /*   Nothing.                                                               */
907 /****************************************************************************/
908 static void
909 bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val)
910 {
911         device_t dev;
912         dev = sc->bce_dev;
913
914         DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
915                 __FUNCTION__, offset, val);
916
917         pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
918         pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
919 }
920
921
922 /****************************************************************************/
923 /* Context memory write.                                                    */
924 /*                                                                          */
925 /* The NetXtreme II controller uses context memory to track connection      */
926 /* information for L2 and higher network protocols.                         */
927 /*                                                                          */
928 /* Returns:                                                                 */
929 /*   Nothing.                                                               */
930 /****************************************************************************/
931 static void
932 bce_ctx_wr(struct bce_softc *sc, u32 cid_addr, u32 offset, u32 val)
933 {
934
935         DBPRINT(sc, BCE_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
936                 "val = 0x%08X\n", __FUNCTION__, cid_addr, offset, val);
937
938         offset += cid_addr;
939         REG_WR(sc, BCE_CTX_DATA_ADR, offset);
940         REG_WR(sc, BCE_CTX_DATA, val);
941 }
942
943
944 /****************************************************************************/
945 /* PHY register read.                                                       */
946 /*                                                                          */
947 /* Implements register reads on the MII bus.                                */
948 /*                                                                          */
949 /* Returns:                                                                 */
950 /*   The value of the register.                                             */
951 /****************************************************************************/
952 static int
953 bce_miibus_read_reg(device_t dev, int phy, int reg)
954 {
955         struct bce_softc *sc;
956         u32 val;
957         int i;
958
959         sc = device_get_softc(dev);
960
961         /* Make sure we are accessing the correct PHY address. */
962         if (phy != sc->bce_phy_addr) {
963                 DBPRINT(sc, BCE_VERBOSE, "Invalid PHY address %d for PHY read!\n", phy);
964                 return(0);
965         }
966
967         if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
968                 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
969                 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
970
971                 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
972                 REG_RD(sc, BCE_EMAC_MDIO_MODE);
973
974                 DELAY(40);
975         }
976
977         val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
978                 BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
979                 BCE_EMAC_MDIO_COMM_START_BUSY;
980         REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
981
982         for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
983                 DELAY(10);
984
985                 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
986                 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
987                         DELAY(5);
988
989                         val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
990                         val &= BCE_EMAC_MDIO_COMM_DATA;
991
992                         break;
993                 }
994         }
995
996         if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
997                 BCE_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
998                         __FILE__, __LINE__, phy, reg);
999                 val = 0x0;
1000         } else {
1001                 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1002         }
1003
1004         DBPRINT(sc, BCE_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1005                 __FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff);
1006
1007         if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1008                 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1009                 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1010
1011                 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1012                 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1013
1014                 DELAY(40);
1015         }
1016
1017         return (val & 0xffff);
1018
1019 }
1020
1021
1022 /****************************************************************************/
1023 /* PHY register write.                                                      */
1024 /*                                                                          */
1025 /* Implements register writes on the MII bus.                               */
1026 /*                                                                          */
1027 /* Returns:                                                                 */
1028 /*   The value of the register.                                             */
1029 /****************************************************************************/
1030 static int
1031 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1032 {
1033         struct bce_softc *sc;
1034         u32 val1;
1035         int i;
1036
1037         sc = device_get_softc(dev);
1038
1039         /* Make sure we are accessing the correct PHY address. */
1040         if (phy != sc->bce_phy_addr) {
1041                 DBPRINT(sc, BCE_WARN, "Invalid PHY address %d for PHY write!\n", phy);
1042                 return(0);
1043         }
1044
1045         DBPRINT(sc, BCE_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1046                 __FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff);
1047
1048         if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1049                 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1050                 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1051
1052                 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1053                 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1054
1055                 DELAY(40);
1056         }
1057
1058         val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1059                 BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1060                 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1061         REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1062
1063         for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1064                 DELAY(10);
1065
1066                 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1067                 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1068                         DELAY(5);
1069                         break;
1070                 }
1071         }
1072
1073         if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1074                 BCE_PRINTF(sc, "%s(%d): PHY write timeout!\n", 
1075                         __FILE__, __LINE__);
1076
1077         if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1078                 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1079                 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1080
1081                 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1082                 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1083
1084                 DELAY(40);
1085         }
1086
1087         return 0;
1088 }
1089
1090
1091 /****************************************************************************/
1092 /* MII bus status change.                                                   */
1093 /*                                                                          */
1094 /* Called by the MII bus driver when the PHY establishes link to set the    */
1095 /* MAC interface registers.                                                 */
1096 /*                                                                          */
1097 /* Returns:                                                                 */
1098 /*   Nothing.                                                               */
1099 /****************************************************************************/
1100 static void
1101 bce_miibus_statchg(device_t dev)
1102 {
1103         struct bce_softc *sc;
1104         struct mii_data *mii;
1105
1106         sc = device_get_softc(dev);
1107
1108         mii = device_get_softc(sc->bce_miibus);
1109
1110         BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT);
1111
1112         /* Set MII or GMII inerface based on the speed negotiated by the PHY. */
1113         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
1114                 DBPRINT(sc, BCE_INFO, "Setting GMII interface.\n");
1115                 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII);
1116         } else {
1117                 DBPRINT(sc, BCE_INFO, "Setting MII interface.\n");
1118                 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII);
1119         }
1120
1121         /* Set half or full duplex based on the duplicity negotiated by the PHY. */
1122         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1123                 DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n");
1124                 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1125         } else {
1126                 DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n");
1127                 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1128         }
1129 }
1130
1131
1132 /****************************************************************************/
1133 /* Acquire NVRAM lock.                                                      */
1134 /*                                                                          */
1135 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1136 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1137 /* for use by the driver.                                                   */
1138 /*                                                                          */
1139 /* Returns:                                                                 */
1140 /*   0 on success, positive value on failure.                               */
1141 /****************************************************************************/
1142 static int
1143 bce_acquire_nvram_lock(struct bce_softc *sc)
1144 {
1145         u32 val;
1146         int j;
1147
1148         DBPRINT(sc, BCE_VERBOSE, "Acquiring NVRAM lock.\n");
1149
1150         /* Request access to the flash interface. */
1151         REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1152         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1153                 val = REG_RD(sc, BCE_NVM_SW_ARB);
1154                 if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1155                         break;
1156
1157                 DELAY(5);
1158         }
1159
1160         if (j >= NVRAM_TIMEOUT_COUNT) {
1161                 DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n");
1162                 return EBUSY;
1163         }
1164
1165         return 0;
1166 }
1167
1168
1169 /****************************************************************************/
1170 /* Release NVRAM lock.                                                      */
1171 /*                                                                          */
1172 /* When the caller is finished accessing NVRAM the lock must be released.   */
1173 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1174 /* for use by the driver.                                                   */
1175 /*                                                                          */
1176 /* Returns:                                                                 */
1177 /*   0 on success, positive value on failure.                               */
1178 /****************************************************************************/
1179 static int
1180 bce_release_nvram_lock(struct bce_softc *sc)
1181 {
1182         int j;
1183         u32 val;
1184
1185         DBPRINT(sc, BCE_VERBOSE, "Releasing NVRAM lock.\n");
1186
1187         /*
1188          * Relinquish nvram interface.
1189          */
1190         REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1191
1192         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1193                 val = REG_RD(sc, BCE_NVM_SW_ARB);
1194                 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1195                         break;
1196
1197                 DELAY(5);
1198         }
1199
1200         if (j >= NVRAM_TIMEOUT_COUNT) {
1201                 DBPRINT(sc, BCE_WARN, "Timeout reeasing NVRAM lock!\n");
1202                 return EBUSY;
1203         }
1204
1205         return 0;
1206 }
1207
1208
1209 #ifdef BCE_NVRAM_WRITE_SUPPORT
1210 /****************************************************************************/
1211 /* Enable NVRAM write access.                                               */
1212 /*                                                                          */
1213 /* Before writing to NVRAM the caller must enable NVRAM writes.             */
1214 /*                                                                          */
1215 /* Returns:                                                                 */
1216 /*   0 on success, positive value on failure.                               */
1217 /****************************************************************************/
1218 static int
1219 bce_enable_nvram_write(struct bce_softc *sc)
1220 {
1221         u32 val;
1222
1223         DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM write.\n");
1224
1225         val = REG_RD(sc, BCE_MISC_CFG);
1226         REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI);
1227
1228         if (!sc->bce_flash_info->buffered) {
1229                 int j;
1230
1231                 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1232                 REG_WR(sc, BCE_NVM_COMMAND,     BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT);
1233
1234                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1235                         DELAY(5);
1236
1237                         val = REG_RD(sc, BCE_NVM_COMMAND);
1238                         if (val & BCE_NVM_COMMAND_DONE)
1239                                 break;
1240                 }
1241
1242                 if (j >= NVRAM_TIMEOUT_COUNT) {
1243                         DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n");
1244                         return EBUSY;
1245                 }
1246         }
1247         return 0;
1248 }
1249
1250
1251 /****************************************************************************/
1252 /* Disable NVRAM write access.                                              */
1253 /*                                                                          */
1254 /* When the caller is finished writing to NVRAM write access must be        */
1255 /* disabled.                                                                */
1256 /*                                                                          */
1257 /* Returns:                                                                 */
1258 /*   Nothing.                                                               */
1259 /****************************************************************************/
1260 static void
1261 bce_disable_nvram_write(struct bce_softc *sc)
1262 {
1263         u32 val;
1264
1265         DBPRINT(sc, BCE_VERBOSE,  "Disabling NVRAM write.\n");
1266
1267         val = REG_RD(sc, BCE_MISC_CFG);
1268         REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN);
1269 }
1270 #endif
1271
1272
1273 /****************************************************************************/
1274 /* Enable NVRAM access.                                                     */
1275 /*                                                                          */
1276 /* Before accessing NVRAM for read or write operations the caller must      */
1277 /* enabled NVRAM access.                                                    */
1278 /*                                                                          */
1279 /* Returns:                                                                 */
1280 /*   Nothing.                                                               */
1281 /****************************************************************************/
1282 static void
1283 bce_enable_nvram_access(struct bce_softc *sc)
1284 {
1285         u32 val;
1286
1287         DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM access.\n");
1288
1289         val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1290         /* Enable both bits, even on read. */
1291         REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1292                val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1293 }
1294
1295
1296 /****************************************************************************/
1297 /* Disable NVRAM access.                                                    */
1298 /*                                                                          */
1299 /* When the caller is finished accessing NVRAM access must be disabled.     */
1300 /*                                                                          */
1301 /* Returns:                                                                 */
1302 /*   Nothing.                                                               */
1303 /****************************************************************************/
1304 static void
1305 bce_disable_nvram_access(struct bce_softc *sc)
1306 {
1307         u32 val;
1308
1309         DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM access.\n");
1310
1311         val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1312
1313         /* Disable both bits, even after read. */
1314         REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1315                 val & ~(BCE_NVM_ACCESS_ENABLE_EN |
1316                         BCE_NVM_ACCESS_ENABLE_WR_EN));
1317 }
1318
1319
1320 #ifdef BCE_NVRAM_WRITE_SUPPORT
1321 /****************************************************************************/
1322 /* Erase NVRAM page before writing.                                         */
1323 /*                                                                          */
1324 /* Non-buffered flash parts require that a page be erased before it is      */
1325 /* written.                                                                 */
1326 /*                                                                          */
1327 /* Returns:                                                                 */
1328 /*   0 on success, positive value on failure.                               */
1329 /****************************************************************************/
1330 static int
1331 bce_nvram_erase_page(struct bce_softc *sc, u32 offset)
1332 {
1333         u32 cmd;
1334         int j;
1335
1336         /* Buffered flash doesn't require an erase. */
1337         if (sc->bce_flash_info->buffered)
1338                 return 0;
1339
1340         DBPRINT(sc, BCE_VERBOSE, "Erasing NVRAM page.\n");
1341
1342         /* Build an erase command. */
1343         cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR |
1344               BCE_NVM_COMMAND_DOIT;
1345
1346         /*
1347          * Clear the DONE bit separately, set the NVRAM adress to erase,
1348          * and issue the erase command.
1349          */
1350         REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1351         REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1352         REG_WR(sc, BCE_NVM_COMMAND, cmd);
1353
1354         /* Wait for completion. */
1355         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1356                 u32 val;
1357
1358                 DELAY(5);
1359
1360                 val = REG_RD(sc, BCE_NVM_COMMAND);
1361                 if (val & BCE_NVM_COMMAND_DONE)
1362                         break;
1363         }
1364
1365         if (j >= NVRAM_TIMEOUT_COUNT) {
1366                 DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n");
1367                 return EBUSY;
1368         }
1369
1370         return 0;
1371 }
1372 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1373
1374
1375 /****************************************************************************/
1376 /* Read a dword (32 bits) from NVRAM.                                       */
1377 /*                                                                          */
1378 /* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1379 /* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1380 /*                                                                          */
1381 /* Returns:                                                                 */
1382 /*   0 on success and the 32 bit value read, positive value on failure.     */
1383 /****************************************************************************/
1384 static int
1385 bce_nvram_read_dword(struct bce_softc *sc, u32 offset, u8 *ret_val,
1386                                                         u32 cmd_flags)
1387 {
1388         u32 cmd;
1389         int i, rc = 0;
1390
1391         /* Build the command word. */
1392         cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1393
1394         /* Calculate the offset for buffered flash. */
1395         if (sc->bce_flash_info->buffered) {
1396                 offset = ((offset / sc->bce_flash_info->page_size) <<
1397                            sc->bce_flash_info->page_bits) +
1398                           (offset % sc->bce_flash_info->page_size);
1399         }
1400
1401         /*
1402          * Clear the DONE bit separately, set the address to read,
1403          * and issue the read.
1404          */
1405         REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1406         REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1407         REG_WR(sc, BCE_NVM_COMMAND, cmd);
1408
1409         /* Wait for completion. */
1410         for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1411                 u32 val;
1412
1413                 DELAY(5);
1414
1415                 val = REG_RD(sc, BCE_NVM_COMMAND);
1416                 if (val & BCE_NVM_COMMAND_DONE) {
1417                         val = REG_RD(sc, BCE_NVM_READ);
1418
1419                         val = bce_be32toh(val);
1420                         memcpy(ret_val, &val, 4);
1421                         break;
1422                 }
1423         }
1424
1425         /* Check for errors. */
1426         if (i >= NVRAM_TIMEOUT_COUNT) {
1427                 BCE_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at offset 0x%08X!\n",
1428                         __FILE__, __LINE__, offset);
1429                 rc = EBUSY;
1430         }
1431
1432         return(rc);
1433 }
1434
1435
1436 #ifdef BCE_NVRAM_WRITE_SUPPORT
1437 /****************************************************************************/
1438 /* Write a dword (32 bits) to NVRAM.                                        */
1439 /*                                                                          */
1440 /* Write a 32 bit word to NVRAM.  The caller is assumed to have already     */
1441 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and    */
1442 /* enabled NVRAM write access.                                              */
1443 /*                                                                          */
1444 /* Returns:                                                                 */
1445 /*   0 on success, positive value on failure.                               */
1446 /****************************************************************************/
1447 static int
1448 bce_nvram_write_dword(struct bce_softc *sc, u32 offset, u8 *val,
1449         u32 cmd_flags)
1450 {
1451         u32 cmd, val32;
1452         int j;
1453
1454         /* Build the command word. */
1455         cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags;
1456
1457         /* Calculate the offset for buffered flash. */
1458         if (sc->bce_flash_info->buffered) {
1459                 offset = ((offset / sc->bce_flash_info->page_size) <<
1460                           sc->bce_flash_info->page_bits) +
1461                          (offset % sc->bce_flash_info->page_size);
1462         }
1463
1464         /*
1465          * Clear the DONE bit separately, convert NVRAM data to big-endian,
1466          * set the NVRAM address to write, and issue the write command
1467          */
1468         REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1469         memcpy(&val32, val, 4);
1470         val32 = htobe32(val32);
1471         REG_WR(sc, BCE_NVM_WRITE, val32);
1472         REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1473         REG_WR(sc, BCE_NVM_COMMAND, cmd);
1474
1475         /* Wait for completion. */
1476         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1477                 DELAY(5);
1478
1479                 if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE)
1480                         break;
1481         }
1482         if (j >= NVRAM_TIMEOUT_COUNT) {
1483                 BCE_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at offset 0x%08X\n",
1484                         __FILE__, __LINE__, offset);
1485                 return EBUSY;
1486         }
1487
1488         return 0;
1489 }
1490 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1491
1492
1493 /****************************************************************************/
1494 /* Initialize NVRAM access.                                                 */
1495 /*                                                                          */
1496 /* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1497 /* access that device.                                                      */
1498 /*                                                                          */
1499 /* Returns:                                                                 */
1500 /*   0 on success, positive value on failure.                               */
1501 /****************************************************************************/
1502 static int
1503 bce_init_nvram(struct bce_softc *sc)
1504 {
1505         u32 val;
1506         int j, entry_count, rc;
1507         struct flash_spec *flash;
1508
1509         DBPRINT(sc,BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
1510
1511         /* Determine the selected interface. */
1512         val = REG_RD(sc, BCE_NVM_CFG1);
1513
1514         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1515
1516         rc = 0;
1517
1518         /*
1519          * Flash reconfiguration is required to support additional
1520          * NVRAM devices not directly supported in hardware.
1521          * Check if the flash interface was reconfigured
1522          * by the bootcode.
1523          */
1524
1525         if (val & 0x40000000) {
1526                 /* Flash interface reconfigured by bootcode. */
1527
1528                 DBPRINT(sc,BCE_INFO_LOAD, 
1529                         "bce_init_nvram(): Flash WAS reconfigured.\n");
1530
1531                 for (j = 0, flash = &flash_table[0]; j < entry_count;
1532                      j++, flash++) {
1533                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
1534                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1535                                 sc->bce_flash_info = flash;
1536                                 break;
1537                         }
1538                 }
1539         } else {
1540                 /* Flash interface not yet reconfigured. */
1541                 u32 mask;
1542
1543                 DBPRINT(sc,BCE_INFO_LOAD, 
1544                         "bce_init_nvram(): Flash was NOT reconfigured.\n");
1545
1546                 if (val & (1 << 23))
1547                         mask = FLASH_BACKUP_STRAP_MASK;
1548                 else
1549                         mask = FLASH_STRAP_MASK;
1550
1551                 /* Look for the matching NVRAM device configuration data. */
1552                 for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) {
1553
1554                         /* Check if the device matches any of the known devices. */
1555                         if ((val & mask) == (flash->strapping & mask)) {
1556                                 /* Found a device match. */
1557                                 sc->bce_flash_info = flash;
1558
1559                                 /* Request access to the flash interface. */
1560                                 if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1561                                         return rc;
1562
1563                                 /* Reconfigure the flash interface. */
1564                                 bce_enable_nvram_access(sc);
1565                                 REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1566                                 REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1567                                 REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1568                                 REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1569                                 bce_disable_nvram_access(sc);
1570                                 bce_release_nvram_lock(sc);
1571
1572                                 break;
1573                         }
1574                 }
1575         }
1576
1577         /* Check if a matching device was found. */
1578         if (j == entry_count) {
1579                 sc->bce_flash_info = NULL;
1580                 BCE_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n", 
1581                         __FILE__, __LINE__);
1582                 rc = ENODEV;
1583         }
1584
1585         /* Write the flash config data to the shared memory interface. */
1586         val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_SHARED_HW_CFG_CONFIG2);
1587         val &= BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1588         if (val)
1589                 sc->bce_flash_size = val;
1590         else
1591                 sc->bce_flash_size = sc->bce_flash_info->total_size;
1592
1593         DBPRINT(sc, BCE_INFO_LOAD, "bce_init_nvram() flash->total_size = 0x%08X\n",
1594                 sc->bce_flash_info->total_size);
1595
1596         DBPRINT(sc,BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
1597
1598         return rc;
1599 }
1600
1601
1602 /****************************************************************************/
1603 /* Read an arbitrary range of data from NVRAM.                              */
1604 /*                                                                          */
1605 /* Prepares the NVRAM interface for access and reads the requested data     */
1606 /* into the supplied buffer.                                                */
1607 /*                                                                          */
1608 /* Returns:                                                                 */
1609 /*   0 on success and the data read, positive value on failure.             */
1610 /****************************************************************************/
1611 static int
1612 bce_nvram_read(struct bce_softc *sc, u32 offset, u8 *ret_buf,
1613         int buf_size)
1614 {
1615         int rc = 0;
1616         u32 cmd_flags, offset32, len32, extra;
1617
1618         if (buf_size == 0)
1619                 return 0;
1620
1621         /* Request access to the flash interface. */
1622         if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1623                 return rc;
1624
1625         /* Enable access to flash interface */
1626         bce_enable_nvram_access(sc);
1627
1628         len32 = buf_size;
1629         offset32 = offset;
1630         extra = 0;
1631
1632         cmd_flags = 0;
1633
1634         if (offset32 & 3) {
1635                 u8 buf[4];
1636                 u32 pre_len;
1637
1638                 offset32 &= ~3;
1639                 pre_len = 4 - (offset & 3);
1640
1641                 if (pre_len >= len32) {
1642                         pre_len = len32;
1643                         cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1644                 }
1645                 else {
1646                         cmd_flags = BCE_NVM_COMMAND_FIRST;
1647                 }
1648
1649                 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1650
1651                 if (rc)
1652                         return rc;
1653
1654                 memcpy(ret_buf, buf + (offset & 3), pre_len);
1655
1656                 offset32 += 4;
1657                 ret_buf += pre_len;
1658                 len32 -= pre_len;
1659         }
1660
1661         if (len32 & 3) {
1662                 extra = 4 - (len32 & 3);
1663                 len32 = (len32 + 4) & ~3;
1664         }
1665
1666         if (len32 == 4) {
1667                 u8 buf[4];
1668
1669                 if (cmd_flags)
1670                         cmd_flags = BCE_NVM_COMMAND_LAST;
1671                 else
1672                         cmd_flags = BCE_NVM_COMMAND_FIRST |
1673                                     BCE_NVM_COMMAND_LAST;
1674
1675                 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1676
1677                 memcpy(ret_buf, buf, 4 - extra);
1678         }
1679         else if (len32 > 0) {
1680                 u8 buf[4];
1681
1682                 /* Read the first word. */
1683                 if (cmd_flags)
1684                         cmd_flags = 0;
1685                 else
1686                         cmd_flags = BCE_NVM_COMMAND_FIRST;
1687
1688                 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1689
1690                 /* Advance to the next dword. */
1691                 offset32 += 4;
1692                 ret_buf += 4;
1693                 len32 -= 4;
1694
1695                 while (len32 > 4 && rc == 0) {
1696                         rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1697
1698                         /* Advance to the next dword. */
1699                         offset32 += 4;
1700                         ret_buf += 4;
1701                         len32 -= 4;
1702                 }
1703
1704                 if (rc)
1705                         return rc;
1706
1707                 cmd_flags = BCE_NVM_COMMAND_LAST;
1708                 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1709
1710                 memcpy(ret_buf, buf, 4 - extra);
1711         }
1712
1713         /* Disable access to flash interface and release the lock. */
1714         bce_disable_nvram_access(sc);
1715         bce_release_nvram_lock(sc);
1716
1717         return rc;
1718 }
1719
1720
1721 #ifdef BCE_NVRAM_WRITE_SUPPORT
1722 /****************************************************************************/
1723 /* Write an arbitrary range of data from NVRAM.                             */
1724 /*                                                                          */
1725 /* Prepares the NVRAM interface for write access and writes the requested   */
1726 /* data from the supplied buffer.  The caller is responsible for            */
1727 /* calculating any appropriate CRCs.                                        */
1728 /*                                                                          */
1729 /* Returns:                                                                 */
1730 /*   0 on success, positive value on failure.                               */
1731 /****************************************************************************/
1732 static int
1733 bce_nvram_write(struct bce_softc *sc, u32 offset, u8 *data_buf,
1734         int buf_size)
1735 {
1736         u32 written, offset32, len32;
1737         u8 *buf, start[4], end[4];
1738         int rc = 0;
1739         int align_start, align_end;
1740
1741         buf = data_buf;
1742         offset32 = offset;
1743         len32 = buf_size;
1744         align_start = align_end = 0;
1745
1746         if ((align_start = (offset32 & 3))) {
1747                 offset32 &= ~3;
1748                 len32 += align_start;
1749                 if ((rc = bce_nvram_read(sc, offset32, start, 4)))
1750                         return rc;
1751         }
1752
1753         if (len32 & 3) {
1754                 if ((len32 > 4) || !align_start) {
1755                         align_end = 4 - (len32 & 3);
1756                         len32 += align_end;
1757                         if ((rc = bce_nvram_read(sc, offset32 + len32 - 4,
1758                                 end, 4))) {
1759                                 return rc;
1760                         }
1761                 }
1762         }
1763
1764         if (align_start || align_end) {
1765                 buf = malloc(len32, M_DEVBUF, M_NOWAIT);
1766                 if (buf == 0)
1767                         return ENOMEM;
1768                 if (align_start) {
1769                         memcpy(buf, start, 4);
1770                 }
1771                 if (align_end) {
1772                         memcpy(buf + len32 - 4, end, 4);
1773                 }
1774                 memcpy(buf + align_start, data_buf, buf_size);
1775         }
1776
1777         written = 0;
1778         while ((written < len32) && (rc == 0)) {
1779                 u32 page_start, page_end, data_start, data_end;
1780                 u32 addr, cmd_flags;
1781                 int i;
1782                 u8 flash_buffer[264];
1783
1784             /* Find the page_start addr */
1785                 page_start = offset32 + written;
1786                 page_start -= (page_start % sc->bce_flash_info->page_size);
1787                 /* Find the page_end addr */
1788                 page_end = page_start + sc->bce_flash_info->page_size;
1789                 /* Find the data_start addr */
1790                 data_start = (written == 0) ? offset32 : page_start;
1791                 /* Find the data_end addr */
1792                 data_end = (page_end > offset32 + len32) ?
1793                         (offset32 + len32) : page_end;
1794
1795                 /* Request access to the flash interface. */
1796                 if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1797                         goto nvram_write_end;
1798
1799                 /* Enable access to flash interface */
1800                 bce_enable_nvram_access(sc);
1801
1802                 cmd_flags = BCE_NVM_COMMAND_FIRST;
1803                 if (sc->bce_flash_info->buffered == 0) {
1804                         int j;
1805
1806                         /* Read the whole page into the buffer
1807                          * (non-buffer flash only) */
1808                         for (j = 0; j < sc->bce_flash_info->page_size; j += 4) {
1809                                 if (j == (sc->bce_flash_info->page_size - 4)) {
1810                                         cmd_flags |= BCE_NVM_COMMAND_LAST;
1811                                 }
1812                                 rc = bce_nvram_read_dword(sc,
1813                                         page_start + j,
1814                                         &flash_buffer[j],
1815                                         cmd_flags);
1816
1817                                 if (rc)
1818                                         goto nvram_write_end;
1819
1820                                 cmd_flags = 0;
1821                         }
1822                 }
1823
1824                 /* Enable writes to flash interface (unlock write-protect) */
1825                 if ((rc = bce_enable_nvram_write(sc)) != 0)
1826                         goto nvram_write_end;
1827
1828                 /* Erase the page */
1829                 if ((rc = bce_nvram_erase_page(sc, page_start)) != 0)
1830                         goto nvram_write_end;
1831
1832                 /* Re-enable the write again for the actual write */
1833                 bce_enable_nvram_write(sc);
1834
1835                 /* Loop to write back the buffer data from page_start to
1836                  * data_start */
1837                 i = 0;
1838                 if (sc->bce_flash_info->buffered == 0) {
1839                         for (addr = page_start; addr < data_start;
1840                                 addr += 4, i += 4) {
1841
1842                                 rc = bce_nvram_write_dword(sc, addr,
1843                                         &flash_buffer[i], cmd_flags);
1844
1845                                 if (rc != 0)
1846                                         goto nvram_write_end;
1847
1848                                 cmd_flags = 0;
1849                         }
1850                 }
1851
1852                 /* Loop to write the new data from data_start to data_end */
1853                 for (addr = data_start; addr < data_end; addr += 4, i++) {
1854                         if ((addr == page_end - 4) ||
1855                                 ((sc->bce_flash_info->buffered) &&
1856                                  (addr == data_end - 4))) {
1857
1858                                 cmd_flags |= BCE_NVM_COMMAND_LAST;
1859                         }
1860                         rc = bce_nvram_write_dword(sc, addr, buf,
1861                                 cmd_flags);
1862
1863                         if (rc != 0)
1864                                 goto nvram_write_end;
1865
1866                         cmd_flags = 0;
1867                         buf += 4;
1868                 }
1869
1870                 /* Loop to write back the buffer data from data_end
1871                  * to page_end */
1872                 if (sc->bce_flash_info->buffered == 0) {
1873                         for (addr = data_end; addr < page_end;
1874                                 addr += 4, i += 4) {
1875
1876                                 if (addr == page_end-4) {
1877                                         cmd_flags = BCE_NVM_COMMAND_LAST;
1878                                 }
1879                                 rc = bce_nvram_write_dword(sc, addr,
1880                                         &flash_buffer[i], cmd_flags);
1881
1882                                 if (rc != 0)
1883                                         goto nvram_write_end;
1884
1885                                 cmd_flags = 0;
1886                         }
1887                 }
1888
1889                 /* Disable writes to flash interface (lock write-protect) */
1890                 bce_disable_nvram_write(sc);
1891
1892                 /* Disable access to flash interface */
1893                 bce_disable_nvram_access(sc);
1894                 bce_release_nvram_lock(sc);
1895
1896                 /* Increment written */
1897                 written += data_end - data_start;
1898         }
1899
1900 nvram_write_end:
1901         if (align_start || align_end)
1902                 free(buf, M_DEVBUF);
1903
1904         return rc;
1905 }
1906 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1907
1908
1909 /****************************************************************************/
1910 /* Verifies that NVRAM is accessible and contains valid data.               */
1911 /*                                                                          */
1912 /* Reads the configuration data from NVRAM and verifies that the CRC is     */
1913 /* correct.                                                                 */
1914 /*                                                                          */
1915 /* Returns:                                                                 */
1916 /*   0 on success, positive value on failure.                               */
1917 /****************************************************************************/
1918 static int
1919 bce_nvram_test(struct bce_softc *sc)
1920 {
1921         u32 buf[BCE_NVRAM_SIZE / 4];
1922         u8 *data = (u8 *) buf;
1923         int rc = 0;
1924         u32 magic, csum;
1925
1926
1927         /*
1928          * Check that the device NVRAM is valid by reading
1929          * the magic value at offset 0.
1930          */
1931         if ((rc = bce_nvram_read(sc, 0, data, 4)) != 0)
1932                 goto bce_nvram_test_done;
1933
1934
1935     magic = bce_be32toh(buf[0]);
1936         if (magic != BCE_NVRAM_MAGIC) {
1937                 rc = ENODEV;
1938                 BCE_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! Expected: 0x%08X, "
1939                         "Found: 0x%08X\n",
1940                         __FILE__, __LINE__, BCE_NVRAM_MAGIC, magic);
1941                 goto bce_nvram_test_done;
1942         }
1943
1944         /*
1945          * Verify that the device NVRAM includes valid
1946          * configuration data.
1947          */
1948         if ((rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE)) != 0)
1949                 goto bce_nvram_test_done;
1950
1951         csum = ether_crc32_le(data, 0x100);
1952         if (csum != BCE_CRC32_RESIDUAL) {
1953                 rc = ENODEV;
1954                 BCE_PRINTF(sc, "%s(%d): Invalid Manufacturing Information NVRAM CRC! "
1955                         "Expected: 0x%08X, Found: 0x%08X\n",
1956                         __FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
1957                 goto bce_nvram_test_done;
1958         }
1959
1960         csum = ether_crc32_le(data + 0x100, 0x100);
1961         if (csum != BCE_CRC32_RESIDUAL) {
1962                 BCE_PRINTF(sc, "%s(%d): Invalid Feature Configuration Information "
1963                         "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
1964                         __FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
1965                 rc = ENODEV;
1966         }
1967
1968 bce_nvram_test_done:
1969         return rc;
1970 }
1971
1972
1973 /****************************************************************************/
1974 /* Free any DMA memory owned by the driver.                                 */
1975 /*                                                                          */
1976 /* Scans through each data structre that requires DMA memory and frees      */
1977 /* the memory if allocated.                                                 */
1978 /*                                                                          */
1979 /* Returns:                                                                 */
1980 /*   Nothing.                                                               */
1981 /****************************************************************************/
1982 static void
1983 bce_dma_free(struct bce_softc *sc)
1984 {
1985         int i;
1986
1987         DBPRINT(sc,BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
1988
1989         /* Destroy the status block. */
1990         if (sc->status_block != NULL)
1991                 bus_dmamem_free(
1992                         sc->status_tag,
1993                     sc->status_block,
1994                     sc->status_map);
1995
1996         if (sc->status_map != NULL) {
1997                 bus_dmamap_unload(
1998                         sc->status_tag,
1999                     sc->status_map);
2000                 bus_dmamap_destroy(sc->status_tag,
2001                     sc->status_map);
2002         }
2003
2004         if (sc->status_tag != NULL)
2005                 bus_dma_tag_destroy(sc->status_tag);
2006
2007
2008         /* Destroy the statistics block. */
2009         if (sc->stats_block != NULL)
2010                 bus_dmamem_free(
2011                         sc->stats_tag,
2012                     sc->stats_block,
2013                     sc->stats_map);
2014
2015         if (sc->stats_map != NULL) {
2016                 bus_dmamap_unload(
2017                         sc->stats_tag,
2018                     sc->stats_map);
2019                 bus_dmamap_destroy(sc->stats_tag,
2020                     sc->stats_map);
2021         }
2022
2023         if (sc->stats_tag != NULL)
2024                 bus_dma_tag_destroy(sc->stats_tag);
2025
2026
2027         /* Free, unmap and destroy all TX buffer descriptor chain pages. */
2028         for (i = 0; i < TX_PAGES; i++ ) {
2029                 if (sc->tx_bd_chain[i] != NULL)
2030                         bus_dmamem_free(
2031                                 sc->tx_bd_chain_tag,
2032                             sc->tx_bd_chain[i],
2033                             sc->tx_bd_chain_map[i]);
2034
2035                 if (sc->tx_bd_chain_map[i] != NULL) {
2036                         bus_dmamap_unload(
2037                                 sc->tx_bd_chain_tag,
2038                         sc->tx_bd_chain_map[i]);
2039                         bus_dmamap_destroy(
2040                                 sc->tx_bd_chain_tag,
2041                             sc->tx_bd_chain_map[i]);
2042                 }
2043
2044         }
2045
2046         /* Destroy the TX buffer descriptor tag. */
2047         if (sc->tx_bd_chain_tag != NULL)
2048                 bus_dma_tag_destroy(sc->tx_bd_chain_tag);
2049
2050
2051         /* Free, unmap and destroy all RX buffer descriptor chain pages. */
2052         for (i = 0; i < RX_PAGES; i++ ) {
2053                 if (sc->rx_bd_chain[i] != NULL)
2054                         bus_dmamem_free(
2055                                 sc->rx_bd_chain_tag,
2056                             sc->rx_bd_chain[i],
2057                             sc->rx_bd_chain_map[i]);
2058
2059                 if (sc->rx_bd_chain_map[i] != NULL) {
2060                         bus_dmamap_unload(
2061                                 sc->rx_bd_chain_tag,
2062                         sc->rx_bd_chain_map[i]);
2063                         bus_dmamap_destroy(
2064                                 sc->rx_bd_chain_tag,
2065                             sc->rx_bd_chain_map[i]);
2066                 }
2067         }
2068
2069         /* Destroy the RX buffer descriptor tag. */
2070         if (sc->rx_bd_chain_tag != NULL)
2071                 bus_dma_tag_destroy(sc->rx_bd_chain_tag);
2072
2073
2074         /* Unload and destroy the TX mbuf maps. */
2075         for (i = 0; i < TOTAL_TX_BD; i++) {
2076                 if (sc->tx_mbuf_map[i] != NULL) {
2077                         bus_dmamap_unload(sc->tx_mbuf_tag, 
2078                                 sc->tx_mbuf_map[i]);
2079                         bus_dmamap_destroy(sc->tx_mbuf_tag, 
2080                                 sc->tx_mbuf_map[i]);
2081                 }
2082         }
2083
2084         /* Destroy the TX mbuf tag. */
2085         if (sc->tx_mbuf_tag != NULL)
2086                 bus_dma_tag_destroy(sc->tx_mbuf_tag);
2087
2088
2089         /* Unload and destroy the RX mbuf maps. */
2090         for (i = 0; i < TOTAL_RX_BD; i++) {
2091                 if (sc->rx_mbuf_map[i] != NULL) {
2092                         bus_dmamap_unload(sc->rx_mbuf_tag, 
2093                                 sc->rx_mbuf_map[i]);
2094                         bus_dmamap_destroy(sc->rx_mbuf_tag, 
2095                                 sc->rx_mbuf_map[i]);
2096                 }
2097         }
2098
2099         /* Destroy the RX mbuf tag. */
2100         if (sc->rx_mbuf_tag != NULL)
2101                 bus_dma_tag_destroy(sc->rx_mbuf_tag);
2102
2103
2104         /* Destroy the parent tag */
2105         if (sc->parent_tag != NULL)
2106                 bus_dma_tag_destroy(sc->parent_tag);
2107
2108         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2109
2110 }
2111
2112
2113 /****************************************************************************/
2114 /* Get DMA memory from the OS.                                              */
2115 /*                                                                          */
2116 /* Validates that the OS has provided DMA buffers in response to a          */
2117 /* bus_dmamap_load() call and saves the physical address of those buffers.  */
2118 /* When the callback is used the OS will return 0 for the mapping function  */
2119 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any  */
2120 /* failures back to the caller.                                             */
2121 /*                                                                          */
2122 /* Returns:                                                                 */
2123 /*   Nothing.                                                               */
2124 /****************************************************************************/
2125 static void
2126 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2127 {
2128         struct bce_dmamap_arg *map_arg = arg;
2129         struct bce_softc *sc = map_arg->sc;
2130
2131         /* Simulate a mapping failure. */
2132         DBRUNIF(DB_RANDOMTRUE(bce_debug_dma_map_addr_failure),
2133                 BCE_PRINTF(sc, "%s(%d): Simulating DMA mapping error.\n",
2134                         __FILE__, __LINE__);
2135                 error = ENOMEM);
2136                 
2137         /* Check for an error and signal the caller that an error occurred. */
2138         if (error || (nseg > map_arg->maxsegs)) {
2139                 BCE_PRINTF(sc, "%s(%d): DMA mapping error! error = %d, "
2140                 "nseg = %d, maxsegs = %d\n",
2141                         __FILE__, __LINE__, error, nseg, map_arg->maxsegs);
2142                 map_arg->maxsegs = 0;
2143                 goto bce_dma_map_addr_exit;
2144         }
2145
2146         map_arg->busaddr = segs->ds_addr;
2147
2148 bce_dma_map_addr_exit:
2149         return;
2150 }
2151
2152
2153 /****************************************************************************/
2154 /* Map TX buffers into TX buffer descriptors.                               */
2155 /*                                                                          */
2156 /* Given a series of DMA memory containting an outgoing frame, map the      */
2157 /* segments into the tx_bd structure used by the hardware.                  */
2158 /*                                                                          */
2159 /* Returns:                                                                 */
2160 /*   Nothing.                                                               */
2161 /****************************************************************************/
2162 static void
2163 bce_dma_map_tx_desc(void *arg, bus_dma_segment_t *segs,
2164         int nseg, bus_size_t mapsize, int error)
2165 {
2166         struct bce_dmamap_arg *map_arg;
2167         struct bce_softc *sc;
2168         struct tx_bd *txbd = NULL;
2169         int i = 0;
2170         u16 prod, chain_prod;
2171         u32     prod_bseq;
2172 #ifdef BCE_DEBUG
2173         u16 debug_prod;
2174 #endif
2175
2176         map_arg = arg;
2177         sc = map_arg->sc;
2178
2179         if (error) {
2180                 DBPRINT(sc, BCE_WARN, "%s(): Called with error = %d\n",
2181                         __FUNCTION__, error);
2182                 return;
2183         }
2184
2185         /* Signal error to caller if there's too many segments */
2186         if (nseg > map_arg->maxsegs) {
2187                 DBPRINT(sc, BCE_WARN,
2188                         "%s(): Mapped TX descriptors: max segs = %d, "
2189                         "actual segs = %d\n",
2190                         __FUNCTION__, map_arg->maxsegs, nseg);
2191
2192                 map_arg->maxsegs = 0;
2193                 return;
2194         }
2195
2196         /* prod points to an empty tx_bd at this point. */
2197         prod       = map_arg->prod;
2198         chain_prod = map_arg->chain_prod;
2199         prod_bseq  = map_arg->prod_bseq;
2200
2201 #ifdef BCE_DEBUG
2202         debug_prod = chain_prod;
2203 #endif
2204
2205         DBPRINT(sc, BCE_INFO_SEND,
2206                 "%s(): Start: prod = 0x%04X, chain_prod = %04X, "
2207                 "prod_bseq = 0x%08X\n",
2208                 __FUNCTION__, prod, chain_prod, prod_bseq);
2209
2210         /*
2211          * Cycle through each mbuf segment that makes up
2212          * the outgoing frame, gathering the mapping info
2213          * for that segment and creating a tx_bd to for
2214          * the mbuf.
2215          */
2216
2217         txbd = &map_arg->tx_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
2218
2219         /* Setup the first tx_bd for the first segment. */
2220         txbd->tx_bd_haddr_lo       = htole32(BCE_ADDR_LO(segs[i].ds_addr));
2221         txbd->tx_bd_haddr_hi       = htole32(BCE_ADDR_HI(segs[i].ds_addr));
2222         txbd->tx_bd_mss_nbytes     = htole16(segs[i].ds_len);
2223         txbd->tx_bd_vlan_tag_flags = htole16(map_arg->tx_flags |
2224                         TX_BD_FLAGS_START);
2225         prod_bseq += segs[i].ds_len;
2226
2227         /* Setup any remaing segments. */
2228         for (i = 1; i < nseg; i++) {
2229                 prod       = NEXT_TX_BD(prod);
2230                 chain_prod = TX_CHAIN_IDX(prod);
2231
2232                 txbd = &map_arg->tx_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
2233
2234                 txbd->tx_bd_haddr_lo       = htole32(BCE_ADDR_LO(segs[i].ds_addr));
2235                 txbd->tx_bd_haddr_hi       = htole32(BCE_ADDR_HI(segs[i].ds_addr));
2236                 txbd->tx_bd_mss_nbytes     = htole16(segs[i].ds_len);
2237                 txbd->tx_bd_vlan_tag_flags = htole16(map_arg->tx_flags);
2238
2239                 prod_bseq += segs[i].ds_len;
2240         }
2241
2242         /* Set the END flag on the last TX buffer descriptor. */
2243         txbd->tx_bd_vlan_tag_flags |= htole16(TX_BD_FLAGS_END);
2244
2245         DBRUN(BCE_INFO_SEND, bce_dump_tx_chain(sc, debug_prod, nseg));
2246
2247         DBPRINT(sc, BCE_INFO_SEND,
2248                 "%s(): End: prod = 0x%04X, chain_prod = %04X, "
2249                 "prod_bseq = 0x%08X\n",
2250                 __FUNCTION__, prod, chain_prod, prod_bseq);
2251
2252         /* prod points to the last tx_bd at this point. */
2253         map_arg->maxsegs    = nseg;
2254         map_arg->prod       = prod;
2255         map_arg->chain_prod = chain_prod;
2256         map_arg->prod_bseq  = prod_bseq;
2257 }
2258
2259
2260 /****************************************************************************/
2261 /* Allocate any DMA memory needed by the driver.                            */
2262 /*                                                                          */
2263 /* Allocates DMA memory needed for the various global structures needed by  */
2264 /* hardware.                                                                */
2265 /*                                                                          */
2266 /* Returns:                                                                 */
2267 /*   0 for success, positive value for failure.                             */
2268 /****************************************************************************/
2269 static int
2270 bce_dma_alloc(device_t dev)
2271 {
2272         struct bce_softc *sc;
2273         int i, error, rc = 0;
2274         struct bce_dmamap_arg map_arg;
2275
2276         sc = device_get_softc(dev);
2277
2278         DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2279
2280         /*
2281          * Allocate the parent bus DMA tag appropriate for PCI.
2282          */
2283         if (bus_dma_tag_create(NULL,            /* parent     */
2284                         BCE_DMA_ALIGN,                          /* alignment  */
2285                         BCE_DMA_BOUNDARY,                       /* boundary   */
2286                         sc->max_bus_addr,                       /* lowaddr    */
2287                         BUS_SPACE_MAXADDR,                      /* highaddr   */
2288                         NULL,                                           /* filterfunc */
2289                         NULL,                                           /* filterarg  */
2290                         MAXBSIZE,                                       /* maxsize    */
2291                         BUS_SPACE_UNRESTRICTED,         /* nsegments  */
2292                         BUS_SPACE_MAXSIZE_32BIT,        /* maxsegsize */
2293                         0,                                                      /* flags      */
2294                         NULL,                                           /* locfunc    */
2295                         NULL,                                           /* lockarg    */
2296                         &sc->parent_tag)) {
2297                 BCE_PRINTF(sc, "%s(%d): Could not allocate parent DMA tag!\n",
2298                         __FILE__, __LINE__);
2299                 rc = ENOMEM;
2300                 goto bce_dma_alloc_exit;
2301         }
2302
2303         /*
2304          * Create a DMA tag for the status block, allocate and clear the
2305          * memory, map the memory into DMA space, and fetch the physical 
2306          * address of the block.
2307          */
2308         if (bus_dma_tag_create(
2309                         sc->parent_tag,                 /* parent      */
2310                 BCE_DMA_ALIGN,                  /* alignment   */
2311                 BCE_DMA_BOUNDARY,               /* boundary    */
2312                 sc->max_bus_addr,               /* lowaddr     */
2313                 BUS_SPACE_MAXADDR,              /* highaddr    */
2314                 NULL,                                   /* filterfunc  */
2315                 NULL,                                   /* filterarg   */
2316                 BCE_STATUS_BLK_SZ,              /* maxsize     */
2317                 1,                                              /* nsegments   */
2318                 BCE_STATUS_BLK_SZ,              /* maxsegsize  */
2319                 0,                                              /* flags       */
2320                 NULL,                                   /* lockfunc    */
2321                 NULL,                                   /* lockarg     */
2322                 &sc->status_tag)) {
2323                 BCE_PRINTF(sc, "%s(%d): Could not allocate status block DMA tag!\n",
2324                         __FILE__, __LINE__);
2325                 rc = ENOMEM;
2326                 goto bce_dma_alloc_exit;
2327         }
2328
2329         if(bus_dmamem_alloc(
2330                         sc->status_tag,                         /* dmat        */
2331                 (void **)&sc->status_block,     /* vaddr       */
2332                 BUS_DMA_NOWAIT,                                 /* flags       */
2333                 &sc->status_map)) {
2334                 BCE_PRINTF(sc, "%s(%d): Could not allocate status block DMA memory!\n",
2335                         __FILE__, __LINE__);
2336                 rc = ENOMEM;
2337                 goto bce_dma_alloc_exit;
2338         }
2339
2340         bzero((char *)sc->status_block, BCE_STATUS_BLK_SZ);
2341
2342         map_arg.sc = sc;
2343         map_arg.maxsegs = 1;
2344
2345         error = bus_dmamap_load(
2346                         sc->status_tag,                 /* dmat        */
2347                 sc->status_map,                 /* map         */
2348                 sc->status_block,               /* buf         */
2349                 BCE_STATUS_BLK_SZ,              /* buflen      */
2350                 bce_dma_map_addr,               /* callback    */
2351                 &map_arg,                               /* callbackarg */
2352                 BUS_DMA_NOWAIT);                /* flags       */
2353                 
2354         if(error || (map_arg.maxsegs == 0)) {
2355                 BCE_PRINTF(sc, "%s(%d): Could not map status block DMA memory!\n",
2356                         __FILE__, __LINE__);
2357                 rc = ENOMEM;
2358                 goto bce_dma_alloc_exit;
2359         }
2360
2361         sc->status_block_paddr = map_arg.busaddr;
2362         /* DRC - Fix for 64 bit addresses. */
2363         DBPRINT(sc, BCE_INFO, "status_block_paddr = 0x%08X\n",
2364                 (u32) sc->status_block_paddr);
2365
2366         /*
2367          * Create a DMA tag for the statistics block, allocate and clear the
2368          * memory, map the memory into DMA space, and fetch the physical 
2369          * address of the block.
2370          */
2371         if (bus_dma_tag_create(
2372                         sc->parent_tag,                 /* parent      */
2373                 BCE_DMA_ALIGN,                  /* alignment   */
2374                 BCE_DMA_BOUNDARY,               /* boundary    */
2375                 sc->max_bus_addr,               /* lowaddr     */
2376                 BUS_SPACE_MAXADDR,              /* highaddr    */
2377                 NULL,                                   /* filterfunc  */
2378                 NULL,                                   /* filterarg   */
2379                 BCE_STATS_BLK_SZ,               /* maxsize     */
2380                 1,                                              /* nsegments   */
2381                 BCE_STATS_BLK_SZ,               /* maxsegsize  */
2382                 0,                                              /* flags       */
2383                 NULL,                                   /* lockfunc    */
2384                 NULL,                                   /* lockarg     */
2385                 &sc->stats_tag)) {
2386                 BCE_PRINTF(sc, "%s(%d): Could not allocate statistics block DMA tag!\n",
2387                         __FILE__, __LINE__);
2388                 rc = ENOMEM;
2389                 goto bce_dma_alloc_exit;
2390         }
2391
2392         if (bus_dmamem_alloc(
2393                         sc->stats_tag,                          /* dmat        */
2394                 (void **)&sc->stats_block,      /* vaddr       */
2395                 BUS_DMA_NOWAIT,                         /* flags       */
2396                 &sc->stats_map)) {
2397                 BCE_PRINTF(sc, "%s(%d): Could not allocate statistics block DMA memory!\n",
2398                         __FILE__, __LINE__);
2399                 rc = ENOMEM;
2400                 goto bce_dma_alloc_exit;
2401         }
2402
2403         bzero((char *)sc->stats_block, BCE_STATS_BLK_SZ);
2404
2405         map_arg.sc = sc;
2406         map_arg.maxsegs = 1;
2407
2408         error = bus_dmamap_load(
2409                         sc->stats_tag,          /* dmat        */
2410                 sc->stats_map,          /* map         */
2411                 sc->stats_block,        /* buf         */
2412                 BCE_STATS_BLK_SZ,       /* buflen      */
2413                 bce_dma_map_addr,       /* callback    */
2414                 &map_arg,                       /* callbackarg */
2415                 BUS_DMA_NOWAIT);        /* flags       */
2416
2417         if(error || (map_arg.maxsegs == 0)) {
2418                 BCE_PRINTF(sc, "%s(%d): Could not map statistics block DMA memory!\n",
2419                         __FILE__, __LINE__);
2420                 rc = ENOMEM;
2421                 goto bce_dma_alloc_exit;
2422         }
2423
2424         sc->stats_block_paddr = map_arg.busaddr;
2425         /* DRC - Fix for 64 bit address. */
2426         DBPRINT(sc,BCE_INFO, "stats_block_paddr = 0x%08X\n", 
2427                 (u32) sc->stats_block_paddr);
2428
2429         /*
2430          * Create a DMA tag for the TX buffer descriptor chain,
2431          * allocate and clear the  memory, and fetch the
2432          * physical address of the block.
2433          */
2434         if(bus_dma_tag_create(
2435                         sc->parent_tag,           /* parent      */
2436                 BCM_PAGE_SIZE,            /* alignment   */
2437                 BCE_DMA_BOUNDARY,         /* boundary    */
2438                         sc->max_bus_addr,         /* lowaddr     */
2439                         BUS_SPACE_MAXADDR,        /* highaddr    */
2440                         NULL,                             /* filterfunc  */ 
2441                         NULL,                             /* filterarg   */
2442                         BCE_TX_CHAIN_PAGE_SZ, /* maxsize     */
2443                         1,                                        /* nsegments   */
2444                         BCE_TX_CHAIN_PAGE_SZ, /* maxsegsize  */
2445                         0,                                        /* flags       */
2446                         NULL,                             /* lockfunc    */
2447                         NULL,                             /* lockarg     */
2448                         &sc->tx_bd_chain_tag)) {
2449                 BCE_PRINTF(sc, "%s(%d): Could not allocate TX descriptor chain DMA tag!\n",
2450                         __FILE__, __LINE__);
2451                 rc = ENOMEM;
2452                 goto bce_dma_alloc_exit;
2453         }
2454
2455         for (i = 0; i < TX_PAGES; i++) {
2456
2457                 if(bus_dmamem_alloc(
2458                                 sc->tx_bd_chain_tag,                    /* tag   */
2459                         (void **)&sc->tx_bd_chain[i],   /* vaddr */
2460                         BUS_DMA_NOWAIT,                                 /* flags */
2461                         &sc->tx_bd_chain_map[i])) {
2462                         BCE_PRINTF(sc, "%s(%d): Could not allocate TX descriptor "
2463                                 "chain DMA memory!\n", __FILE__, __LINE__);
2464                         rc = ENOMEM;
2465                         goto bce_dma_alloc_exit;
2466                 }
2467
2468                 map_arg.maxsegs = 1;
2469                 map_arg.sc = sc;
2470
2471                 error = bus_dmamap_load(
2472                                 sc->tx_bd_chain_tag,     /* dmat        */
2473                         sc->tx_bd_chain_map[i],  /* map         */
2474                         sc->tx_bd_chain[i],              /* buf         */
2475                         BCE_TX_CHAIN_PAGE_SZ,    /* buflen      */
2476                         bce_dma_map_addr,                /* callback    */
2477                         &map_arg,                                /* callbackarg */
2478                         BUS_DMA_NOWAIT);                 /* flags       */
2479
2480                 if(error || (map_arg.maxsegs == 0)) {
2481                         BCE_PRINTF(sc, "%s(%d): Could not map TX descriptor chain DMA memory!\n",
2482                                 __FILE__, __LINE__);
2483                         rc = ENOMEM;
2484                         goto bce_dma_alloc_exit;
2485                 }
2486
2487                 sc->tx_bd_chain_paddr[i] = map_arg.busaddr;
2488                 /* DRC - Fix for 64 bit systems. */
2489                 DBPRINT(sc, BCE_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n", 
2490                         i, (u32) sc->tx_bd_chain_paddr[i]);
2491         }
2492
2493         /* Create a DMA tag for TX mbufs. */
2494         if (bus_dma_tag_create(
2495                         sc->parent_tag,                 /* parent      */
2496                 BCE_DMA_ALIGN,                  /* alignment   */
2497                 BCE_DMA_BOUNDARY,               /* boundary    */
2498                         sc->max_bus_addr,               /* lowaddr     */
2499                         BUS_SPACE_MAXADDR,              /* highaddr    */
2500                         NULL,                                   /* filterfunc  */
2501                         NULL,                                   /* filterarg   */
2502                         MCLBYTES * BCE_MAX_SEGMENTS,    /* maxsize     */
2503                         BCE_MAX_SEGMENTS,               /* nsegments   */
2504                         MCLBYTES,                               /* maxsegsize  */
2505                         0,                                              /* flags       */
2506                         NULL,                                   /* lockfunc    */
2507                         NULL,                                   /* lockarg     */
2508                 &sc->tx_mbuf_tag)) {
2509                 BCE_PRINTF(sc, "%s(%d): Could not allocate TX mbuf DMA tag!\n",
2510                         __FILE__, __LINE__);
2511                 rc = ENOMEM;
2512                 goto bce_dma_alloc_exit;
2513         }
2514
2515         /* Create DMA maps for the TX mbufs clusters. */
2516         for (i = 0; i < TOTAL_TX_BD; i++) {
2517                 if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT, 
2518                         &sc->tx_mbuf_map[i])) {
2519                         BCE_PRINTF(sc, "%s(%d): Unable to create TX mbuf DMA map!\n",
2520                                 __FILE__, __LINE__);
2521                         rc = ENOMEM;
2522                         goto bce_dma_alloc_exit;
2523                 }
2524         }
2525
2526         /*
2527          * Create a DMA tag for the RX buffer descriptor chain,
2528          * allocate and clear the  memory, and fetch the physical
2529          * address of the blocks.
2530          */
2531         if (bus_dma_tag_create(
2532                         sc->parent_tag,                 /* parent      */
2533                 BCM_PAGE_SIZE,                  /* alignment   */
2534                 BCE_DMA_BOUNDARY,               /* boundary    */
2535                         BUS_SPACE_MAXADDR,              /* lowaddr     */
2536                         sc->max_bus_addr,               /* lowaddr     */
2537                         NULL,                                   /* filter      */
2538                         NULL,                                   /* filterarg   */
2539                         BCE_RX_CHAIN_PAGE_SZ,   /* maxsize     */
2540                         1,                                              /* nsegments   */
2541                         BCE_RX_CHAIN_PAGE_SZ,   /* maxsegsize  */
2542                         0,                                              /* flags       */
2543                         NULL,                                   /* lockfunc    */
2544                         NULL,                                   /* lockarg     */
2545                         &sc->rx_bd_chain_tag)) {
2546                 BCE_PRINTF(sc, "%s(%d): Could not allocate RX descriptor chain DMA tag!\n",
2547                         __FILE__, __LINE__);
2548                 rc = ENOMEM;
2549                 goto bce_dma_alloc_exit;
2550         }
2551
2552         for (i = 0; i < RX_PAGES; i++) {
2553
2554                 if (bus_dmamem_alloc(
2555                                 sc->rx_bd_chain_tag,                    /* tag   */
2556                         (void **)&sc->rx_bd_chain[i],   /* vaddr */
2557                         BUS_DMA_NOWAIT,                                 /* flags */
2558                         &sc->rx_bd_chain_map[i])) {
2559                         BCE_PRINTF(sc, "%s(%d): Could not allocate RX descriptor chain "
2560                                 "DMA memory!\n", __FILE__, __LINE__);
2561                         rc = ENOMEM;
2562                         goto bce_dma_alloc_exit;
2563                 }
2564
2565                 bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
2566
2567                 map_arg.maxsegs = 1;
2568                 map_arg.sc = sc;
2569
2570                 error = bus_dmamap_load(
2571                                 sc->rx_bd_chain_tag,    /* dmat        */
2572                         sc->rx_bd_chain_map[i], /* map         */
2573                         sc->rx_bd_chain[i],             /* buf         */
2574                         BCE_RX_CHAIN_PAGE_SZ,   /* buflen      */
2575                         bce_dma_map_addr,               /* callback    */
2576                         &map_arg,                               /* callbackarg */
2577                         BUS_DMA_NOWAIT);                /* flags       */
2578
2579                 if(error || (map_arg.maxsegs == 0)) {
2580                         BCE_PRINTF(sc, "%s(%d): Could not map RX descriptor chain DMA memory!\n",
2581                                 __FILE__, __LINE__);
2582                         rc = ENOMEM;
2583                         goto bce_dma_alloc_exit;
2584                 }
2585
2586                 sc->rx_bd_chain_paddr[i] = map_arg.busaddr;
2587                 /* DRC - Fix for 64 bit systems. */
2588                 DBPRINT(sc, BCE_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2589                         i, (u32) sc->rx_bd_chain_paddr[i]);
2590         }
2591
2592         /*
2593          * Create a DMA tag for RX mbufs.
2594          */
2595         if (bus_dma_tag_create(
2596                         sc->parent_tag,                 /* parent      */
2597                 BCE_DMA_ALIGN,                  /* alignment   */
2598                 BCE_DMA_BOUNDARY,               /* boundary    */
2599                         sc->max_bus_addr,               /* lowaddr     */
2600                         BUS_SPACE_MAXADDR,              /* highaddr    */
2601                         NULL,                                   /* filterfunc  */
2602                         NULL,                                   /* filterarg   */
2603                         MJUM9BYTES,                             /* maxsize     */
2604                         BCE_MAX_SEGMENTS,               /* nsegments   */
2605                         MJUM9BYTES,                             /* maxsegsize  */
2606                         0,                                              /* flags       */
2607                         NULL,                                   /* lockfunc    */
2608                         NULL,                                   /* lockarg     */
2609                 &sc->rx_mbuf_tag)) {
2610                 BCE_PRINTF(sc, "%s(%d): Could not allocate RX mbuf DMA tag!\n",
2611                         __FILE__, __LINE__);
2612                 rc = ENOMEM;
2613                 goto bce_dma_alloc_exit;
2614         }
2615
2616         /* Create DMA maps for the RX mbuf clusters. */
2617         for (i = 0; i < TOTAL_RX_BD; i++) {
2618                 if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT,
2619                                 &sc->rx_mbuf_map[i])) {
2620                         BCE_PRINTF(sc, "%s(%d): Unable to create RX mbuf DMA map!\n",
2621                                 __FILE__, __LINE__);
2622                         rc = ENOMEM;
2623                         goto bce_dma_alloc_exit;
2624                 }
2625         }
2626
2627 bce_dma_alloc_exit:
2628         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2629
2630         return(rc);
2631 }
2632
2633
2634 /****************************************************************************/
2635 /* Release all resources used by the driver.                                */
2636 /*                                                                          */
2637 /* Releases all resources acquired by the driver including interrupts,      */
2638 /* interrupt handler, interfaces, mutexes, and DMA memory.                  */
2639 /*                                                                          */
2640 /* Returns:                                                                 */
2641 /*   Nothing.                                                               */
2642 /****************************************************************************/
2643 static void
2644 bce_release_resources(struct bce_softc *sc)
2645 {
2646         device_t dev;
2647
2648         DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2649
2650         dev = sc->bce_dev;
2651
2652         bce_dma_free(sc);
2653
2654         if (sc->bce_intrhand != NULL)
2655                 bus_teardown_intr(dev, sc->bce_irq, sc->bce_intrhand);
2656
2657         if (sc->bce_irq != NULL)
2658                 bus_release_resource(dev,
2659                         SYS_RES_IRQ,
2660                         0,
2661                         sc->bce_irq);
2662
2663         if (sc->bce_res != NULL)
2664                 bus_release_resource(dev,
2665                         SYS_RES_MEMORY,
2666                     PCIR_BAR(0),
2667                     sc->bce_res);
2668
2669         if (sc->bce_ifp != NULL)
2670                 if_free(sc->bce_ifp);
2671
2672
2673         if (mtx_initialized(&sc->bce_mtx))
2674                 BCE_LOCK_DESTROY(sc);
2675
2676         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2677
2678 }
2679
2680
2681 /****************************************************************************/
2682 /* Firmware synchronization.                                                */
2683 /*                                                                          */
2684 /* Before performing certain events such as a chip reset, synchronize with  */
2685 /* the firmware first.                                                      */
2686 /*                                                                          */
2687 /* Returns:                                                                 */
2688 /*   0 for success, positive value for failure.                             */
2689 /****************************************************************************/
2690 static int
2691 bce_fw_sync(struct bce_softc *sc, u32 msg_data)
2692 {
2693         int i, rc = 0;
2694         u32 val;
2695
2696         /* Don't waste any time if we've timed out before. */
2697         if (sc->bce_fw_timed_out) {
2698                 rc = EBUSY;
2699                 goto bce_fw_sync_exit;
2700         }
2701
2702         /* Increment the message sequence number. */
2703         sc->bce_fw_wr_seq++;
2704         msg_data |= sc->bce_fw_wr_seq;
2705
2706         DBPRINT(sc, BCE_VERBOSE, "bce_fw_sync(): msg_data = 0x%08X\n", msg_data);
2707
2708         /* Send the message to the bootcode driver mailbox. */
2709         REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2710
2711         /* Wait for the bootcode to acknowledge the message. */
2712         for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2713                 /* Check for a response in the bootcode firmware mailbox. */
2714                 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_FW_MB);
2715                 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2716                         break;
2717                 DELAY(1000);
2718         }
2719
2720         /* If we've timed out, tell the bootcode that we've stopped waiting. */
2721         if (((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ)) &&
2722                 ((msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0)) {
2723
2724                 BCE_PRINTF(sc, "%s(%d): Firmware synchronization timeout! "
2725                         "msg_data = 0x%08X\n",
2726                         __FILE__, __LINE__, msg_data);
2727
2728                 msg_data &= ~BCE_DRV_MSG_CODE;
2729                 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2730
2731                 REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2732
2733                 sc->bce_fw_timed_out = 1;
2734                 rc = EBUSY;
2735         }
2736
2737 bce_fw_sync_exit:
2738         return (rc);
2739 }
2740
2741
2742 /****************************************************************************/
2743 /* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2744 /*                                                                          */
2745 /* Returns:                                                                 */
2746 /*   Nothing.                                                               */
2747 /****************************************************************************/
2748 static void
2749 bce_load_rv2p_fw(struct bce_softc *sc, u32 *rv2p_code, 
2750         u32 rv2p_code_len, u32 rv2p_proc)
2751 {
2752         int i;
2753         u32 val;
2754
2755         for (i = 0; i < rv2p_code_len; i += 8) {
2756                 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2757                 rv2p_code++;
2758                 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2759                 rv2p_code++;
2760
2761                 if (rv2p_proc == RV2P_PROC1) {
2762                         val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2763                         REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2764                 }
2765                 else {
2766                         val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2767                         REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2768                 }
2769         }
2770
2771         /* Reset the processor, un-stall is done later. */
2772         if (rv2p_proc == RV2P_PROC1) {
2773                 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2774         }
2775         else {
2776                 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2777         }
2778 }
2779
2780
2781 /****************************************************************************/
2782 /* Load RISC processor firmware.                                            */
2783 /*                                                                          */
2784 /* Loads firmware from the file if_bcefw.h into the scratchpad memory       */
2785 /* associated with a particular processor.                                  */
2786 /*                                                                          */
2787 /* Returns:                                                                 */
2788 /*   Nothing.                                                               */
2789 /****************************************************************************/
2790 static void
2791 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2792         struct fw_info *fw)
2793 {
2794         u32 offset;
2795         u32 val;
2796
2797         /* Halt the CPU. */
2798         val = REG_RD_IND(sc, cpu_reg->mode);
2799         val |= cpu_reg->mode_value_halt;
2800         REG_WR_IND(sc, cpu_reg->mode, val);
2801         REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2802
2803         /* Load the Text area. */
2804         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2805         if (fw->text) {
2806                 int j;
2807
2808                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2809                         REG_WR_IND(sc, offset, fw->text[j]);
2810                 }
2811         }
2812
2813         /* Load the Data area. */
2814         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2815         if (fw->data) {
2816                 int j;
2817
2818                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2819                         REG_WR_IND(sc, offset, fw->data[j]);
2820                 }
2821         }
2822
2823         /* Load the SBSS area. */
2824         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2825         if (fw->sbss) {
2826                 int j;
2827
2828                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2829                         REG_WR_IND(sc, offset, fw->sbss[j]);
2830                 }
2831         }
2832
2833         /* Load the BSS area. */
2834         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2835         if (fw->bss) {
2836                 int j;
2837
2838                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2839                         REG_WR_IND(sc, offset, fw->bss[j]);
2840                 }
2841         }
2842
2843         /* Load the Read-Only area. */
2844         offset = cpu_reg->spad_base +
2845                 (fw->rodata_addr - cpu_reg->mips_view_base);
2846         if (fw->rodata) {
2847                 int j;
2848
2849                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2850                         REG_WR_IND(sc, offset, fw->rodata[j]);
2851                 }
2852         }
2853
2854         /* Clear the pre-fetch instruction. */
2855         REG_WR_IND(sc, cpu_reg->inst, 0);
2856         REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2857
2858         /* Start the CPU. */
2859         val = REG_RD_IND(sc, cpu_reg->mode);
2860         val &= ~cpu_reg->mode_value_halt;
2861         REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2862         REG_WR_IND(sc, cpu_reg->mode, val);
2863 }
2864
2865
2866 /****************************************************************************/
2867 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs.                         */
2868 /*                                                                          */
2869 /* Loads the firmware for each CPU and starts the CPU.                      */
2870 /*                                                                          */
2871 /* Returns:                                                                 */
2872 /*   Nothing.                                                               */
2873 /****************************************************************************/
2874 static void
2875 bce_init_cpus(struct bce_softc *sc)
2876 {
2877         struct cpu_reg cpu_reg;
2878         struct fw_info fw;
2879
2880         /* Initialize the RV2P processor. */
2881         bce_load_rv2p_fw(sc, bce_rv2p_proc1, sizeof(bce_rv2p_proc1), RV2P_PROC1);
2882         bce_load_rv2p_fw(sc, bce_rv2p_proc2, sizeof(bce_rv2p_proc2), RV2P_PROC2);
2883
2884         /* Initialize the RX Processor. */
2885         cpu_reg.mode = BCE_RXP_CPU_MODE;
2886         cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2887         cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2888         cpu_reg.state = BCE_RXP_CPU_STATE;
2889         cpu_reg.state_value_clear = 0xffffff;
2890         cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2891         cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2892         cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2893         cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2894         cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2895         cpu_reg.spad_base = BCE_RXP_SCRATCH;
2896         cpu_reg.mips_view_base = 0x8000000;
2897
2898         fw.ver_major = bce_RXP_b06FwReleaseMajor;
2899         fw.ver_minor = bce_RXP_b06FwReleaseMinor;
2900         fw.ver_fix = bce_RXP_b06FwReleaseFix;
2901         fw.start_addr = bce_RXP_b06FwStartAddr;
2902
2903         fw.text_addr = bce_RXP_b06FwTextAddr;
2904         fw.text_len = bce_RXP_b06FwTextLen;
2905         fw.text_index = 0;
2906         fw.text = bce_RXP_b06FwText;
2907
2908         fw.data_addr = bce_RXP_b06FwDataAddr;
2909         fw.data_len = bce_RXP_b06FwDataLen;
2910         fw.data_index = 0;
2911         fw.data = bce_RXP_b06FwData;
2912
2913         fw.sbss_addr = bce_RXP_b06FwSbssAddr;
2914         fw.sbss_len = bce_RXP_b06FwSbssLen;
2915         fw.sbss_index = 0;
2916         fw.sbss = bce_RXP_b06FwSbss;
2917
2918         fw.bss_addr = bce_RXP_b06FwBssAddr;
2919         fw.bss_len = bce_RXP_b06FwBssLen;
2920         fw.bss_index = 0;
2921         fw.bss = bce_RXP_b06FwBss;
2922
2923         fw.rodata_addr = bce_RXP_b06FwRodataAddr;
2924         fw.rodata_len = bce_RXP_b06FwRodataLen;
2925         fw.rodata_index = 0;
2926         fw.rodata = bce_RXP_b06FwRodata;
2927
2928         DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n");
2929         bce_load_cpu_fw(sc, &cpu_reg, &fw);
2930
2931         /* Initialize the TX Processor. */
2932         cpu_reg.mode = BCE_TXP_CPU_MODE;
2933         cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
2934         cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
2935         cpu_reg.state = BCE_TXP_CPU_STATE;
2936         cpu_reg.state_value_clear = 0xffffff;
2937         cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
2938         cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
2939         cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
2940         cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
2941         cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
2942         cpu_reg.spad_base = BCE_TXP_SCRATCH;
2943         cpu_reg.mips_view_base = 0x8000000;
2944
2945         fw.ver_major = bce_TXP_b06FwReleaseMajor;
2946         fw.ver_minor = bce_TXP_b06FwReleaseMinor;
2947         fw.ver_fix = bce_TXP_b06FwReleaseFix;
2948         fw.start_addr = bce_TXP_b06FwStartAddr;
2949
2950         fw.text_addr = bce_TXP_b06FwTextAddr;
2951         fw.text_len = bce_TXP_b06FwTextLen;
2952         fw.text_index = 0;
2953         fw.text = bce_TXP_b06FwText;
2954
2955         fw.data_addr = bce_TXP_b06FwDataAddr;
2956         fw.data_len = bce_TXP_b06FwDataLen;
2957         fw.data_index = 0;
2958         fw.data = bce_TXP_b06FwData;
2959
2960         fw.sbss_addr = bce_TXP_b06FwSbssAddr;
2961         fw.sbss_len = bce_TXP_b06FwSbssLen;
2962         fw.sbss_index = 0;
2963         fw.sbss = bce_TXP_b06FwSbss;
2964
2965         fw.bss_addr = bce_TXP_b06FwBssAddr;
2966         fw.bss_len = bce_TXP_b06FwBssLen;
2967         fw.bss_index = 0;
2968         fw.bss = bce_TXP_b06FwBss;
2969
2970         fw.rodata_addr = bce_TXP_b06FwRodataAddr;
2971         fw.rodata_len = bce_TXP_b06FwRodataLen;
2972         fw.rodata_index = 0;
2973         fw.rodata = bce_TXP_b06FwRodata;
2974
2975         DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n");
2976         bce_load_cpu_fw(sc, &cpu_reg, &fw);
2977
2978         /* Initialize the TX Patch-up Processor. */
2979         cpu_reg.mode = BCE_TPAT_CPU_MODE;
2980         cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
2981         cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
2982         cpu_reg.state = BCE_TPAT_CPU_STATE;
2983         cpu_reg.state_value_clear = 0xffffff;
2984         cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
2985         cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
2986         cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
2987         cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
2988         cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
2989         cpu_reg.spad_base = BCE_TPAT_SCRATCH;
2990         cpu_reg.mips_view_base = 0x8000000;
2991
2992         fw.ver_major = bce_TPAT_b06FwReleaseMajor;
2993         fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
2994         fw.ver_fix = bce_TPAT_b06FwReleaseFix;
2995         fw.start_addr = bce_TPAT_b06FwStartAddr;
2996
2997         fw.text_addr = bce_TPAT_b06FwTextAddr;
2998         fw.text_len = bce_TPAT_b06FwTextLen;
2999         fw.text_index = 0;
3000         fw.text = bce_TPAT_b06FwText;
3001
3002         fw.data_addr = bce_TPAT_b06FwDataAddr;
3003         fw.data_len = bce_TPAT_b06FwDataLen;
3004         fw.data_index = 0;
3005         fw.data = bce_TPAT_b06FwData;
3006
3007         fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
3008         fw.sbss_len = bce_TPAT_b06FwSbssLen;
3009         fw.sbss_index = 0;
3010         fw.sbss = bce_TPAT_b06FwSbss;
3011
3012         fw.bss_addr = bce_TPAT_b06FwBssAddr;
3013         fw.bss_len = bce_TPAT_b06FwBssLen;
3014         fw.bss_index = 0;
3015         fw.bss = bce_TPAT_b06FwBss;
3016
3017         fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
3018         fw.rodata_len = bce_TPAT_b06FwRodataLen;
3019         fw.rodata_index = 0;
3020         fw.rodata = bce_TPAT_b06FwRodata;
3021
3022         DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n");
3023         bce_load_cpu_fw(sc, &cpu_reg, &fw);
3024
3025         /* Initialize the Completion Processor. */
3026         cpu_reg.mode = BCE_COM_CPU_MODE;
3027         cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
3028         cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
3029         cpu_reg.state = BCE_COM_CPU_STATE;
3030         cpu_reg.state_value_clear = 0xffffff;
3031         cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
3032         cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
3033         cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
3034         cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
3035         cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
3036         cpu_reg.spad_base = BCE_COM_SCRATCH;
3037         cpu_reg.mips_view_base = 0x8000000;
3038
3039         fw.ver_major = bce_COM_b06FwReleaseMajor;
3040         fw.ver_minor = bce_COM_b06FwReleaseMinor;
3041         fw.ver_fix = bce_COM_b06FwReleaseFix;
3042         fw.start_addr = bce_COM_b06FwStartAddr;
3043
3044         fw.text_addr = bce_COM_b06FwTextAddr;
3045         fw.text_len = bce_COM_b06FwTextLen;
3046         fw.text_index = 0;
3047         fw.text = bce_COM_b06FwText;
3048
3049         fw.data_addr = bce_COM_b06FwDataAddr;
3050         fw.data_len = bce_COM_b06FwDataLen;
3051         fw.data_index = 0;
3052         fw.data = bce_COM_b06FwData;
3053
3054         fw.sbss_addr = bce_COM_b06FwSbssAddr;
3055         fw.sbss_len = bce_COM_b06FwSbssLen;
3056         fw.sbss_index = 0;
3057         fw.sbss = bce_COM_b06FwSbss;
3058
3059         fw.bss_addr = bce_COM_b06FwBssAddr;
3060         fw.bss_len = bce_COM_b06FwBssLen;
3061         fw.bss_index = 0;
3062         fw.bss = bce_COM_b06FwBss;
3063
3064         fw.rodata_addr = bce_COM_b06FwRodataAddr;
3065         fw.rodata_len = bce_COM_b06FwRodataLen;
3066         fw.rodata_index = 0;
3067         fw.rodata = bce_COM_b06FwRodata;
3068
3069         DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
3070         bce_load_cpu_fw(sc, &cpu_reg, &fw);
3071 }
3072
3073
3074 /****************************************************************************/
3075 /* Initialize context memory.                                               */
3076 /*                                                                          */
3077 /* Clears the memory associated with each Context ID (CID).                 */
3078 /*                                                                          */
3079 /* Returns:                                                                 */
3080 /*   Nothing.                                                               */
3081 /****************************************************************************/
3082 static void
3083 bce_init_context(struct bce_softc *sc)
3084 {
3085         u32 vcid;
3086
3087         vcid = 96;
3088         while (vcid) {
3089                 u32 vcid_addr, pcid_addr, offset;
3090
3091                 vcid--;
3092
3093                 vcid_addr = GET_CID_ADDR(vcid);
3094                 pcid_addr = vcid_addr;
3095
3096                 REG_WR(sc, BCE_CTX_VIRT_ADDR, 0x00);
3097                 REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
3098
3099                 /* Zero out the context. */
3100                 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
3101                         CTX_WR(sc, 0x00, offset, 0);
3102                 }
3103
3104                 REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
3105                 REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
3106         }
3107 }
3108
3109
3110 /****************************************************************************/
3111 /* Fetch the permanent MAC address of the controller.                       */
3112 /*                                                                          */
3113 /* Returns:                                                                 */
3114 /*   Nothing.                                                               */
3115 /****************************************************************************/
3116 static void
3117 bce_get_mac_addr(struct bce_softc *sc)
3118 {
3119         u32 mac_lo = 0, mac_hi = 0;
3120
3121         /*
3122          * The NetXtreme II bootcode populates various NIC
3123          * power-on and runtime configuration items in a
3124          * shared memory area.  The factory configured MAC
3125          * address is available from both NVRAM and the
3126          * shared memory area so we'll read the value from
3127          * shared memory for speed.
3128          */
3129
3130         mac_hi = REG_RD_IND(sc, sc->bce_shmem_base +
3131                 BCE_PORT_HW_CFG_MAC_UPPER);
3132         mac_lo = REG_RD_IND(sc, sc->bce_shmem_base +
3133                 BCE_PORT_HW_CFG_MAC_LOWER);
3134
3135         if ((mac_lo == 0) && (mac_hi == 0)) {
3136                 BCE_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n", 
3137                         __FILE__, __LINE__);
3138         } else {
3139                 sc->eaddr[0] = (u_char)(mac_hi >> 8);
3140                 sc->eaddr[1] = (u_char)(mac_hi >> 0);
3141                 sc->eaddr[2] = (u_char)(mac_lo >> 24);
3142                 sc->eaddr[3] = (u_char)(mac_lo >> 16);
3143                 sc->eaddr[4] = (u_char)(mac_lo >> 8);
3144                 sc->eaddr[5] = (u_char)(mac_lo >> 0);
3145         }
3146
3147         DBPRINT(sc, BCE_INFO, "Permanent Ethernet address = %6D\n", sc->eaddr, ":");
3148 }
3149
3150
3151 /****************************************************************************/
3152 /* Program the MAC address.                                                 */
3153 /*                                                                          */
3154 /* Returns:                                                                 */
3155 /*   Nothing.                                                               */
3156 /****************************************************************************/
3157 static void
3158 bce_set_mac_addr(struct bce_softc *sc)
3159 {
3160         u32 val;
3161         u8 *mac_addr = sc->eaddr;
3162
3163         DBPRINT(sc, BCE_INFO, "Setting Ethernet address = %6D\n", sc->eaddr, ":");
3164
3165         val = (mac_addr[0] << 8) | mac_addr[1];
3166
3167         REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
3168
3169         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
3170                 (mac_addr[4] << 8) | mac_addr[5];
3171
3172         REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
3173 }
3174
3175
3176 /****************************************************************************/
3177 /* Stop the controller.                                                     */
3178 /*                                                                          */
3179 /* Returns:                                                                 */
3180 /*   Nothing.                                                               */
3181 /****************************************************************************/
3182 static void
3183 bce_stop(struct bce_softc *sc)
3184 {
3185         struct ifnet *ifp;
3186         struct ifmedia_entry *ifm;
3187         struct mii_data *mii = NULL;
3188         int mtmp, itmp;
3189
3190         DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3191
3192         BCE_LOCK_ASSERT(sc);
3193
3194         ifp = sc->bce_ifp;
3195
3196         mii = device_get_softc(sc->bce_miibus);
3197
3198         callout_stop(&sc->bce_stat_ch);
3199
3200         /* Disable the transmit/receive blocks. */
3201         REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 0x5ffffff);
3202         REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3203         DELAY(20);
3204
3205         bce_disable_intr(sc);
3206
3207         /* Tell firmware that the driver is going away. */
3208         bce_reset(sc, BCE_DRV_MSG_CODE_SUSPEND_NO_WOL);
3209
3210         /* Free the RX lists. */
3211         bce_free_rx_chain(sc);
3212
3213         /* Free TX buffers. */
3214         bce_free_tx_chain(sc);
3215
3216         /*
3217          * Isolate/power down the PHY, but leave the media selection
3218          * unchanged so that things will be put back to normal when
3219          * we bring the interface back up.
3220          */
3221
3222         itmp = ifp->if_flags;
3223         ifp->if_flags |= IFF_UP;
3224         /*
3225          * If we are called from bce_detach(), mii is already NULL.
3226          */
3227         if (mii != NULL) {
3228                 ifm = mii->mii_media.ifm_cur;
3229                 mtmp = ifm->ifm_media;
3230                 ifm->ifm_media = IFM_ETHER | IFM_NONE;
3231                 mii_mediachg(mii);
3232                 ifm->ifm_media = mtmp;
3233         }
3234
3235         ifp->if_flags = itmp;
3236         ifp->if_timer = 0;
3237
3238         sc->bce_link = 0;
3239
3240         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3241
3242         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3243
3244 }
3245
3246
3247 static int
3248 bce_reset(struct bce_softc *sc, u32 reset_code)
3249 {
3250         u32 val;
3251         int i, rc = 0;
3252
3253         DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3254
3255         /* Wait for pending PCI transactions to complete. */
3256         REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
3257                BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3258                BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3259                BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3260                BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3261         val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3262         DELAY(5);
3263
3264         /* Assume bootcode is running. */
3265         sc->bce_fw_timed_out = 0;
3266
3267         /* Give the firmware a chance to prepare for the reset. */
3268         rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
3269         if (rc)
3270                 goto bce_reset_exit;
3271
3272         /* Set a firmware reminder that this is a soft reset. */
3273         REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_RESET_SIGNATURE,
3274                    BCE_DRV_RESET_SIGNATURE_MAGIC);
3275
3276         /* Dummy read to force the chip to complete all current transactions. */
3277         val = REG_RD(sc, BCE_MISC_ID);
3278
3279         /* Chip reset. */
3280         val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3281               BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3282               BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3283         REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
3284
3285         /* Allow up to 30us for reset to complete. */
3286         for (i = 0; i < 10; i++) {
3287                 val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
3288                 if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3289                             BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3290                         break;
3291                 }
3292                 DELAY(10);
3293         }
3294
3295         /* Check that reset completed successfully. */
3296         if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3297                    BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3298                 BCE_PRINTF(sc, "%s(%d): Reset failed!\n", 
3299                         __FILE__, __LINE__);
3300                 rc = EBUSY;
3301                 goto bce_reset_exit;
3302         }
3303
3304         /* Make sure byte swapping is properly configured. */
3305         val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
3306         if (val != 0x01020304) {
3307                 BCE_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n", 
3308                         __FILE__, __LINE__);
3309                 rc = ENODEV;
3310                 goto bce_reset_exit;
3311         }
3312
3313         /* Just completed a reset, assume that firmware is running again. */
3314         sc->bce_fw_timed_out = 0;
3315
3316         /* Wait for the firmware to finish its initialization. */
3317         rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
3318         if (rc)
3319                 BCE_PRINTF(sc, "%s(%d): Firmware did not complete initialization!\n",
3320                         __FILE__, __LINE__);
3321
3322 bce_reset_exit:
3323         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3324
3325         return (rc);
3326 }
3327
3328
3329 static int
3330 bce_chipinit(struct bce_softc *sc)
3331 {
3332         u32 val;
3333         int rc = 0;
3334
3335         DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3336
3337         /* Make sure the interrupt is not active. */
3338         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3339
3340         /* Initialize DMA byte/word swapping, configure the number of DMA  */
3341         /* channels and PCI clock compensation delay.                      */
3342         val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3343               BCE_DMA_CONFIG_DATA_WORD_SWAP |
3344 #if BYTE_ORDER == BIG_ENDIAN
3345               BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3346 #endif
3347               BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3348               DMA_READ_CHANS << 12 |
3349               DMA_WRITE_CHANS << 16;
3350
3351         val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3352
3353         if ((sc->bce_flags & BCE_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
3354                 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3355
3356         /*
3357          * This setting resolves a problem observed on certain Intel PCI
3358          * chipsets that cannot handle multiple outstanding DMA operations.
3359          * See errata E9_5706A1_65.
3360          */
3361         if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
3362             (BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0) &&
3363             !(sc->bce_flags & BCE_PCIX_FLAG))
3364                 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3365
3366         REG_WR(sc, BCE_DMA_CONFIG, val);
3367
3368         /* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3369         if (sc->bce_flags & BCE_PCIX_FLAG) {
3370                 u16 val;
3371
3372                 val = pci_read_config(sc->bce_dev, BCE_PCI_PCIX_CMD, 2);
3373                 pci_write_config(sc->bce_dev, BCE_PCI_PCIX_CMD, val & ~0x2, 2);
3374         }
3375
3376         /* Enable the RX_V2P and Context state machines before access. */
3377         REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3378                BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3379                BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3380                BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3381
3382         /* Initialize context mapping and zero out the quick contexts. */
3383         bce_init_context(sc);
3384
3385         /* Initialize the on-boards CPUs */
3386         bce_init_cpus(sc);
3387
3388         /* Prepare NVRAM for access. */
3389         if (bce_init_nvram(sc)) {
3390                 rc = ENODEV;
3391                 goto bce_chipinit_exit;
3392         }
3393
3394         /* Set the kernel bypass block size */
3395         val = REG_RD(sc, BCE_MQ_CONFIG);
3396         val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3397         val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3398         REG_WR(sc, BCE_MQ_CONFIG, val);
3399
3400         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3401         REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3402         REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3403
3404         val = (BCM_PAGE_BITS - 8) << 24;
3405         REG_WR(sc, BCE_RV2P_CONFIG, val);
3406
3407         /* Configure page size. */
3408         val = REG_RD(sc, BCE_TBDR_CONFIG);
3409         val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3410         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3411         REG_WR(sc, BCE_TBDR_CONFIG, val);
3412
3413 bce_chipinit_exit:
3414         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3415
3416         return(rc);
3417 }
3418
3419
3420 /****************************************************************************/
3421 /* Initialize the controller in preparation to send/receive traffic.        */
3422 /*                                                                          */
3423 /* Returns:                                                                 */
3424 /*   0 for success, positive value for failure.                             */
3425 /****************************************************************************/
3426 static int
3427 bce_blockinit(struct bce_softc *sc)
3428 {
3429         u32 reg, val;
3430         int rc = 0;
3431
3432         DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3433
3434         /* Load the hardware default MAC address. */
3435         bce_set_mac_addr(sc);
3436
3437         /* Set the Ethernet backoff seed value */
3438         val = sc->eaddr[0]         + (sc->eaddr[1] << 8) +
3439               (sc->eaddr[2] << 16) + (sc->eaddr[3]     ) +
3440               (sc->eaddr[4] << 8)  + (sc->eaddr[5] << 16);
3441         REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3442
3443         sc->last_status_idx = 0;
3444         sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3445
3446         /* Set up link change interrupt generation. */
3447         REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3448
3449         /* Program the physical address of the status block. */
3450         REG_WR(sc, BCE_HC_STATUS_ADDR_L,
3451                 BCE_ADDR_LO(sc->status_block_paddr));
3452         REG_WR(sc, BCE_HC_STATUS_ADDR_H,
3453                 BCE_ADDR_HI(sc->status_block_paddr));
3454
3455         /* Program the physical address of the statistics block. */
3456         REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3457                 BCE_ADDR_LO(sc->stats_block_paddr));
3458         REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3459                 BCE_ADDR_HI(sc->stats_block_paddr));
3460
3461         /* Program various host coalescing parameters. */
3462         REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3463                 (sc->bce_tx_quick_cons_trip_int << 16) | sc->bce_tx_quick_cons_trip);
3464         REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3465                 (sc->bce_rx_quick_cons_trip_int << 16) | sc->bce_rx_quick_cons_trip);
3466         REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3467                 (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3468         REG_WR(sc, BCE_HC_TX_TICKS,
3469                 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3470         REG_WR(sc, BCE_HC_RX_TICKS,
3471                 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3472         REG_WR(sc, BCE_HC_COM_TICKS,
3473                 (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3474         REG_WR(sc, BCE_HC_CMD_TICKS,
3475                 (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3476         REG_WR(sc, BCE_HC_STATS_TICKS,
3477                 (sc->bce_stats_ticks & 0xffff00));
3478         REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS,
3479                 0xbb8);  /* 3ms */
3480         REG_WR(sc, BCE_HC_CONFIG,
3481                 (BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE |
3482                 BCE_HC_CONFIG_COLLECT_STATS));
3483
3484         /* Clear the internal statistics counters. */
3485         REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3486
3487         /* Verify that bootcode is running. */
3488         reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_SIGNATURE);
3489
3490         DBRUNIF(DB_RANDOMTRUE(bce_debug_bootcode_running_failure),
3491                 BCE_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n",
3492                         __FILE__, __LINE__);
3493                 reg = 0);
3494
3495         if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3496             BCE_DEV_INFO_SIGNATURE_MAGIC) {
3497                 BCE_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, "
3498                         "Expected: 08%08X\n", __FILE__, __LINE__,
3499                         (reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK),
3500                         BCE_DEV_INFO_SIGNATURE_MAGIC);
3501                 rc = ENODEV;
3502                 goto bce_blockinit_exit;
3503         }
3504
3505         /* Check if any management firmware is running. */
3506         reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_FEATURE);
3507         if (reg & (BCE_PORT_FEATURE_ASF_ENABLED | BCE_PORT_FEATURE_IMD_ENABLED)) {
3508                 DBPRINT(sc, BCE_INFO, "Management F/W Enabled.\n");
3509                 sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
3510         }
3511
3512         sc->bce_fw_ver = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_BC_REV);
3513         DBPRINT(sc, BCE_INFO, "bootcode rev = 0x%08X\n", sc->bce_fw_ver);
3514
3515         /* Allow bootcode to apply any additional fixes before enabling MAC. */
3516         rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3517
3518         /* Enable link state change interrupt generation. */
3519         REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3520
3521         /* Enable all remaining blocks in the MAC. */
3522         REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 0x5ffffff);
3523         REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3524         DELAY(20);
3525
3526 bce_blockinit_exit:
3527         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3528
3529         return (rc);
3530 }
3531
3532
3533 /****************************************************************************/
3534 /* Encapsulate an mbuf cluster into the rx_bd chain.                        */
3535 /*                                                                          */
3536 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's.     */
3537 /* This routine will map an mbuf cluster into 1 or more rx_bd's as          */
3538 /* necessary.                                                               */
3539 /*                                                                          */
3540 /* Returns:                                                                 */
3541 /*   0 for success, positive value for failure.                             */
3542 /****************************************************************************/
3543 static int
3544 bce_get_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod, u16 *chain_prod, 
3545         u32 *prod_bseq)
3546 {
3547         bus_dmamap_t            map;
3548         bus_dma_segment_t       segs[4];
3549         struct mbuf *m_new = NULL;
3550         struct rx_bd            *rxbd;
3551         int i, nsegs, error, rc = 0;
3552 #ifdef BCE_DEBUG
3553         u16 debug_chain_prod = *chain_prod;
3554 #endif
3555
3556         DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Entering %s()\n", 
3557                 __FUNCTION__);
3558
3559         /* Make sure the inputs are valid. */
3560         DBRUNIF((*chain_prod > MAX_RX_BD),
3561                 BCE_PRINTF(sc, "%s(%d): RX producer out of range: 0x%04X > 0x%04X\n",
3562                 __FILE__, __LINE__, *chain_prod, (u16) MAX_RX_BD));
3563
3564         DBPRINT(sc, BCE_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, "
3565                 "prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
3566
3567         if (m == NULL) {
3568
3569                 DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
3570                         BCE_PRINTF(sc, "%s(%d): Simulating mbuf allocation failure.\n", 
3571                                 __FILE__, __LINE__);
3572                         sc->mbuf_alloc_failed++;
3573                         rc = ENOBUFS;
3574                         goto bce_get_buf_exit);
3575
3576                 /* This is a new mbuf allocation. */
3577                 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
3578                 if (m_new == NULL) {
3579
3580                         DBPRINT(sc, BCE_WARN, "%s(%d): RX mbuf header allocation failed!\n", 
3581                                 __FILE__, __LINE__);
3582
3583                         DBRUNIF(1, sc->mbuf_alloc_failed++);
3584
3585                         rc = ENOBUFS;
3586                         goto bce_get_buf_exit;
3587                 }
3588
3589                 DBRUNIF(1, sc->rx_mbuf_alloc++);
3590                 m_cljget(m_new, M_DONTWAIT, sc->mbuf_alloc_size);
3591                 if (!(m_new->m_flags & M_EXT)) {
3592
3593                         DBPRINT(sc, BCE_WARN, "%s(%d): RX mbuf chain allocation failed!\n", 
3594                                 __FILE__, __LINE__);
3595                         
3596                         m_freem(m_new);
3597
3598                         DBRUNIF(1, sc->rx_mbuf_alloc--);
3599                         DBRUNIF(1, sc->mbuf_alloc_failed++);
3600
3601                         rc = ENOBUFS;
3602                         goto bce_get_buf_exit;
3603                 }
3604                         
3605                 m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3606         } else {
3607                 m_new = m;
3608                 m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3609                 m_new->m_data = m_new->m_ext.ext_buf;
3610         }
3611
3612         /* Map the mbuf cluster into device memory. */
3613         map = sc->rx_mbuf_map[*chain_prod];
3614         error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag, map, m_new,
3615             segs, &nsegs, BUS_DMA_NOWAIT);
3616
3617         if (error) {
3618                 BCE_PRINTF(sc, "%s(%d): Error mapping mbuf into RX chain!\n",
3619                         __FILE__, __LINE__);
3620
3621                 m_freem(m_new);
3622
3623                 DBRUNIF(1, sc->rx_mbuf_alloc--);
3624
3625                 rc = ENOBUFS;
3626                 goto bce_get_buf_exit;
3627         }
3628
3629         /* Watch for overflow. */
3630         DBRUNIF((sc->free_rx_bd > USABLE_RX_BD),
3631                 BCE_PRINTF(sc, "%s(%d): Too many free rx_bd (0x%04X > 0x%04X)!\n", 
3632                         __FILE__, __LINE__, sc->free_rx_bd, (u16) USABLE_RX_BD));
3633
3634         DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 
3635                 sc->rx_low_watermark = sc->free_rx_bd);
3636
3637         /* Setup the rx_bd for the first segment. */
3638         rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3639
3640         rxbd->rx_bd_haddr_lo  = htole32(BCE_ADDR_LO(segs[0].ds_addr));
3641         rxbd->rx_bd_haddr_hi  = htole32(BCE_ADDR_HI(segs[0].ds_addr));
3642         rxbd->rx_bd_len       = htole32(segs[0].ds_len);
3643         rxbd->rx_bd_flags     = htole32(RX_BD_FLAGS_START);
3644         *prod_bseq += segs[0].ds_len;
3645
3646         for (i = 1; i < nsegs; i++) {
3647
3648                 *prod = NEXT_RX_BD(*prod);
3649                 *chain_prod = RX_CHAIN_IDX(*prod); 
3650
3651                 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3652
3653                 rxbd->rx_bd_haddr_lo  = htole32(BCE_ADDR_LO(segs[i].ds_addr));
3654                 rxbd->rx_bd_haddr_hi  = htole32(BCE_ADDR_HI(segs[i].ds_addr));
3655                 rxbd->rx_bd_len       = htole32(segs[i].ds_len);
3656                 rxbd->rx_bd_flags     = 0;
3657                 *prod_bseq += segs[i].ds_len;
3658         }
3659
3660         rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3661
3662         /* Save the mbuf and update our counter. */
3663         sc->rx_mbuf_ptr[*chain_prod] = m_new;
3664         sc->free_rx_bd -= nsegs;
3665
3666         DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_mbuf_chain(sc, debug_chain_prod, 
3667                 nsegs));
3668
3669         DBPRINT(sc, BCE_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, "
3670                 "prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
3671
3672 bce_get_buf_exit:
3673         DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Exiting %s()\n", 
3674                 __FUNCTION__);
3675
3676         return(rc);
3677 }
3678
3679
3680 /****************************************************************************/
3681 /* Allocate memory and initialize the TX data structures.                   */
3682 /*                                                                          */
3683 /* Returns:                                                                 */
3684 /*   0 for success, positive value for failure.                             */
3685 /****************************************************************************/
3686 static int
3687 bce_init_tx_chain(struct bce_softc *sc)
3688 {
3689         struct tx_bd *txbd;
3690         u32 val;
3691         int i, rc = 0;
3692
3693         DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3694
3695         /* Set the initial TX producer/consumer indices. */
3696         sc->tx_prod        = 0;
3697         sc->tx_cons        = 0;
3698         sc->tx_prod_bseq   = 0;
3699         sc->used_tx_bd = 0;
3700         DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3701
3702         /*
3703          * The NetXtreme II supports a linked-list structre called
3704          * a Buffer Descriptor Chain (or BD chain).  A BD chain
3705          * consists of a series of 1 or more chain pages, each of which
3706          * consists of a fixed number of BD entries.
3707          * The last BD entry on each page is a pointer to the next page
3708          * in the chain, and the last pointer in the BD chain
3709          * points back to the beginning of the chain.
3710          */
3711
3712         /* Set the TX next pointer chain entries. */
3713         for (i = 0; i < TX_PAGES; i++) {
3714                 int j;
3715
3716                 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3717
3718                 /* Check if we've reached the last page. */
3719                 if (i == (TX_PAGES - 1))
3720                         j = 0;
3721                 else
3722                         j = i + 1;
3723
3724                 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
3725                 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
3726         }
3727
3728         /*
3729          * Initialize the context ID for an L2 TX chain.
3730          */
3731         val = BCE_L2CTX_TYPE_TYPE_L2;
3732         val |= BCE_L2CTX_TYPE_SIZE_L2;
3733         CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TYPE, val);
3734
3735         val = BCE_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3736         CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_CMD_TYPE, val);
3737
3738         /* Point the hardware to the first page in the chain. */
3739         val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
3740         CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_HI, val);
3741         val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
3742         CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_LO, val);
3743
3744         DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
3745
3746         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3747
3748         return(rc);
3749 }
3750
3751
3752 /****************************************************************************/
3753 /* Free memory and clear the TX data structures.                            */
3754 /*                                                                          */
3755 /* Returns:                                                                 */
3756 /*   Nothing.                                                               */
3757 /****************************************************************************/
3758 static void
3759 bce_free_tx_chain(struct bce_softc *sc)
3760 {
3761         int i;
3762
3763         DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3764
3765         /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3766         for (i = 0; i < TOTAL_TX_BD; i++) {
3767                 if (sc->tx_mbuf_ptr[i] != NULL) {
3768                         if (sc->tx_mbuf_map != NULL)
3769                                 bus_dmamap_sync(sc->tx_mbuf_tag, sc->tx_mbuf_map[i],
3770                                         BUS_DMASYNC_POSTWRITE);
3771                         m_freem(sc->tx_mbuf_ptr[i]);
3772                         sc->tx_mbuf_ptr[i] = NULL;
3773                         DBRUNIF(1, sc->tx_mbuf_alloc--);
3774                 }                       
3775         }
3776
3777         /* Clear each TX chain page. */
3778         for (i = 0; i < TX_PAGES; i++)
3779                 bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
3780
3781         /* Check if we lost any mbufs in the process. */
3782         DBRUNIF((sc->tx_mbuf_alloc),
3783                 BCE_PRINTF(sc, "%s(%d): Memory leak! Lost %d mbufs "
3784                         "from tx chain!\n",
3785                         __FILE__, __LINE__, sc->tx_mbuf_alloc));
3786
3787         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3788 }
3789
3790
3791 /****************************************************************************/
3792 /* Allocate memory and initialize the RX data structures.                   */
3793 /*                                                                          */
3794 /* Returns:                                                                 */
3795 /*   0 for success, positive value for failure.                             */
3796 /****************************************************************************/
3797 static int
3798 bce_init_rx_chain(struct bce_softc *sc)
3799 {
3800         struct rx_bd *rxbd;
3801         int i, rc = 0;
3802         u16 prod, chain_prod;
3803         u32 prod_bseq, val;
3804
3805         DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3806
3807         /* Initialize the RX producer and consumer indices. */
3808         sc->rx_prod        = 0;
3809         sc->rx_cons        = 0;
3810         sc->rx_prod_bseq   = 0;
3811         sc->free_rx_bd     = BCE_RX_SLACK_SPACE;
3812         DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
3813
3814         /* Initialize the RX next pointer chain entries. */
3815         for (i = 0; i < RX_PAGES; i++) {
3816                 int j;
3817
3818                 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
3819
3820                 /* Check if we've reached the last page. */
3821                 if (i == (RX_PAGES - 1))
3822                         j = 0;
3823                 else
3824                         j = i + 1;
3825
3826                 /* Setup the chain page pointers. */
3827                 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j]));
3828                 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j]));
3829         }
3830
3831         /* Initialize the context ID for an L2 RX chain. */
3832         val = BCE_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3833         val |= BCE_L2CTX_CTX_TYPE_SIZE_L2;
3834         val |= 0x02 << 8;
3835         CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_CTX_TYPE, val);
3836
3837         /* Point the hardware to the first page in the chain. */
3838         val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]);
3839         CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_HI, val);
3840         val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
3841         CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_LO, val);
3842
3843         /* Allocate mbuf clusters for the rx_bd chain. */
3844         prod = prod_bseq = 0;
3845         while (prod < BCE_RX_SLACK_SPACE) {
3846                 chain_prod = RX_CHAIN_IDX(prod);
3847                 if (bce_get_buf(sc, NULL, &prod, &chain_prod, &prod_bseq)) {
3848                         BCE_PRINTF(sc, "%s(%d): Error filling RX chain: rx_bd[0x%04X]!\n",
3849                                 __FILE__, __LINE__, chain_prod);
3850                         rc = ENOBUFS;
3851                         break;
3852                 }
3853                 prod = NEXT_RX_BD(prod);
3854         }
3855
3856         /* Save the RX chain producer index. */
3857         sc->rx_prod      = prod;
3858         sc->rx_prod_bseq = prod_bseq;
3859
3860         for (i = 0; i < RX_PAGES; i++) {
3861                 bus_dmamap_sync(
3862                         sc->rx_bd_chain_tag,
3863                 sc->rx_bd_chain_map[i],
3864                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3865         }
3866
3867         /* Tell the chip about the waiting rx_bd's. */
3868         REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
3869         REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3870
3871         DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD));
3872
3873         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3874
3875         return(rc);
3876 }
3877
3878
3879 /****************************************************************************/
3880 /* Free memory and clear the RX data structures.                            */
3881 /*                                                                          */
3882 /* Returns:                                                                 */
3883 /*   Nothing.                                                               */
3884 /****************************************************************************/
3885 static void
3886 bce_free_rx_chain(struct bce_softc *sc)
3887 {
3888         int i;
3889
3890         DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3891
3892         /* Free any mbufs still in the RX mbuf chain. */
3893         for (i = 0; i < TOTAL_RX_BD; i++) {
3894                 if (sc->rx_mbuf_ptr[i] != NULL) {
3895                         if (sc->rx_mbuf_map[i] != NULL)
3896                                 bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[i],
3897                                         BUS_DMASYNC_POSTREAD);
3898                         m_freem(sc->rx_mbuf_ptr[i]);
3899                         sc->rx_mbuf_ptr[i] = NULL;
3900                         DBRUNIF(1, sc->rx_mbuf_alloc--);
3901                 }
3902         }
3903
3904         /* Clear each RX chain page. */
3905         for (i = 0; i < RX_PAGES; i++)
3906                 bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
3907
3908         /* Check if we lost any mbufs in the process. */
3909         DBRUNIF((sc->rx_mbuf_alloc),
3910                 BCE_PRINTF(sc, "%s(%d): Memory leak! Lost %d mbufs from rx chain!\n",
3911                         __FILE__, __LINE__, sc->rx_mbuf_alloc));
3912
3913         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3914 }
3915
3916
3917 /****************************************************************************/
3918 /* Set media options.                                                       */
3919 /*                                                                          */
3920 /* Returns:                                                                 */
3921 /*   0 for success, positive value for failure.                             */
3922 /****************************************************************************/
3923 static int
3924 bce_ifmedia_upd(struct ifnet *ifp)
3925 {
3926         struct bce_softc *sc;
3927         struct mii_data *mii;
3928         struct ifmedia *ifm;
3929         int rc = 0;
3930
3931         sc = ifp->if_softc;
3932         ifm = &sc->bce_ifmedia;
3933
3934         /* DRC - ToDo: Add SerDes support. */
3935
3936         mii = device_get_softc(sc->bce_miibus);
3937         sc->bce_link = 0;
3938         if (mii->mii_instance) {
3939                 struct mii_softc *miisc;
3940                 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3941                     miisc = LIST_NEXT(miisc, mii_list))
3942                         mii_phy_reset(miisc);
3943         }
3944         mii_mediachg(mii);
3945
3946         return(rc);
3947 }
3948
3949
3950 /****************************************************************************/
3951 /* Reports current media status.                                            */
3952 /*                                                                          */
3953 /* Returns:                                                                 */
3954 /*   Nothing.                                                               */
3955 /****************************************************************************/
3956 static void
3957 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3958 {
3959         struct bce_softc *sc;
3960         struct mii_data *mii;
3961
3962         sc = ifp->if_softc;
3963
3964         BCE_LOCK(sc);
3965
3966         mii = device_get_softc(sc->bce_miibus);
3967
3968         /* DRC - ToDo: Add SerDes support. */
3969
3970         mii_pollstat(mii);
3971         ifmr->ifm_active = mii->mii_media_active;
3972         ifmr->ifm_status = mii->mii_media_status;
3973
3974         BCE_UNLOCK(sc);
3975 }
3976
3977
3978 /****************************************************************************/
3979 /* Handles PHY generated interrupt events.                                  */
3980 /*                                                                          */
3981 /* Returns:                                                                 */
3982 /*   Nothing.                                                               */
3983 /****************************************************************************/
3984 static void
3985 bce_phy_intr(struct bce_softc *sc)
3986 {
3987         u32 new_link_state, old_link_state;
3988
3989         new_link_state = sc->status_block->status_attn_bits &
3990                 STATUS_ATTN_BITS_LINK_STATE;
3991         old_link_state = sc->status_block->status_attn_bits_ack &
3992                 STATUS_ATTN_BITS_LINK_STATE;
3993
3994         /* Handle any changes if the link state has changed. */
3995         if (new_link_state != old_link_state) {
3996
3997                 DBRUN(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
3998
3999                 sc->bce_link = 0;
4000                 callout_stop(&sc->bce_stat_ch);
4001                 bce_tick_locked(sc);
4002
4003                 /* Update the status_attn_bits_ack field in the status block. */
4004                 if (new_link_state) {
4005                         REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
4006                                 STATUS_ATTN_BITS_LINK_STATE);
4007                         DBPRINT(sc, BCE_INFO, "Link is now UP.\n");
4008                 }
4009                 else {
4010                         REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
4011                                 STATUS_ATTN_BITS_LINK_STATE);
4012                         DBPRINT(sc, BCE_INFO, "Link is now DOWN.\n");
4013                 }
4014
4015         }
4016
4017         /* Acknowledge the link change interrupt. */
4018         REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
4019 }
4020
4021
4022 /****************************************************************************/
4023 /* Handles received frame interrupt events.                                 */
4024 /*                                                                          */
4025 /* Returns:                                                                 */
4026 /*   Nothing.                                                               */
4027 /****************************************************************************/
4028 static void
4029 bce_rx_intr(struct bce_softc *sc)
4030 {
4031         struct status_block *sblk = sc->status_block;
4032         struct ifnet *ifp = sc->bce_ifp;
4033         u16 hw_cons, sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
4034         u32 sw_prod_bseq;
4035         struct l2_fhdr *l2fhdr;
4036
4037         DBRUNIF(1, sc->rx_interrupts++);
4038
4039         /* Prepare the RX chain pages to be accessed by the host CPU. */
4040         for (int i = 0; i < RX_PAGES; i++)
4041                 bus_dmamap_sync(sc->rx_bd_chain_tag,
4042                     sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTWRITE);
4043
4044         /* Get the hardware's view of the RX consumer index. */
4045         hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
4046         if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4047                 hw_cons++;
4048
4049         /* Get working copies of the driver's view of the RX indices. */
4050         sw_cons = sc->rx_cons;
4051         sw_prod = sc->rx_prod;
4052         sw_prod_bseq = sc->rx_prod_bseq;
4053
4054         DBPRINT(sc, BCE_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
4055                 "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
4056                 __FUNCTION__, sw_prod, sw_cons, 
4057                 sw_prod_bseq);
4058
4059         /* Prevent speculative reads from getting ahead of the status block. */
4060         bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 
4061                 BUS_SPACE_BARRIER_READ);
4062
4063         DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
4064                 sc->rx_low_watermark = sc->free_rx_bd);
4065
4066         /* 
4067          * Scan through the receive chain as long 
4068          * as there is work to do.
4069          */
4070         while (sw_cons != hw_cons) {
4071                 struct mbuf *m;
4072                 struct rx_bd *rxbd;
4073                 unsigned int len;
4074                 u32 status;
4075
4076                 /* Convert the producer/consumer indices to an actual rx_bd index. */
4077                 sw_chain_cons = RX_CHAIN_IDX(sw_cons);
4078                 sw_chain_prod = RX_CHAIN_IDX(sw_prod);
4079
4080                 /* Get the used rx_bd. */
4081                 rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)];
4082                 sc->free_rx_bd++;
4083         
4084                 DBRUN(BCE_VERBOSE_RECV, 
4085                         BCE_PRINTF(sc, "%s(): ", __FUNCTION__); 
4086                         bce_dump_rxbd(sc, sw_chain_cons, rxbd));
4087
4088 #ifdef DEVICE_POLLING
4089                 if (ifp->if_capenable & IFCAP_POLLING) {
4090                         if (sc->bce_rxcycles <= 0)
4091                                 break;
4092                         sc->bce_rxcycles--;
4093                 }
4094 #endif
4095
4096                 /* The mbuf is stored with the last rx_bd entry of a packet. */
4097                 if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
4098
4099                         /* Validate that this is the last rx_bd. */
4100                         DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)),
4101                                 BCE_PRINTF(sc, "%s(%d): Unexpected mbuf found in rx_bd[0x%04X]!\n",
4102                                 __FILE__, __LINE__, sw_chain_cons);
4103                                 bce_breakpoint(sc));
4104
4105                         /* DRC - ToDo: If the received packet is small, say less */
4106                         /*             than 128 bytes, allocate a new mbuf here, */
4107                         /*             copy the data to that mbuf, and recycle   */
4108                         /*             the mapped jumbo frame.                   */
4109
4110                         /* Unmap the mbuf from DMA space. */
4111                         bus_dmamap_sync(sc->rx_mbuf_tag, 
4112                             sc->rx_mbuf_map[sw_chain_cons],
4113                         BUS_DMASYNC_POSTREAD);
4114                         bus_dmamap_unload(sc->rx_mbuf_tag,
4115                             sc->rx_mbuf_map[sw_chain_cons]);
4116
4117                         /* Remove the mbuf from the driver's chain. */
4118                         m = sc->rx_mbuf_ptr[sw_chain_cons];
4119                         sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
4120
4121                         /*
4122                          * Frames received on the NetXteme II are prepended 
4123                          * with the l2_fhdr structure which provides status
4124                          * information about the received frame (including
4125                          * VLAN tags and checksum info) and are also
4126                          * automatically adjusted to align the IP header
4127                          * (i.e. two null bytes are inserted before the 
4128                          * Ethernet header).
4129                          */
4130                         l2fhdr = mtod(m, struct l2_fhdr *);
4131
4132                         len    = l2fhdr->l2_fhdr_pkt_len;
4133                         status = l2fhdr->l2_fhdr_status;
4134
4135                         DBRUNIF(DB_RANDOMTRUE(bce_debug_l2fhdr_status_check),
4136                                 BCE_PRINTF(sc, "Simulating l2_fhdr status error.\n");
4137                                 status = status | L2_FHDR_ERRORS_PHY_DECODE);
4138
4139                         /* Watch for unusual sized frames. */
4140                         DBRUNIF(((len < BCE_MIN_MTU) || (len > BCE_MAX_JUMBO_ETHER_MTU_VLAN)),
4141                                 BCE_PRINTF(sc, "%s(%d): Unusual frame size found. "
4142                                         "Min(%d), Actual(%d), Max(%d)\n", 
4143                                         __FILE__, __LINE__, (int) BCE_MIN_MTU, 
4144                                         len, (int) BCE_MAX_JUMBO_ETHER_MTU_VLAN);
4145                                 bce_dump_mbuf(sc, m);
4146                                 bce_breakpoint(sc));
4147
4148                         len -= ETHER_CRC_LEN;
4149
4150                         /* Check the received frame for errors. */
4151                         if (status &  (L2_FHDR_ERRORS_BAD_CRC | 
4152                                 L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT | 
4153                                 L2_FHDR_ERRORS_TOO_SHORT  | L2_FHDR_ERRORS_GIANT_FRAME)) {
4154
4155                                 ifp->if_ierrors++;
4156                                 DBRUNIF(1, sc->l2fhdr_status_errors++);
4157
4158                                 /* Reuse the mbuf for a new frame. */
4159                                 if (bce_get_buf(sc, m, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) {
4160
4161                                         DBRUNIF(1, bce_breakpoint(sc));
4162                                         panic("bce%d: Can't reuse RX mbuf!\n", sc->bce_unit);
4163
4164                                 }
4165                                 goto bce_rx_int_next_rx;
4166                         }
4167
4168                         /* 
4169                          * Get a new mbuf for the rx_bd.   If no new
4170                          * mbufs are available then reuse the current mbuf,
4171                          * log an ierror on the interface, and generate
4172                          * an error in the system log.
4173                          */
4174                         if (bce_get_buf(sc, NULL, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) {
4175
4176                                 DBRUN(BCE_WARN, 
4177                                         BCE_PRINTF(sc, "%s(%d): Failed to allocate "
4178                                         "new mbuf, incoming frame dropped!\n", 
4179                                         __FILE__, __LINE__));
4180
4181                                 ifp->if_ierrors++;
4182
4183                                 /* Try and reuse the exisitng mbuf. */
4184                                 if (bce_get_buf(sc, m, &sw_prod, &sw_chain_prod, &sw_prod_bseq)) {
4185
4186                                         DBRUNIF(1, bce_breakpoint(sc));
4187                                         panic("bce%d: Double mbuf allocation failure!", sc->bce_unit);
4188
4189                                 }
4190                                 goto bce_rx_int_next_rx;
4191                         }
4192
4193                         /* Skip over the l2_fhdr when passing the data up the stack. */
4194                         m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4195
4196                         /* Adjust the packet length to match the received data. */
4197                         m->m_pkthdr.len = m->m_len = len;
4198
4199                         /* Send the packet to the appropriate interface. */
4200                         m->m_pkthdr.rcvif = ifp;
4201
4202                         DBRUN(BCE_VERBOSE_RECV,
4203                                 struct ether_header *eh;
4204                                 eh = mtod(m, struct ether_header *);
4205                                 BCE_PRINTF(sc, "%s(): to: %6D, from: %6D, type: 0x%04X\n",
4206                                         __FUNCTION__, eh->ether_dhost, ":", 
4207                                         eh->ether_shost, ":", htons(eh->ether_type)));
4208
4209                         /* Validate the checksum if offload enabled. */
4210                         if (ifp->if_capenable & IFCAP_RXCSUM) {
4211
4212                                 /* Check for an IP datagram. */
4213                                 if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4214                                         m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4215
4216                                         /* Check if the IP checksum is valid. */
4217                                         if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0)
4218                                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4219                                         else
4220                                                 DBPRINT(sc, BCE_WARN_SEND, 
4221                                                         "%s(): Invalid IP checksum = 0x%04X!\n",
4222                                                         __FUNCTION__, l2fhdr->l2_fhdr_ip_xsum);
4223                                 }
4224
4225                                 /* Check for a valid TCP/UDP frame. */
4226                                 if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4227                                         L2_FHDR_STATUS_UDP_DATAGRAM)) {
4228
4229                                         /* Check for a good TCP/UDP checksum. */
4230                                         if ((status & (L2_FHDR_ERRORS_TCP_XSUM |
4231                                                       L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4232                                                 m->m_pkthdr.csum_data =
4233                                                     l2fhdr->l2_fhdr_tcp_udp_xsum;
4234                                                 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID 
4235                                                         | CSUM_PSEUDO_HDR);
4236                                         } else
4237                                                 DBPRINT(sc, BCE_WARN_SEND, 
4238                                                         "%s(): Invalid TCP/UDP checksum = 0x%04X!\n",
4239                                                         __FUNCTION__, l2fhdr->l2_fhdr_tcp_udp_xsum);
4240                                 }
4241                         }               
4242
4243
4244                         /*
4245                          * If we received a packet with a vlan tag,
4246                          * attach that information to the packet.
4247                          */
4248                         if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
4249                                 DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): VLAN tag = 0x%04X\n",
4250                                         __FUNCTION__, l2fhdr->l2_fhdr_vlan_tag);
4251 #if __FreeBSD_version < 700000
4252                                 VLAN_INPUT_TAG(ifp, m, l2fhdr->l2_fhdr_vlan_tag, continue);
4253 #else
4254                                 m->m_pkthdr.ether_vtag = l2fhdr->l2_fhdr_vlan_tag;
4255                                 m->m_flags |= M_VLANTAG;
4256 #endif  
4257                         }
4258
4259                         /* Pass the mbuf off to the upper layers. */
4260                         ifp->if_ipackets++;
4261                         DBPRINT(sc, BCE_VERBOSE_RECV, "%s(): Passing received frame up.\n",
4262                                 __FUNCTION__);
4263                         BCE_UNLOCK(sc);
4264                         (*ifp->if_input)(ifp, m);
4265                         DBRUNIF(1, sc->rx_mbuf_alloc--);
4266                         BCE_LOCK(sc);
4267
4268 bce_rx_int_next_rx:
4269                         sw_prod = NEXT_RX_BD(sw_prod);
4270                 }
4271
4272                 sw_cons = NEXT_RX_BD(sw_cons);
4273
4274                 /* Refresh hw_cons to see if there's new work */
4275                 if (sw_cons == hw_cons) {
4276                         hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
4277                         if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4278                                 hw_cons++;
4279                 }
4280
4281                 /* Prevent speculative reads from getting ahead of the status block. */
4282                 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 
4283                         BUS_SPACE_BARRIER_READ);
4284         }
4285
4286         for (int i = 0; i < RX_PAGES; i++)
4287                 bus_dmamap_sync(sc->rx_bd_chain_tag,
4288                     sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
4289
4290         sc->rx_cons = sw_cons;
4291         sc->rx_prod = sw_prod;
4292         sc->rx_prod_bseq = sw_prod_bseq;
4293
4294         REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
4295         REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
4296
4297         DBPRINT(sc, BCE_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
4298                 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
4299                 __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
4300 }
4301
4302
4303 /****************************************************************************/
4304 /* Handles transmit completion interrupt events.                            */
4305 /*                                                                          */
4306 /* Returns:                                                                 */
4307 /*   Nothing.                                                               */
4308 /****************************************************************************/
4309 static void
4310 bce_tx_intr(struct bce_softc *sc)
4311 {
4312         struct status_block *sblk = sc->status_block;
4313         struct ifnet *ifp = sc->bce_ifp;
4314         u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
4315
4316         BCE_LOCK_ASSERT(sc);
4317
4318         DBRUNIF(1, sc->tx_interrupts++);
4319
4320         /* Get the hardware's view of the TX consumer index. */
4321         hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4322
4323         /* Skip to the next entry if this is a chain page pointer. */
4324         if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4325                 hw_tx_cons++;
4326
4327         sw_tx_cons = sc->tx_cons;
4328
4329         /* Prevent speculative reads from getting ahead of the status block. */
4330         bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 
4331                 BUS_SPACE_BARRIER_READ);
4332
4333         /* Cycle through any completed TX chain page entries. */
4334         while (sw_tx_cons != hw_tx_cons) {
4335 #ifdef BCE_DEBUG
4336                 struct tx_bd *txbd = NULL;
4337 #endif
4338                 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4339
4340                 DBPRINT(sc, BCE_INFO_SEND,
4341                         "%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, "
4342                         "sw_tx_chain_cons = 0x%04X\n",
4343                         __FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4344
4345                 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4346                         BCE_PRINTF(sc, "%s(%d): TX chain consumer out of range! "
4347                                 " 0x%04X > 0x%04X\n",
4348                                 __FILE__, __LINE__, sw_tx_chain_cons, 
4349                                 (int) MAX_TX_BD);
4350                         bce_breakpoint(sc));
4351
4352                 DBRUNIF(1,
4353                         txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
4354                                 [TX_IDX(sw_tx_chain_cons)]);
4355                 
4356                 DBRUNIF((txbd == NULL),
4357                         BCE_PRINTF(sc, "%s(%d): Unexpected NULL tx_bd[0x%04X]!\n", 
4358                                 __FILE__, __LINE__, sw_tx_chain_cons);
4359                         bce_breakpoint(sc));
4360
4361                 DBRUN(BCE_INFO_SEND, 
4362                         BCE_PRINTF(sc, "%s(): ", __FUNCTION__);
4363                         bce_dump_txbd(sc, sw_tx_chain_cons, txbd));
4364
4365                 /*
4366                  * Free the associated mbuf. Remember
4367                  * that only the last tx_bd of a packet
4368                  * has an mbuf pointer and DMA map.
4369                  */
4370                 if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
4371
4372                         /* Validate that this is the last tx_bd. */
4373                         DBRUNIF((!(txbd->tx_bd_vlan_tag_flags & TX_BD_FLAGS_END)),
4374                                 BCE_PRINTF(sc, "%s(%d): tx_bd END flag not set but "
4375                                 "txmbuf == NULL!\n", __FILE__, __LINE__);
4376                                 bce_breakpoint(sc));
4377
4378                         DBRUN(BCE_INFO_SEND, 
4379                                 BCE_PRINTF(sc, "%s(): Unloading map/freeing mbuf "
4380                                         "from tx_bd[0x%04X]\n", __FUNCTION__, sw_tx_chain_cons));
4381
4382                         /* Unmap the mbuf. */
4383                         bus_dmamap_unload(sc->tx_mbuf_tag,
4384                             sc->tx_mbuf_map[sw_tx_chain_cons]);
4385         
4386                         /* Free the mbuf. */
4387                         m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
4388                         sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
4389                         DBRUNIF(1, sc->tx_mbuf_alloc--);
4390
4391                         ifp->if_opackets++;
4392                 }
4393
4394                 sc->used_tx_bd--;
4395                 sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4396
4397                 /* Refresh hw_cons to see if there's new work. */
4398                 hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4399                 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4400                         hw_tx_cons++;
4401
4402                 /* Prevent speculative reads from getting ahead of the status block. */
4403                 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 
4404                         BUS_SPACE_BARRIER_READ);
4405         }
4406
4407         /* Clear the TX timeout timer. */
4408         ifp->if_timer = 0;
4409
4410         /* Clear the tx hardware queue full flag. */
4411         if ((sc->used_tx_bd + BCE_TX_SLACK_SPACE) < USABLE_TX_BD) {
4412                 DBRUNIF((ifp->if_drv_flags & IFF_DRV_OACTIVE),
4413                         BCE_PRINTF(sc, "%s(): TX chain is open for business! Used tx_bd = %d\n", 
4414                                 __FUNCTION__, sc->used_tx_bd));
4415                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4416         }
4417
4418         sc->tx_cons = sw_tx_cons;
4419 }
4420
4421
4422 /****************************************************************************/
4423 /* Disables interrupt generation.                                           */
4424 /*                                                                          */
4425 /* Returns:                                                                 */
4426 /*   Nothing.                                                               */
4427 /****************************************************************************/
4428 static void
4429 bce_disable_intr(struct bce_softc *sc)
4430 {
4431         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4432                BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4433         REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
4434 }
4435
4436
4437 /****************************************************************************/
4438 /* Enables interrupt generation.                                            */
4439 /*                                                                          */
4440 /* Returns:                                                                 */
4441 /*   Nothing.                                                               */
4442 /****************************************************************************/
4443 static void
4444 bce_enable_intr(struct bce_softc *sc)
4445 {
4446         u32 val;
4447
4448         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4449                BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4450                BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4451
4452         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4453                BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4454
4455         val = REG_RD(sc, BCE_HC_COMMAND);
4456         REG_WR(sc, BCE_HC_COMMAND, val | BCE_HC_COMMAND_COAL_NOW);
4457 }
4458
4459
4460 /****************************************************************************/
4461 /* Handles controller initialization.                                       */
4462 /*                                                                          */
4463 /* Must be called from a locked routine.                                    */
4464 /*                                                                          */
4465 /* Returns:                                                                 */
4466 /*   Nothing.                                                               */
4467 /****************************************************************************/
4468 static void
4469 bce_init_locked(struct bce_softc *sc)
4470 {
4471         struct ifnet *ifp;
4472         u32 ether_mtu;
4473
4474         DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4475
4476         BCE_LOCK_ASSERT(sc);
4477
4478         ifp = sc->bce_ifp;
4479
4480         /* Check if the driver is still running and bail out if it is. */
4481         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4482                 goto bce_init_locked_exit;
4483
4484         bce_stop(sc);
4485
4486         if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
4487                 BCE_PRINTF(sc, "%s(%d): Controller reset failed!\n", 
4488                         __FILE__, __LINE__);
4489                 goto bce_init_locked_exit;
4490         }
4491
4492         if (bce_chipinit(sc)) {
4493                 BCE_PRINTF(sc, "%s(%d): Controller initialization failed!\n", 
4494                         __FILE__, __LINE__);
4495                 goto bce_init_locked_exit;
4496         }
4497
4498         if (bce_blockinit(sc)) {
4499                 BCE_PRINTF(sc, "%s(%d): Block initialization failed!\n", 
4500                         __FILE__, __LINE__);
4501                 goto bce_init_locked_exit;
4502         }
4503
4504         /* Load our MAC address. */
4505         bcopy(IF_LLADDR(sc->bce_ifp), sc->eaddr, ETHER_ADDR_LEN);
4506         bce_set_mac_addr(sc);
4507
4508         /* Calculate and program the Ethernet MTU size. */
4509         ether_mtu = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ifp->if_mtu + 
4510                 ETHER_CRC_LEN;
4511
4512         DBPRINT(sc, BCE_INFO, "%s(): setting mtu = %d\n",__FUNCTION__, ether_mtu);
4513
4514         /* 
4515          * Program the mtu, enabling jumbo frame 
4516          * support if necessary.  Also set the mbuf
4517          * allocation count for RX frames.
4518          */
4519         if (ether_mtu > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) {
4520                 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu | 
4521                         BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4522                 sc->mbuf_alloc_size = MJUM9BYTES;
4523         } else {
4524                 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
4525                 sc->mbuf_alloc_size = MCLBYTES;
4526         }
4527
4528         /* Calculate the RX Ethernet frame size for rx_bd's. */
4529         sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4530
4531         DBPRINT(sc, BCE_INFO, 
4532                 "%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4533                 "max_frame_size = %d\n",
4534                 __FUNCTION__, (int) MCLBYTES, sc->mbuf_alloc_size, sc->max_frame_size);
4535
4536         /* Program appropriate promiscuous/multicast filtering. */
4537         bce_set_rx_mode(sc);
4538
4539         /* Init RX buffer descriptor chain. */
4540         bce_init_rx_chain(sc);
4541
4542         /* Init TX buffer descriptor chain. */
4543         bce_init_tx_chain(sc);
4544
4545 #ifdef DEVICE_POLLING
4546         /* Disable interrupts if we are polling. */
4547         if (ifp->if_capenable & IFCAP_POLLING) {
4548                 bce_disable_intr(sc);
4549
4550                 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4551                         (1 << 16) | sc->bce_rx_quick_cons_trip);
4552                 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4553                         (1 << 16) | sc->bce_tx_quick_cons_trip);
4554         } else
4555 #endif
4556         /* Enable host interrupts. */
4557         bce_enable_intr(sc);
4558
4559         bce_ifmedia_upd(ifp);
4560
4561         ifp->if_drv_flags |= IFF_DRV_RUNNING;
4562         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4563
4564         callout_reset(&sc->bce_stat_ch, hz, bce_tick, sc);
4565
4566 bce_init_locked_exit:
4567         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4568
4569         return;
4570 }
4571
4572
4573 /****************************************************************************/
4574 /* Handles controller initialization when called from an unlocked routine.  */
4575 /*                                                                          */
4576 /* Returns:                                                                 */
4577 /*   Nothing.                                                               */
4578 /****************************************************************************/
4579 static void
4580 bce_init(void *xsc)
4581 {
4582         struct bce_softc *sc = xsc;
4583
4584         BCE_LOCK(sc);
4585         bce_init_locked(sc);
4586         BCE_UNLOCK(sc);
4587 }
4588
4589
4590 /****************************************************************************/
4591 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4592 /* memory visible to the controller.                                        */
4593 /*                                                                          */
4594 /* Returns:                                                                 */
4595 /*   0 for success, positive value for failure.                             */
4596 /****************************************************************************/
4597 static int
4598 bce_tx_encap(struct bce_softc *sc, struct mbuf *m_head, u16 *prod,
4599         u16 *chain_prod, u32 *prod_bseq)
4600 {
4601         u32 vlan_tag_flags = 0;
4602         struct bce_dmamap_arg map_arg;
4603         bus_dmamap_t map;
4604         int i, error, rc = 0;
4605
4606         /* Transfer any checksum offload flags to the bd. */
4607         if (m_head->m_pkthdr.csum_flags) {
4608                 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
4609                         vlan_tag_flags |= TX_BD_FLAGS_IP_CKSUM;
4610                 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
4611                         vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4612         }
4613
4614         /* Transfer any VLAN tags to the bd. */
4615         if (m_head->m_flags & M_VLANTAG)
4616                 vlan_tag_flags |= (TX_BD_FLAGS_VLAN_TAG |
4617                         (m_head->m_pkthdr.ether_vtag << 16));
4618
4619         /* Map the mbuf into DMAable memory. */
4620         map = sc->tx_mbuf_map[*chain_prod];
4621         map_arg.sc         = sc;
4622         map_arg.prod       = *prod;
4623         map_arg.chain_prod = *chain_prod;
4624         map_arg.prod_bseq  = *prod_bseq;
4625         map_arg.tx_flags   = vlan_tag_flags;
4626         map_arg.maxsegs    = USABLE_TX_BD - sc->used_tx_bd - 
4627                 BCE_TX_SLACK_SPACE;
4628
4629         KASSERT(map_arg.maxsegs > 0, ("Invalid TX maxsegs value!"));
4630
4631         for (i = 0; i < TX_PAGES; i++)
4632                 map_arg.tx_chain[i] = sc->tx_bd_chain[i];
4633
4634         /* Map the mbuf into our DMA address space. */
4635         error = bus_dmamap_load_mbuf(sc->tx_mbuf_tag, map, m_head,
4636             bce_dma_map_tx_desc, &map_arg, BUS_DMA_NOWAIT);
4637
4638         if (error || map_arg.maxsegs == 0) {
4639             
4640             /* Try to defrag the mbuf if there are too many segments. */
4641             if (error == EFBIG && map_arg.maxsegs != 0) {
4642                 struct mbuf *m0;
4643
4644                 DBPRINT(sc, BCE_WARN, "%s(): fragmented mbuf (%d pieces)\n",
4645                     __FUNCTION__, map_arg.maxsegs);
4646
4647                 m0 = m_defrag(m_head, M_DONTWAIT);
4648                 if (m0 != NULL) {
4649                     m_head = m0;
4650                     error = bus_dmamap_load_mbuf(sc->tx_mbuf_tag,
4651                         map, m_head, bce_dma_map_tx_desc, &map_arg,
4652                         BUS_DMA_NOWAIT);
4653                 }
4654             }
4655
4656             /* Still getting an error after a defrag. */
4657             if (error) {
4658                 BCE_PRINTF(sc,
4659                     "%s(%d): Error mapping mbuf into TX chain!\n",
4660                     __FILE__, __LINE__);
4661                 rc = ENOBUFS;
4662                 goto bce_tx_encap_exit;
4663             }
4664
4665         }
4666
4667         /*
4668          * Ensure that the map for this transmission
4669          * is placed at the array index of the last
4670          * descriptor in this chain.  This is done
4671          * because a single map is used for all 
4672          * segments of the mbuf and we don't want to
4673          * delete the map before all of the segments
4674          * have been freed.
4675          */
4676         sc->tx_mbuf_map[*chain_prod] = 
4677                 sc->tx_mbuf_map[map_arg.chain_prod];
4678         sc->tx_mbuf_map[map_arg.chain_prod] = map;
4679         sc->tx_mbuf_ptr[map_arg.chain_prod] = m_head;
4680         sc->used_tx_bd += map_arg.maxsegs;
4681
4682         DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark), 
4683                 sc->tx_hi_watermark = sc->used_tx_bd);
4684
4685         DBRUNIF(1, sc->tx_mbuf_alloc++);
4686
4687         DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_mbuf_chain(sc, *chain_prod, 
4688                 map_arg.maxsegs));
4689
4690         /* prod still points the last used tx_bd at this point. */
4691         *prod       = map_arg.prod;
4692         *chain_prod = map_arg.chain_prod;
4693         *prod_bseq  = map_arg.prod_bseq;
4694
4695 bce_tx_encap_exit:
4696
4697         return(rc);
4698 }
4699
4700
4701 /****************************************************************************/
4702 /* Main transmit routine when called from another routine with a lock.      */
4703 /*                                                                          */
4704 /* Returns:                                                                 */
4705 /*   Nothing.                                                               */
4706 /****************************************************************************/
4707 static void
4708 bce_start_locked(struct ifnet *ifp)
4709 {
4710         struct bce_softc *sc = ifp->if_softc;
4711         struct mbuf *m_head = NULL;
4712         int count = 0;
4713         u16 tx_prod, tx_chain_prod;
4714         u32     tx_prod_bseq;
4715
4716         /* If there's no link or the transmit queue is empty then just exit. */
4717         if (!sc->bce_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
4718                 DBPRINT(sc, BCE_INFO_SEND, "%s(): No link or transmit queue empty.\n", 
4719                         __FUNCTION__);
4720                 goto bce_start_locked_exit;
4721         }
4722
4723         /* prod points to the next free tx_bd. */
4724         tx_prod = sc->tx_prod;
4725         tx_chain_prod = TX_CHAIN_IDX(tx_prod);
4726         tx_prod_bseq = sc->tx_prod_bseq;
4727
4728         DBPRINT(sc, BCE_INFO_SEND,
4729                 "%s(): Start: tx_prod = 0x%04X, tx_chain_prod = %04X, "
4730                 "tx_prod_bseq = 0x%08X\n",
4731                 __FUNCTION__, tx_prod, tx_chain_prod, tx_prod_bseq);
4732
4733         /* Keep adding entries while there is space in the ring. */
4734         while(sc->tx_mbuf_ptr[tx_chain_prod] == NULL) {
4735
4736                 /* Check for any frames to send. */
4737                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4738                 if (m_head == NULL)
4739                         break;
4740
4741                 /*
4742                  * Pack the data into the transmit ring. If we
4743                  * don't have room, place the mbuf back at the
4744                  * head of the queue and set the OACTIVE flag
4745                  * to wait for the NIC to drain the chain.
4746                  */
4747                 if (bce_tx_encap(sc, m_head, &tx_prod, &tx_chain_prod, &tx_prod_bseq)) {
4748                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4749                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4750                         DBPRINT(sc, BCE_INFO_SEND,
4751                                 "TX chain is closed for business! Total tx_bd used = %d\n", 
4752                                 sc->used_tx_bd);
4753                         break;
4754                 }
4755
4756                 count++;
4757
4758                 /* Send a copy of the frame to any BPF listeners. */
4759                 BPF_MTAP(ifp, m_head);
4760
4761                 tx_prod = NEXT_TX_BD(tx_prod);
4762                 tx_chain_prod = TX_CHAIN_IDX(tx_prod);
4763         }
4764
4765         if (count == 0) {
4766                 /* no packets were dequeued */
4767                 DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): No packets were dequeued\n", 
4768                         __FUNCTION__);
4769                 goto bce_start_locked_exit;
4770         }
4771
4772         /* Update the driver's counters. */
4773         sc->tx_prod      = tx_prod;
4774         sc->tx_prod_bseq = tx_prod_bseq;
4775
4776         DBPRINT(sc, BCE_INFO_SEND,
4777                 "%s(): End: tx_prod = 0x%04X, tx_chain_prod = 0x%04X, "
4778                 "tx_prod_bseq = 0x%08X\n",
4779                 __FUNCTION__, tx_prod, tx_chain_prod, tx_prod_bseq);
4780
4781         /* Start the transmit. */
4782         REG_WR16(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BIDX, sc->tx_prod);
4783         REG_WR(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
4784
4785         /* Set the tx timeout. */
4786         ifp->if_timer = BCE_TX_TIMEOUT;
4787
4788 bce_start_locked_exit:
4789         return;
4790 }
4791
4792
4793 /****************************************************************************/
4794 /* Main transmit routine when called from another routine without a lock.   */
4795 /*                                                                          */
4796 /* Returns:                                                                 */
4797 /*   Nothing.                                                               */
4798 /****************************************************************************/
4799 static void
4800 bce_start(struct ifnet *ifp)
4801 {
4802         struct bce_softc *sc = ifp->if_softc;
4803
4804         BCE_LOCK(sc);
4805         bce_start_locked(ifp);
4806         BCE_UNLOCK(sc);
4807 }
4808
4809
4810 /****************************************************************************/
4811 /* Handles any IOCTL calls from the operating system.                       */
4812 /*                                                                          */
4813 /* Returns:                                                                 */
4814 /*   0 for success, positive value for failure.                             */
4815 /****************************************************************************/
4816 static int
4817 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4818 {
4819         struct bce_softc *sc = ifp->if_softc;
4820         struct ifreq *ifr = (struct ifreq *) data;
4821         struct mii_data *mii;
4822         int mask, error = 0;
4823
4824         DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4825
4826         switch(command) {
4827
4828                 /* Set the MTU. */
4829                 case SIOCSIFMTU:
4830                         /* Check that the MTU setting is supported. */
4831                         if ((ifr->ifr_mtu < BCE_MIN_MTU) || 
4832                                 (ifr->ifr_mtu > BCE_MAX_JUMBO_MTU)) {
4833                                 error = EINVAL;
4834                                 break;
4835                         }
4836
4837                         DBPRINT(sc, BCE_INFO, "Setting new MTU of %d\n", ifr->ifr_mtu);
4838
4839                         BCE_LOCK(sc);
4840                         ifp->if_mtu = ifr->ifr_mtu;
4841                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4842                         bce_init_locked(sc);
4843                         BCE_UNLOCK(sc);
4844                         break;
4845
4846                 /* Set interface. */
4847                 case SIOCSIFFLAGS:
4848                         DBPRINT(sc, BCE_VERBOSE, "Received SIOCSIFFLAGS\n");
4849
4850                         BCE_LOCK(sc);
4851
4852                         /* Check if the interface is up. */
4853                         if (ifp->if_flags & IFF_UP) {
4854                                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4855                                         /* Change the promiscuous/multicast flags as necessary. */
4856                                         bce_set_rx_mode(sc);
4857                                 } else {
4858                                         /* Start the HW */
4859                                         bce_init_locked(sc);
4860                                 }
4861                         } else {
4862                                 /* The interface is down.  Check if the driver is running. */
4863                                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4864                                         bce_stop(sc);
4865                                 }
4866                         }
4867
4868                         BCE_UNLOCK(sc);
4869                         error = 0;
4870
4871                         break;
4872
4873                 /* Add/Delete multicast address */
4874                 case SIOCADDMULTI:
4875                 case SIOCDELMULTI:
4876                         DBPRINT(sc, BCE_VERBOSE, "Received SIOCADDMULTI/SIOCDELMULTI\n");
4877
4878                         BCE_LOCK(sc);
4879                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4880                                 bce_set_rx_mode(sc);
4881                                 error = 0;
4882                         }
4883                         BCE_UNLOCK(sc);
4884
4885                         break;
4886
4887                 /* Set/Get Interface media */
4888                 case SIOCSIFMEDIA:
4889                 case SIOCGIFMEDIA:
4890                         DBPRINT(sc, BCE_VERBOSE, "Received SIOCSIFMEDIA/SIOCGIFMEDIA\n");
4891
4892                         DBPRINT(sc, BCE_VERBOSE, "bce_phy_flags = 0x%08X\n",
4893                                 sc->bce_phy_flags);
4894
4895                         if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
4896                                 DBPRINT(sc, BCE_VERBOSE, "SerDes media set/get\n");
4897
4898                                 error = ifmedia_ioctl(ifp, ifr,
4899                                     &sc->bce_ifmedia, command);
4900                         } else {
4901                                 DBPRINT(sc, BCE_VERBOSE, "Copper media set/get\n");
4902                                 mii = device_get_softc(sc->bce_miibus);
4903                                 error = ifmedia_ioctl(ifp, ifr,
4904                                     &mii->mii_media, command);
4905                         }
4906                         break;
4907
4908                 /* Set interface capability */
4909                 case SIOCSIFCAP:
4910                         mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4911                         DBPRINT(sc, BCE_INFO, "Received SIOCSIFCAP = 0x%08X\n", (u32) mask);
4912
4913 #ifdef DEVICE_POLLING
4914                         if (mask & IFCAP_POLLING) {
4915                                 if (ifr->ifr_reqcap & IFCAP_POLLING) {
4916
4917                                         /* Setup the poll routine to call. */
4918                                         error = ether_poll_register(bce_poll, ifp);
4919                                         if (error) {
4920                                                 BCE_PRINTF(sc, "%s(%d): Error registering poll function!\n",
4921                                                         __FILE__, __LINE__);
4922                                                 goto bce_ioctl_exit;
4923                                         }
4924
4925                                         /* Clear the interrupt. */
4926                                         BCE_LOCK(sc);
4927                                         bce_disable_intr(sc);
4928
4929                                         REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4930                                                 (1 << 16) | sc->bce_rx_quick_cons_trip);
4931                                         REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4932                                                 (1 << 16) | sc->bce_tx_quick_cons_trip);
4933
4934                                         ifp->if_capenable |= IFCAP_POLLING;
4935                                         BCE_UNLOCK(sc);
4936                                 } else {
4937                                         /* Clear the poll routine. */
4938                                         error = ether_poll_deregister(ifp);
4939
4940                                         /* Enable interrupt even in error case */
4941                                         BCE_LOCK(sc);
4942                                         bce_enable_intr(sc);
4943
4944                                         REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4945                                                 (sc->bce_tx_quick_cons_trip_int << 16) |
4946                                                 sc->bce_tx_quick_cons_trip);
4947                                         REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4948                                                 (sc->bce_rx_quick_cons_trip_int << 16) |
4949                                                 sc->bce_rx_quick_cons_trip);
4950
4951                                         ifp->if_capenable &= ~IFCAP_POLLING;
4952                                         BCE_UNLOCK(sc);
4953                                 }
4954                         }
4955 #endif /*DEVICE_POLLING */
4956
4957                         /* Toggle the TX checksum capabilites enable flag. */                                           
4958                         if (mask & IFCAP_TXCSUM) {
4959                                 ifp->if_capenable ^= IFCAP_TXCSUM;
4960                                 if (IFCAP_TXCSUM & ifp->if_capenable)
4961                                         ifp->if_hwassist = BCE_IF_HWASSIST;
4962                                 else
4963                                         ifp->if_hwassist = 0;
4964                         }
4965
4966                         /* Toggle the RX checksum capabilities enable flag. */
4967                         if (mask & IFCAP_RXCSUM) {
4968                                 ifp->if_capenable ^= IFCAP_RXCSUM;
4969                                 if (IFCAP_RXCSUM & ifp->if_capenable)
4970                                         ifp->if_hwassist = BCE_IF_HWASSIST;
4971                                 else
4972                                         ifp->if_hwassist = 0;
4973                         }
4974
4975                         /* Toggle VLAN_MTU capabilities enable flag. */
4976                         if (mask & IFCAP_VLAN_MTU) {
4977                                 BCE_PRINTF(sc, "%s(%d): Changing VLAN_MTU not supported.\n",
4978                                         __FILE__, __LINE__);
4979                         }
4980
4981                         /* Toggle VLANHWTAG capabilities enabled flag. */
4982                         if (mask & IFCAP_VLAN_HWTAGGING) {
4983                                 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
4984                                         BCE_PRINTF(sc, "%s(%d): Cannot change VLAN_HWTAGGING while "
4985                                                 "management firmware (ASF/IPMI/UMP) is running!\n",
4986                                                 __FILE__, __LINE__);
4987                                 else
4988                                         BCE_PRINTF(sc, "%s(%d): Changing VLAN_HWTAGGING not supported!\n",
4989                                                 __FILE__, __LINE__);
4990                         }
4991
4992                         break;
4993                 default:
4994                         DBPRINT(sc, BCE_INFO, "Received unsupported IOCTL: 0x%08X\n",
4995                                 (u32) command);
4996
4997                         /* We don't know how to handle the IOCTL, pass it on. */
4998                         error = ether_ioctl(ifp, command, data);
4999                         break;
5000         }
5001
5002 #ifdef DEVICE_POLLING
5003 bce_ioctl_exit:
5004 #endif
5005
5006         DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
5007
5008         return(error);
5009 }
5010
5011
5012 /****************************************************************************/
5013 /* Transmit timeout handler.                                                */
5014 /*                                                                          */
5015 /* Returns:                                                                 */
5016 /*   Nothing.                                                               */
5017 /****************************************************************************/
5018 static void
5019 bce_watchdog(struct ifnet *ifp)
5020 {
5021         struct bce_softc *sc = ifp->if_softc;
5022
5023         DBRUN(BCE_WARN_SEND, 
5024                 bce_dump_driver_state(sc);
5025                 bce_dump_status_block(sc));
5026
5027         BCE_PRINTF(sc, "%s(%d): Watchdog timeout occurred, resetting!\n", 
5028                 __FILE__, __LINE__);
5029
5030         /* DBRUN(BCE_FATAL, bce_breakpoint(sc)); */
5031
5032         BCE_LOCK(sc);
5033         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5034
5035         bce_init_locked(sc);
5036         ifp->if_oerrors++;
5037         BCE_UNLOCK(sc);
5038
5039 }
5040
5041
5042 #ifdef DEVICE_POLLING
5043 static void
5044 bce_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
5045 {
5046         struct bce_softc *sc = ifp->if_softc;
5047
5048         BCE_LOCK_ASSERT(sc);
5049
5050         sc->bce_rxcycles = count;
5051
5052         bus_dmamap_sync(sc->status_tag, sc->status_map,
5053             BUS_DMASYNC_POSTWRITE);
5054
5055         /* Check for any completed RX frames. */
5056         if (sc->status_block->status_rx_quick_consumer_index0 != 
5057                 sc->hw_rx_cons)
5058                 bce_rx_intr(sc);
5059
5060         /* Check for any completed TX frames. */
5061         if (sc->status_block->status_tx_quick_consumer_index0 != 
5062                 sc->hw_tx_cons)
5063                 bce_tx_intr(sc);
5064
5065         /* Check for new frames to transmit. */
5066         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5067                 bce_start_locked(ifp);
5068
5069 }
5070
5071
5072 static void
5073 bce_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
5074 {
5075         struct bce_softc *sc = ifp->if_softc;
5076
5077         BCE_LOCK(sc);
5078         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5079                 bce_poll_locked(ifp, cmd, count);
5080         BCE_UNLOCK(sc);
5081 }
5082 #endif /* DEVICE_POLLING */
5083
5084
5085 #if 0
5086 static inline int
5087 bce_has_work(struct bce_softc *sc)
5088 {
5089         struct status_block *stat = sc->status_block;
5090
5091         if ((stat->status_rx_quick_consumer_index0 != sc->hw_rx_cons) ||
5092             (stat->status_tx_quick_consumer_index0 != sc->hw_tx_cons))
5093                 return 1;
5094
5095         if (((stat->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
5096             bp->link_up)
5097                 return 1;
5098
5099         return 0;
5100 }
5101 #endif
5102
5103
5104 /*
5105  * Interrupt handler.
5106  */
5107 /****************************************************************************/
5108 /* Main interrupt entry point.  Verifies that the controller generated the  */
5109 /* interrupt and then calls a separate routine for handle the various       */
5110 /* interrupt causes (PHY, TX, RX).                                          */
5111 /*                                                                          */
5112 /* Returns:                                                                 */
5113 /*   0 for success, positive value for failure.                             */
5114 /****************************************************************************/
5115 static void
5116 bce_intr(void *xsc)
5117 {
5118         struct bce_softc *sc;
5119         struct ifnet *ifp;
5120         u32 status_attn_bits;
5121
5122         sc = xsc;
5123         ifp = sc->bce_ifp;
5124
5125         BCE_LOCK(sc);
5126
5127         DBRUNIF(1, sc->interrupts_generated++);
5128
5129 #ifdef DEVICE_POLLING
5130         if (ifp->if_capenable & IFCAP_POLLING) {
5131                 DBPRINT(sc, BCE_INFO, "Polling enabled!\n");
5132                 goto bce_intr_exit;
5133         }
5134 #endif
5135
5136         bus_dmamap_sync(sc->status_tag, sc->status_map,
5137             BUS_DMASYNC_POSTWRITE);
5138
5139         /*
5140          * If the hardware status block index
5141          * matches the last value read by the
5142          * driver and we haven't asserted our
5143          * interrupt then there's nothing to do.
5144          */
5145         if ((sc->status_block->status_idx == sc->last_status_idx) && 
5146                 (REG_RD(sc, BCE_PCICFG_MISC_STATUS) & BCE_PCICFG_MISC_STATUS_INTA_VALUE))
5147                 goto bce_intr_exit;
5148
5149         /* Ack the interrupt and stop others from occuring. */
5150         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5151                 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5152                 BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5153
5154         /* Keep processing data as long as there is work to do. */
5155         for (;;) {
5156
5157                 status_attn_bits = sc->status_block->status_attn_bits;
5158
5159                 DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention),
5160                         BCE_PRINTF(sc, "Simulating unexpected status attention bit set.");
5161                         status_attn_bits = status_attn_bits | STATUS_ATTN_BITS_PARITY_ERROR);
5162
5163                 /* Was it a link change interrupt? */
5164                 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5165                         (sc->status_block->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
5166                         bce_phy_intr(sc);
5167
5168                 /* If any other attention is asserted then the chip is toast. */
5169                 if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5170                         (sc->status_block->status_attn_bits_ack & 
5171                         ~STATUS_ATTN_BITS_LINK_STATE))) {
5172
5173                         DBRUN(1, sc->unexpected_attentions++);
5174
5175                         BCE_PRINTF(sc, "%s(%d): Fatal attention detected: 0x%08X\n", 
5176                                 __FILE__, __LINE__, sc->status_block->status_attn_bits);
5177
5178                         DBRUN(BCE_FATAL, 
5179                                 if (bce_debug_unexpected_attention == 0)
5180                                         bce_breakpoint(sc));
5181
5182                         bce_init_locked(sc);
5183                         goto bce_intr_exit;
5184                 }
5185
5186                 /* Check for any completed RX frames. */
5187                 if (sc->status_block->status_rx_quick_consumer_index0 != sc->hw_rx_cons)
5188                         bce_rx_intr(sc);
5189
5190                 /* Check for any completed TX frames. */
5191                 if (sc->status_block->status_tx_quick_consumer_index0 != sc->hw_tx_cons)
5192                         bce_tx_intr(sc);
5193
5194                 /* Save the status block index value for use during the next interrupt. */
5195                 sc->last_status_idx = sc->status_block->status_idx;
5196
5197                 /* Prevent speculative reads from getting ahead of the status block. */
5198                 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, 
5199                         BUS_SPACE_BARRIER_READ);
5200
5201                 /* If there's no work left then exit the interrupt service routine. */
5202                 if ((sc->status_block->status_rx_quick_consumer_index0 == sc->hw_rx_cons) &&
5203                 (sc->status_block->status_tx_quick_consumer_index0 == sc->hw_tx_cons))
5204                         break;
5205         
5206         }
5207
5208         bus_dmamap_sync(sc->status_tag, sc->status_map,
5209             BUS_DMASYNC_PREWRITE);
5210
5211         /* Re-enable interrupts. */
5212         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5213                BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx |
5214                BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5215         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5216                BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
5217
5218         /* Handle any frames that arrived while handling the interrupt. */
5219         if (ifp->if_drv_flags & IFF_DRV_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5220                 bce_start_locked(ifp);
5221
5222 bce_intr_exit:
5223         BCE_UNLOCK(sc);
5224 }
5225
5226
5227 /****************************************************************************/
5228 /* Programs the various packet receive modes (broadcast and multicast).     */
5229 /*                                                                          */
5230 /* Returns:                                                                 */
5231 /*   Nothing.                                                               */
5232 /****************************************************************************/
5233 static void
5234 bce_set_rx_mode(struct bce_softc *sc)
5235 {
5236         struct ifnet *ifp;
5237         struct ifmultiaddr *ifma;
5238         u32 hashes[4] = { 0, 0, 0, 0 };
5239         u32 rx_mode, sort_mode;
5240         int h, i;
5241
5242         BCE_LOCK_ASSERT(sc);
5243
5244         ifp = sc->bce_ifp;
5245
5246         /* Initialize receive mode default settings. */
5247         rx_mode   = sc->rx_mode & ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
5248                             BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
5249         sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
5250
5251         /*
5252          * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5253          * be enbled.
5254          */
5255         if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
5256                 (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)))
5257                 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
5258
5259         /*
5260          * Check for promiscuous, all multicast, or selected
5261          * multicast address filtering.
5262          */
5263         if (ifp->if_flags & IFF_PROMISC) {
5264                 DBPRINT(sc, BCE_INFO, "Enabling promiscuous mode.\n");
5265
5266                 /* Enable promiscuous mode. */
5267                 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
5268                 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
5269         } else if (ifp->if_flags & IFF_ALLMULTI) {
5270                 DBPRINT(sc, BCE_INFO, "Enabling all multicast mode.\n");
5271
5272                 /* Enable all multicast addresses. */
5273                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5274                         REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff);
5275         }
5276                 sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
5277         } else {
5278                 /* Accept one or more multicast(s). */
5279                 DBPRINT(sc, BCE_INFO, "Enabling selective multicast mode.\n");
5280
5281                 IF_ADDR_LOCK(ifp);
5282                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
5283                         if (ifma->ifma_addr->sa_family != AF_LINK)
5284                                 continue;
5285                         h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
5286                         ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
5287                         hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
5288                 }
5289                 IF_ADDR_UNLOCK(ifp);
5290
5291                 for (i = 0; i < 4; i++)
5292                         REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]);
5293
5294                 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
5295         }
5296
5297         /* Only make changes if the recive mode has actually changed. */
5298         if (rx_mode != sc->rx_mode) {
5299                 DBPRINT(sc, BCE_VERBOSE, "Enabling new receive mode: 0x%08X\n", 
5300                         rx_mode);
5301
5302                 sc->rx_mode = rx_mode;
5303                 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
5304         }
5305
5306         /* Disable and clear the exisitng sort before enabling a new sort. */
5307         REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
5308         REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
5309         REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
5310 }
5311
5312
5313 /****************************************************************************/
5314 /* Called periodically to updates statistics from the controllers           */
5315 /* statistics block.                                                        */
5316 /*                                                                          */
5317 /* Returns:                                                                 */
5318 /*   Nothing.                                                               */
5319 /****************************************************************************/
5320 static void
5321 bce_stats_update(struct bce_softc *sc)
5322 {
5323         struct ifnet *ifp;
5324         struct statistics_block *stats;
5325
5326         DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __FUNCTION__);
5327
5328         ifp = sc->bce_ifp;
5329
5330         stats = (struct statistics_block *) sc->stats_block;
5331
5332         /* 
5333          * Update the interface statistics from the
5334          * hardware statistics.
5335          */
5336         ifp->if_collisions = (u_long) stats->stat_EtherStatsCollisions;
5337
5338         ifp->if_ierrors = (u_long) stats->stat_EtherStatsUndersizePkts +
5339                                       (u_long) stats->stat_EtherStatsOverrsizePkts +
5340                                           (u_long) stats->stat_IfInMBUFDiscards +
5341                                           (u_long) stats->stat_Dot3StatsAlignmentErrors +
5342                                           (u_long) stats->stat_Dot3StatsFCSErrors;
5343
5344         ifp->if_oerrors = (u_long) stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5345                                           (u_long) stats->stat_Dot3StatsExcessiveCollisions +
5346                                           (u_long) stats->stat_Dot3StatsLateCollisions;
5347
5348         /* 
5349          * Certain controllers don't report 
5350          * carrier sense errors correctly.
5351          * See errata E11_5708CA0_1165. 
5352          */
5353         if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
5354             !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0))
5355                 ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
5356
5357         /*
5358          * Update the sysctl statistics from the
5359          * hardware statistics.
5360          */
5361         sc->stat_IfHCInOctets = 
5362                 ((u64) stats->stat_IfHCInOctets_hi << 32) + 
5363                  (u64) stats->stat_IfHCInOctets_lo;
5364
5365         sc->stat_IfHCInBadOctets =
5366                 ((u64) stats->stat_IfHCInBadOctets_hi << 32) + 
5367                  (u64) stats->stat_IfHCInBadOctets_lo;
5368
5369         sc->stat_IfHCOutOctets =
5370                 ((u64) stats->stat_IfHCOutOctets_hi << 32) +
5371                  (u64) stats->stat_IfHCOutOctets_lo;
5372
5373         sc->stat_IfHCOutBadOctets =
5374                 ((u64) stats->stat_IfHCOutBadOctets_hi << 32) +
5375                  (u64) stats->stat_IfHCOutBadOctets_lo;
5376
5377         sc->stat_IfHCInUcastPkts =
5378                 ((u64) stats->stat_IfHCInUcastPkts_hi << 32) +
5379                  (u64) stats->stat_IfHCInUcastPkts_lo;
5380
5381         sc->stat_IfHCInMulticastPkts =
5382                 ((u64) stats->stat_IfHCInMulticastPkts_hi << 32) +
5383                  (u64) stats->stat_IfHCInMulticastPkts_lo;
5384
5385         sc->stat_IfHCInBroadcastPkts =
5386                 ((u64) stats->stat_IfHCInBroadcastPkts_hi << 32) +
5387                  (u64) stats->stat_IfHCInBroadcastPkts_lo;
5388
5389         sc->stat_IfHCOutUcastPkts =
5390                 ((u64) stats->stat_IfHCOutUcastPkts_hi << 32) +
5391                  (u64) stats->stat_IfHCOutUcastPkts_lo;
5392
5393         sc->stat_IfHCOutMulticastPkts =
5394                 ((u64) stats->stat_IfHCOutMulticastPkts_hi << 32) +
5395                  (u64) stats->stat_IfHCOutMulticastPkts_lo;
5396
5397         sc->stat_IfHCOutBroadcastPkts =
5398                 ((u64) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5399                  (u64) stats->stat_IfHCOutBroadcastPkts_lo;
5400
5401         sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5402                 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5403
5404         sc->stat_Dot3StatsCarrierSenseErrors =
5405                 stats->stat_Dot3StatsCarrierSenseErrors;
5406
5407         sc->stat_Dot3StatsFCSErrors = 
5408                 stats->stat_Dot3StatsFCSErrors;
5409
5410         sc->stat_Dot3StatsAlignmentErrors =
5411                 stats->stat_Dot3StatsAlignmentErrors;
5412
5413         sc->stat_Dot3StatsSingleCollisionFrames =
5414                 stats->stat_Dot3StatsSingleCollisionFrames;
5415
5416         sc->stat_Dot3StatsMultipleCollisionFrames =
5417                 stats->stat_Dot3StatsMultipleCollisionFrames;
5418
5419         sc->stat_Dot3StatsDeferredTransmissions =
5420                 stats->stat_Dot3StatsDeferredTransmissions;
5421
5422         sc->stat_Dot3StatsExcessiveCollisions =
5423                 stats->stat_Dot3StatsExcessiveCollisions;
5424
5425         sc->stat_Dot3StatsLateCollisions =
5426                 stats->stat_Dot3StatsLateCollisions;
5427
5428         sc->stat_EtherStatsCollisions =
5429                 stats->stat_EtherStatsCollisions;
5430
5431         sc->stat_EtherStatsFragments =
5432                 stats->stat_EtherStatsFragments;
5433
5434         sc->stat_EtherStatsJabbers =
5435                 stats->stat_EtherStatsJabbers;
5436
5437         sc->stat_EtherStatsUndersizePkts =
5438                 stats->stat_EtherStatsUndersizePkts;
5439
5440         sc->stat_EtherStatsOverrsizePkts =
5441                 stats->stat_EtherStatsOverrsizePkts;
5442
5443         sc->stat_EtherStatsPktsRx64Octets =
5444                 stats->stat_EtherStatsPktsRx64Octets;
5445
5446         sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5447                 stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5448
5449         sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5450                 stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5451
5452         sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5453                 stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5454
5455         sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5456                 stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5457
5458         sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5459                 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5460
5461         sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5462                 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5463
5464         sc->stat_EtherStatsPktsTx64Octets =
5465                 stats->stat_EtherStatsPktsTx64Octets;
5466
5467         sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5468                 stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5469
5470         sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5471                 stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5472
5473         sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5474                 stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5475
5476         sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5477                 stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5478
5479         sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5480                 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5481
5482         sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5483                 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5484
5485         sc->stat_XonPauseFramesReceived =
5486                 stats->stat_XonPauseFramesReceived;
5487
5488         sc->stat_XoffPauseFramesReceived =
5489                 stats->stat_XoffPauseFramesReceived;
5490
5491         sc->stat_OutXonSent =
5492                 stats->stat_OutXonSent;
5493
5494         sc->stat_OutXoffSent =
5495                 stats->stat_OutXoffSent;
5496
5497         sc->stat_FlowControlDone =
5498                 stats->stat_FlowControlDone;
5499
5500         sc->stat_MacControlFramesReceived =
5501                 stats->stat_MacControlFramesReceived;
5502
5503         sc->stat_XoffStateEntered =
5504                 stats->stat_XoffStateEntered;
5505
5506         sc->stat_IfInFramesL2FilterDiscards =
5507                 stats->stat_IfInFramesL2FilterDiscards;
5508
5509         sc->stat_IfInRuleCheckerDiscards =
5510                 stats->stat_IfInRuleCheckerDiscards;
5511
5512         sc->stat_IfInFTQDiscards =
5513                 stats->stat_IfInFTQDiscards;
5514
5515         sc->stat_IfInMBUFDiscards =
5516                 stats->stat_IfInMBUFDiscards;
5517
5518         sc->stat_IfInRuleCheckerP4Hit =
5519                 stats->stat_IfInRuleCheckerP4Hit;
5520
5521         sc->stat_CatchupInRuleCheckerDiscards =
5522                 stats->stat_CatchupInRuleCheckerDiscards;
5523
5524         sc->stat_CatchupInFTQDiscards =
5525                 stats->stat_CatchupInFTQDiscards;
5526
5527         sc->stat_CatchupInMBUFDiscards =
5528                 stats->stat_CatchupInMBUFDiscards;
5529
5530         sc->stat_CatchupInRuleCheckerP4Hit =
5531                 stats->stat_CatchupInRuleCheckerP4Hit;
5532
5533         DBPRINT(sc, BCE_EXCESSIVE, "Exiting %s()\n", __FUNCTION__);
5534 }
5535
5536
5537 static void
5538 bce_tick_locked(struct bce_softc *sc)
5539 {
5540         struct mii_data *mii = NULL;
5541         struct ifnet *ifp;
5542         u32 msg;
5543
5544         ifp = sc->bce_ifp;
5545
5546         BCE_LOCK_ASSERT(sc);
5547
5548         /* Tell the firmware that the driver is still running. */
5549 #ifdef BCE_DEBUG
5550         msg = (u32) BCE_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE;
5551 #else
5552         msg = (u32) ++sc->bce_fw_drv_pulse_wr_seq;
5553 #endif
5554         REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_PULSE_MB, msg);
5555
5556         /* Update the statistics from the hardware statistics block. */
5557         bce_stats_update(sc);
5558
5559         /* Schedule the next tick. */
5560         callout_reset(
5561                 &sc->bce_stat_ch,               /* callout */
5562                 hz,                                     /* ticks */
5563                 bce_tick,                               /* function */
5564                 sc);                                    /* function argument */
5565
5566         /* If link is up already up then we're done. */
5567         if (sc->bce_link)
5568                 goto bce_tick_locked_exit;
5569
5570         /* DRC - ToDo: Add SerDes support and check SerDes link here. */
5571
5572         mii = device_get_softc(sc->bce_miibus);
5573         mii_tick(mii);
5574
5575         /* Check if the link has come up. */
5576         if (!sc->bce_link && mii->mii_media_status & IFM_ACTIVE &&
5577             IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5578                 sc->bce_link++;
5579                 if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
5580                     IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) &&
5581                     bootverbose)
5582                         BCE_PRINTF(sc, "Gigabit link up\n");
5583                 /* Now that link is up, handle any outstanding TX traffic. */
5584                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5585                         bce_start_locked(ifp);
5586         }
5587
5588 bce_tick_locked_exit:
5589         return;
5590 }
5591
5592
5593 static void
5594 bce_tick(void *xsc)
5595 {
5596         struct bce_softc *sc;
5597
5598         sc = xsc;
5599
5600         BCE_LOCK(sc);
5601         bce_tick_locked(sc);
5602         BCE_UNLOCK(sc);
5603 }
5604
5605
5606 #ifdef BCE_DEBUG
5607 /****************************************************************************/
5608 /* Allows the driver state to be dumped through the sysctl interface.       */
5609 /*                                                                          */
5610 /* Returns:                                                                 */
5611 /*   0 for success, positive value for failure.                             */
5612 /****************************************************************************/
5613 static int
5614 bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS)
5615 {
5616         int error;
5617         int result;
5618         struct bce_softc *sc;
5619
5620         result = -1;
5621         error = sysctl_handle_int(oidp, &result, 0, req);
5622
5623         if (error || !req->newptr)
5624                 return (error);
5625
5626         if (result == 1) {
5627                 sc = (struct bce_softc *)arg1;
5628                 bce_dump_driver_state(sc);
5629         }
5630
5631         return error;
5632 }
5633
5634
5635 /****************************************************************************/
5636 /* Allows the hardware state to be dumped through the sysctl interface.     */
5637 /*                                                                          */
5638 /* Returns:                                                                 */
5639 /*   0 for success, positive value for failure.                             */
5640 /****************************************************************************/
5641 static int
5642 bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS)
5643 {
5644         int error;
5645         int result;
5646         struct bce_softc *sc;
5647
5648         result = -1;
5649         error = sysctl_handle_int(oidp, &result, 0, req);
5650
5651         if (error || !req->newptr)
5652                 return (error);
5653
5654         if (result == 1) {
5655                 sc = (struct bce_softc *)arg1;
5656                 bce_dump_hw_state(sc);
5657         }
5658
5659         return error;
5660 }
5661
5662
5663 /****************************************************************************/
5664 /*                                                                          */
5665 /*                                                                          */
5666 /* Returns:                                                                 */
5667 /*   0 for success, positive value for failure.                             */
5668 /****************************************************************************/
5669 static int
5670 bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS)
5671 {
5672         int error;
5673         int result;
5674         struct bce_softc *sc;
5675
5676         result = -1;
5677         error = sysctl_handle_int(oidp, &result, 0, req);
5678
5679         if (error || !req->newptr)
5680                 return (error);
5681
5682         if (result == 1) {
5683                 sc = (struct bce_softc *)arg1;
5684                 bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
5685         }
5686
5687         return error;
5688 }
5689
5690
5691 /****************************************************************************/
5692 /*                                                                          */
5693 /*                                                                          */
5694 /* Returns:                                                                 */
5695 /*   0 for success, positive value for failure.                             */
5696 /****************************************************************************/
5697 static int
5698 bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS)
5699 {
5700         int error;
5701         int result;
5702         struct bce_softc *sc;
5703
5704         result = -1;
5705         error = sysctl_handle_int(oidp, &result, 0, req);
5706
5707         if (error || !req->newptr)
5708                 return (error);
5709
5710         if (result == 1) {
5711                 sc = (struct bce_softc *)arg1;
5712                 bce_breakpoint(sc);
5713         }
5714
5715         return error;
5716 }
5717 #endif
5718
5719
5720 /****************************************************************************/
5721 /* Adds any sysctl parameters for tuning or debugging purposes.             */
5722 /*                                                                          */
5723 /* Returns:                                                                 */
5724 /*   0 for success, positive value for failure.                             */
5725 /****************************************************************************/
5726 static void
5727 bce_add_sysctls(struct bce_softc *sc)
5728 {
5729         struct sysctl_ctx_list *ctx;
5730         struct sysctl_oid_list *children;
5731
5732         ctx = device_get_sysctl_ctx(sc->bce_dev);
5733         children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev));
5734
5735         SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
5736                 "driver_version",
5737                 CTLFLAG_RD, &bce_driver_version,
5738                 0, "bce driver version");
5739
5740 #ifdef BCE_DEBUG
5741         SYSCTL_ADD_INT(ctx, children, OID_AUTO, 
5742                 "rx_low_watermark",
5743                 CTLFLAG_RD, &sc->rx_low_watermark,
5744                 0, "Lowest level of free rx_bd's");
5745
5746         SYSCTL_ADD_INT(ctx, children, OID_AUTO, 
5747                 "tx_hi_watermark",
5748                 CTLFLAG_RD, &sc->tx_hi_watermark,
5749                 0, "Highest level of used tx_bd's");
5750
5751         SYSCTL_ADD_INT(ctx, children, OID_AUTO, 
5752                 "l2fhdr_status_errors",
5753                 CTLFLAG_RD, &sc->l2fhdr_status_errors,
5754                 0, "l2_fhdr status errors");
5755
5756         SYSCTL_ADD_INT(ctx, children, OID_AUTO, 
5757                 "unexpected_attentions",
5758                 CTLFLAG_RD, &sc->unexpected_attentions,
5759                 0, "unexpected attentions");
5760
5761         SYSCTL_ADD_INT(ctx, children, OID_AUTO, 
5762                 "lost_status_block_updates",
5763                 CTLFLAG_RD, &sc->lost_status_block_updates,
5764                 0, "lost status block updates");
5765
5766         SYSCTL_ADD_INT(ctx, children, OID_AUTO, 
5767                 "mbuf_alloc_failed",
5768                 CTLFLAG_RD, &sc->mbuf_alloc_failed,
5769                 0, "mbuf cluster allocation failures");
5770 #endif 
5771
5772         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5773                 "stat_IfHcInOctets",
5774                 CTLFLAG_RD, &sc->stat_IfHCInOctets,
5775                 "Bytes received");
5776
5777         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5778                 "stat_IfHCInBadOctets",
5779                 CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
5780                 "Bad bytes received");
5781
5782         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5783                 "stat_IfHCOutOctets",
5784                 CTLFLAG_RD, &sc->stat_IfHCOutOctets,
5785                 "Bytes sent");
5786
5787         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5788                 "stat_IfHCOutBadOctets",
5789                 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
5790                 "Bad bytes sent");
5791
5792         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5793                 "stat_IfHCInUcastPkts",
5794                 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
5795                 "Unicast packets received");
5796
5797         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5798                 "stat_IfHCInMulticastPkts",
5799                 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
5800                 "Multicast packets received");
5801
5802         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5803                 "stat_IfHCInBroadcastPkts",
5804                 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
5805                 "Broadcast packets received");
5806
5807         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5808                 "stat_IfHCOutUcastPkts",
5809                 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
5810                 "Unicast packets sent");
5811
5812         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5813                 "stat_IfHCOutMulticastPkts",
5814                 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
5815                 "Multicast packets sent");
5816
5817         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5818                 "stat_IfHCOutBroadcastPkts",
5819                 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
5820                 "Broadcast packets sent");
5821
5822         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5823                 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
5824                 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
5825                 0, "Internal MAC transmit errors");
5826
5827         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5828                 "stat_Dot3StatsCarrierSenseErrors",
5829                 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
5830                 0, "Carrier sense errors");
5831
5832         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5833                 "stat_Dot3StatsFCSErrors",
5834                 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
5835                 0, "Frame check sequence errors");
5836
5837         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5838                 "stat_Dot3StatsAlignmentErrors",
5839                 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
5840                 0, "Alignment errors");
5841
5842         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5843                 "stat_Dot3StatsSingleCollisionFrames",
5844                 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
5845                 0, "Single Collision Frames");
5846
5847         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5848                 "stat_Dot3StatsMultipleCollisionFrames",
5849                 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
5850                 0, "Multiple Collision Frames");
5851
5852         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5853                 "stat_Dot3StatsDeferredTransmissions",
5854                 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
5855                 0, "Deferred Transmissions");
5856
5857         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5858                 "stat_Dot3StatsExcessiveCollisions",
5859                 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
5860                 0, "Excessive Collisions");
5861
5862         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5863                 "stat_Dot3StatsLateCollisions",
5864                 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
5865                 0, "Late Collisions");
5866
5867         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5868                 "stat_EtherStatsCollisions",
5869                 CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
5870                 0, "Collisions");
5871
5872         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5873                 "stat_EtherStatsFragments",
5874                 CTLFLAG_RD, &sc->stat_EtherStatsFragments,
5875                 0, "Fragments");
5876
5877         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5878                 "stat_EtherStatsJabbers",
5879                 CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
5880                 0, "Jabbers");
5881
5882         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5883                 "stat_EtherStatsUndersizePkts",
5884                 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
5885                 0, "Undersize packets");
5886
5887         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5888                 "stat_EtherStatsOverrsizePkts",
5889                 CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
5890                 0, "stat_EtherStatsOverrsizePkts");
5891
5892         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5893                 "stat_EtherStatsPktsRx64Octets",
5894                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
5895                 0, "Bytes received in 64 byte packets");
5896
5897         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5898                 "stat_EtherStatsPktsRx65Octetsto127Octets",
5899                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
5900                 0, "Bytes received in 65 to 127 byte packets");
5901
5902         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5903                 "stat_EtherStatsPktsRx128Octetsto255Octets",
5904                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
5905                 0, "Bytes received in 128 to 255 byte packets");
5906
5907         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5908                 "stat_EtherStatsPktsRx256Octetsto511Octets",
5909                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
5910                 0, "Bytes received in 256 to 511 byte packets");
5911
5912         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5913                 "stat_EtherStatsPktsRx512Octetsto1023Octets",
5914                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
5915                 0, "Bytes received in 512 to 1023 byte packets");
5916
5917         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5918                 "stat_EtherStatsPktsRx1024Octetsto1522Octets",
5919                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
5920                 0, "Bytes received in 1024 t0 1522 byte packets");
5921
5922         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5923                 "stat_EtherStatsPktsRx1523Octetsto9022Octets",
5924                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
5925                 0, "Bytes received in 1523 to 9022 byte packets");
5926
5927         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5928                 "stat_EtherStatsPktsTx64Octets",
5929                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
5930                 0, "Bytes sent in 64 byte packets");
5931
5932         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5933                 "stat_EtherStatsPktsTx65Octetsto127Octets",
5934                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
5935                 0, "Bytes sent in 65 to 127 byte packets");
5936
5937         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5938                 "stat_EtherStatsPktsTx128Octetsto255Octets",
5939                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
5940                 0, "Bytes sent in 128 to 255 byte packets");
5941
5942         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5943                 "stat_EtherStatsPktsTx256Octetsto511Octets",
5944                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
5945                 0, "Bytes sent in 256 to 511 byte packets");
5946
5947         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5948                 "stat_EtherStatsPktsTx512Octetsto1023Octets",
5949                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
5950                 0, "Bytes sent in 512 to 1023 byte packets");
5951
5952         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5953                 "stat_EtherStatsPktsTx1024Octetsto1522Octets",
5954                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
5955                 0, "Bytes sent in 1024 to 1522 byte packets");
5956
5957         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5958                 "stat_EtherStatsPktsTx1523Octetsto9022Octets",
5959                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
5960                 0, "Bytes sent in 1523 to 9022 byte packets");
5961
5962         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5963                 "stat_XonPauseFramesReceived",
5964                 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
5965                 0, "XON pause frames receved");
5966
5967         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5968                 "stat_XoffPauseFramesReceived",
5969                 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
5970                 0, "XOFF pause frames received");
5971
5972         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5973                 "stat_OutXonSent",
5974                 CTLFLAG_RD, &sc->stat_OutXonSent,
5975                 0, "XON pause frames sent");
5976
5977         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5978                 "stat_OutXoffSent",
5979                 CTLFLAG_RD, &sc->stat_OutXoffSent,
5980                 0, "XOFF pause frames sent");
5981
5982         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5983                 "stat_FlowControlDone",
5984                 CTLFLAG_RD, &sc->stat_FlowControlDone,
5985                 0, "Flow control done");
5986
5987         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5988                 "stat_MacControlFramesReceived",
5989                 CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
5990                 0, "MAC control frames received");
5991
5992         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5993                 "stat_XoffStateEntered",
5994                 CTLFLAG_RD, &sc->stat_XoffStateEntered,
5995                 0, "XOFF state entered");
5996
5997         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5998                 "stat_IfInFramesL2FilterDiscards",
5999                 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
6000                 0, "Received L2 packets discarded");
6001
6002         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6003                 "stat_IfInRuleCheckerDiscards",
6004                 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
6005                 0, "Received packets discarded by rule");
6006
6007         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6008                 "stat_IfInFTQDiscards",
6009                 CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
6010                 0, "Received packet FTQ discards");
6011
6012         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6013                 "stat_IfInMBUFDiscards",
6014                 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
6015                 0, "Received packets discarded due to lack of controller buffer memory");
6016
6017         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6018                 "stat_IfInRuleCheckerP4Hit",
6019                 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
6020                 0, "Received packets rule checker hits");
6021
6022         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6023                 "stat_CatchupInRuleCheckerDiscards",
6024                 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
6025                 0, "Received packets discarded in Catchup path");
6026
6027         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6028                 "stat_CatchupInFTQDiscards",
6029                 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
6030                 0, "Received packets discarded in FTQ in Catchup path");
6031
6032         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6033                 "stat_CatchupInMBUFDiscards",
6034                 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
6035                 0, "Received packets discarded in controller buffer memory in Catchup path");
6036
6037         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6038                 "stat_CatchupInRuleCheckerP4Hit",
6039                 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
6040                 0, "Received packets rule checker hits in Catchup path");
6041
6042 #ifdef BCE_DEBUG
6043         SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6044                 "driver_state", CTLTYPE_INT | CTLFLAG_RW,
6045                 (void *)sc, 0,
6046                 bce_sysctl_driver_state, "I", "Drive state information");
6047
6048         SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6049                 "hw_state", CTLTYPE_INT | CTLFLAG_RW,
6050                 (void *)sc, 0,
6051                 bce_sysctl_hw_state, "I", "Hardware state information");
6052
6053         SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6054                 "dump_rx_chain", CTLTYPE_INT | CTLFLAG_RW,
6055                 (void *)sc, 0,
6056                 bce_sysctl_dump_rx_chain, "I", "Dump rx_bd chain");
6057
6058         SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6059                 "breakpoint", CTLTYPE_INT | CTLFLAG_RW,
6060                 (void *)sc, 0,
6061                 bce_sysctl_breakpoint, "I", "Driver breakpoint");
6062 #endif
6063
6064 }
6065
6066
6067 /****************************************************************************/
6068 /* BCE Debug Routines                                                       */
6069 /****************************************************************************/
6070 #ifdef BCE_DEBUG
6071
6072 /****************************************************************************/
6073 /* Prints out information about an mbuf.                                    */
6074 /*                                                                          */
6075 /* Returns:                                                                 */
6076 /*   Nothing.                                                               */
6077 /****************************************************************************/
6078 static void
6079 bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
6080 {
6081         u32 val_hi, val_lo;
6082         struct mbuf *mp = m;
6083
6084         if (m == NULL) {
6085                 /* Index out of range. */
6086                 printf("mbuf ptr is null!\n");
6087                 return;
6088         }
6089
6090         while (mp) {
6091                 val_hi = BCE_ADDR_HI(mp);
6092                 val_lo = BCE_ADDR_LO(mp);
6093                 BCE_PRINTF(sc, "mbuf: vaddr = 0x%08X:%08X, m_len = %d, m_flags = ", 
6094                            val_hi, val_lo, mp->m_len);
6095
6096                 if (mp->m_flags & M_EXT)
6097                         printf("M_EXT ");
6098                 if (mp->m_flags & M_PKTHDR)
6099                         printf("M_PKTHDR ");
6100                 printf("\n");
6101
6102                 if (mp->m_flags & M_EXT) {
6103                         val_hi = BCE_ADDR_HI(mp->m_ext.ext_buf);
6104                         val_lo = BCE_ADDR_LO(mp->m_ext.ext_buf);
6105                         BCE_PRINTF(sc, "- m_ext: vaddr = 0x%08X:%08X, ext_size = 0x%04X\n", 
6106                                 val_hi, val_lo, mp->m_ext.ext_size);
6107                 }
6108
6109                 mp = mp->m_next;
6110         }
6111
6112
6113 }
6114
6115
6116 /****************************************************************************/
6117 /* Prints out the mbufs in the TX mbuf chain.                               */
6118 /*                                                                          */
6119 /* Returns:                                                                 */
6120 /*   Nothing.                                                               */
6121 /****************************************************************************/
6122 static void
6123 bce_dump_tx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6124 {
6125         struct mbuf *m;
6126
6127         BCE_PRINTF(sc,
6128                 "----------------------------"
6129                 "  tx mbuf data  "
6130                 "----------------------------\n");
6131
6132         for (int i = 0; i < count; i++) {
6133                 m = sc->tx_mbuf_ptr[chain_prod];
6134                 BCE_PRINTF(sc, "txmbuf[%d]\n", chain_prod);
6135                 bce_dump_mbuf(sc, m);
6136                 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
6137         }
6138
6139         BCE_PRINTF(sc,
6140                 "----------------------------"
6141                 "----------------"
6142                 "----------------------------\n");
6143 }
6144
6145
6146 /*
6147  * This routine prints the RX mbuf chain.
6148  */
6149 static void
6150 bce_dump_rx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6151 {
6152         struct mbuf *m;
6153
6154         BCE_PRINTF(sc,
6155                 "----------------------------"
6156                 "  rx mbuf data  "
6157                 "----------------------------\n");
6158
6159         for (int i = 0; i < count; i++) {
6160                 m = sc->rx_mbuf_ptr[chain_prod];
6161                 BCE_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod);
6162                 bce_dump_mbuf(sc, m);
6163                 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
6164         }
6165
6166
6167         BCE_PRINTF(sc,
6168                 "----------------------------"
6169                 "----------------"
6170                 "----------------------------\n");
6171 }
6172
6173
6174 static void
6175 bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd)
6176 {
6177         if (idx > MAX_TX_BD)
6178                 /* Index out of range. */
6179                 BCE_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
6180         else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
6181                 /* TX Chain page pointer. */
6182                 BCE_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n", 
6183                         idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo);
6184         else
6185                 /* Normal tx_bd entry. */
6186                 BCE_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
6187                         "flags = 0x%08X\n", idx, 
6188                         txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
6189                         txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag_flags);
6190 }
6191
6192
6193 static void
6194 bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
6195 {
6196         if (idx > MAX_RX_BD)
6197                 /* Index out of range. */
6198                 BCE_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
6199         else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
6200                 /* TX Chain page pointer. */
6201                 BCE_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n", 
6202                         idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo);
6203         else
6204                 /* Normal tx_bd entry. */
6205                 BCE_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
6206                         "flags = 0x%08X\n", idx, 
6207                         rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
6208                         rxbd->rx_bd_len, rxbd->rx_bd_flags);
6209 }
6210
6211
6212 static void
6213 bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr)
6214 {
6215         BCE_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, "
6216                 "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, "
6217                 "tcp_udp_xsum = 0x%04X\n", idx,
6218                 l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len,
6219                 l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum,
6220                 l2fhdr->l2_fhdr_tcp_udp_xsum);
6221 }
6222
6223
6224 /*
6225  * This routine prints the TX chain.
6226  */
6227 static void
6228 bce_dump_tx_chain(struct bce_softc *sc, int tx_prod, int count)
6229 {
6230         struct tx_bd *txbd;
6231
6232         /* First some info about the tx_bd chain structure. */
6233         BCE_PRINTF(sc,
6234                 "----------------------------"
6235                 "  tx_bd  chain  "
6236                 "----------------------------\n");
6237
6238         BCE_PRINTF(sc, "page size      = 0x%08X, tx chain pages        = 0x%08X\n",
6239                 (u32) BCM_PAGE_SIZE, (u32) TX_PAGES);
6240
6241         BCE_PRINTF(sc, "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
6242                 (u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE);
6243
6244         BCE_PRINTF(sc, "total tx_bd    = 0x%08X\n", (u32) TOTAL_TX_BD);
6245
6246         BCE_PRINTF(sc, ""
6247                 "-----------------------------"
6248                 "   tx_bd data   "
6249                 "-----------------------------\n");
6250
6251         /* Now print out the tx_bd's themselves. */
6252         for (int i = 0; i < count; i++) {
6253                 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
6254                 bce_dump_txbd(sc, tx_prod, txbd);
6255                 tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
6256         }
6257
6258         BCE_PRINTF(sc,
6259                 "-----------------------------"
6260                 "--------------"
6261                 "-----------------------------\n");
6262 }
6263
6264
6265 /*
6266  * This routine prints the RX chain.
6267  */
6268 static void
6269 bce_dump_rx_chain(struct bce_softc *sc, int rx_prod, int count)
6270 {
6271         struct rx_bd *rxbd;
6272
6273         /* First some info about the tx_bd chain structure. */
6274         BCE_PRINTF(sc,
6275                 "----------------------------"
6276                 "  rx_bd  chain  "
6277                 "----------------------------\n");
6278
6279         BCE_PRINTF(sc, "----- RX_BD Chain -----\n");
6280
6281         BCE_PRINTF(sc, "page size      = 0x%08X, rx chain pages        = 0x%08X\n",
6282                 (u32) BCM_PAGE_SIZE, (u32) RX_PAGES);
6283
6284         BCE_PRINTF(sc, "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
6285                 (u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE);
6286
6287         BCE_PRINTF(sc, "total rx_bd    = 0x%08X\n", (u32) TOTAL_RX_BD);
6288
6289         BCE_PRINTF(sc,
6290                 "----------------------------"
6291                 "   rx_bd data   "
6292                 "----------------------------\n");
6293
6294         /* Now print out the rx_bd's themselves. */
6295         for (int i = 0; i < count; i++) {
6296                 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
6297                 bce_dump_rxbd(sc, rx_prod, rxbd);
6298                 rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
6299         }
6300
6301         BCE_PRINTF(sc,
6302                 "----------------------------"
6303                 "--------------"
6304                 "----------------------------\n");
6305 }
6306
6307
6308 /*
6309  * This routine prints the status block.
6310  */
6311 static void
6312 bce_dump_status_block(struct bce_softc *sc)
6313 {
6314         struct status_block *sblk;
6315
6316         sblk = sc->status_block;
6317
6318         BCE_PRINTF(sc, "----------------------------- Status Block "
6319                 "-----------------------------\n");
6320
6321         BCE_PRINTF(sc, "attn_bits  = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n",
6322                 sblk->status_attn_bits, sblk->status_attn_bits_ack,
6323                 sblk->status_idx);
6324
6325         BCE_PRINTF(sc, "rx_cons0   = 0x%08X, tx_cons0      = 0x%08X\n",
6326                 sblk->status_rx_quick_consumer_index0,
6327                 sblk->status_tx_quick_consumer_index0);
6328
6329         BCE_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx);
6330
6331         /* Theses indices are not used for normal L2 drivers. */
6332         if (sblk->status_rx_quick_consumer_index1 || 
6333                 sblk->status_tx_quick_consumer_index1)
6334                 BCE_PRINTF(sc, "rx_cons1  = 0x%08X, tx_cons1      = 0x%08X\n",
6335                         sblk->status_rx_quick_consumer_index1,
6336                         sblk->status_tx_quick_consumer_index1);
6337
6338         if (sblk->status_rx_quick_consumer_index2 || 
6339                 sblk->status_tx_quick_consumer_index2)
6340                 BCE_PRINTF(sc, "rx_cons2  = 0x%08X, tx_cons2      = 0x%08X\n",
6341                         sblk->status_rx_quick_consumer_index2,
6342                         sblk->status_tx_quick_consumer_index2);
6343
6344         if (sblk->status_rx_quick_consumer_index3 || 
6345                 sblk->status_tx_quick_consumer_index3)
6346                 BCE_PRINTF(sc, "rx_cons3  = 0x%08X, tx_cons3      = 0x%08X\n",
6347                         sblk->status_rx_quick_consumer_index3,
6348                         sblk->status_tx_quick_consumer_index3);
6349
6350         if (sblk->status_rx_quick_consumer_index4 || 
6351                 sblk->status_rx_quick_consumer_index5)
6352                 BCE_PRINTF(sc, "rx_cons4  = 0x%08X, rx_cons5      = 0x%08X\n",
6353                         sblk->status_rx_quick_consumer_index4,
6354                         sblk->status_rx_quick_consumer_index5);
6355
6356         if (sblk->status_rx_quick_consumer_index6 || 
6357                 sblk->status_rx_quick_consumer_index7)
6358                 BCE_PRINTF(sc, "rx_cons6  = 0x%08X, rx_cons7      = 0x%08X\n",
6359                         sblk->status_rx_quick_consumer_index6,
6360                         sblk->status_rx_quick_consumer_index7);
6361
6362         if (sblk->status_rx_quick_consumer_index8 || 
6363                 sblk->status_rx_quick_consumer_index9)
6364                 BCE_PRINTF(sc, "rx_cons8  = 0x%08X, rx_cons9      = 0x%08X\n",
6365                         sblk->status_rx_quick_consumer_index8,
6366                         sblk->status_rx_quick_consumer_index9);
6367
6368         if (sblk->status_rx_quick_consumer_index10 || 
6369                 sblk->status_rx_quick_consumer_index11)
6370                 BCE_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11     = 0x%08X\n",
6371                         sblk->status_rx_quick_consumer_index10,
6372                         sblk->status_rx_quick_consumer_index11);
6373
6374         if (sblk->status_rx_quick_consumer_index12 || 
6375                 sblk->status_rx_quick_consumer_index13)
6376                 BCE_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13     = 0x%08X\n",
6377                         sblk->status_rx_quick_consumer_index12,
6378                         sblk->status_rx_quick_consumer_index13);
6379
6380         if (sblk->status_rx_quick_consumer_index14 || 
6381                 sblk->status_rx_quick_consumer_index15)
6382                 BCE_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15     = 0x%08X\n",
6383                         sblk->status_rx_quick_consumer_index14,
6384                         sblk->status_rx_quick_consumer_index15);
6385
6386         if (sblk->status_completion_producer_index || 
6387                 sblk->status_cmd_consumer_index)
6388                 BCE_PRINTF(sc, "com_prod  = 0x%08X, cmd_cons      = 0x%08X\n",
6389                         sblk->status_completion_producer_index,
6390                         sblk->status_cmd_consumer_index);
6391
6392         BCE_PRINTF(sc, "-------------------------------------------"
6393                 "-----------------------------\n");
6394 }
6395
6396
6397 /*
6398  * This routine prints the statistics block.
6399  */
6400 static void
6401 bce_dump_stats_block(struct bce_softc *sc)
6402 {
6403         struct statistics_block *sblk;
6404
6405         sblk = sc->stats_block;
6406
6407         BCE_PRINTF(sc, ""
6408                 "-----------------------------"
6409                 " Stats  Block "
6410                 "-----------------------------\n");
6411
6412         BCE_PRINTF(sc, "IfHcInOctets         = 0x%08X:%08X, "
6413                 "IfHcInBadOctets      = 0x%08X:%08X\n",
6414                 sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo,
6415                 sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo);
6416
6417         BCE_PRINTF(sc, "IfHcOutOctets        = 0x%08X:%08X, "
6418                 "IfHcOutBadOctets     = 0x%08X:%08X\n",
6419                 sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo,
6420                 sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo);
6421
6422         BCE_PRINTF(sc, "IfHcInUcastPkts      = 0x%08X:%08X, "
6423                 "IfHcInMulticastPkts  = 0x%08X:%08X\n",
6424                 sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo,
6425                 sblk->stat_IfHCInMulticastPkts_hi, sblk->stat_IfHCInMulticastPkts_lo);
6426
6427         BCE_PRINTF(sc, "IfHcInBroadcastPkts  = 0x%08X:%08X, "
6428                 "IfHcOutUcastPkts     = 0x%08X:%08X\n",
6429                 sblk->stat_IfHCInBroadcastPkts_hi, sblk->stat_IfHCInBroadcastPkts_lo,
6430                 sblk->stat_IfHCOutUcastPkts_hi, sblk->stat_IfHCOutUcastPkts_lo);
6431
6432         BCE_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, IfHcOutBroadcastPkts = 0x%08X:%08X\n",
6433                 sblk->stat_IfHCOutMulticastPkts_hi, sblk->stat_IfHCOutMulticastPkts_lo,
6434                 sblk->stat_IfHCOutBroadcastPkts_hi, sblk->stat_IfHCOutBroadcastPkts_lo);
6435
6436         if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
6437                 BCE_PRINTF(sc, "0x%08X : "
6438                 "emac_tx_stat_dot3statsinternalmactransmiterrors\n", 
6439                 sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
6440
6441         if (sblk->stat_Dot3StatsCarrierSenseErrors)
6442                 BCE_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n",
6443                         sblk->stat_Dot3StatsCarrierSenseErrors);
6444
6445         if (sblk->stat_Dot3StatsFCSErrors)
6446                 BCE_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n",
6447                         sblk->stat_Dot3StatsFCSErrors);
6448
6449         if (sblk->stat_Dot3StatsAlignmentErrors)
6450                 BCE_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n",
6451                         sblk->stat_Dot3StatsAlignmentErrors);
6452
6453         if (sblk->stat_Dot3StatsSingleCollisionFrames)
6454                 BCE_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n",
6455                         sblk->stat_Dot3StatsSingleCollisionFrames);
6456
6457         if (sblk->stat_Dot3StatsMultipleCollisionFrames)
6458                 BCE_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n",
6459                         sblk->stat_Dot3StatsMultipleCollisionFrames);
6460         
6461         if (sblk->stat_Dot3StatsDeferredTransmissions)
6462                 BCE_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n",
6463                         sblk->stat_Dot3StatsDeferredTransmissions);
6464
6465         if (sblk->stat_Dot3StatsExcessiveCollisions)
6466                 BCE_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n",
6467                         sblk->stat_Dot3StatsExcessiveCollisions);
6468
6469         if (sblk->stat_Dot3StatsLateCollisions)
6470                 BCE_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n",
6471                         sblk->stat_Dot3StatsLateCollisions);
6472
6473         if (sblk->stat_EtherStatsCollisions)
6474                 BCE_PRINTF(sc, "0x%08X : EtherStatsCollisions\n",
6475                         sblk->stat_EtherStatsCollisions);
6476
6477         if (sblk->stat_EtherStatsFragments) 
6478                 BCE_PRINTF(sc, "0x%08X : EtherStatsFragments\n",
6479                         sblk->stat_EtherStatsFragments);
6480
6481         if (sblk->stat_EtherStatsJabbers)
6482                 BCE_PRINTF(sc, "0x%08X : EtherStatsJabbers\n",
6483                         sblk->stat_EtherStatsJabbers);
6484
6485         if (sblk->stat_EtherStatsUndersizePkts)
6486                 BCE_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n",
6487                         sblk->stat_EtherStatsUndersizePkts);
6488
6489         if (sblk->stat_EtherStatsOverrsizePkts)
6490                 BCE_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n",
6491                         sblk->stat_EtherStatsOverrsizePkts);
6492
6493         if (sblk->stat_EtherStatsPktsRx64Octets)
6494                 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n",
6495                         sblk->stat_EtherStatsPktsRx64Octets);
6496
6497         if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
6498                 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
6499                         sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
6500
6501         if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
6502                 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx128Octetsto255Octets\n",
6503                         sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
6504
6505         if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
6506                 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx256Octetsto511Octets\n",
6507                         sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
6508
6509         if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
6510                 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx512Octetsto1023Octets\n",
6511                         sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
6512
6513         if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
6514                 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx1024Octetsto1522Octets\n",
6515                         sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
6516
6517         if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
6518                 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsRx1523Octetsto9022Octets\n",
6519                         sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
6520
6521         if (sblk->stat_EtherStatsPktsTx64Octets)
6522                 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n",
6523                         sblk->stat_EtherStatsPktsTx64Octets);
6524
6525         if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
6526                 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
6527                         sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
6528
6529         if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
6530                 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx128Octetsto255Octets\n",
6531                         sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
6532
6533         if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
6534                 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx256Octetsto511Octets\n",
6535                         sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
6536
6537         if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
6538                 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx512Octetsto1023Octets\n",
6539                         sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
6540
6541         if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
6542                 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx1024Octetsto1522Octets\n",
6543                         sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
6544
6545         if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
6546                 BCE_PRINTF(sc, "0x%08X : EtherStatsPktsTx1523Octetsto9022Octets\n",
6547                         sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
6548
6549         if (sblk->stat_XonPauseFramesReceived)
6550                 BCE_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n",
6551                         sblk->stat_XonPauseFramesReceived);
6552
6553         if (sblk->stat_XoffPauseFramesReceived)
6554            BCE_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n",
6555                         sblk->stat_XoffPauseFramesReceived);
6556
6557         if (sblk->stat_OutXonSent)
6558                 BCE_PRINTF(sc, "0x%08X : OutXonSent\n",
6559                         sblk->stat_OutXonSent);
6560
6561         if (sblk->stat_OutXoffSent)
6562                 BCE_PRINTF(sc, "0x%08X : OutXoffSent\n",
6563                         sblk->stat_OutXoffSent);
6564
6565         if (sblk->stat_FlowControlDone)
6566                 BCE_PRINTF(sc, "0x%08X : FlowControlDone\n",
6567                         sblk->stat_FlowControlDone);
6568
6569         if (sblk->stat_MacControlFramesReceived)
6570                 BCE_PRINTF(sc, "0x%08X : MacControlFramesReceived\n",
6571                         sblk->stat_MacControlFramesReceived);
6572
6573         if (sblk->stat_XoffStateEntered)
6574                 BCE_PRINTF(sc, "0x%08X : XoffStateEntered\n",
6575                         sblk->stat_XoffStateEntered);
6576
6577         if (sblk->stat_IfInFramesL2FilterDiscards)
6578                 BCE_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n",
6579                         sblk->stat_IfInFramesL2FilterDiscards);
6580
6581         if (sblk->stat_IfInRuleCheckerDiscards)
6582                 BCE_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n",
6583                         sblk->stat_IfInRuleCheckerDiscards);
6584
6585         if (sblk->stat_IfInFTQDiscards)
6586                 BCE_PRINTF(sc, "0x%08X : IfInFTQDiscards\n",
6587                         sblk->stat_IfInFTQDiscards);
6588
6589         if (sblk->stat_IfInMBUFDiscards)
6590                 BCE_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n",
6591                         sblk->stat_IfInMBUFDiscards);
6592
6593         if (sblk->stat_IfInRuleCheckerP4Hit)
6594                 BCE_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n",
6595                         sblk->stat_IfInRuleCheckerP4Hit);
6596
6597         if (sblk->stat_CatchupInRuleCheckerDiscards)
6598                 BCE_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n",
6599                         sblk->stat_CatchupInRuleCheckerDiscards);
6600
6601         if (sblk->stat_CatchupInFTQDiscards)
6602                 BCE_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n",
6603                         sblk->stat_CatchupInFTQDiscards);
6604
6605         if (sblk->stat_CatchupInMBUFDiscards)
6606                 BCE_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n",
6607                         sblk->stat_CatchupInMBUFDiscards);
6608
6609         if (sblk->stat_CatchupInRuleCheckerP4Hit)
6610                 BCE_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n",
6611                         sblk->stat_CatchupInRuleCheckerP4Hit);
6612
6613         BCE_PRINTF(sc,
6614                 "-----------------------------"
6615                 "--------------"
6616                 "-----------------------------\n");
6617 }
6618
6619
6620 static void
6621 bce_dump_driver_state(struct bce_softc *sc)
6622 {
6623         u32 val_hi, val_lo;
6624
6625         BCE_PRINTF(sc,
6626                 "-----------------------------"
6627                 " Driver State "
6628                 "-----------------------------\n");
6629
6630         val_hi = BCE_ADDR_HI(sc);
6631         val_lo = BCE_ADDR_LO(sc);
6632         BCE_PRINTF(sc, "0x%08X:%08X - (sc) driver softc structure virtual address\n",
6633                 val_hi, val_lo);
6634
6635         val_hi = BCE_ADDR_HI(sc->bce_vhandle);
6636         val_lo = BCE_ADDR_LO(sc->bce_vhandle);
6637         BCE_PRINTF(sc, "0x%08X:%08X - (sc->bce_vhandle) PCI BAR virtual address\n",
6638                 val_hi, val_lo);
6639
6640         val_hi = BCE_ADDR_HI(sc->status_block);
6641         val_lo = BCE_ADDR_LO(sc->status_block);
6642         BCE_PRINTF(sc, "0x%08X:%08X - (sc->status_block) status block virtual address\n",
6643                 val_hi, val_lo);
6644
6645         val_hi = BCE_ADDR_HI(sc->stats_block);
6646         val_lo = BCE_ADDR_LO(sc->stats_block);
6647         BCE_PRINTF(sc, "0x%08X:%08X - (sc->stats_block) statistics block virtual address\n",
6648                 val_hi, val_lo);
6649
6650         val_hi = BCE_ADDR_HI(sc->tx_bd_chain);
6651         val_lo = BCE_ADDR_LO(sc->tx_bd_chain);
6652         BCE_PRINTF(sc,
6653                 "0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain virtual adddress\n",
6654                 val_hi, val_lo);
6655
6656         val_hi = BCE_ADDR_HI(sc->rx_bd_chain);
6657         val_lo = BCE_ADDR_LO(sc->rx_bd_chain);
6658         BCE_PRINTF(sc,
6659                 "0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain virtual address\n",
6660                 val_hi, val_lo);
6661
6662         val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr);
6663         val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr);
6664         BCE_PRINTF(sc,
6665                 "0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
6666                 val_hi, val_lo);
6667
6668         val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr);
6669         val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr);
6670         BCE_PRINTF(sc, 
6671                 "0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
6672                 val_hi, val_lo);
6673
6674         BCE_PRINTF(sc, "         0x%08X - (sc->interrupts_generated) h/w intrs\n",
6675                 sc->interrupts_generated);
6676         
6677         BCE_PRINTF(sc, "         0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
6678                 sc->rx_interrupts);
6679
6680         BCE_PRINTF(sc, "         0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
6681                 sc->tx_interrupts);
6682
6683         BCE_PRINTF(sc, "         0x%08X - (sc->last_status_idx) status block index\n",
6684                 sc->last_status_idx);
6685
6686         BCE_PRINTF(sc, "         0x%08X - (sc->tx_prod) tx producer index\n",
6687                 sc->tx_prod);
6688
6689         BCE_PRINTF(sc, "         0x%08X - (sc->tx_cons) tx consumer index\n",
6690                 sc->tx_cons);
6691
6692         BCE_PRINTF(sc, "         0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
6693                 sc->tx_prod_bseq);
6694
6695         BCE_PRINTF(sc, "         0x%08X - (sc->rx_prod) rx producer index\n",
6696                 sc->rx_prod);
6697
6698         BCE_PRINTF(sc, "         0x%08X - (sc->rx_cons) rx consumer index\n",
6699                 sc->rx_cons);
6700
6701         BCE_PRINTF(sc, "         0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
6702                 sc->rx_prod_bseq);
6703
6704         BCE_PRINTF(sc, "         0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
6705                 sc->rx_mbuf_alloc);
6706
6707         BCE_PRINTF(sc, "         0x%08X - (sc->free_rx_bd) free rx_bd's\n",
6708                 sc->free_rx_bd);
6709
6710         BCE_PRINTF(sc, "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n",
6711                 sc->rx_low_watermark, (u32) USABLE_RX_BD);
6712
6713         BCE_PRINTF(sc, "         0x%08X - (sc->txmbuf_alloc) tx mbufs allocated\n",
6714                 sc->tx_mbuf_alloc);
6715
6716         BCE_PRINTF(sc, "         0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
6717                 sc->rx_mbuf_alloc);
6718
6719         BCE_PRINTF(sc, "         0x%08X - (sc->used_tx_bd) used tx_bd's\n",
6720                 sc->used_tx_bd);
6721
6722         BCE_PRINTF(sc, "0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
6723                 sc->tx_hi_watermark, (u32) USABLE_TX_BD);
6724
6725         BCE_PRINTF(sc, "         0x%08X - (sc->mbuf_alloc_failed) failed mbuf alloc\n",
6726                 sc->mbuf_alloc_failed);
6727
6728         BCE_PRINTF(sc,
6729                 "-----------------------------"
6730                 "--------------"
6731                 "-----------------------------\n");
6732 }
6733
6734
6735 static void
6736 bce_dump_hw_state(struct bce_softc *sc)
6737 {
6738         u32 val1;
6739
6740         BCE_PRINTF(sc,
6741                 "----------------------------"
6742                 " Hardware State "
6743                 "----------------------------\n");
6744
6745         BCE_PRINTF(sc, "0x%08X : bootcode version\n", sc->bce_fw_ver);
6746
6747         val1 = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS);
6748         BCE_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n",
6749                 val1, BCE_MISC_ENABLE_STATUS_BITS);
6750
6751         val1 = REG_RD(sc, BCE_DMA_STATUS);
6752         BCE_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BCE_DMA_STATUS);
6753
6754         val1 = REG_RD(sc, BCE_CTX_STATUS);
6755         BCE_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BCE_CTX_STATUS);
6756
6757         val1 = REG_RD(sc, BCE_EMAC_STATUS);
6758         BCE_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1, BCE_EMAC_STATUS);
6759
6760         val1 = REG_RD(sc, BCE_RPM_STATUS);
6761         BCE_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BCE_RPM_STATUS);
6762
6763         val1 = REG_RD(sc, BCE_TBDR_STATUS);
6764         BCE_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1, BCE_TBDR_STATUS);
6765
6766         val1 = REG_RD(sc, BCE_TDMA_STATUS);
6767         BCE_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1, BCE_TDMA_STATUS);
6768
6769         val1 = REG_RD(sc, BCE_HC_STATUS);
6770         BCE_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BCE_HC_STATUS);
6771
6772         BCE_PRINTF(sc, 
6773                 "----------------------------"
6774                 "----------------"
6775                 "----------------------------\n");
6776
6777         BCE_PRINTF(sc, 
6778                 "----------------------------"
6779                 " Register  Dump "
6780                 "----------------------------\n");
6781
6782         for (int i = 0x400; i < 0x8000; i += 0x10)
6783                 BCE_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
6784                         i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
6785                         REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
6786
6787         BCE_PRINTF(sc, 
6788                 "----------------------------"
6789                 "----------------"
6790                 "----------------------------\n");
6791 }
6792
6793
6794 static void
6795 bce_breakpoint(struct bce_softc *sc)
6796 {
6797
6798         /* Unreachable code to shut the compiler up about unused functions. */
6799         if (0) {
6800                 bce_dump_txbd(sc, 0, NULL);
6801                 bce_dump_rxbd(sc, 0, NULL);
6802                 bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
6803                 bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
6804                 bce_dump_l2fhdr(sc, 0, NULL);
6805                 bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
6806                 bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
6807                 bce_dump_status_block(sc);
6808                 bce_dump_stats_block(sc);
6809                 bce_dump_driver_state(sc);
6810                 bce_dump_hw_state(sc);
6811         }
6812
6813         bce_dump_driver_state(sc);
6814         /* Print the important status block fields. */
6815         bce_dump_status_block(sc);
6816
6817         /* Call the debugger. */
6818         breakpoint();
6819
6820         return;
6821 }
6822 #endif