2 * Copyright (c) 2007-2011 Broadcom Corporation. All rights reserved.
4 * Gary Zambrano <zambrano@broadcom.com>
5 * David Christensen <davidch@broadcom.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written consent.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
37 * The following controllers are supported by this driver:
42 * The following controllers are not supported by this driver:
43 * BCM57710 A0 (pre-production)
45 * External PHY References:
46 * ------------------------
47 * BCM8073 - Dual Port 10GBase-KR Ethernet PHY
48 * BCM8705 - 10Gb Ethernet Serial Transceiver
49 * BCM8706 - 10Gb Ethernet LRM PHY
50 * BCM8726 - Dual Port 10Gb Ethernet LRM PHY
51 * BCM8727 - Dual Port 10Gb Ethernet LRM PHY
52 * BCM8481 - Single Port 10GBase-T Ethernet PHY
53 * BCM84823 - Dual Port 10GBase-T Ethernet PHY
54 * SFX7101 - Solarflare 10GBase-T Ethernet PHY
59 #include "bxe_include.h"
63 #include "hw_dump_reg_st.h"
67 #include "bxe_self_test.h"
69 /* BXE Debug Options */
71 uint32_t bxe_debug = BXE_WARN;
74 /* 1 = 1 in 2,147,483,648 */
75 /* 256 = 1 in 8,388,608 */
76 /* 2048 = 1 in 1,048,576 */
77 /* 65536 = 1 in 32,768 */
78 /* 1048576 = 1 in 2,048 */
79 /* 268435456 = 1 in 8 */
80 /* 536870912 = 1 in 4 */
81 /* 1073741824 = 1 in 2 */
83 /* Controls how often to simulate an mbuf allocation failure. */
84 int bxe_debug_mbuf_allocation_failure = 0;
86 /* Controls how often to simulate a DMA mapping failure. */
87 int bxe_debug_dma_map_addr_failure = 0;
89 /* Controls how often to simulate a bootcode failure. */
90 int bxe_debug_bootcode_running_failure = 0;
93 #define MDIO_INDIRECT_REG_ADDR 0x1f
94 #define MDIO_SET_REG_BANK(sc, reg_bank) \
95 bxe_mdio22_write(sc, MDIO_INDIRECT_REG_ADDR, reg_bank)
97 #define MDIO_ACCESS_TIMEOUT 1000
98 #define BMAC_CONTROL_RX_ENABLE 2
100 /* BXE Build Time Options */
101 /* #define BXE_NVRAM_WRITE 1 */
102 #define BXE_USE_DMAE 1
105 * PCI Device ID Table
106 * Used by bxe_probe() to identify the devices supported by this driver.
108 #define BXE_DEVDESC_MAX 64
110 static struct bxe_type bxe_devs[] = {
111 /* BCM57710 Controllers and OEM boards. */
112 { BRCM_VENDORID, BRCM_DEVICEID_BCM57710, PCI_ANY_ID, PCI_ANY_ID,
113 "Broadcom NetXtreme II BCM57710 10GbE" },
114 /* BCM57711 Controllers and OEM boards. */
115 { BRCM_VENDORID, BRCM_DEVICEID_BCM57711, PCI_ANY_ID, PCI_ANY_ID,
116 "Broadcom NetXtreme II BCM57711 10GbE" },
117 /* BCM57711E Controllers and OEM boards. */
118 { BRCM_VENDORID, BRCM_DEVICEID_BCM57711E, PCI_ANY_ID, PCI_ANY_ID,
119 "Broadcom NetXtreme II BCM57711E 10GbE" },
124 * FreeBSD device entry points.
126 static int bxe_probe(device_t);
127 static int bxe_attach(device_t);
128 static int bxe_detach(device_t);
129 static int bxe_shutdown(device_t);
132 * Driver local functions.
134 static void bxe_tunables_set(struct bxe_softc *);
135 static void bxe_print_adapter_info(struct bxe_softc *);
136 static void bxe_probe_pci_caps(struct bxe_softc *);
137 static void bxe_link_settings_supported(struct bxe_softc *, uint32_t);
138 static void bxe_link_settings_requested(struct bxe_softc *);
139 static int bxe_hwinfo_function_get(struct bxe_softc *);
140 static int bxe_hwinfo_port_get(struct bxe_softc *);
141 static int bxe_hwinfo_common_get(struct bxe_softc *);
142 static void bxe_undi_unload(struct bxe_softc *);
143 static int bxe_setup_leading(struct bxe_softc *);
144 static int bxe_stop_leading(struct bxe_softc *);
145 static int bxe_setup_multi(struct bxe_softc *, int);
146 static int bxe_stop_multi(struct bxe_softc *, int);
147 static int bxe_stop_locked(struct bxe_softc *, int);
148 static int bxe_alloc_buf_rings(struct bxe_softc *);
149 static void bxe_free_buf_rings(struct bxe_softc *);
150 static void bxe_init_locked(struct bxe_softc *, int);
151 static int bxe_wait_ramrod(struct bxe_softc *, int, int, int *, int);
152 static void bxe_init_str_wr(struct bxe_softc *, uint32_t, const uint32_t *,
154 static void bxe_init_ind_wr(struct bxe_softc *, uint32_t, const uint32_t *,
156 static void bxe_init_wr_64(struct bxe_softc *, uint32_t, const uint32_t *,
158 static void bxe_write_big_buf(struct bxe_softc *, uint32_t, uint32_t);
159 static void bxe_init_fill(struct bxe_softc *, uint32_t, int, uint32_t);
160 static void bxe_init_block(struct bxe_softc *, uint32_t, uint32_t);
161 static void bxe_init(void *);
162 static void bxe_release_resources(struct bxe_softc *);
163 static void bxe_reg_wr_ind(struct bxe_softc *, uint32_t, uint32_t);
164 static uint32_t bxe_reg_rd_ind(struct bxe_softc *, uint32_t);
165 static void bxe_post_dmae(struct bxe_softc *, struct dmae_command *, int);
166 static void bxe_wb_wr(struct bxe_softc *, int, uint32_t, uint32_t);
167 static __inline uint32_t bxe_reg_poll(struct bxe_softc *, uint32_t,
169 static int bxe_mc_assert(struct bxe_softc *);
170 static void bxe_panic_dump(struct bxe_softc *);
171 static void bxe_int_enable(struct bxe_softc *);
172 static void bxe_int_disable(struct bxe_softc *);
174 static int bxe_nvram_acquire_lock(struct bxe_softc *);
175 static int bxe_nvram_release_lock(struct bxe_softc *);
176 static void bxe_nvram_enable_access(struct bxe_softc *);
177 static void bxe_nvram_disable_access(struct bxe_softc *);
178 static int bxe_nvram_read_dword (struct bxe_softc *, uint32_t, uint32_t *,
180 static int bxe_nvram_read(struct bxe_softc *, uint32_t, uint8_t *, int);
182 #ifdef BXE_NVRAM_WRITE_SUPPORT
183 static int bxe_nvram_write_dword(struct bxe_softc *, uint32_t, uint32_t,
185 static int bxe_nvram_write1(struct bxe_softc *, uint32_t, uint8_t *, int);
186 static int bxe_nvram_write(struct bxe_softc *, uint32_t, uint8_t *, int);
189 static int bxe_nvram_test(struct bxe_softc *);
191 static __inline void bxe_ack_sb(struct bxe_softc *, uint8_t, uint8_t, uint16_t,
193 static __inline uint16_t bxe_update_fpsb_idx(struct bxe_fastpath *);
194 static uint16_t bxe_ack_int(struct bxe_softc *);
195 static void bxe_sp_event(struct bxe_fastpath *, union eth_rx_cqe *);
196 static int bxe_acquire_hw_lock(struct bxe_softc *, uint32_t);
197 static int bxe_release_hw_lock(struct bxe_softc *, uint32_t);
198 static void bxe_acquire_phy_lock(struct bxe_softc *);
199 static void bxe_release_phy_lock(struct bxe_softc *);
200 static void bxe_pmf_update(struct bxe_softc *);
201 static void bxe_init_port_minmax(struct bxe_softc *);
202 static void bxe_link_attn(struct bxe_softc *);
204 static int bxe_sp_post(struct bxe_softc *, int, int, uint32_t, uint32_t, int);
205 static int bxe_acquire_alr(struct bxe_softc *);
206 static void bxe_release_alr(struct bxe_softc *);
207 static uint16_t bxe_update_dsb_idx(struct bxe_softc *);
208 static void bxe_attn_int_asserted(struct bxe_softc *, uint32_t);
209 static __inline void bxe_attn_int_deasserted0(struct bxe_softc *, uint32_t);
210 static __inline void bxe_attn_int_deasserted1(struct bxe_softc *, uint32_t);
211 static __inline void bxe_attn_int_deasserted2(struct bxe_softc *, uint32_t);
212 static __inline void bxe_attn_int_deasserted3(struct bxe_softc *, uint32_t);
213 static void bxe_attn_int_deasserted(struct bxe_softc *, uint32_t);
214 static void bxe_attn_int(struct bxe_softc *);
216 static void bxe_stats_storm_post(struct bxe_softc *);
217 static void bxe_stats_init(struct bxe_softc *);
218 static void bxe_stats_hw_post(struct bxe_softc *);
219 static int bxe_stats_comp(struct bxe_softc *);
220 static void bxe_stats_pmf_update(struct bxe_softc *);
221 static void bxe_stats_port_base_init(struct bxe_softc *);
222 static void bxe_stats_port_init(struct bxe_softc *);
223 static void bxe_stats_func_base_init(struct bxe_softc *);
224 static void bxe_stats_func_init(struct bxe_softc *);
225 static void bxe_stats_start(struct bxe_softc *);
226 static void bxe_stats_pmf_start(struct bxe_softc *);
227 static void bxe_stats_restart(struct bxe_softc *);
228 static void bxe_stats_bmac_update(struct bxe_softc *);
229 static void bxe_stats_emac_update(struct bxe_softc *);
230 static int bxe_stats_hw_update(struct bxe_softc *);
231 static int bxe_stats_storm_update(struct bxe_softc *);
232 static void bxe_stats_func_base_update(struct bxe_softc *);
233 static void bxe_stats_update(struct bxe_softc *);
234 static void bxe_stats_port_stop(struct bxe_softc *);
235 static void bxe_stats_stop(struct bxe_softc *);
236 static void bxe_stats_do_nothing(struct bxe_softc *);
237 static void bxe_stats_handle(struct bxe_softc *, enum bxe_stats_event);
239 static int bxe_tx_encap(struct bxe_fastpath *, struct mbuf **);
240 static void bxe_tx_start(struct ifnet *);
241 static void bxe_tx_start_locked(struct ifnet *, struct bxe_fastpath *);
242 static int bxe_tx_mq_start(struct ifnet *, struct mbuf *);
243 static int bxe_tx_mq_start_locked(struct ifnet *,
244 struct bxe_fastpath *, struct mbuf *);
245 static void bxe_mq_flush(struct ifnet *ifp);
246 static int bxe_ioctl(struct ifnet *, u_long, caddr_t);
247 static __inline int bxe_has_rx_work(struct bxe_fastpath *);
248 static __inline int bxe_has_tx_work(struct bxe_fastpath *);
250 static void bxe_intr_legacy(void *);
251 static void bxe_task_sp(void *, int);
252 static void bxe_intr_sp(void *);
253 static void bxe_task_fp(void *, int);
254 static void bxe_intr_fp(void *);
255 static void bxe_zero_sb(struct bxe_softc *, int);
256 static void bxe_init_sb(struct bxe_softc *,
257 struct host_status_block *, bus_addr_t, int);
258 static void bxe_zero_def_sb(struct bxe_softc *);
259 static void bxe_init_def_sb(struct bxe_softc *,
260 struct host_def_status_block *, bus_addr_t, int);
261 static void bxe_update_coalesce(struct bxe_softc *);
262 static __inline void bxe_update_rx_prod(struct bxe_softc *,
263 struct bxe_fastpath *, uint16_t, uint16_t, uint16_t);
264 static void bxe_clear_sge_mask_next_elems(struct bxe_fastpath *);
265 static __inline void bxe_init_sge_ring_bit_mask(struct bxe_fastpath *);
266 static int bxe_alloc_tpa_mbuf(struct bxe_fastpath *, int);
267 static int bxe_fill_tpa_pool(struct bxe_fastpath *);
268 static void bxe_free_tpa_pool(struct bxe_fastpath *);
270 static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *, uint16_t);
271 static int bxe_fill_sg_chain(struct bxe_fastpath *);
272 static void bxe_free_sg_chain(struct bxe_fastpath *);
274 static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *, uint16_t);
275 static int bxe_fill_rx_bd_chain(struct bxe_fastpath *);
276 static void bxe_free_rx_bd_chain(struct bxe_fastpath *);
278 static void bxe_mutexes_alloc(struct bxe_softc *);
279 static void bxe_mutexes_free(struct bxe_softc *);
280 static void bxe_clear_rx_chains(struct bxe_softc *);
281 static int bxe_init_rx_chains(struct bxe_softc *);
282 static void bxe_clear_tx_chains(struct bxe_softc *);
283 static void bxe_init_tx_chains(struct bxe_softc *);
284 static void bxe_init_sp_ring(struct bxe_softc *);
285 static void bxe_init_context(struct bxe_softc *);
286 static void bxe_init_ind_table(struct bxe_softc *);
287 static void bxe_set_client_config(struct bxe_softc *);
288 static void bxe_set_storm_rx_mode(struct bxe_softc *);
289 static void bxe_init_internal_common(struct bxe_softc *);
290 static void bxe_init_internal_port(struct bxe_softc *);
292 static void bxe_init_internal_func(struct bxe_softc *);
293 static void bxe_init_internal(struct bxe_softc *, uint32_t);
294 static int bxe_init_nic(struct bxe_softc *, uint32_t);
295 static void bxe_lb_pckt(struct bxe_softc *);
296 static int bxe_int_mem_test(struct bxe_softc *);
297 static void bxe_enable_blocks_attention (struct bxe_softc *);
299 static void bxe_init_pxp(struct bxe_softc *);
300 static int bxe_init_common(struct bxe_softc *);
301 static int bxe_init_port(struct bxe_softc *);
302 static void bxe_ilt_wr(struct bxe_softc *, uint32_t, bus_addr_t);
303 static int bxe_init_func(struct bxe_softc *);
304 static int bxe_init_hw(struct bxe_softc *, uint32_t);
305 static int bxe_fw_command(struct bxe_softc *, uint32_t);
306 static void bxe_host_structures_free(struct bxe_softc *);
307 static void bxe_dma_map_addr(void *, bus_dma_segment_t *, int, int);
308 static int bxe_host_structures_alloc(device_t);
309 static void bxe_set_mac_addr_e1(struct bxe_softc *, int);
310 static void bxe_set_mac_addr_e1h(struct bxe_softc *, int);
311 static void bxe_set_rx_mode(struct bxe_softc *);
312 static void bxe_reset_func(struct bxe_softc *);
313 static void bxe_reset_port(struct bxe_softc *);
314 static void bxe_reset_common(struct bxe_softc *);
315 static void bxe_reset_chip(struct bxe_softc *, uint32_t);
316 static int bxe_ifmedia_upd(struct ifnet *);
317 static void bxe_ifmedia_status(struct ifnet *, struct ifmediareq *);
318 static __inline void bxe_update_last_max_sge(struct bxe_fastpath *, uint16_t);
319 static void bxe_update_sge_prod(struct bxe_fastpath *,
320 struct eth_fast_path_rx_cqe *);
321 static void bxe_tpa_start(struct bxe_fastpath *, uint16_t, uint16_t, uint16_t);
322 static int bxe_fill_frag_mbuf(struct bxe_softc *, struct bxe_fastpath *,
323 struct mbuf *, struct eth_fast_path_rx_cqe *, uint16_t);
324 static void bxe_tpa_stop(struct bxe_softc *, struct bxe_fastpath *, uint16_t,
325 int, int, union eth_rx_cqe *, uint16_t);
326 static void bxe_rxeof(struct bxe_fastpath *);
327 static void bxe_txeof(struct bxe_fastpath *);
328 static int bxe_watchdog(struct bxe_fastpath *fp);
329 static void bxe_tick(void *);
330 static void bxe_add_sysctls(struct bxe_softc *);
332 static void bxe_write_dmae_phys_len(struct bxe_softc *,
333 bus_addr_t, uint32_t, uint32_t);
335 void bxe_write_dmae(struct bxe_softc *, bus_addr_t, uint32_t, uint32_t);
336 void bxe_read_dmae(struct bxe_softc *, uint32_t, uint32_t);
337 int bxe_set_gpio(struct bxe_softc *, int, uint32_t, uint8_t);
338 int bxe_get_gpio(struct bxe_softc *, int, uint8_t);
339 int bxe_set_spio(struct bxe_softc *, int, uint32_t);
340 int bxe_set_gpio_int(struct bxe_softc *, int, uint32_t, uint8_t);
343 * BXE Debug Data Structure Dump Routines
347 static int bxe_sysctl_driver_state(SYSCTL_HANDLER_ARGS);
348 static int bxe_sysctl_hw_state(SYSCTL_HANDLER_ARGS);
349 static int bxe_sysctl_dump_fw(SYSCTL_HANDLER_ARGS);
350 static int bxe_sysctl_dump_rx_cq_chain(SYSCTL_HANDLER_ARGS);
351 static int bxe_sysctl_dump_rx_bd_chain(SYSCTL_HANDLER_ARGS);
352 static int bxe_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS);
353 static int bxe_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
354 static int bxe_sysctl_breakpoint(SYSCTL_HANDLER_ARGS);
355 static __noinline void bxe_validate_rx_packet(struct bxe_fastpath *,
356 uint16_t, union eth_rx_cqe *, struct mbuf *);
357 static void bxe_grcdump(struct bxe_softc *, int);
358 static __noinline void bxe_dump_enet(struct bxe_softc *,struct mbuf *);
359 static __noinline void bxe_dump_mbuf (struct bxe_softc *, struct mbuf *);
360 static __noinline void bxe_dump_tx_mbuf_chain(struct bxe_softc *, int, int);
361 static __noinline void bxe_dump_rx_mbuf_chain(struct bxe_softc *, int, int);
362 static __noinline void bxe_dump_tx_parsing_bd(struct bxe_fastpath *,int,
363 struct eth_tx_parse_bd *);
364 static __noinline void bxe_dump_txbd(struct bxe_fastpath *, int,
365 union eth_tx_bd_types *);
366 static __noinline void bxe_dump_rxbd(struct bxe_fastpath *, int,
368 static __noinline void bxe_dump_cqe(struct bxe_fastpath *,
369 int, union eth_rx_cqe *);
370 static __noinline void bxe_dump_tx_chain(struct bxe_fastpath *, int, int);
371 static __noinline void bxe_dump_rx_cq_chain(struct bxe_fastpath *, int, int);
372 static __noinline void bxe_dump_rx_bd_chain(struct bxe_fastpath *, int, int);
373 static __noinline void bxe_dump_status_block(struct bxe_softc *);
374 static __noinline void bxe_dump_stats_block(struct bxe_softc *);
375 static __noinline void bxe_dump_fp_state(struct bxe_fastpath *);
376 static __noinline void bxe_dump_port_state_locked(struct bxe_softc *);
377 static __noinline void bxe_dump_link_vars_state_locked(struct bxe_softc *);
378 static __noinline void bxe_dump_link_params_state_locked(struct bxe_softc *);
379 static __noinline void bxe_dump_driver_state(struct bxe_softc *);
380 static __noinline void bxe_dump_hw_state(struct bxe_softc *);
381 static __noinline void bxe_dump_fw(struct bxe_softc *);
382 static void bxe_decode_mb_msgs(struct bxe_softc *, uint32_t, uint32_t);
383 static void bxe_decode_ramrod_cmd(struct bxe_softc *, int);
384 static void bxe_breakpoint(struct bxe_softc *);
388 #define BXE_DRIVER_VERSION "1.5.52"
390 static void bxe_init_e1_firmware(struct bxe_softc *sc);
391 static void bxe_init_e1h_firmware(struct bxe_softc *sc);
394 * FreeBSD device dispatch table.
396 static device_method_t bxe_methods[] = {
397 /* Device interface (device_if.h) */
398 DEVMETHOD(device_probe, bxe_probe),
399 DEVMETHOD(device_attach, bxe_attach),
400 DEVMETHOD(device_detach, bxe_detach),
401 DEVMETHOD(device_shutdown, bxe_shutdown),
403 /* Bus interface (bus_if.h) */
404 DEVMETHOD(bus_print_child, bus_generic_print_child),
405 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
411 static driver_t bxe_driver = {
414 sizeof(struct bxe_softc)
417 static devclass_t bxe_devclass;
419 MODULE_DEPEND(bxe, pci, 1, 1, 1);
420 MODULE_DEPEND(bxe, ether, 1, 1, 1);
421 DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0);
424 * Tunable device values
426 SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters");
427 /* Allowable values are TRUE (1) or FALSE (0). */
429 static int bxe_dcc_enable = FALSE;
430 TUNABLE_INT("hw.bxe.dcc_enable", &bxe_dcc_enable);
431 SYSCTL_UINT(_hw_bxe, OID_AUTO, dcc_enable, CTLFLAG_RDTUN, &bxe_dcc_enable,
432 0, "dcc Enable/Disable");
434 /* Allowable values are TRUE (1) or FALSE (0). */
435 static int bxe_tso_enable = TRUE;
436 TUNABLE_INT("hw.bxe.tso_enable", &bxe_tso_enable);
437 SYSCTL_UINT(_hw_bxe, OID_AUTO, tso_enable, CTLFLAG_RDTUN, &bxe_tso_enable,
438 0, "TSO Enable/Disable");
440 /* Allowable values are 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ). */
441 static int bxe_int_mode = 2;
442 TUNABLE_INT("hw.bxe.int_mode", &bxe_int_mode);
443 SYSCTL_UINT(_hw_bxe, OID_AUTO, int_mode, CTLFLAG_RDTUN, &bxe_int_mode,
444 0, "Interrupt (MSI-X|MSI|INTx) mode");
447 * Specifies the number of queues that will be used when a multi-queue
448 * RSS mode is selected using bxe_multi_mode below.
450 * Allowable values are 0 (Auto) or 1 to MAX_CONTEXT (fixed queue number).
452 static int bxe_queue_count = 0;
453 TUNABLE_INT("hw.bxe.queue_count", &bxe_queue_count);
454 SYSCTL_UINT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, &bxe_queue_count,
455 0, "Multi-Queue queue count");
458 * ETH_RSS_MODE_DISABLED (0)
459 * Disables all multi-queue/packet sorting algorithms. All
460 * received frames are routed to a single receive queue.
462 * ETH_RSS_MODE_REGULAR (1)
463 * The default mode which assigns incoming frames to receive
464 * queues according to RSS (i.e a 2-tuple match on the source/
465 * destination IP address or a 4-tuple match on the source/
466 * destination IP address and the source/destination TCP port).
469 static int bxe_multi_mode = ETH_RSS_MODE_REGULAR;
470 TUNABLE_INT("hw.bxe.multi_mode", &bxe_multi_mode);
471 SYSCTL_UINT(_hw_bxe, OID_AUTO, multi_mode, CTLFLAG_RDTUN, &bxe_multi_mode,
472 0, "Multi-Queue Mode");
475 * Host interrupt coalescing is controller by these values.
476 * The first frame always causes an interrupt but subsequent
477 * frames are coalesced until the RX/TX ticks timer value
478 * expires and another interrupt occurs. (Ticks are measured
481 static uint32_t bxe_rx_ticks = 25;
482 TUNABLE_INT("hw.bxe.rx_ticks", &bxe_rx_ticks);
483 SYSCTL_UINT(_hw_bxe, OID_AUTO, rx_ticks, CTLFLAG_RDTUN, &bxe_rx_ticks,
486 static uint32_t bxe_tx_ticks = 50;
487 TUNABLE_INT("hw.bxe.tx_ticks", &bxe_tx_ticks);
488 SYSCTL_UINT(_hw_bxe, OID_AUTO, tx_ticks, CTLFLAG_RDTUN, &bxe_tx_ticks,
489 0, "Transmit ticks");
492 * Allows the PCIe maximum read request size value to be manually
493 * set during initialization rather than automatically determined
496 * Allowable values are:
497 * -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB)
499 static int bxe_mrrs = -1;
500 TUNABLE_INT("hw.bxe.mrrs", &bxe_mrrs);
501 SYSCTL_UINT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN, &bxe_mrrs,
502 0, "PCIe maximum read request size.");
506 * Allows setting the maximum number of received frames to process
507 * during an interrupt.
509 * Allowable values are:
510 * -1 (Unlimited), 0 (None), otherwise specifies the number of RX frames.
512 static int bxe_rx_limit = -1;
513 TUNABLE_INT("hw.bxe.rx_limit", &bxe_rx_limit);
514 SYSCTL_UINT(_hw_bxe, OID_AUTO, rx_limit, CTLFLAG_RDTUN, &bxe_rx_limit,
515 0, "Maximum received frames processed during an interrupt.");
518 * Allows setting the maximum number of transmit frames to process
519 * during an interrupt.
521 * Allowable values are:
522 * -1 (Unlimited), 0 (None), otherwise specifies the number of TX frames.
524 static int bxe_tx_limit = -1;
525 TUNABLE_INT("hw.bxe.tx_limit", &bxe_tx_limit);
526 SYSCTL_UINT(_hw_bxe, OID_AUTO, tx_limit, CTLFLAG_RDTUN, &bxe_tx_limit,
527 0, "Maximum transmit frames processed during an interrupt.");
534 /* 0 is common, 1 is port 0, 2 is port 1. */
535 static int load_count[3];
537 /* Tracks whether MCP firmware is running. */
542 * A debug version of the 32 bit OS register write function to
543 * capture/display values written to the controller.
549 bxe_reg_write32(struct bxe_softc *sc, bus_size_t offset, uint32_t val)
552 if ((offset % 4) != 0) {
553 DBPRINT(sc, BXE_WARN,
554 "%s(): Warning! Unaligned write to 0x%jX!\n", __FUNCTION__,
558 DBPRINT(sc, BXE_INSANE_REGS, "%s(): offset = 0x%jX, val = 0x%08X\n",
559 __FUNCTION__, (uintmax_t)offset, val);
561 bus_space_write_4(sc->bxe_btag, sc->bxe_bhandle, offset, val);
565 * A debug version of the 16 bit OS register write function to
566 * capture/display values written to the controller.
572 bxe_reg_write16(struct bxe_softc *sc, bus_size_t offset, uint16_t val)
575 if ((offset % 2) != 0) {
576 DBPRINT(sc, BXE_WARN,
577 "%s(): Warning! Unaligned write to 0x%jX!\n", __FUNCTION__,
581 DBPRINT(sc, BXE_INSANE_REGS, "%s(): offset = 0x%jX, val = 0x%04X\n",
582 __FUNCTION__, (uintmax_t)offset, val);
584 bus_space_write_2(sc->bxe_btag, sc->bxe_bhandle, offset, val);
588 * A debug version of the 8 bit OS register write function to
589 * capture/display values written to the controller.
595 bxe_reg_write8(struct bxe_softc *sc, bus_size_t offset, uint8_t val)
598 DBPRINT(sc, BXE_INSANE_REGS, "%s(): offset = 0x%jX, val = 0x%02X\n",
599 __FUNCTION__, (uintmax_t)offset, val);
601 bus_space_write_1(sc->bxe_btag, sc->bxe_bhandle, offset, val);
605 * A debug version of the 32 bit OS register read function to
606 * capture/display values read from the controller.
612 bxe_reg_read32(struct bxe_softc *sc, bus_size_t offset)
616 if ((offset % 4) != 0) {
617 DBPRINT(sc, BXE_WARN,
618 "%s(): Warning! Unaligned read from 0x%jX!\n",
619 __FUNCTION__, (uintmax_t)offset);
622 val = bus_space_read_4(sc->bxe_btag, sc->bxe_bhandle, offset);
624 DBPRINT(sc, BXE_INSANE_REGS, "%s(): offset = 0x%jX, val = 0x%08X\n",
625 __FUNCTION__, (uintmax_t)offset, val);
631 * A debug version of the 16 bit OS register read function to
632 * capture/display values read from the controller.
638 bxe_reg_read16(struct bxe_softc *sc, bus_size_t offset)
642 if ((offset % 2) != 0) {
643 DBPRINT(sc, BXE_WARN,
644 "%s(): Warning! Unaligned read from 0x%jX!\n",
645 __FUNCTION__, (uintmax_t)offset);
648 val = bus_space_read_2(sc->bxe_btag, sc->bxe_bhandle, offset);
650 DBPRINT(sc, BXE_INSANE_REGS, "%s(): offset = 0x%jX, val = 0x%08X\n",
651 __FUNCTION__, (uintmax_t)offset, val);
658 * A debug version of the 8 bit OS register write function to
659 * capture/display values written to the controller.
665 bxe_reg_read8(struct bxe_softc *sc, bus_size_t offset)
667 uint8_t val = bus_space_read_1(sc->bxe_btag, sc->bxe_bhandle, offset);
669 DBPRINT(sc, BXE_INSANE_REGS, "%s(): offset = 0x%jX, val = 0x%02X\n",
670 __FUNCTION__, (uintmax_t)offset, val);
677 bxe_read_mf_cfg(struct bxe_softc *sc)
681 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
682 func = 2 * vn + BP_PORT(sc);
684 SHMEM_RD(sc,mf_cfg.func_mf_config[func].config);
690 bxe_e1h_disable(struct bxe_softc *sc)
695 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 0);
696 sc->bxe_ifp->if_drv_flags = 0;
700 bxe_e1h_enable(struct bxe_softc *sc)
705 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
706 sc->bxe_ifp->if_drv_flags = IFF_DRV_RUNNING;
710 * Calculates the sum of vn_min_rates.
711 * It's needed for further normalizing of the min_rates.
713 * sum of vn_min_rates.
715 * 0 - if all the min_rates are 0. In the later case fainess
716 * algorithm should be deactivated. If not all min_rates are
717 * zero then those that are zeroes will be set to 1.
720 bxe_calc_vn_wsum(struct bxe_softc *sc)
722 uint32_t vn_cfg, vn_min_rate;
725 DBENTER(BXE_VERBOSE_LOAD);
729 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
730 vn_cfg = sc->mf_config[vn];
731 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
732 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
733 /* Skip hidden vns */
734 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
736 /* If min rate is zero - set it to 1. */
738 vn_min_rate = DEF_MIN_RATE;
742 sc->vn_wsum += vn_min_rate;
745 /* ... only if all min rates are zeros - disable fairness */
747 sc->cmng.flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
749 sc->cmng.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
751 DBEXIT(BXE_VERBOSE_LOAD);
760 bxe_init_vn_minmax(struct bxe_softc *sc, int vn)
762 struct rate_shaping_vars_per_vn m_rs_vn;
763 struct fairness_vars_per_vn m_fair_vn;
765 uint16_t vn_min_rate, vn_max_rate;
768 vn_cfg = sc->mf_config[vn];
769 func = 2 * vn + BP_PORT(sc);
771 DBENTER(BXE_VERBOSE_LOAD);
773 /* If function is hidden - set min and max to zeroes. */
774 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
778 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
779 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
781 * If fairness is enabled (i.e. not all min rates are zero),
782 * and if the current min rate is zero, set it to 1.
783 * This is a requirement of the algorithm.
785 if (sc->vn_wsum && (vn_min_rate == 0))
786 vn_min_rate = DEF_MIN_RATE;
788 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
789 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
791 if (vn_max_rate == 0)
794 DBPRINT(sc, BXE_INFO_LOAD,
795 "%s(): func %d: vn_min_rate = %d, vn_max_rate = %d, wsum = %d.\n",
796 __FUNCTION__, func, vn_min_rate, vn_max_rate, sc->vn_wsum);
798 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
799 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
801 /* Global VNIC counter - maximal Mbps for this VNIC. */
802 m_rs_vn.vn_counter.rate = vn_max_rate;
804 /* Quota - number of bytes transmitted in this period. */
805 m_rs_vn.vn_counter.quota =
806 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
810 * Credit for each period of the fairness algorithm. The
811 * number of bytes in T_FAIR (the VNIC shares the port rate).
812 * vn_wsum should not be larger than 10000, thus
813 * T_FAIR_COEF / (8 * vn_wsum) will always be grater than zero.
815 m_fair_vn.vn_credit_delta =
816 max((uint32_t)(vn_min_rate * (T_FAIR_COEF /
818 (uint32_t)(sc->cmng.fair_vars.fair_threshold * 2));
823 /* Store it to internal memory */
824 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn) / 4; i++)
825 REG_WR(sc, BAR_XSTORM_INTMEM +
826 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + (i * 4),
827 ((uint32_t *)(&m_rs_vn))[i]);
829 for (i = 0; i < sizeof(struct fairness_vars_per_vn) / 4; i++)
830 REG_WR(sc, BAR_XSTORM_INTMEM +
831 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + (i * 4),
832 ((uint32_t *)(&m_fair_vn))[i]);
834 DBEXIT(BXE_VERBOSE_LOAD);
838 bxe_congestionmgmt(struct bxe_softc *sc, uint8_t readshm)
842 DBENTER(BXE_VERBOSE_LOAD);
844 /* Read mf conf from shmem. */
848 /* Init rate shaping and fairness contexts */
849 bxe_init_port_minmax(sc);
851 /* vn_weight_sum and enable fairness if not 0 */
852 bxe_calc_vn_wsum(sc);
854 /* calculate and set min-max rate for each vn */
855 for (vn = 0; vn < E1HVN_MAX; vn++)
856 bxe_init_vn_minmax(sc, vn);
858 /* Always enable rate shaping and fairness. */
859 sc->cmng.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
861 DBPRINT(sc, BXE_VERBOSE_LOAD,
862 "%s(): Rate shaping set\n", __FUNCTION__);
865 DBPRINT(sc, BXE_INFO_LOAD, "%s(): All MIN values "
866 "are zeroes, fairness is disabled\n", __FUNCTION__);
868 DBEXIT(BXE_VERBOSE_LOAD);
872 bxe_dcc_event(struct bxe_softc *sc, uint32_t dcc_event)
876 DBENTER(BXE_VERBOSE_LOAD);
878 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
879 if (sc->mf_config[BP_E1HVN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
880 DBPRINT(sc, BXE_INFO_LOAD, "%s(): mf_cfg function "
881 "disabled\n", __FUNCTION__);
882 sc->state = BXE_STATE_DISABLED;
885 DBPRINT(sc, BXE_INFO_LOAD, "%s(): mf_cfg function "
886 "enabled\n", __FUNCTION__);
887 sc->state = BXE_STATE_OPEN;
890 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
892 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
894 bxe_congestionmgmt(sc, TRUE);
895 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
896 REG_WR(sc, BAR_XSTORM_INTMEM +
897 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
898 ((uint32_t *)(&sc->cmng))[i]);
899 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
902 /* Report results to MCP */
904 bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE);
906 bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK);
908 DBEXIT(BXE_VERBOSE_LOAD);
912 * Device probe function.
914 * Compares the device to the driver's list of supported devices and
915 * reports back to the OS whether this is the right driver for the device.
918 * BUS_PROBE_DEFAULT on success, positive value on failure.
921 bxe_probe(device_t dev)
923 struct bxe_softc *sc;
926 uint16_t did, sdid, svid, vid;
928 sc = device_get_softc(dev);
932 /* Get the data for the device to be probed. */
933 vid = pci_get_vendor(dev);
934 did = pci_get_device(dev);
935 svid = pci_get_subvendor(dev);
936 sdid = pci_get_subdevice(dev);
938 DBPRINT(sc, BXE_VERBOSE_LOAD,
939 "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
940 "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
942 /* Look through the list of known devices for a match. */
943 while (t->bxe_name != NULL) {
944 if ((vid == t->bxe_vid) && (did == t->bxe_did) &&
945 ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) &&
946 ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) {
947 descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
951 /* Print out the device identity. */
952 snprintf(descbuf, BXE_DEVDESC_MAX,
953 "%s (%c%d) BXE v:%s\n", t->bxe_name,
954 (((pci_read_config(dev, PCIR_REVID, 4) &
956 (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
959 device_set_desc_copy(dev, descbuf);
960 free(descbuf, M_TEMP);
961 return (BUS_PROBE_DEFAULT);
970 * Prints useful adapter info.
975 /* ToDo: Create a sysctl for this info. */
977 bxe_print_adapter_info(struct bxe_softc *sc)
981 DBENTER(BXE_EXTREME_LOAD);
983 /* Hardware chip info. */
984 BXE_PRINTF("ASIC (0x%08X); ", sc->common.chip_id);
985 printf("Rev (%c%d); ", (CHIP_REV(sc) >> 12) + 'A',
986 (CHIP_METAL(sc) >> 4));
989 printf("Bus (PCIe x%d, ", sc->pcie_link_width);
990 switch (sc->pcie_link_speed) {
998 printf("Unknown link speed");
1001 /* Device features. */
1002 printf("); Flags (");
1004 /* Miscellaneous flags. */
1005 if (sc->msi_count > 0)
1008 if (sc->msix_count > 0) {
1009 if (i > 0) printf("|");
1010 printf("MSI-X"); i++;
1013 if (TPA_ENABLED(sc)) {
1014 if (i > 0) printf("|");
1018 printf("); Queues (");
1019 switch (sc->multi_mode) {
1020 case ETH_RSS_MODE_DISABLED:
1023 case ETH_RSS_MODE_REGULAR:
1024 printf("RSS:%d", sc->num_queues);
1031 printf("); BD's (RX:%d,TX:%d",
1032 (int) USABLE_RX_BD, (int) USABLE_TX_BD);
1034 /* Firmware versions and device features. */
1035 printf("); Firmware (%d.%d.%d); Bootcode (%d.%d.%d)\n",
1036 BCM_5710_FW_MAJOR_VERSION,
1037 BCM_5710_FW_MINOR_VERSION,
1038 BCM_5710_FW_REVISION_VERSION,
1039 (int)((sc->common.bc_ver & 0xff0000) >> 16),
1040 (int)((sc->common.bc_ver & 0x00ff00) >> 8),
1041 (int)((sc->common.bc_ver & 0x0000ff)));
1043 DBEXIT(BXE_EXTREME_LOAD);
1047 * Release any interrupts allocated by the driver.
1053 bxe_interrupt_free(struct bxe_softc *sc)
1058 DBENTER(BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
1062 if (sc->msix_count > 0) {
1063 /* Free MSI-X resources. */
1065 for (i = 0; i < sc->msix_count; i++) {
1066 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET |
1067 BXE_VERBOSE_INTR), "%s(): Releasing MSI-X[%d] "
1068 "vector.\n", __FUNCTION__, i);
1069 if (sc->bxe_msix_res[i] && sc->bxe_msix_rid[i])
1070 bus_release_resource(dev, SYS_RES_IRQ,
1071 sc->bxe_msix_rid[i], sc->bxe_msix_res[i]);
1074 pci_release_msi(dev);
1076 } else if (sc->msi_count > 0) {
1077 /* Free MSI resources. */
1079 for (i = 0; i < sc->msi_count; i++) {
1080 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET |
1081 BXE_VERBOSE_INTR), "%s(): Releasing MSI[%d] "
1082 "vector.\n", __FUNCTION__, i);
1083 if (sc->bxe_msi_res[i] && sc->bxe_msi_rid[i])
1084 bus_release_resource(dev, SYS_RES_IRQ,
1085 sc->bxe_msi_rid[i], sc->bxe_msi_res[i]);
1088 pci_release_msi(dev);
1091 /* Free legacy interrupt resources. */
1093 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET |
1094 BXE_VERBOSE_INTR), "%s(): Releasing legacy interrupt.\n",
1096 if (sc->bxe_irq_res != NULL)
1097 bus_release_resource(dev, SYS_RES_IRQ,
1098 sc->bxe_irq_rid, sc->bxe_irq_res);
1101 DBEXIT(BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
1105 * This function determines and allocates the appropriate
1106 * interrupt based on system capabilites and user request.
1108 * The user may force a particular interrupt mode, specify
1109 * the number of receive queues, specify the method for
1110 * distribuitng received frames to receive queues, or use
1111 * the default settings which will automatically select the
1112 * best supported combination. In addition, the OS may or
1113 * may not support certain combinations of these settings.
1114 * This routine attempts to reconcile the settings requested
1115 * by the user with the capabilites available from the system
1116 * to select the optimal combination of features.
1119 * 0 = Success, !0 = Failure.
1122 bxe_interrupt_alloc(struct bxe_softc *sc)
1125 int error, i, rid, rc;
1126 int msi_count, msi_required, msi_allocated;
1127 int msix_count, msix_required, msix_allocated;
1129 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR);
1133 msi_count = msi_required = msi_allocated = 0;
1134 msix_count = msix_required = msix_allocated = 0;
1136 /* Get the number of available MSI/MSI-X interrupts from the OS. */
1137 if (sc->int_mode > 0) {
1138 if (sc->bxe_cap_flags & BXE_MSIX_CAPABLE_FLAG)
1139 msix_count = pci_msix_count(dev);
1141 if (sc->bxe_cap_flags & BXE_MSI_CAPABLE_FLAG)
1142 msi_count = pci_msi_count(dev);
1144 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
1145 "%s(): %d MSI and %d MSI-X vectors available.\n",
1146 __FUNCTION__, msi_count, msix_count);
1149 /* Try allocating MSI-X interrupt resources. */
1150 if ((sc->bxe_cap_flags & BXE_MSIX_CAPABLE_FLAG) &&
1151 (sc->int_mode > 1) && (msix_count > 0) &&
1152 (msix_count >= sc->num_queues)) {
1153 /* Ask for the necessary number of MSI-X vectors. */
1154 if (sc->num_queues == 1)
1155 msix_allocated = msix_required = 2;
1157 msix_allocated = msix_required = sc->num_queues + 1;
1159 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
1160 "%s(): Requesting %d MSI-X vectors.\n",
1161 __FUNCTION__, msix_required);
1163 /* BSD resource identifier */
1165 error = pci_alloc_msix(dev, &msix_allocated);
1167 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
1168 "%s(): Required/Allocated (%d/%d) MSI-X vector(s).\n",
1169 __FUNCTION__, msix_required, msix_allocated);
1171 /* Make sure we got all the interrupts we asked for. */
1172 if (msix_allocated >= msix_required) {
1173 sc->msix_count = msix_required;
1176 /* Allocate the MSI-X vectors. */
1177 for (i = 0; i < msix_required; i++) {
1178 sc->bxe_msix_rid[i] = rid + i +
1180 sc->bxe_msix_res[i] =
1181 bus_alloc_resource_any(dev,
1182 SYS_RES_IRQ, &sc->bxe_msix_rid[i],
1184 /* Report any IRQ allocation errors. */
1185 if (sc->bxe_msix_res[i] == NULL) {
1187 "%s(%d): Failed to map MSI-X[%d] vector!\n",
1188 __FILE__, __LINE__, (3));
1190 goto bxe_interrupt_alloc_exit;
1195 DBPRINT(sc, BXE_WARN,
1196 "%s(): MSI-X allocation failed!\n",
1199 /* Release any resources acquired. */
1200 pci_release_msi(dev);
1201 sc->msix_count = msix_count = 0;
1203 /* We'll try MSI next. */
1209 /* Try allocating MSI vector resources. */
1210 if ((sc->bxe_cap_flags & BXE_MSI_CAPABLE_FLAG) &&
1211 (sc->int_mode > 0) && (msi_count > 0) &&
1212 (msi_count >= sc->num_queues)) {
1213 /* Ask for the necessary number of MSI vectors. */
1214 if (sc->num_queues == 1)
1215 msi_required = msi_allocated = 1;
1217 msi_required = msi_allocated = BXE_MSI_VECTOR_COUNT;
1219 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
1220 "%s(): Requesting %d MSI vectors.\n", __FUNCTION__,
1224 error = pci_alloc_msi(dev, &msi_allocated);
1226 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
1227 "%s(): Required/Allocated (%d/%d) MSI vector(s).\n",
1228 __FUNCTION__, msi_required, msi_allocated);
1231 * Make sure we got all the vectors we asked for.
1233 * FreeBSD always gives 8 even if we ask for less.
1235 if (msi_required >= msi_allocated) {
1236 sc->msi_count = msi_required;
1237 /* Allocate the MSI vectors. */
1238 for (i = 0; i < msi_required; i++) {
1239 sc->bxe_msi_rid[i] = i + rid;
1240 sc->bxe_msi_res[i] =
1241 bus_alloc_resource_any(dev,
1242 SYS_RES_IRQ, &sc->bxe_msi_rid[i],
1244 /* Report any IRQ allocation errors. */
1245 if (sc->bxe_msi_res[i] == NULL) {
1247 "%s(%d): Failed to map MSI vector (%d)!\n",
1248 __FILE__, __LINE__, (i));
1250 goto bxe_interrupt_alloc_exit;
1256 DBPRINT(sc, BXE_WARN, "%s(): MSI allocation failed!\n",
1259 /* Release any resources acquired. */
1260 pci_release_msi(dev);
1261 sc->msi_count = msi_count = 0;
1263 /* We'll try INTx next. */
1268 /* Try allocating INTx resources. */
1269 if (sc->int_mode == 0) {
1271 sc->multi_mode = ETH_RSS_MODE_DISABLED;
1273 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
1274 "%s(): Requesting legacy INTx interrupt.\n",
1278 sc->bxe_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1279 RF_SHAREABLE | RF_ACTIVE);
1280 /* Report any IRQ allocation errors. */
1281 if (sc->bxe_irq_res == NULL) {
1282 BXE_PRINTF("%s(%d): PCI map interrupt failed!\n",
1283 __FILE__, __LINE__);
1285 goto bxe_interrupt_alloc_exit;
1287 sc->bxe_irq_rid = rid;
1290 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
1291 "%s(): Actual: int_mode = %d, multi_mode = %d, num_queues = %d\n",
1292 __FUNCTION__, sc->int_mode, sc->multi_mode, sc->num_queues);
1294 bxe_interrupt_alloc_exit:
1295 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR);
1300 * This function releases taskqueues.
1306 bxe_interrupt_detach(struct bxe_softc *sc)
1309 struct bxe_fastpath *fp;
1314 DBENTER(BXE_VERBOSE_UNLOAD);
1319 /* Free the OS taskqueue resources. */
1320 for (i = 0; i < sc->num_queues; i++) {
1323 if (fp->tq != NULL) {
1324 taskqueue_drain(fp->tq, &fp->task);
1325 taskqueue_free(fp->tq);
1329 if (sc->tq != NULL) {
1330 taskqueue_drain(sc->tq, &sc->task);
1331 taskqueue_free(sc->tq);
1335 /* Release interrupt resources. */
1336 if (sc->msix_count > 0) {
1337 for (i = 0; i < sc->msix_count; i++) {
1338 if (sc->bxe_msix_tag[i] && sc->bxe_msix_res[i])
1339 bus_teardown_intr(dev, sc->bxe_msix_res[i],
1340 sc->bxe_msix_tag[i]);
1342 } else if (sc->msi_count > 0) {
1343 for (i = 0; i < sc->msi_count; i++) {
1344 if (sc->bxe_msi_tag[i] && sc->bxe_msi_res[i])
1345 bus_teardown_intr(dev, sc->bxe_msi_res[i],
1346 sc->bxe_msi_tag[i]);
1349 if (sc->bxe_irq_tag != NULL)
1350 bus_teardown_intr(dev, sc->bxe_irq_res,
1354 DBEXIT(BXE_VERBOSE_UNLOAD);
1358 * This function enables interrupts and attachs to the ISR.
1360 * When using multiple MSI/MSI-X vectors the first vector
1361 * is used for slowpath operations while all remaining
1362 * vectors are used for fastpath operations. If only a
1363 * single MSI/MSI-X vector is used (SINGLE_ISR) then the
1364 * ISR must look for both slowpath and fastpath completions.
1367 * 0 = Success, !0 = Failure.
1370 bxe_interrupt_attach(struct bxe_softc *sc)
1372 struct bxe_fastpath *fp;
1375 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR);
1380 /* Setup the slowpath deferred task queue. */
1381 TASK_INIT(&sc->task, 0, bxe_task_sp, sc);
1382 sc->tq = taskqueue_create_fast("bxe_spq", M_NOWAIT,
1383 taskqueue_thread_enqueue, &sc->tq);
1384 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s spq",
1385 device_get_nameunit(sc->dev));
1388 /* Setup interrupt handlers. */
1389 if (sc->msix_count > 0) {
1390 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
1391 "%s(): Enabling slowpath MSI-X[0] vector.\n",__FUNCTION__);
1393 * Setup the interrupt handler. Note that we pass the
1394 * driver instance to the interrupt handler for the
1397 rc = bus_setup_intr(sc->dev, sc->bxe_msix_res[0],
1398 INTR_TYPE_NET | INTR_MPSAFE, NULL, bxe_intr_sp,
1399 sc, &sc->bxe_msix_tag[0]);
1403 "%s(%d): Failed to allocate MSI-X[0] vector!\n",
1404 __FILE__, __LINE__);
1405 goto bxe_interrupt_attach_exit;
1408 #if __FreeBSD_version >= 800504
1409 bus_describe_intr(sc->dev, sc->bxe_msix_res[0],
1410 sc->bxe_msix_tag[0], "sp");
1413 /* Now initialize the fastpath vectors. */
1414 for (i = 0; i < (sc->num_queues); i++) {
1416 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
1417 "%s(): Enabling MSI-X[%d] vector.\n",
1418 __FUNCTION__, i + 1);
1420 * Setup the interrupt handler. Note that we pass the
1421 * fastpath context to the interrupt handler in this
1422 * case. Also the first msix_res was used by the sp.
1424 rc = bus_setup_intr(sc->dev, sc->bxe_msix_res[i + 1],
1425 INTR_TYPE_NET | INTR_MPSAFE, NULL, bxe_intr_fp,
1426 fp, &sc->bxe_msix_tag[i + 1]);
1430 "%s(%d): Failed to allocate MSI-X[%d] vector!\n",
1431 __FILE__, __LINE__, (i + 1));
1432 goto bxe_interrupt_attach_exit;
1435 #if __FreeBSD_version >= 800504
1436 bus_describe_intr(sc->dev, sc->bxe_msix_res[i + 1],
1437 sc->bxe_msix_tag[i + 1], "fp[%02d]", i);
1440 /* Bind the fastpath instance to a CPU. */
1441 if (sc->num_queues > 1) {
1442 bus_bind_intr(sc->dev,
1443 sc->bxe_msix_res[i + 1], i);
1447 TASK_INIT(&fp->task, 0, bxe_task_fp, fp);
1448 fp->tq = taskqueue_create_fast("bxe_fpq", M_NOWAIT,
1449 taskqueue_thread_enqueue, &fp->tq);
1450 taskqueue_start_threads(&fp->tq, 1, PI_NET, "%s fpq",
1451 device_get_nameunit(sc->dev));
1453 fp->state = BXE_FP_STATE_IRQ;
1455 } else if (sc->msi_count > 0) {
1456 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
1457 "%s(): Enabling slowpath MSI[0] vector.\n",
1460 * Setup the interrupt handler. Note that we pass the driver
1461 * instance to the interrupt handler for the slowpath.
1463 rc = bus_setup_intr(sc->dev,sc->bxe_msi_res[0],
1464 INTR_TYPE_NET | INTR_MPSAFE, NULL, bxe_intr_sp,
1465 sc, &sc->bxe_msi_tag[0]);
1469 "%s(%d): Failed to allocate MSI[0] vector!\n",
1470 __FILE__, __LINE__);
1471 goto bxe_interrupt_attach_exit;
1474 #if __FreeBSD_version >= 800504
1475 bus_describe_intr(sc->dev, sc->bxe_msi_res[0],
1476 sc->bxe_msi_tag[0], "sp");
1479 /* Now initialize the fastpath vectors. */
1480 for (i = 0; i < (sc->num_queues); i++) {
1483 (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
1484 "%s(): Enabling MSI[%d] vector.\n",
1485 __FUNCTION__, i + 1);
1487 * Setup the interrupt handler. Note that we pass the
1488 * fastpath context to the interrupt handler in this
1491 rc = bus_setup_intr(sc->dev, sc->bxe_msi_res[i + 1],
1492 INTR_TYPE_NET | INTR_MPSAFE, NULL, bxe_intr_fp,
1493 fp, &sc->bxe_msi_tag[i + 1]);
1497 "%s(%d): Failed to allocate MSI[%d] vector!\n",
1498 __FILE__, __LINE__, (i + 1));
1499 goto bxe_interrupt_attach_exit;
1502 #if __FreeBSD_version >= 800504
1503 bus_describe_intr(sc->dev, sc->bxe_msi_res[i + 1],
1504 sc->bxe_msi_tag[i + 1], "fp[%02d]", i);
1508 TASK_INIT(&fp->task, 0, bxe_task_fp, fp);
1509 fp->tq = taskqueue_create_fast("bxe_fpq", M_NOWAIT,
1510 taskqueue_thread_enqueue, &fp->tq);
1511 taskqueue_start_threads(&fp->tq, 1, PI_NET, "%s fpq",
1512 device_get_nameunit(sc->dev));
1520 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
1521 "%s(): Enabling INTx interrupts.\n", __FUNCTION__);
1524 * Setup the interrupt handler. Note that we pass the
1525 * driver instance to the interrupt handler which
1526 * will handle both the slowpath and fastpath.
1528 rc = bus_setup_intr(sc->dev,sc->bxe_irq_res, INTR_TYPE_NET |
1529 INTR_MPSAFE, NULL, bxe_intr_legacy, sc, &sc->bxe_irq_tag);
1532 BXE_PRINTF("%s(%d): Failed to allocate interrupt!\n",
1533 __FILE__, __LINE__);
1534 goto bxe_interrupt_attach_exit;
1537 TASK_INIT(&fp->task, 0, bxe_task_fp, fp);
1538 fp->tq = taskqueue_create_fast("bxe_fpq",
1539 M_NOWAIT, taskqueue_thread_enqueue, &fp->tq);
1540 taskqueue_start_threads(&fp->tq, 1,
1541 PI_NET, "%s fpq", device_get_nameunit(sc->dev));
1545 bxe_interrupt_attach_exit:
1546 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR);
1552 * PCI Capabilities Probe Function.
1554 * Walks the PCI capabiites list for the device to find what features are
1555 * supported. These capabilites may be enabled/disabled by firmware so it's
1556 * best to walk the list rather than hard code any values.
1562 bxe_probe_pci_caps(struct bxe_softc *sc)
1566 uint16_t link_status;
1569 DBENTER(BXE_EXTREME_LOAD);
1571 /* Check if PCI Power Management capability is enabled. */
1572 if (pci_find_cap(dev, PCIY_PMG, ®) == 0) {
1574 DBPRINT(sc, BXE_EXTREME_LOAD,
1575 "%s(): Found PM capability at 0x%04X\n",
1581 /* Check if PCIe capability is enabled. */
1582 if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) {
1584 link_status = pci_read_config(dev, reg + 0x12, 2);
1586 DBPRINT(sc, BXE_EXTREME_LOAD,
1587 "%s(): Found PCIe capability at 0x%04X\n",
1590 /* Handle PCIe 2.0 workarounds for the 57710. */
1591 if (CHIP_IS_E1(sc)) {
1592 /* Workaround for 57710 errata E4_57710_27462. */
1593 sc->pcie_link_speed =
1594 (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1;
1596 /* Workaround for 57710 errata E4_57710_27488. */
1597 sc->pcie_link_width = (link_status >> 4) & 0x3f;
1598 if (sc->pcie_link_speed > 1)
1599 sc->pcie_link_width =
1600 ((link_status >> 4) & 0x3f) >> 1;
1604 sc->pcie_link_speed = link_status & 0xf;
1605 sc->pcie_link_width = (link_status >> 4) & 0x3f;
1609 sc->bxe_cap_flags |= BXE_PCIE_CAPABLE_FLAG;
1615 /* Check if MSI capability is enabled. */
1616 if (pci_find_cap(dev, PCIY_MSI, ®) == 0) {
1618 DBPRINT(sc, BXE_EXTREME_LOAD,
1619 "%s(): Found MSI capability at 0x%04X\n",
1621 sc->bxe_cap_flags |= BXE_MSI_CAPABLE_FLAG;
1625 /* Check if MSI-X capability is enabled. */
1626 if (pci_find_cap(dev, PCIY_MSIX, ®) == 0) {
1628 DBPRINT(sc, BXE_EXTREME_LOAD,
1629 "%s(): Found MSI-X capability at 0x%04X\n",
1631 sc->bxe_cap_flags |= BXE_MSIX_CAPABLE_FLAG;
1635 DBEXIT(BXE_EXTREME_LOAD);
1639 * Setup firmware pointers for BCM57710.
1645 bxe_init_e1_firmware(struct bxe_softc *sc)
1647 INIT_OPS(sc) = (struct raw_op *)init_ops_e1;
1648 INIT_DATA(sc) = (const uint32_t *)init_data_e1;
1649 INIT_OPS_OFFSETS(sc) = (const uint16_t *)init_ops_offsets_e1;
1650 INIT_TSEM_INT_TABLE_DATA(sc) = tsem_int_table_data_e1;
1651 INIT_TSEM_PRAM_DATA(sc) = tsem_pram_data_e1;
1652 INIT_USEM_INT_TABLE_DATA(sc) = usem_int_table_data_e1;
1653 INIT_USEM_PRAM_DATA(sc) = usem_pram_data_e1;
1654 INIT_XSEM_INT_TABLE_DATA(sc) = xsem_int_table_data_e1;
1655 INIT_XSEM_PRAM_DATA(sc) = xsem_pram_data_e1;
1656 INIT_CSEM_INT_TABLE_DATA(sc) = csem_int_table_data_e1;
1657 INIT_CSEM_PRAM_DATA(sc) = csem_pram_data_e1;
1661 * Setup firmware pointers for BCM57711.
1667 bxe_init_e1h_firmware(struct bxe_softc *sc)
1669 INIT_OPS(sc) = (struct raw_op *)init_ops_e1h;
1670 INIT_DATA(sc) = (const uint32_t *)init_data_e1h;
1671 INIT_OPS_OFFSETS(sc) = (const uint16_t *)init_ops_offsets_e1h;
1672 INIT_TSEM_INT_TABLE_DATA(sc) = tsem_int_table_data_e1h;
1673 INIT_TSEM_PRAM_DATA(sc) = tsem_pram_data_e1h;
1674 INIT_USEM_INT_TABLE_DATA(sc) = usem_int_table_data_e1h;
1675 INIT_USEM_PRAM_DATA(sc) = usem_pram_data_e1h;
1676 INIT_XSEM_INT_TABLE_DATA(sc) = xsem_int_table_data_e1h;
1677 INIT_XSEM_PRAM_DATA(sc) = xsem_pram_data_e1h;
1678 INIT_CSEM_INT_TABLE_DATA(sc) = csem_int_table_data_e1h;
1679 INIT_CSEM_PRAM_DATA(sc) = csem_pram_data_e1h;
1683 * Sets up pointers for loading controller firmware.
1686 * 0 = Success, !0 = Failure
1689 bxe_init_firmware(struct bxe_softc *sc)
1696 bxe_init_e1_firmware(sc);
1697 else if (CHIP_IS_E1H(sc))
1698 bxe_init_e1h_firmware(sc);
1700 BXE_PRINTF("%s(%d): No firmware to support chip revision!\n",
1701 __FILE__, __LINE__);
1709 bxe_tunables_set(struct bxe_softc *sc)
1712 * Get our starting point for interrupt mode/number of queues.
1713 * We will progressively step down from MSI-X to MSI to INTx
1714 * and reduce the number of receive queues as necessary to
1715 * match the system capabilities.
1717 sc->multi_mode = bxe_multi_mode;
1718 sc->int_mode = bxe_int_mode;
1719 sc->tso_enable = bxe_tso_enable;
1722 * Verify the Priority -> Receive Queue mappings.
1724 if (sc->int_mode > 0) {
1725 /* Multi-queue modes require MSI/MSI-X. */
1726 switch (sc->multi_mode) {
1727 case ETH_RSS_MODE_DISABLED:
1728 /* No multi-queue mode requested. */
1731 case ETH_RSS_MODE_REGULAR:
1732 if (sc->int_mode > 1) {
1734 * Assume we can use MSI-X
1735 * (max of 16 receive queues).
1737 sc->num_queues = min((bxe_queue_count ?
1738 bxe_queue_count : mp_ncpus), MAX_CONTEXT);
1741 * Assume we can use MSI
1742 * (max of 7 receive queues).
1744 sc->num_queues = min((bxe_queue_count ?
1745 bxe_queue_count : mp_ncpus),
1746 BXE_MSI_VECTOR_COUNT - 1);
1751 "%s(%d): Unsupported multi_mode parameter (%d), "
1752 "disabling multi-queue support!\n", __FILE__,
1753 __LINE__, sc->multi_mode);
1754 sc->multi_mode = ETH_RSS_MODE_DISABLED;
1759 /* User has forced INTx mode. */
1760 sc->multi_mode = ETH_RSS_MODE_DISABLED;
1764 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
1765 "%s(): Requested: int_mode = %d, multi_mode = %d num_queues = %d\n",
1766 __FUNCTION__, sc->int_mode, sc->multi_mode, sc->num_queues);
1768 sc->stats_enable = TRUE;
1770 /* Select the host coalescing tick count values (limit values). */
1771 if (bxe_tx_ticks > 100) {
1772 BXE_PRINTF("%s(%d): bxe_tx_ticks too large "
1773 "(%d), setting default value of 50.\n",
1774 __FILE__, __LINE__, bxe_tx_ticks);
1777 sc->tx_ticks = bxe_tx_ticks;
1779 if (bxe_rx_ticks > 100) {
1780 BXE_PRINTF("%s(%d): bxe_rx_ticks too large "
1781 "(%d), setting default value of 25.\n",
1782 __FILE__, __LINE__, bxe_rx_ticks);
1785 sc->rx_ticks = bxe_rx_ticks;
1787 /* Select the PCIe maximum read request size (MRRS). */
1791 sc->mrrs = bxe_mrrs;
1793 /* Check for DCC support. */
1794 if (bxe_dcc_enable == FALSE)
1795 sc->dcc_enable = FALSE;
1797 sc->dcc_enable = TRUE;
1802 * Allocates PCI resources from OS.
1805 * 0 = Success, !0 = Failure
1808 bxe_pci_resources_alloc(struct bxe_softc *sc)
1812 DBENTER(BXE_VERBOSE_LOAD);
1815 * Allocate PCI memory resources for BAR0.
1816 * This includes device registers and internal
1820 sc->bxe_res = bus_alloc_resource_any(sc->dev,
1821 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1822 if (sc->bxe_res == NULL) {
1823 BXE_PRINTF("%s(%d):PCI BAR0 memory allocation failed\n",
1824 __FILE__, __LINE__);
1826 goto bxe_pci_resources_alloc_exit;
1829 /* Get OS resource handles for BAR0 memory. */
1830 sc->bxe_btag = rman_get_bustag(sc->bxe_res);
1831 sc->bxe_bhandle = rman_get_bushandle(sc->bxe_res);
1832 sc->bxe_vhandle = (vm_offset_t) rman_get_virtual(sc->bxe_res);
1835 * Allocate PCI memory resources for BAR2.
1836 * Doorbell (DB) memory.
1839 sc->bxe_db_res = bus_alloc_resource_any(sc->dev,
1840 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1841 if (sc->bxe_db_res == NULL) {
1842 BXE_PRINTF("%s(%d): PCI BAR2 memory allocation failed\n",
1843 __FILE__, __LINE__);
1845 goto bxe_pci_resources_alloc_exit;
1848 /* Get OS resource handles for BAR2 memory. */
1849 sc->bxe_db_btag = rman_get_bustag(sc->bxe_db_res);
1850 sc->bxe_db_bhandle = rman_get_bushandle(sc->bxe_db_res);
1851 sc->bxe_db_vhandle = (vm_offset_t) rman_get_virtual(sc->bxe_db_res);
1853 bxe_pci_resources_alloc_exit:
1854 DBEXIT(BXE_VERBOSE_LOAD);
1860 * Frees PCI resources allocated in bxe_pci_resources_alloc().
1866 bxe_pci_resources_free(struct bxe_softc *sc)
1868 DBENTER(BXE_VERBOSE_UNLOAD);
1870 /* Release the PCIe BAR0 mapped memory. */
1871 if (sc->bxe_res != NULL) {
1872 bus_release_resource(sc->dev, SYS_RES_MEMORY,
1873 PCIR_BAR(0), sc->bxe_res);
1876 /* Release the PCIe BAR2 (doorbell) mapped memory. */
1877 if (sc->bxe_db_res != NULL) {
1878 bus_release_resource(sc->dev, SYS_RES_MEMORY,
1879 PCIR_BAR(2), sc->bxe_db_res);
1882 DBENTER(BXE_VERBOSE_UNLOAD);
1887 * Determines the media reported to the OS by examining
1888 * the installed PHY type.
1891 * 0 = Success, !0 = Failure
1894 bxe_media_detect(struct bxe_softc *sc)
1900 /* Identify supported media based on the PHY type. */
1901 switch (XGXS_EXT_PHY_TYPE(sc->link_params.ext_phy_config)) {
1902 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
1903 DBPRINT(sc, BXE_INFO_LOAD,
1904 "%s(): Found 10GBase-CX4 media.\n", __FUNCTION__);
1905 sc->media = IFM_10G_CX4;
1907 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
1908 /* Technically 10GBase-KR but report as 10GBase-SR*/
1909 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
1910 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
1911 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
1912 DBPRINT(sc, BXE_INFO_LOAD,
1913 "%s(): Found 10GBase-SR media.\n", __FUNCTION__);
1914 sc->media = IFM_10G_SR;
1916 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
1917 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
1918 DBPRINT(sc, BXE_INFO_LOAD,
1919 "%s(): Found 10Gb twinax media.\n", __FUNCTION__);
1920 sc->media = IFM_10G_TWINAX;
1922 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
1923 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
1924 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
1925 DBPRINT(sc, BXE_INFO_LOAD,
1926 "%s(): Found 10GBase-T media.\n", __FUNCTION__);
1927 sc->media = IFM_10G_T;
1929 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
1930 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN:
1941 * Device attach function.
1943 * Allocates device resources, performs secondary chip identification,
1944 * resets and initializes the hardware, and initializes driver instance
1948 * 0 = Success, Positive value on failure.
1951 bxe_attach(device_t dev)
1953 struct bxe_softc *sc;
1957 sc = device_get_softc(dev);
1958 DBENTER(BXE_INFO_LOAD | BXE_INFO_RESET);
1961 sc->bxe_unit = device_get_unit(dev);
1962 sc->bxe_func = pci_get_function(dev);
1964 sc->state = BXE_STATE_CLOSED;
1967 DBPRINT(sc, BXE_FATAL, "%s(): ************************\n",
1969 DBPRINT(sc, BXE_FATAL, "%s(): ** Debug mode enabled **\n",
1971 DBPRINT(sc, BXE_FATAL, "%s(): ************************\n",
1973 DBPRINT(sc, BXE_FATAL, "%s(): sc vaddr = 0x%08X:%08X\n",
1974 __FUNCTION__, (uint32_t) U64_HI(sc), (uint32_t) U64_LO(sc));
1976 /* Get the user configurable values for driver load. */
1977 bxe_tunables_set(sc);
1979 bxe_mutexes_alloc(sc);
1981 /* Prepare tick routine. */
1982 callout_init_mtx(&sc->bxe_tick_callout, &sc->bxe_core_mtx, 0);
1984 /* Enable bus master capability */
1985 pci_enable_busmaster(dev);
1987 /* Enable PCI BAR mapped memory for register access. */
1988 rc = bxe_pci_resources_alloc(sc);
1990 BXE_PRINTF("%s(%d): Error allocating PCI resources!\n",
1991 __FILE__, __LINE__);
1992 goto bxe_attach_fail;
1995 /* Put indirect address registers into a sane state. */
1996 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS,
1997 PCICFG_VENDOR_ID_OFFSET, 4);
1998 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(sc) * 16, 0);
1999 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(sc) * 16, 0);
2000 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(sc) * 16, 0);
2001 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(sc) * 16, 0);
2003 /* Get hardware info from shared memory and validate data. */
2004 rc = bxe_hwinfo_function_get(sc);
2006 DBPRINT(sc, BXE_WARN,
2007 "%s(): Failed to get hardware info!\n", __FUNCTION__);
2008 goto bxe_attach_fail;
2011 /* Setup supported media options. */
2012 rc = bxe_media_detect(sc);
2014 BXE_PRINTF("%s(%d): Unknown media (PHY) type!\n",
2015 __FILE__, __LINE__);
2016 goto bxe_attach_fail;
2019 /* Interface entrypoint for media type/status reporting. */
2020 ifmedia_init(&sc->bxe_ifmedia,
2021 IFM_IMASK, bxe_ifmedia_upd, bxe_ifmedia_status);
2023 /* Default interface values. */
2024 ifmedia_add(&sc->bxe_ifmedia,
2025 IFM_ETHER | sc->media | IFM_FDX, 0, NULL);
2026 ifmedia_add(&sc->bxe_ifmedia,
2027 IFM_ETHER | IFM_AUTO, 0, NULL);
2028 ifmedia_set(&sc->bxe_ifmedia,
2029 IFM_ETHER | IFM_AUTO);
2030 sc->bxe_ifmedia.ifm_media =
2031 sc->bxe_ifmedia.ifm_cur->ifm_media;
2033 /* Setup firmware arrays (firmware load comes later). */
2034 rc = bxe_init_firmware(sc);
2036 BXE_PRINTF("%s(%d): Error preparing firmware load!\n",
2037 __FILE__, __LINE__);
2038 goto bxe_attach_fail;
2042 /* Allocate a memory buffer for grcdump output.*/
2043 sc->grcdump_buffer = malloc(BXE_GRCDUMP_BUF_SIZE, M_TEMP, M_NOWAIT);
2044 if (sc->grcdump_buffer == NULL) {
2045 BXE_PRINTF("%s(%d): Failed to allocate grcdump memory "
2046 "buffer!\n", __FILE__, __LINE__);
2051 /* Check that NVRAM contents are valid.*/
2052 rc = bxe_nvram_test(sc);
2054 BXE_PRINTF("%s(%d): Failed NVRAM test!\n",
2055 __FILE__, __LINE__);
2056 goto bxe_attach_fail;
2059 /* Allocate the appropriate interrupts.*/
2060 rc = bxe_interrupt_alloc(sc);
2062 BXE_PRINTF("%s(%d): Interrupt allocation failed!\n",
2063 __FILE__, __LINE__);
2064 goto bxe_attach_fail;
2067 /* Useful for accessing unconfigured devices (i.e. factory diags).*/
2069 sc->bxe_flags |= BXE_NO_MCP_FLAG;
2071 /* If bootcode is not running only initialize port 0. */
2072 if (nomcp && BP_PORT(sc)) {
2074 "%s(%d): Second device disabled (no bootcode), "
2075 "exiting...\n", __FILE__, __LINE__);
2077 goto bxe_attach_fail;
2080 /* Check if PXE/UNDI is still active and unload it. */
2082 bxe_undi_unload(sc);
2085 * Select the RX and TX ring sizes. The actual
2086 * ring size for TX is complicated by the fact
2087 * that a single TX frame may be broken up into
2088 * many buffer descriptors (tx_start_bd,
2089 * tx_parse_bd, tx_data_bd). In the best case,
2090 * there are always at least two BD's required
2091 * so we'll assume the best case here.
2093 sc->tx_ring_size = (USABLE_TX_BD >> 1);
2094 sc->rx_ring_size = USABLE_RX_BD;
2096 /* Assume receive IP/TCP/UDP checksum is enabled. */
2097 /* ToDo: Change when IOCTL changes checksum offload? */
2103 /* Assume a standard 1500 byte MTU size for mbuf allocations. */
2104 sc->mbuf_alloc_size = MCLBYTES;
2106 /* Allocate DMA memory resources. */
2107 rc = bxe_host_structures_alloc(sc->dev);
2109 BXE_PRINTF("%s(%d): DMA memory allocation failed!\n",
2110 __FILE__, __LINE__);
2111 goto bxe_attach_fail;
2114 /* Allocate a FreeBSD ifnet structure. */
2115 ifp = sc->bxe_ifp = if_alloc(IFT_ETHER);
2117 BXE_PRINTF("%s(%d): Interface allocation failed!\n",
2118 __FILE__, __LINE__);
2120 goto bxe_attach_fail;
2123 /* Initialize the FreeBSD ifnet interface. */
2125 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2127 /* Written by driver before attach, read-only afterwards. */
2128 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2130 /* Driver entrypoints from the network interface. */
2131 ifp->if_ioctl = bxe_ioctl;
2132 ifp->if_start = bxe_tx_start;
2133 #if __FreeBSD_version >= 800000
2134 ifp->if_transmit = bxe_tx_mq_start;
2135 ifp->if_qflush = bxe_mq_flush;
2142 ifp->if_init = bxe_init;
2143 ifp->if_mtu = ETHERMTU;
2144 ifp->if_hwassist = BXE_IF_HWASSIST;
2145 ifp->if_capabilities = BXE_IF_CAPABILITIES;
2146 /* TPA not enabled by default. */
2147 ifp->if_capenable = BXE_IF_CAPABILITIES & ~IFCAP_LRO;
2148 ifp->if_baudrate = IF_Gbps(10UL);
2150 ifp->if_snd.ifq_drv_maxlen = sc->tx_ring_size;
2152 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2153 IFQ_SET_READY(&ifp->if_snd);
2155 /* Attach to the Ethernet interface list. */
2156 ether_ifattach(ifp, sc->link_params.mac_addr);
2158 /* Attach the interrupts to the interrupt handlers. */
2159 rc = bxe_interrupt_attach(sc);
2161 BXE_PRINTF("%s(%d): Interrupt allocation failed!\n",
2162 __FILE__, __LINE__);
2163 goto bxe_attach_fail;
2166 /* Print important adapter info for the user. */
2167 bxe_print_adapter_info(sc);
2169 /* Add the supported sysctls to the kernel. */
2170 bxe_add_sysctls(sc);
2176 DBEXIT(BXE_INFO_LOAD | BXE_INFO_RESET);
2182 * Supported link settings.
2184 * Examines hardware configuration present in NVRAM and
2185 * determines the link settings that are supported between
2186 * the external PHY and the switch.
2192 * Sets sc->port.supported
2193 * Sets sc->link_params.phy_addr
2196 bxe_link_settings_supported(struct bxe_softc *sc, uint32_t switch_cfg)
2198 uint32_t ext_phy_type;
2201 DBENTER(BXE_VERBOSE_PHY);
2202 DBPRINT(sc, BXE_VERBOSE_PHY, "%s(): switch_cfg = 0x%08X\n",
2203 __FUNCTION__, switch_cfg);
2206 /* Get the link settings supported by the external PHY. */
2207 switch (switch_cfg) {
2210 SERDES_EXT_PHY_TYPE(sc->link_params.ext_phy_config);
2212 DBPRINT(sc, BXE_VERBOSE_PHY,
2213 "%s(): 1G switch w/ ext_phy_type = "
2214 "0x%08X\n", __FUNCTION__, ext_phy_type);
2216 switch (ext_phy_type) {
2217 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
2218 DBPRINT(sc, BXE_VERBOSE_PHY, "%s(): 1G Direct.\n",
2221 sc->port.supported |=
2222 (SUPPORTED_10baseT_Half |
2223 SUPPORTED_10baseT_Full |
2224 SUPPORTED_100baseT_Half |
2225 SUPPORTED_100baseT_Full |
2226 SUPPORTED_1000baseT_Full |
2227 SUPPORTED_2500baseX_Full |
2232 SUPPORTED_Asym_Pause);
2235 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
2236 DBPRINT(sc, BXE_VERBOSE_PHY, "%s(): 1G 5482\n",
2239 sc->port.supported |=
2240 (SUPPORTED_10baseT_Half |
2241 SUPPORTED_10baseT_Full |
2242 SUPPORTED_100baseT_Half |
2243 SUPPORTED_100baseT_Full |
2244 SUPPORTED_1000baseT_Full |
2249 SUPPORTED_Asym_Pause);
2254 "%s(%d): Bad NVRAM 1Gb PHY configuration data "
2255 "(ext_phy_config=0x%08X).\n",
2257 sc->link_params.ext_phy_config);
2258 goto bxe_link_settings_supported_exit;
2262 REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + (port * 0x10));
2264 DBPRINT(sc, BXE_VERBOSE_PHY, "%s(): phy_addr = 0x%08X\n",
2265 __FUNCTION__, sc->port.phy_addr);
2268 case SWITCH_CFG_10G:
2270 XGXS_EXT_PHY_TYPE(sc->link_params.ext_phy_config);
2273 sc, BXE_VERBOSE_PHY,
2274 "%s(): 10G switch w/ ext_phy_type = 0x%08X\n",
2275 __FUNCTION__, ext_phy_type);
2277 switch (ext_phy_type) {
2278 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
2279 DBPRINT(sc, BXE_VERBOSE_PHY,
2280 "%s(): 10G switch w/ direct connect.\n",
2283 sc->port.supported |=
2284 (SUPPORTED_10baseT_Half |
2285 SUPPORTED_10baseT_Full |
2286 SUPPORTED_100baseT_Half |
2287 SUPPORTED_100baseT_Full |
2288 SUPPORTED_1000baseT_Full |
2289 SUPPORTED_2500baseX_Full |
2290 SUPPORTED_10000baseT_Full |
2295 SUPPORTED_Asym_Pause);
2298 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
2299 DBPRINT(sc, BXE_VERBOSE_PHY,
2300 "ext_phy_type 0x%x (8072)\n",ext_phy_type);
2302 sc->port.supported |=
2303 (SUPPORTED_10000baseT_Full |
2304 SUPPORTED_1000baseT_Full |
2308 SUPPORTED_Asym_Pause);
2311 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
2313 BXE_VERBOSE_PHY,"ext_phy_type 0x%x (8073)\n",
2316 sc->port.supported |=
2317 (SUPPORTED_10000baseT_Full |
2318 SUPPORTED_2500baseX_Full |
2319 SUPPORTED_1000baseT_Full |
2323 SUPPORTED_Asym_Pause);
2326 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
2327 DBPRINT(sc, BXE_VERBOSE_PHY,
2328 "%s(): 10G switch w/ 8705.\n",__FUNCTION__);
2330 sc->port.supported |=
2331 (SUPPORTED_10000baseT_Full |
2334 SUPPORTED_Asym_Pause);
2337 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
2338 DBPRINT(sc, BXE_VERBOSE_PHY,
2339 "%s(): 10G switch w/ 8706.\n",
2342 sc->port.supported |=
2343 (SUPPORTED_10000baseT_Full |
2344 SUPPORTED_1000baseT_Full |
2347 SUPPORTED_Asym_Pause);
2350 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
2351 DBPRINT(sc, BXE_VERBOSE_PHY,
2352 "%s(): 10G switch w/ 8726.\n",
2355 sc->port.supported |=
2356 (SUPPORTED_10000baseT_Full |
2359 SUPPORTED_Asym_Pause);
2362 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2363 DBPRINT(sc, BXE_VERBOSE_PHY,"ext_phy_type 0x%x (8727)\n",
2366 sc->port.supported |=
2367 (SUPPORTED_10000baseT_Full |
2368 SUPPORTED_1000baseT_Full |
2372 SUPPORTED_Asym_Pause);
2375 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2376 DBPRINT(sc, BXE_VERBOSE_PHY,
2377 "%s(): 10G switch w/ SFX7101.\n",
2380 sc->port.supported |=
2381 (SUPPORTED_10000baseT_Full |
2385 SUPPORTED_Asym_Pause);
2388 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
2389 DBPRINT(sc, BXE_VERBOSE_PHY,
2390 "ext_phy_type 0x%x (BCM8481)\n",
2393 sc->port.supported |=
2394 (SUPPORTED_10baseT_Half |
2395 SUPPORTED_10baseT_Full |
2396 SUPPORTED_100baseT_Half |
2397 SUPPORTED_100baseT_Full |
2398 SUPPORTED_1000baseT_Full |
2399 SUPPORTED_10000baseT_Full |
2403 SUPPORTED_Asym_Pause);
2406 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
2407 DBPRINT(sc, BXE_WARN,
2408 "%s(): 10G XGXS PHY failure detected.\n",
2413 "%s(%d): Bad NVRAM 10Gb PHY configuration data "
2414 "(ext_phy_config=0x%08X).\n",
2416 sc->link_params.ext_phy_config);
2417 goto bxe_link_settings_supported_exit;
2421 REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR +(port * 0x18));
2425 DBPRINT(sc, BXE_WARN, "%s(): BAD switch configuration "
2426 "(link_config = 0x%08X)\n", __FUNCTION__,
2427 sc->port.link_config);
2428 goto bxe_link_settings_supported_exit;
2431 sc->link_params.phy_addr = sc->port.phy_addr;
2433 /* Mask out unsupported speeds according to NVRAM. */
2434 if ((sc->link_params.speed_cap_mask &
2435 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) == 0)
2436 sc->port.supported &= ~SUPPORTED_10baseT_Half;
2438 if ((sc->link_params.speed_cap_mask &
2439 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) == 0)
2440 sc->port.supported &= ~SUPPORTED_10baseT_Full;
2442 if ((sc->link_params.speed_cap_mask &
2443 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) == 0)
2444 sc->port.supported &= ~SUPPORTED_100baseT_Half;
2446 if ((sc->link_params.speed_cap_mask &
2447 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) == 0)
2448 sc->port.supported &= ~SUPPORTED_100baseT_Full;
2450 if ((sc->link_params.speed_cap_mask &
2451 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) == 0)
2452 sc->port.supported &= ~(SUPPORTED_1000baseT_Half |
2453 SUPPORTED_1000baseT_Full);
2455 if ((sc->link_params.speed_cap_mask &
2456 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) == 0)
2457 sc->port.supported &= ~SUPPORTED_2500baseX_Full;
2459 if ((sc->link_params.speed_cap_mask &
2460 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) == 0)
2461 sc->port.supported &= ~SUPPORTED_10000baseT_Full;
2463 DBPRINT(sc, BXE_VERBOSE_PHY,
2464 "%s(): Supported link settings = 0x%b\n", __FUNCTION__,
2465 sc->port.supported, BXE_SUPPORTED_PRINTFB);
2467 bxe_link_settings_supported_exit:
2469 DBEXIT(BXE_VERBOSE_PHY);
2473 * Requested link settings.
2479 bxe_link_settings_requested(struct bxe_softc *sc)
2481 uint32_t ext_phy_type;
2482 DBENTER(BXE_VERBOSE_PHY);
2484 sc->link_params.req_duplex = MEDIUM_FULL_DUPLEX;
2486 switch (sc->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
2488 case PORT_FEATURE_LINK_SPEED_AUTO:
2489 if (sc->port.supported & SUPPORTED_Autoneg) {
2490 sc->link_params.req_line_speed |= SPEED_AUTO_NEG;
2491 sc->port.advertising = sc->port.supported;
2493 ext_phy_type = XGXS_EXT_PHY_TYPE(
2494 sc->link_params.ext_phy_config);
2496 if ((ext_phy_type ==
2497 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
2499 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
2500 /* Force 10G, no autonegotiation. */
2501 sc->link_params.req_line_speed = SPEED_10000;
2502 sc->port.advertising =
2503 ADVERTISED_10000baseT_Full |
2508 DBPRINT(sc, BXE_FATAL,
2509 "%s(): NVRAM config error. Invalid "
2510 "link_config (0x%08X) - Autoneg not supported!\n",
2511 __FUNCTION__, sc->port.link_config);
2512 goto bxe_link_settings_requested_exit;
2515 case PORT_FEATURE_LINK_SPEED_10M_FULL:
2516 if (sc->port.supported & SUPPORTED_10baseT_Full) {
2517 sc->link_params.req_line_speed = SPEED_10;
2518 sc->port.advertising = ADVERTISED_10baseT_Full |
2521 DBPRINT(sc, BXE_FATAL,
2522 "%s(): NVRAM config error. Invalid "
2523 "link_config (0x%08X) - speed_cap_mask 0x%08X\n",
2524 __FUNCTION__, sc->port.link_config,
2525 sc->link_params.speed_cap_mask);
2526 goto bxe_link_settings_requested_exit;
2529 case PORT_FEATURE_LINK_SPEED_10M_HALF:
2530 if (sc->port.supported & SUPPORTED_10baseT_Half) {
2531 sc->link_params.req_line_speed = SPEED_10;
2532 sc->link_params.req_duplex = MEDIUM_HALF_DUPLEX;
2533 sc->port.advertising = ADVERTISED_10baseT_Half |
2536 DBPRINT(sc, BXE_FATAL,
2537 "%s(): NVRAM config error. Invalid "
2538 "link_config (0x%08X) - speed_cap_mask = 0x%08X\n",
2539 __FUNCTION__, sc->port.link_config,
2540 sc->link_params.speed_cap_mask);
2541 goto bxe_link_settings_requested_exit;
2544 case PORT_FEATURE_LINK_SPEED_100M_FULL:
2545 if (sc->port.supported & SUPPORTED_100baseT_Full) {
2546 sc->link_params.req_line_speed = SPEED_100;
2547 sc->port.advertising = ADVERTISED_100baseT_Full |
2550 DBPRINT(sc, BXE_FATAL,
2551 "%s(): NVRAM config error. Invalid "
2552 "link_config (0x%08X) - speed_cap_mask = 0x%08X\n",
2553 __FUNCTION__, sc->port.link_config,
2554 sc->link_params.speed_cap_mask);
2555 goto bxe_link_settings_requested_exit;
2558 case PORT_FEATURE_LINK_SPEED_100M_HALF:
2559 if (sc->port.supported & SUPPORTED_100baseT_Half) {
2560 sc->link_params.req_line_speed = SPEED_100;
2561 sc->link_params.req_duplex = MEDIUM_HALF_DUPLEX;
2562 sc->port.advertising = ADVERTISED_100baseT_Half |
2565 DBPRINT(sc, BXE_FATAL,
2566 "%s(): NVRAM config error. Invalid "
2567 "link_config (0x%08X) - speed_cap_mask = 0x%08X\n",
2568 __FUNCTION__, sc->port.link_config,
2569 sc->link_params.speed_cap_mask);
2570 goto bxe_link_settings_requested_exit;
2573 case PORT_FEATURE_LINK_SPEED_1G:
2574 if (sc->port.supported & SUPPORTED_1000baseT_Full) {
2575 sc->link_params.req_line_speed = SPEED_1000;
2576 sc->port.advertising = ADVERTISED_1000baseT_Full |
2579 DBPRINT(sc, BXE_FATAL,
2580 "%s(): NVRAM config error. Invalid "
2581 "link_config (0x%08X) - speed_cap_mask = 0x%08X\n",
2582 __FUNCTION__, sc->port.link_config,
2583 sc->link_params.speed_cap_mask);
2584 goto bxe_link_settings_requested_exit;
2587 case PORT_FEATURE_LINK_SPEED_2_5G:
2588 if (sc->port.supported & SUPPORTED_2500baseX_Full) {
2589 sc->link_params.req_line_speed = SPEED_2500;
2590 sc->port.advertising = ADVERTISED_2500baseX_Full |
2593 DBPRINT(sc, BXE_FATAL,
2594 "%s(): NVRAM config error. Invalid "
2595 "link_config (0x%08X) - speed_cap_mask = 0x%08X\n",
2596 __FUNCTION__, sc->port.link_config,
2597 sc->link_params.speed_cap_mask);
2598 goto bxe_link_settings_requested_exit;
2601 case PORT_FEATURE_LINK_SPEED_10G_CX4:
2602 case PORT_FEATURE_LINK_SPEED_10G_KX4:
2603 case PORT_FEATURE_LINK_SPEED_10G_KR:
2604 if (sc->port.supported & SUPPORTED_10000baseT_Full) {
2605 sc->link_params.req_line_speed = SPEED_10000;
2606 sc->port.advertising = ADVERTISED_10000baseT_Full |
2609 DBPRINT(sc, BXE_FATAL,
2610 "%s(): NVRAM config error. Invalid "
2611 "link_config (0x%08X) - speed_cap_mask = 0x%08X\n",
2612 __FUNCTION__, sc->port.link_config,
2613 sc->link_params.speed_cap_mask);
2614 goto bxe_link_settings_requested_exit;
2618 DBPRINT(sc, BXE_FATAL, "%s(): NVRAM config error. BAD link "
2619 "speed - link_config = 0x%08X\n", __FUNCTION__,
2620 sc->port.link_config);
2621 sc->link_params.req_line_speed = 0;
2622 sc->port.advertising = sc->port.supported;
2626 DBPRINT(sc, BXE_VERBOSE_PHY,
2627 "%s(): req_line_speed = %d, req_duplex = %d\n",
2628 __FUNCTION__, sc->link_params.req_line_speed,
2629 sc->link_params.req_duplex);
2631 sc->link_params.req_flow_ctrl =
2632 sc->port.link_config & PORT_FEATURE_FLOW_CONTROL_MASK;
2634 if ((sc->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
2635 !(sc->port.supported & SUPPORTED_Autoneg))
2636 sc->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
2638 DBPRINT(sc, BXE_VERBOSE_PHY,
2639 "%s(): req_flow_ctrl = 0x%08X, advertising = 0x%08X\n",
2640 __FUNCTION__, sc->link_params.req_flow_ctrl,
2641 sc->port.advertising);
2643 bxe_link_settings_requested_exit:
2645 DBEXIT(BXE_VERBOSE_PHY);
2650 * Get function specific hardware configuration.
2652 * Multiple function devices such as the BCM57711E have configuration
2653 * information that is specific to each PCIe function of the controller.
2654 * The number of PCIe functions is not necessarily the same as the number
2655 * of Ethernet ports supported by the device.
2658 * 0 = Success, !0 = Failure
2661 bxe_hwinfo_function_get(struct bxe_softc *sc)
2663 uint32_t mac_hi, mac_lo, val;
2666 DBENTER(BXE_VERBOSE_LOAD);
2671 /* Get the common hardware configuration first. */
2672 bxe_hwinfo_common_get(sc);
2674 /* Assume no outer VLAN/multi-function support. */
2675 sc->e1hov = sc->e1hmf = 0;
2677 /* Get config info for mf enabled devices. */
2678 if (CHIP_IS_E1H(sc)) {
2679 sc->mf_config[BP_E1HVN(sc)] =
2680 SHMEM_RD(sc, mf_cfg.func_mf_config[func].config);
2681 val = (SHMEM_RD(sc, mf_cfg.func_mf_config[func].e1hov_tag) &
2682 FUNC_MF_CFG_E1HOV_TAG_MASK);
2683 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
2684 sc->e1hov = (uint16_t) val;
2689 goto bxe_hwinfo_function_get_exit;
2695 bxe_hwinfo_port_get(sc);
2696 sc->fw_seq = SHMEM_RD(sc, func_mb[func].drv_mb_header) &
2697 DRV_MSG_SEQ_NUMBER_MASK;
2702 * Fetch the factory configured MAC address for multi function
2703 * devices. If this is not a multi-function device then the MAC
2704 * address was already read in the bxe_hwinfo_port_get() routine.
2705 * The MAC addresses used by the port are not the same as the MAC
2706 * addressed used by the function.
2709 mac_hi = SHMEM_RD(sc, mf_cfg.func_mf_config[func].mac_upper);
2710 mac_lo = SHMEM_RD(sc, mf_cfg.func_mf_config[func].mac_lower);
2712 if ((mac_lo == 0) && (mac_hi == 0)) {
2713 BXE_PRINTF("%s(%d): Invalid Ethernet address!\n",
2714 __FILE__, __LINE__);
2717 sc->link_params.mac_addr[0] = (u_char)(mac_hi >> 8);
2718 sc->link_params.mac_addr[1] = (u_char)(mac_hi);
2719 sc->link_params.mac_addr[2] = (u_char)(mac_lo >> 24);
2720 sc->link_params.mac_addr[3] = (u_char)(mac_lo >> 16);
2721 sc->link_params.mac_addr[4] = (u_char)(mac_lo >> 8);
2722 sc->link_params.mac_addr[5] = (u_char)(mac_lo);
2727 bxe_hwinfo_function_get_exit:
2728 DBEXIT(BXE_VERBOSE_LOAD);
2734 * Get port specific hardware configuration.
2736 * Multiple port devices such as the BCM57710 have configuration
2737 * information that is specific to each Ethernet port of the
2738 * controller. This function reads that configuration
2739 * information from the bootcode's shared memory and saves it
2743 * 0 = Success, !0 = Failure
2746 bxe_hwinfo_port_get(struct bxe_softc *sc)
2749 uint32_t val, mac_hi, mac_lo;
2751 DBENTER(BXE_VERBOSE_LOAD);
2755 sc->link_params.sc = sc;
2756 sc->link_params.port = port;
2758 /* Fetch several configuration values from bootcode shared memory. */
2759 sc->link_params.lane_config =
2760 SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config);
2761 sc->link_params.ext_phy_config =
2762 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
2764 if (XGXS_EXT_PHY_TYPE(sc->link_params.ext_phy_config) ==
2765 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
2766 sc->link_params.ext_phy_config &=
2767 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2768 sc->link_params.ext_phy_config |=
2769 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
2770 sc->link_params.feature_config_flags |=
2771 FEATURE_CONFIG_BCM8727_NOC;
2774 sc->link_params.speed_cap_mask =
2775 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask);
2776 sc->port.link_config =
2777 SHMEM_RD(sc, dev_info.port_feature_config[port].link_config);
2780 /* Read the XGXS RX/TX preemphasis values. */
2781 for (i = 0; i < 2; i++) {
2783 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
2784 sc->link_params.xgxs_config_rx[i << 1] = ((val >> 16) & 0xffff);
2785 sc->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
2788 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
2789 sc->link_params.xgxs_config_tx[i << 1] = ((val >> 16) & 0xffff);
2790 sc->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
2793 /* Fetch the device configured link settings. */
2794 sc->link_params.switch_cfg = sc->port.link_config &
2795 PORT_FEATURE_CONNECTED_SWITCH_MASK;
2797 bxe_link_settings_supported(sc, sc->link_params.switch_cfg);
2798 bxe_link_settings_requested(sc);
2800 mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper);
2801 mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower);
2803 if (mac_lo == 0 && mac_hi == 0) {
2804 BXE_PRINTF("%s(%d): No Ethernet address programmed on the "
2805 "controller!\n", __FILE__, __LINE__);
2808 sc->link_params.mac_addr[0] = (u_char)(mac_hi >> 8);
2809 sc->link_params.mac_addr[1] = (u_char)(mac_hi);
2810 sc->link_params.mac_addr[2] = (u_char)(mac_lo >> 24);
2811 sc->link_params.mac_addr[3] = (u_char)(mac_lo >> 16);
2812 sc->link_params.mac_addr[4] = (u_char)(mac_lo >> 8);
2813 sc->link_params.mac_addr[5] = (u_char)(mac_lo);
2816 DBEXIT(BXE_VERBOSE_LOAD);
2822 * Get common hardware configuration.
2824 * Multiple port devices such as the BCM57710 have configuration
2825 * information that is shared between all ports of the Ethernet
2826 * controller. This function reads that configuration
2827 * information from the bootcode's shared memory and saves it
2831 * 0 = Success, !0 = Failure
2834 bxe_hwinfo_common_get(struct bxe_softc *sc)
2839 DBENTER(BXE_VERBOSE_LOAD);
2842 /* Get the chip revision. */
2843 sc->common.chip_id = sc->link_params.chip_id =
2844 ((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) |
2845 ((REG_RD(sc, MISC_REG_CHIP_REV) & 0x000f) << 12) |
2846 ((REG_RD(sc, MISC_REG_CHIP_METAL) & 0xff) << 4) |
2847 ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf));
2849 DBPRINT(sc, BXE_VERBOSE_LOAD, "%s(): chip_id = 0x%08X.\n",
2850 __FUNCTION__, sc->common.chip_id);
2852 val = (REG_RD(sc, 0x2874) & 0x55);
2853 if ((sc->common.chip_id & 0x1) ||
2854 (CHIP_IS_E1(sc) && val) || (CHIP_IS_E1H(sc) && (val == 0x55))) {
2855 sc->bxe_flags |= BXE_ONE_PORT_FLAG;
2856 DBPRINT(sc, BXE_VERBOSE_LOAD, "%s(): Single port device.\n",
2860 /* Identify enabled PCI capabilites (PCIe, MSI-X, etc.). */
2861 bxe_probe_pci_caps(sc);
2863 /* Get the NVRAM size. */
2864 val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4);
2865 sc->common.flash_size = (NVRAM_1MB_SIZE <<
2866 (val & MCPR_NVM_CFG4_FLASH_SIZE));
2868 DBPRINT(sc, BXE_VERBOSE_LOAD, "%s(): flash_size = 0x%08x (%dKB)\n",
2869 __FUNCTION__, sc->common.flash_size,(sc->common.flash_size >> 10));
2871 /* Find the shared memory base address. */
2872 sc->common.shmem_base = sc->link_params.shmem_base =
2873 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
2874 sc->common.shmem2_base = REG_RD(sc, MISC_REG_GENERIC_CR_0);
2875 DBPRINT(sc, BXE_VERBOSE_LOAD, "%s(): shmem_base = 0x%08X\n",
2876 __FUNCTION__, sc->common.shmem_base);
2878 /* Make sure the shared memory address is valid. */
2879 if (!sc->common.shmem_base ||
2880 (sc->common.shmem_base < 0xA0000) ||
2881 (sc->common.shmem_base > 0xC0000)) {
2883 BXE_PRINTF("%s(%d): MCP is not active!\n",
2884 __FILE__, __LINE__);
2885 /* ToDo: Remove the NOMCP support. */
2886 sc->bxe_flags |= BXE_NO_MCP_FLAG;
2888 goto bxe_hwinfo_common_get_exit;
2891 /* Make sure the shared memory contents are valid. */
2892 val = SHMEM_RD(sc, validity_map[BP_PORT(sc)]);
2893 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
2894 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
2895 BXE_PRINTF("%s(%d): Invalid NVRAM! Bad validity "
2896 "signature.\n", __FILE__, __LINE__);
2898 goto bxe_hwinfo_common_get_exit;
2901 /* Read the device configuration from shared memory. */
2902 sc->common.hw_config =
2903 SHMEM_RD(sc, dev_info.shared_hw_config.config);
2904 sc->link_params.hw_led_mode = ((sc->common.hw_config &
2905 SHARED_HW_CFG_LED_MODE_MASK) >> SHARED_HW_CFG_LED_MODE_SHIFT);
2907 /* Check if we need to override the preemphasis values. */
2908 sc->link_params.feature_config_flags = 0;
2909 val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
2910 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
2911 sc->link_params.feature_config_flags |=
2912 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
2914 sc->link_params.feature_config_flags &=
2915 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
2917 /* In multifunction mode, we can't support WoL on a VN. */
2918 if (BP_E1HVN(sc) == 0) {
2919 val = REG_RD(sc, PCICFG_OFFSET + PCICFG_PM_CAPABILITY);
2920 sc->bxe_flags |= (val & PCICFG_PM_CAPABILITY_PME_IN_D3_COLD) ?
2921 0 : BXE_NO_WOL_FLAG;
2923 sc->bxe_flags |= BXE_NO_WOL_FLAG;
2925 DBPRINT(sc, BXE_VERBOSE_LOAD, "%s(): %sWoL capable\n", __FUNCTION__,
2926 (sc->bxe_flags & BXE_NO_WOL_FLAG) ? "Not " : "");
2928 /* Check bootcode version */
2929 sc->common.bc_ver = ((SHMEM_RD(sc, dev_info.bc_rev)) >> 8);
2930 if (sc->common.bc_ver < MIN_BXE_BC_VER) {
2931 BXE_PRINTF("%s(%d): Warning: This driver needs bootcode "
2932 "0x%08X but found 0x%08X, please upgrade!\n",
2933 __FILE__, __LINE__, MIN_BXE_BC_VER, sc->common.bc_ver);
2935 goto bxe_hwinfo_common_get_exit;
2938 bxe_hwinfo_common_get_exit:
2939 DBEXIT(BXE_VERBOSE_LOAD);
2945 * Remove traces of PXE boot by forcing UNDI driver unload.
2951 bxe_undi_unload(struct bxe_softc *sc)
2953 uint32_t reset_code, swap_en, swap_val, val;
2956 DBENTER(BXE_VERBOSE_LOAD);
2958 /* Check if there is any driver already loaded */
2959 val = REG_RD(sc, MISC_REG_UNPREPARED);
2962 /* Check if it is the UNDI driver. */
2963 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_UNDI);
2964 val = REG_RD(sc, DORQ_REG_NORM_CID_OFST);
2966 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
2969 DBPRINT(sc, BXE_WARN,
2970 "%s(): UNDI is active! Resetting the device.\n",
2973 /* Clear the UNDI indication. */
2974 REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0);
2976 /* Try to unload UNDI on port 0. */
2978 sc->fw_seq = (SHMEM_RD(sc,
2979 func_mb[sc->bxe_func].drv_mb_header) &
2980 DRV_MSG_SEQ_NUMBER_MASK);
2981 reset_code = bxe_fw_command(sc, reset_code);
2983 /* Check if UNDI is active on port 1. */
2984 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
2986 /* Send "done" for previous unload. */
2987 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE);
2989 /* Now unload on port 1. */
2991 sc->fw_seq = (SHMEM_RD(sc,
2992 func_mb[sc->bxe_func].drv_mb_header) &
2993 DRV_MSG_SEQ_NUMBER_MASK);
2995 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
2996 bxe_fw_command(sc, reset_code);
2999 /* It's now safe to release the lock. */
3000 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_UNDI);
3002 REG_WR(sc, (BP_PORT(sc) ? HC_REG_CONFIG_1 :
3003 HC_REG_CONFIG_0), 0x1000);
3005 REG_WR(sc, (BP_PORT(sc) ?
3006 NIG_REG_LLH1_BRB1_DRV_MASK :
3007 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
3009 REG_WR(sc, (BP_PORT(sc) ?
3010 NIG_REG_LLH1_BRB1_NOT_MCP :
3011 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
3014 REG_WR(sc, (BP_PORT(sc) ?
3015 MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3016 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
3020 /* Save NIG port swap information. */
3021 swap_val = REG_RD(sc, NIG_REG_PORT_SWAP);
3022 swap_en = REG_RD(sc, NIG_REG_STRAP_OVERRIDE);
3024 /* Reset the controller. */
3025 REG_WR(sc, GRCBASE_MISC +
3026 MISC_REGISTERS_RESET_REG_1_CLEAR, 0xd3ffffff);
3027 REG_WR(sc, GRCBASE_MISC +
3028 MISC_REGISTERS_RESET_REG_2_CLEAR, 0x00001403);
3030 /* Take the NIG out of reset and restore swap values.*/
3031 REG_WR(sc, GRCBASE_MISC +
3032 MISC_REGISTERS_RESET_REG_1_SET,
3033 MISC_REGISTERS_RESET_REG_1_RST_NIG);
3034 REG_WR(sc, NIG_REG_PORT_SWAP, swap_val);
3035 REG_WR(sc, NIG_REG_STRAP_OVERRIDE, swap_en);
3037 /* Send completion message to the MCP. */
3038 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE);
3041 * Restore our function and firmware sequence counter.
3043 sc->bxe_func = func;
3044 sc->fw_seq = (SHMEM_RD(sc,
3045 func_mb[sc->bxe_func].drv_mb_header) &
3046 DRV_MSG_SEQ_NUMBER_MASK);
3048 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_UNDI);
3051 DBEXIT(BXE_VERBOSE_LOAD);
3056 * Device detach function.
3058 * Stops the controller, resets the controller, and releases resources.
3061 * 0 on success, !0 = failure.
3064 bxe_detach(device_t dev)
3066 struct bxe_softc *sc;
3070 sc = device_get_softc(dev);
3071 DBENTER(BXE_INFO_UNLOAD);
3076 if (ifp != NULL && ifp->if_vlantrunk != NULL) {
3077 BXE_PRINTF("%s(%d): Cannot detach while VLANs are in use.\n",
3078 __FILE__, __LINE__);
3080 goto bxe_detach_exit;
3083 /* Stop and reset the controller if it was open. */
3084 if (sc->state != BXE_STATE_CLOSED) {
3086 rc = bxe_stop_locked(sc, UNLOAD_CLOSE);
3087 BXE_CORE_UNLOCK(sc);
3091 /* Free memory buffer for grcdump output.*/
3092 if (sc->grcdump_buffer != NULL)
3093 free(sc->grcdump_buffer, M_TEMP);
3096 /* Clean-up any remaining interrupt resources. */
3097 bxe_interrupt_detach(sc);
3098 bxe_interrupt_free(sc);
3100 /* Release the network interface. */
3102 ether_ifdetach(ifp);
3103 ifmedia_removeall(&sc->bxe_ifmedia);
3105 /* Release all remaining resources. */
3106 bxe_release_resources(sc);
3108 /* Free all PCI resources. */
3109 bxe_pci_resources_free(sc);
3110 pci_disable_busmaster(dev);
3112 bxe_mutexes_free(sc);
3115 DBEXIT(BXE_INFO_UNLOAD);
3121 * Setup a leading connection for the controller.
3124 * 0 = Success, !0 = Failure.
3127 bxe_setup_leading(struct bxe_softc *sc)
3131 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_RAMROD);
3133 DBPRINT(sc, BXE_VERBOSE_LOAD, "%s(): Setup leading connection "
3134 "on fp[00].\n", __FUNCTION__);
3136 /* Reset IGU state for the leading connection. */
3137 bxe_ack_sb(sc, sc->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
3139 /* Post a PORT_SETUP ramrod and wait for completion. */
3140 bxe_sp_post(sc, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
3142 /* Wait for the ramrod to complete on the leading connection. */
3143 rc = bxe_wait_ramrod(sc, BXE_STATE_OPEN, 0, &(sc->state), 1);
3145 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_RAMROD);
3151 * Stop the leading connection on the controller.
3157 bxe_stop_leading(struct bxe_softc *sc)
3159 uint16_t dsb_sp_prod_idx;
3162 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET |
3163 BXE_VERBOSE_UNLOAD), "%s(): Stop client connection "
3164 "on fp[00].\n", __FUNCTION__);
3166 /* Send the ETH_HALT ramrod. */
3167 sc->fp[0].state = BXE_FP_STATE_HALTING;
3168 bxe_sp_post(sc,RAMROD_CMD_ID_ETH_HALT, 0, 0, sc->fp[0].cl_id, 0);
3170 /* Poll for the ETH_HALT ramrod on the leading connection. */
3171 rc = bxe_wait_ramrod(sc, BXE_FP_STATE_HALTED,
3172 0, &(sc->fp[0].state), 1);
3174 DBPRINT(sc, BXE_FATAL, "%s(): Timeout waiting for "
3175 "STATE_HALTED ramrod completion!\n", __FUNCTION__);
3176 goto bxe_stop_leading_exit;
3179 /* Get the default status block SP producer index. */
3180 dsb_sp_prod_idx = *sc->dsb_sp_prod;
3182 /* After HALT we send PORT_DELETE ramrod. */
3183 bxe_sp_post(sc, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
3185 /* Be patient but don't wait forever. */
3187 while (dsb_sp_prod_idx == *sc->dsb_sp_prod) {
3189 DBPRINT(sc, BXE_FATAL, "%s(): Timeout waiting for "
3190 "PORT_DEL ramrod completion!\n", __FUNCTION__);
3199 /* Update the adapter and connection states. */
3200 sc->state = BXE_STATE_CLOSING_WAIT4_UNLOAD;
3201 sc->fp[0].state = BXE_FP_STATE_CLOSED;
3203 bxe_stop_leading_exit:
3208 * Setup a client connection when using multi-queue/RSS.
3214 bxe_setup_multi(struct bxe_softc *sc, int index)
3216 struct bxe_fastpath *fp;
3219 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET |
3220 BXE_VERBOSE_UNLOAD), "%s(): Setup client connection "
3221 "on fp[%02d].\n", __FUNCTION__, index);
3223 fp = &sc->fp[index];
3224 /* Reset IGU state. */
3225 bxe_ack_sb(sc, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
3227 /* Post a CLIENT_SETUP ramrod. */
3228 fp->state = BXE_FP_STATE_OPENING;
3229 bxe_sp_post(sc, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, fp->cl_id, 0);
3231 /* Wait for the ramrod to complete. */
3232 rc = bxe_wait_ramrod(sc, BXE_FP_STATE_OPEN, index, &fp->state, 1);
3238 * Stop a client connection.
3240 * Stops an individual client connection on the device. Use
3241 * bxe_stop_leading() for the first/default connection.
3244 * 0 = Success, !0 = Failure.
3247 bxe_stop_multi(struct bxe_softc *sc, int index)
3249 struct bxe_fastpath *fp;
3252 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET |
3253 BXE_VERBOSE_UNLOAD), "%s(): Stop client connection "
3254 "on fp[%02d].\n", __FUNCTION__, index);
3256 fp = &sc->fp[index];
3258 /* Halt the client connection. */
3259 fp->state = BXE_FP_STATE_HALTING;
3260 bxe_sp_post(sc, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
3262 /* Wait for the HALT ramrod completion. */
3263 rc = bxe_wait_ramrod(sc, BXE_FP_STATE_HALTED, index, &fp->state, 1);
3265 BXE_PRINTF("%s(%d): fp[%02d] client ramrod halt failed!\n",
3266 __FILE__, __LINE__, index);
3267 goto bxe_stop_multi_exit;
3269 /* Delete the CFC entry. */
3270 bxe_sp_post(sc, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
3272 /* Poll for the DELETE ramrod completion. */
3273 rc = bxe_wait_ramrod(sc, BXE_FP_STATE_CLOSED, index, &fp->state, 1);
3275 bxe_stop_multi_exit:
3280 * Hardware lock for shared, dual-port PHYs.
3286 bxe_acquire_phy_lock(struct bxe_softc *sc)
3288 uint32_t ext_phy_type;
3290 DBENTER(BXE_VERBOSE_PHY);
3292 ext_phy_type = XGXS_EXT_PHY_TYPE(sc->link_params.ext_phy_config);
3293 switch(ext_phy_type){
3294 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3295 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
3296 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
3297 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
3298 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_MDIO);
3303 DBEXIT(BXE_VERBOSE_PHY);
3307 * Hardware unlock for shared, dual-port PHYs.
3313 bxe_release_phy_lock(struct bxe_softc *sc)
3315 uint32_t ext_phy_type;
3317 DBENTER(BXE_VERBOSE_PHY);
3318 ext_phy_type = XGXS_EXT_PHY_TYPE(sc->link_params.ext_phy_config);
3319 switch(ext_phy_type){
3320 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3321 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
3322 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
3323 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
3324 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_MDIO);
3330 DBEXIT(BXE_VERBOSE_PHY);
3339 bxe__link_reset(struct bxe_softc *sc)
3341 DBENTER(BXE_VERBOSE_PHY);
3344 bxe_acquire_phy_lock(sc);
3345 bxe_link_reset(&sc->link_params, &sc->link_vars, 1);
3346 bxe_release_phy_lock(sc);
3348 DBPRINT(sc, BXE_WARN,
3349 "%s(): Bootcode is not running, not resetting link!\n",
3353 DBEXIT(BXE_VERBOSE_PHY);
3357 * Stop the controller.
3360 * 0 = Success, !0 = Failure
3363 bxe_stop_locked(struct bxe_softc *sc, int unload_mode)
3366 struct mac_configuration_cmd *config;
3367 struct bxe_fastpath *fp;
3368 uint32_t reset_code;
3369 uint32_t emac_base, val;
3370 uint8_t entry, *mac_addr;
3371 int count, i, port, rc;
3373 DBENTER(BXE_INFO_LOAD | BXE_INFO_RESET | BXE_INFO_UNLOAD);
3377 rc = reset_code = 0;
3379 BXE_CORE_LOCK_ASSERT(sc);
3381 /* Stop the periodic tick. */
3382 callout_stop(&sc->bxe_tick_callout);
3384 sc->state = BXE_STATE_CLOSING_WAIT4_HALT;
3386 /* Prevent any further RX traffic. */
3387 sc->rx_mode = BXE_RX_MODE_NONE;
3388 bxe_set_storm_rx_mode(sc);
3390 /* Tell the stack the driver is stopped and TX queue is full. */
3392 ifp->if_drv_flags = 0;
3394 /* Tell the bootcode to stop watching for a heartbeat. */
3395 SHMEM_WR(sc, func_mb[BP_FUNC(sc)].drv_pulse_mb,
3396 (DRV_PULSE_ALWAYS_ALIVE | sc->fw_drv_pulse_wr_seq));
3398 /* Stop the statistics updates. */
3399 bxe_stats_handle(sc, STATS_EVENT_STOP);
3401 /* Wait until all TX fastpath tasks have completed. */
3402 for (i = 0; i < sc->num_queues; i++) {
3405 if (fp == NULL || fp->tx_pkt_cons_sb == NULL)
3409 while (bxe_has_tx_work(fp)) {
3415 "%s(%d): Timeout wating for fp[%02d] transmits to complete!\n",
3416 __FILE__, __LINE__, i);
3425 /* Wait until all slowpath tasks have completed. */
3427 while ((sc->spq_left != MAX_SPQ_PENDING) && count--)
3430 /* Disable Interrupts */
3431 bxe_int_disable(sc);
3434 /* Clear the MAC addresses. */
3435 if (CHIP_IS_E1(sc)) {
3436 config = BXE_SP(sc, mcast_config);
3437 bxe_set_mac_addr_e1(sc, 0);
3439 for (i = 0; i < config->hdr.length; i++)
3440 CAM_INVALIDATE(&config->config_table[i]);
3442 config->hdr.length = i;
3443 config->hdr.offset = BXE_MAX_MULTICAST * (1 + port);
3444 config->hdr.client_id = BP_CL_ID(sc);
3445 config->hdr.reserved1 = 0;
3447 bxe_sp_post(sc, RAMROD_CMD_ID_ETH_SET_MAC, 0,
3448 U64_HI(BXE_SP_MAPPING(sc, mcast_config)),
3449 U64_LO(BXE_SP_MAPPING(sc, mcast_config)), 0);
3451 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 0);
3452 bxe_set_mac_addr_e1h(sc, 0);
3453 for (i = 0; i < MC_HASH_SIZE; i++)
3454 REG_WR(sc, MC_HASH_OFFSET(sc, i), 0);
3455 REG_WR(sc, MISC_REG_E1HMF_MODE, 0);
3458 /* Determine if any WoL settings needed. */
3459 if (unload_mode == UNLOAD_NORMAL)
3460 /* Driver initiatied WoL is disabled. */
3461 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3462 else if (sc->bxe_flags & BXE_NO_WOL_FLAG) {
3463 /* Driver initiated WoL is disabled, use OOB WoL settings. */
3464 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
3465 if (CHIP_IS_E1H(sc))
3466 REG_WR(sc, MISC_REG_E1HMF_MODE, 0);
3467 } else if (sc->wol) {
3468 emac_base = BP_PORT(sc) ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
3469 mac_addr = sc->link_params.mac_addr;
3470 entry = (BP_E1HVN(sc) + 1) * 8;
3471 val = (mac_addr[0] << 8) | mac_addr[1];
3472 EMAC_WR(sc, EMAC_REG_EMAC_MAC_MATCH + entry, val);
3473 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
3474 (mac_addr[4] << 8) | mac_addr[5];
3475 EMAC_WR(sc, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
3476 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
3479 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3482 /* Stop all non-leading client connections. */
3483 for (i = 1; i < sc->num_queues; i++) {
3484 if (bxe_stop_multi(sc, i)){
3485 goto bxe_stop_locked_exit;
3489 /* Stop the leading client connection. */
3490 rc = bxe_stop_leading(sc);
3493 bxe_stop_locked_exit:
3495 DBPRINT(sc, BXE_INFO,
3496 "%s(): Old No MCP load counts: %d, %d, %d\n",
3497 __FUNCTION__, load_count[0], load_count[1], load_count[2]);
3500 load_count[1 + port]--;
3501 DBPRINT(sc, BXE_INFO,
3502 "%s(): New No MCP load counts: %d, %d, %d\n",
3503 __FUNCTION__, load_count[0], load_count[1], load_count[2]);
3505 if (load_count[0] == 0)
3506 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
3507 else if (load_count[1 + BP_PORT(sc)] == 0)
3508 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
3510 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
3512 /* Tell MCP driver unload is complete. */
3513 reset_code = bxe_fw_command(sc, reset_code);
3516 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
3517 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
3518 bxe__link_reset(sc);
3522 /* Reset the chip */
3523 bxe_reset_chip(sc, reset_code);
3527 /* Report UNLOAD_DONE to MCP */
3529 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE);
3532 /* Free RX chains and buffers. */
3533 bxe_clear_rx_chains(sc);
3535 /* Free TX chains and buffers. */
3536 bxe_clear_tx_chains(sc);
3538 sc->state = BXE_STATE_CLOSED;
3542 DBEXIT(BXE_INFO_LOAD | BXE_INFO_RESET |BXE_INFO_UNLOAD);
3547 * Device shutdown function.
3549 * Stops and resets the controller.
3552 * 0 = Success, !0 = Failure
3555 bxe_shutdown(device_t dev)
3557 struct bxe_softc *sc;
3559 sc = device_get_softc(dev);
3560 DBENTER(BXE_INFO_LOAD | BXE_INFO_RESET | BXE_INFO_UNLOAD);
3563 bxe_stop_locked(sc, UNLOAD_NORMAL);
3564 BXE_CORE_UNLOCK(sc);
3566 DBEXIT(BXE_INFO_LOAD | BXE_INFO_RESET | BXE_INFO_UNLOAD);
3571 * Prints out link speed and duplex setting to console.
3577 bxe_link_report(struct bxe_softc *sc)
3579 uint32_t line_speed;
3580 uint16_t vn_max_rate;
3582 DBENTER(BXE_VERBOSE_PHY);
3584 if (sc->link_vars.link_up) {
3585 /* Report the link status change to OS. */
3586 if (sc->state == BXE_STATE_OPEN)
3587 if_link_state_change(sc->bxe_ifp, LINK_STATE_UP);
3589 line_speed = sc->link_vars.line_speed;
3592 vn_max_rate = ((sc->mf_config[BP_E1HVN(sc)] &
3593 FUNC_MF_CFG_MAX_BW_MASK) >>
3594 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
3595 if (vn_max_rate < line_speed)
3596 line_speed = vn_max_rate;
3599 BXE_PRINTF("Link is up, %d Mbps, ", line_speed);
3601 if (sc->link_vars.duplex == MEDIUM_FULL_DUPLEX)
3602 printf("full duplex");
3604 printf("half duplex");
3606 if (sc->link_vars.flow_ctrl) {
3607 if (sc->link_vars.flow_ctrl & FLOW_CTRL_RX) {
3608 printf(", receive ");
3609 if (sc->link_vars.flow_ctrl & FLOW_CTRL_TX)
3610 printf("& transmit ");
3612 printf(", transmit ");
3613 printf("flow control ON");
3617 /* Report the link down */
3618 BXE_PRINTF("Link is down\n");
3619 if_link_state_change(sc->bxe_ifp, LINK_STATE_DOWN);
3622 DBEXIT(BXE_VERBOSE_PHY);
3631 bxe__link_status_update(struct bxe_softc *sc)
3633 DBENTER(BXE_VERBOSE_PHY);
3635 if (sc->stats_enable == FALSE || sc->state != BXE_STATE_OPEN)
3638 bxe_link_status_update(&sc->link_params, &sc->link_vars);
3640 if (sc->link_vars.link_up)
3641 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
3643 bxe_stats_handle(sc, STATS_EVENT_STOP);
3645 bxe_read_mf_cfg(sc);
3647 /* Indicate link status. */
3648 bxe_link_report(sc);
3650 DBEXIT(BXE_VERBOSE_PHY);
3654 * Calculate flow control to advertise during autonegotiation.
3660 bxe_calc_fc_adv(struct bxe_softc *sc)
3662 DBENTER(BXE_EXTREME_PHY);
3664 switch (sc->link_vars.ieee_fc &
3665 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
3667 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
3668 sc->port.advertising &= ~(ADVERTISED_Asym_Pause |
3672 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
3673 sc->port.advertising |= (ADVERTISED_Asym_Pause |
3677 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
3678 sc->port.advertising |= ADVERTISED_Asym_Pause;
3682 sc->port.advertising &= ~(ADVERTISED_Asym_Pause |
3687 DBEXIT(BXE_EXTREME_PHY);
3698 bxe_initial_phy_init(struct bxe_softc *sc)
3702 DBENTER(BXE_VERBOSE_PHY);
3708 * It is recommended to turn off RX flow control for 5771x
3709 * when using jumbo frames for better performance.
3711 if (!IS_E1HMF(sc) && (sc->mbuf_alloc_size > 5000))
3712 sc->link_params.req_fc_auto_adv = FLOW_CTRL_TX;
3714 sc->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
3716 bxe_acquire_phy_lock(sc);
3717 rc = bxe_phy_init(&sc->link_params, &sc->link_vars);
3718 bxe_release_phy_lock(sc);
3720 bxe_calc_fc_adv(sc);
3722 if (sc->link_vars.link_up) {
3723 bxe_stats_handle(sc,STATS_EVENT_LINK_UP);
3724 bxe_link_report(sc);
3728 DBPRINT(sc, BXE_FATAL, "%s(): Bootcode is not running, "
3729 "not initializing link!\n", __FUNCTION__);
3733 DBEXIT(BXE_VERBOSE_PHY);
3738 #if __FreeBSD_version >= 800000
3740 * Allocate buffer rings used for multiqueue.
3743 * 0 = Success, !0 = Failure.
3746 bxe_alloc_buf_rings(struct bxe_softc *sc)
3748 struct bxe_fastpath *fp;
3751 DBENTER(BXE_VERBOSE_LOAD);
3754 for (i = 0; i < sc->num_queues; i++) {
3758 fp->br = buf_ring_alloc(BXE_BR_SIZE,
3759 M_DEVBUF, M_DONTWAIT, &fp->mtx);
3760 if (fp->br == NULL) {
3762 goto bxe_alloc_buf_rings_exit;
3765 BXE_PRINTF("%s(%d): Bug!\n", __FILE__, __LINE__);
3768 bxe_alloc_buf_rings_exit:
3769 DBEXIT(BXE_VERBOSE_LOAD);
3774 * Releases buffer rings used for multiqueue.
3780 bxe_free_buf_rings(struct bxe_softc *sc)
3782 struct bxe_fastpath *fp;
3785 DBENTER(BXE_VERBOSE_UNLOAD);
3787 for (i = 0; i < sc->num_queues; i++) {
3791 buf_ring_free(fp->br, M_DEVBUF);
3795 DBEXIT(BXE_VERBOSE_UNLOAD);
3801 * Handles controller initialization.
3803 * Must be called from a locked routine. Since this code
3804 * may be called from the OS it does not provide a return
3805 * error value and must clean-up it's own mess.
3811 bxe_init_locked(struct bxe_softc *sc, int load_mode)
3817 DBENTER(BXE_INFO_LOAD | BXE_INFO_RESET);
3819 BXE_CORE_LOCK_ASSERT(sc);
3822 /* Skip if we're in panic mode. */
3824 DBPRINT(sc, BXE_WARN, "%s(): Panic mode enabled, exiting!\n",
3826 goto bxe_init_locked_exit;
3829 /* Check if the driver is still running and bail out if it is. */
3830 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3831 DBPRINT(sc, BXE_WARN,
3832 "%s(): Init called while driver is running!\n",
3834 goto bxe_init_locked_exit;
3838 * Send LOAD_REQUEST command to MCP.
3839 * The MCP will return the type of LOAD
3840 * the driver should perform.
3841 * - If it is the first port to be initialized
3842 * then all common blocks should be initialized.
3843 * - If it is not the first port to be initialized
3844 * then don't do the common block initialization.
3846 sc->state = BXE_STATE_OPENING_WAIT4_LOAD;
3851 DBPRINT(sc, BXE_INFO,
3852 "%s(): Old No MCP load counts: %d, %d, %d\n",
3854 load_count[0], load_count[1], load_count[2]);
3857 load_count[1 + port]++;
3859 DBPRINT(sc, BXE_INFO,
3860 "%s(): New No MCP load counts: %d, %d, %d\n",
3862 load_count[0], load_count[1], load_count[2]);
3864 /* No MCP to tell us what to do. */
3865 if (load_count[0] == 1)
3866 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
3867 else if (load_count[1 + port] == 1)
3868 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
3870 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
3873 /* Ask the MCP what type of initialization we need to do. */
3874 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ);
3876 if ((load_code == 0) ||
3877 (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)) {
3878 BXE_PRINTF("%s(%d): Bootcode refused load request.!\n",
3879 __FILE__, __LINE__);
3880 goto bxe_init_locked_failed1;
3884 /* Keep track of whether we are controlling the port. */
3885 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
3886 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
3891 /* Block any interrupts until we're ready. */
3894 /* Initialize hardware. */
3895 error = bxe_init_hw(sc, load_code);
3897 BXE_PRINTF("%s(%d): Hardware initialization failed, "
3898 "aborting!\n", __FILE__, __LINE__);
3899 goto bxe_init_locked_failed1;
3902 /* Calculate and save the Ethernet MTU size. */
3903 sc->port.ether_mtu = ifp->if_mtu + ETHER_HDR_LEN +
3904 (ETHER_VLAN_ENCAP_LEN * 2) + ETHER_CRC_LEN + 4;
3906 DBPRINT(sc, BXE_INFO, "%s(): Setting MTU = %d\n",
3907 __FUNCTION__, sc->port.ether_mtu);
3909 /* Setup the mbuf allocation size for RX frames. */
3910 if (sc->port.ether_mtu <= MCLBYTES)
3911 sc->mbuf_alloc_size = MCLBYTES;
3912 else if (sc->port.ether_mtu <= PAGE_SIZE)
3913 sc->mbuf_alloc_size = PAGE_SIZE;
3915 sc->mbuf_alloc_size = MJUM9BYTES;
3917 DBPRINT(sc, BXE_INFO, "%s(): mbuf_alloc_size = %d, "
3918 "max_frame_size = %d\n", __FUNCTION__,
3919 sc->mbuf_alloc_size, sc->port.ether_mtu);
3921 /* Setup NIC internals and enable interrupts. */
3922 error = bxe_init_nic(sc, load_code);
3924 BXE_PRINTF("%s(%d): NIC initialization failed, "
3925 "aborting!\n", __FILE__, __LINE__);
3926 goto bxe_init_locked_failed1;
3929 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
3930 (sc->common.shmem2_base)){
3931 if (sc->dcc_enable == TRUE) {
3932 BXE_PRINTF("Enabing DCC support\n");
3933 SHMEM2_WR(sc, dcc_support,
3934 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
3935 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
3939 #if __FreeBSD_version >= 800000
3940 /* Allocate buffer rings for multiqueue operation. */
3941 error = bxe_alloc_buf_rings(sc);
3943 BXE_PRINTF("%s(%d): Buffer ring initialization failed, "
3944 "aborting!\n", __FILE__, __LINE__);
3945 goto bxe_init_locked_failed1;
3949 /* Tell MCP that driver load is done. */
3951 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE);
3953 BXE_PRINTF("%s(%d): Driver load failed! No MCP "
3954 "response to LOAD_DONE!\n", __FILE__, __LINE__);
3955 goto bxe_init_locked_failed2;
3959 sc->state = BXE_STATE_OPENING_WAIT4_PORT;
3961 /* Enable ISR for PORT_SETUP ramrod. */
3964 /* Setup the leading connection for the controller. */
3965 error = bxe_setup_leading(sc);
3967 DBPRINT(sc, BXE_FATAL, "%s(): Initial PORT_SETUP ramrod "
3968 "failed. State is not OPEN!\n", __FUNCTION__);
3969 goto bxe_init_locked_failed3;
3972 if (CHIP_IS_E1H(sc)) {
3973 if (sc->mf_config[BP_E1HVN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
3974 BXE_PRINTF("Multi-function mode is disabled\n");
3975 /* sc->state = BXE_STATE_DISABLED; */
3978 /* Setup additional client connections for RSS/multi-queue */
3979 if (sc->state == BXE_STATE_OPEN) {
3980 for (i = 1; i < sc->num_queues; i++) {
3981 if (bxe_setup_multi(sc, i)) {
3982 DBPRINT(sc, BXE_FATAL,
3983 "%s(): fp[%02d] CLIENT_SETUP ramrod failed! State not OPEN!\n",
3985 goto bxe_init_locked_failed4;
3994 /* Initialize statistics. */
3998 /* Load our MAC address. */
3999 bcopy(IF_LLADDR(sc->bxe_ifp), sc->link_params.mac_addr, ETHER_ADDR_LEN);
4002 bxe_set_mac_addr_e1(sc, 1);
4004 bxe_set_mac_addr_e1h(sc, 1);
4008 /* Perform PHY initialization for the primary port. */
4010 bxe_initial_phy_init(sc);
4014 /* Start fastpath. */
4015 switch (load_mode) {
4018 /* Initialize the receive filters. */
4019 bxe_set_rx_mode(sc);
4023 /* Initialize the receive filters. */
4024 bxe_set_rx_mode(sc);
4025 sc->state = BXE_STATE_DIAG;
4029 DBPRINT(sc, BXE_WARN, "%s(): Unknown load mode (%d)!\n",
4030 __FUNCTION__, load_mode);
4035 bxe__link_status_update(sc);
4038 /* Tell the stack the driver is running. */
4039 ifp->if_drv_flags = IFF_DRV_RUNNING;
4041 /* Schedule our periodic timer tick. */
4042 callout_reset(&sc->bxe_tick_callout, hz, bxe_tick, sc);
4043 /* Everything went OK, go ahead and exit. */
4044 goto bxe_init_locked_exit;
4046 bxe_init_locked_failed4:
4047 /* Try and gracefully shutdown the device because of a failure. */
4048 for (i = 1; i < sc->num_queues; i++)
4049 bxe_stop_multi(sc, i);
4051 bxe_init_locked_failed3:
4052 bxe_stop_leading(sc);
4053 bxe_stats_handle(sc, STATS_EVENT_STOP);
4055 bxe_init_locked_failed2:
4056 bxe_int_disable(sc);
4058 bxe_init_locked_failed1:
4060 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE);
4061 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
4062 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE);
4066 #if __FreeBSD_version >= 800000
4067 bxe_free_buf_rings(sc);
4070 DBPRINT(sc, BXE_WARN, "%s(): Initialization failed!\n", __FUNCTION__);
4072 bxe_init_locked_exit:
4073 DBEXIT(BXE_INFO_LOAD | BXE_INFO_RESET);
4077 * Ramrod wait function.
4079 * Waits for a ramrod command to complete.
4082 * 0 = Success, !0 = Failure
4085 bxe_wait_ramrod(struct bxe_softc *sc, int state, int idx, int *state_p,
4090 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_RAMROD);
4092 DBPRINT(sc, BXE_VERBOSE_RAMROD, "%s(): %s for state 0x%08X on "
4093 "fp[%02d], currently 0x%08X.\n", __FUNCTION__,
4094 poll ? "Polling" : "Waiting", state, idx, *state_p);
4100 /* Manually check for the completion. */
4104 * Some commands don't use the leading client
4108 bxe_rxeof(&sc->fp[idx]);
4111 /* State may be changed by bxe_sp_event(). */
4113 if (*state_p == state)
4114 goto bxe_wait_ramrod_exit;
4118 /* Pause 1ms before checking again. */
4122 /* We timed out polling for a completion. */
4123 DBPRINT(sc, BXE_FATAL, "%s(): Timeout %s for state 0x%08X on fp[%02d]. "
4124 "Got 0x%x instead\n", __FUNCTION__, poll ? "polling" : "waiting",
4125 state, idx, *state_p);
4129 bxe_wait_ramrod_exit:
4131 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_RAMROD);
4140 bxe_write_dmae_phys_len(struct bxe_softc *sc, bus_addr_t phys_addr,
4141 uint32_t addr, uint32_t len)
4143 int dmae_wr_max, offset;
4144 DBENTER(BXE_INSANE_REGS);
4146 dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
4148 while (len > dmae_wr_max) {
4149 bxe_write_dmae(sc, phys_addr + offset, addr + offset,
4151 offset += dmae_wr_max * 4;
4154 bxe_write_dmae(sc, phys_addr + offset, addr + offset, len);
4155 DBEXIT(BXE_INSANE_REGS);
4161 #define INIT_MEM_WR(block, reg, part, hw, data, reg_off, len) \
4162 bxe_init_str_wr(sc, GRCBASE_##block + reg + reg_off * 4, data, len)
4166 * Write a block of data to a range of registers.
4172 bxe_init_str_wr(struct bxe_softc *sc, uint32_t addr, const uint32_t *data,
4176 for (i = 0; i < len; i++)
4177 REG_WR(sc, addr + i * 4, data[i]);
4181 * Write a block of data to a range of registers using indirect access.
4187 bxe_init_ind_wr(struct bxe_softc *sc, uint32_t addr, const uint32_t *data,
4191 for (i = 0; i < len; i++)
4192 REG_WR_IND(sc, addr + i * 4, data[i]);
4201 bxe_write_big_buf(struct bxe_softc *sc, uint32_t addr, uint32_t len)
4203 DBENTER(BXE_INSANE_REGS);
4206 bxe_write_dmae_phys_len(sc, sc->gz_dma.paddr, addr, len);
4208 bxe_init_str_wr(sc, addr, sc->gz, len);
4210 bxe_init_str_wr(sc, addr, sc->gz, len);
4213 DBEXIT(BXE_INSANE_REGS);
4217 * Fill areas of device memory with the specified value.
4219 * Generally used to clear a small area of device memory prior to writing
4220 * firmware to STORM memory or writing STORM firmware to device memory.
4226 bxe_init_fill(struct bxe_softc *sc, uint32_t addr, int fill, uint32_t len)
4228 uint32_t cur_len, i, leftovers, length;
4230 DBENTER(BXE_VERBOSE_LOAD);
4232 length = (((len * 4) > BXE_FW_BUF_SIZE) ? BXE_FW_BUF_SIZE : (len * 4));
4233 leftovers = length / 4;
4234 memset(sc->gz, fill, length);
4236 for (i = 0; i < len; i += leftovers) {
4237 cur_len = min(leftovers, len - i);
4238 bxe_write_big_buf(sc, addr + i * 4, cur_len);
4241 DBEXIT(BXE_VERBOSE_LOAD);
4250 bxe_init_wr_64(struct bxe_softc *sc, uint32_t addr, const uint32_t *data,
4253 uint64_t data64, *pdata;
4254 uint32_t buf_len32, cur_len, len;
4257 DBENTER(BXE_INSANE_REGS);
4259 buf_len32 = BXE_FW_BUF_SIZE / 4;
4261 /* 64 bit value is in a blob: first low DWORD, then high DWORD. */
4262 data64 = HILO_U64((*(data + 1)), (*data));
4263 len64 = min((uint32_t)(BXE_FW_BUF_SIZE / 8), len64);
4264 for (i = 0; i < len64; i++) {
4265 pdata = ((uint64_t *)(sc->gz)) + i;
4269 for (i = 0; i < len; i += buf_len32) {
4270 cur_len = min(buf_len32, len - i);
4271 bxe_write_big_buf(sc, addr + i*4, cur_len);
4274 DBEXIT(BXE_INSANE_REGS);
4279 * There are different blobs for each PRAM section. In addition, each
4280 * blob write operation is divided into multiple, smaller write
4281 * operations in order to decrease the amount of physically contiguous
4282 * buffer memory needed. Thus, when we select a blob, the address may
4283 * be with some offset from the beginning of PRAM section. The same
4284 * holds for the INT_TABLE sections.
4287 #define IF_IS_INT_TABLE_ADDR(base, addr) \
4288 if (((base) <= (addr)) && ((base) + 0x400 >= (addr)))
4290 #define IF_IS_PRAM_ADDR(base, addr) \
4291 if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
4299 static const uint8_t *
4300 bxe_sel_blob(struct bxe_softc *sc, uint32_t addr, const uint8_t *data)
4303 IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
4304 data = INIT_TSEM_INT_TABLE_DATA(sc);
4306 IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr)
4307 data = INIT_CSEM_INT_TABLE_DATA(sc);
4309 IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr)
4310 data = INIT_USEM_INT_TABLE_DATA(sc);
4312 IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr)
4313 data = INIT_XSEM_INT_TABLE_DATA(sc);
4315 IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr)
4316 data = INIT_TSEM_PRAM_DATA(sc);
4318 IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr)
4319 data = INIT_CSEM_PRAM_DATA(sc);
4321 IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr)
4322 data = INIT_USEM_PRAM_DATA(sc);
4324 IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr)
4325 data = INIT_XSEM_PRAM_DATA(sc);
4332 bxe_write_big_buf_wb(struct bxe_softc *sc, uint32_t addr, uint32_t len)
4335 bxe_write_dmae_phys_len(sc, sc->gz_dma.paddr, addr, len);
4337 bxe_init_ind_wr(sc, addr, sc->gz, len);
4341 #define VIRT_WR_DMAE_LEN(sc, data, addr, len32, le32_swap) \
4343 memcpy(sc->gz, data, (len32)*4); \
4344 bxe_write_big_buf_wb(sc, addr, len32); \
4354 bxe_init_wr_wb(struct bxe_softc *sc, uint32_t addr, const uint32_t *data,
4357 const uint32_t *old_data;
4359 DBENTER(BXE_INSANE_REGS);
4361 data = (const uint32_t *)bxe_sel_blob(sc, addr, (const uint8_t *)data);
4362 if (sc->dmae_ready) {
4363 if (old_data != data)
4364 VIRT_WR_DMAE_LEN(sc, data, addr, len, 1);
4366 VIRT_WR_DMAE_LEN(sc, data, addr, len, 0);
4368 bxe_init_ind_wr(sc, addr, data, len);
4370 DBEXIT(BXE_INSANE_REGS);
4374 bxe_init_wr_zp(struct bxe_softc *sc, uint32_t addr, uint32_t len,
4377 BXE_PRINTF("%s(%d): Compressed FW is not supported yet. "
4378 "ERROR: address:0x%x len:0x%x blob_offset:0x%x\n",
4379 __FILE__, __LINE__, addr, len, blob_off);
4383 * Initialize blocks of the device.
4385 * This routine basically performs bulk register programming for different
4386 * blocks within the controller. The file bxe_init_values.h contains a
4387 * series of register access operations (read, write, fill, etc.) as well
4388 * as a BLOB of data to initialize multiple blocks within the controller.
4389 * Block initialization may be supported by all controllers or by specific
4396 bxe_init_block(struct bxe_softc *sc, uint32_t block, uint32_t stage)
4399 const uint32_t *data, *data_base;
4400 uint32_t i, op_type, addr, len;
4401 uint16_t op_end, op_start;
4404 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
4406 op_start = INIT_OPS_OFFSETS(sc)[BLOCK_OPS_IDX(block, stage,
4408 op_end = INIT_OPS_OFFSETS(sc)[BLOCK_OPS_IDX(block, stage, STAGE_END)];
4409 /* If empty block */
4410 if (op_start == op_end)
4415 data_base = INIT_DATA(sc);
4417 for (i = op_start; i < op_end; i++) {
4419 op = (union init_op *)&(INIT_OPS(sc)[i]);
4421 op_type = op->str_wr.op;
4422 addr = op->str_wr.offset;
4423 len = op->str_wr.data_len;
4424 data = data_base + op->str_wr.data_off;
4426 /* HW/EMUL specific */
4427 if ((op_type > OP_WB) && (op_type == hw_wr))
4435 REG_WR(sc, addr, op->write.val);
4438 bxe_init_str_wr(sc, addr, data, len);
4441 bxe_init_wr_wb(sc, addr, data, len);
4444 bxe_init_ind_wr(sc, addr, data, len);
4447 bxe_init_fill(sc, addr, 0, op->zero.len);
4450 bxe_init_wr_zp(sc, addr, len, op->str_wr.data_off);
4453 bxe_init_wr_64(sc, addr, data, len);
4456 /* happens whenever an op is of a diff HW */
4461 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
4465 * Handles controller initialization when called from an unlocked routine.
4466 * ifconfig calls this function.
4474 struct bxe_softc *sc;
4479 bxe_init_locked(sc, LOAD_NORMAL);
4480 BXE_CORE_UNLOCK(sc);
4484 * Release all resources used by the driver.
4486 * Releases all resources acquired by the driver including interrupts,
4487 * interrupt handler, interfaces, mutexes, and DMA memory.
4493 bxe_release_resources(struct bxe_softc *sc)
4497 DBENTER(BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
4501 /* Release the FreeBSD interface. */
4502 if (sc->bxe_ifp != NULL)
4503 if_free(sc->bxe_ifp);
4505 /* Free the DMA resources. */
4506 bxe_host_structures_free(sc);
4508 #if __FreeBSD_version >= 800000
4509 /* Free multiqueue buffer rings. */
4510 bxe_free_buf_rings(sc);
4517 * Indirect register write.
4519 * Writes NetXtreme II registers using an index/data register pair in PCI
4520 * configuration space. Using this mechanism avoids issues with posted
4521 * writes but is much slower than memory-mapped I/O.
4527 bxe_reg_wr_ind(struct bxe_softc *sc, uint32_t offset, uint32_t val)
4529 DBPRINT(sc, BXE_INSANE_REGS, "%s(); offset = 0x%08X, val = 0x%08X\n",
4530 __FUNCTION__, offset, val);
4532 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, offset, 4);
4533 pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4);
4535 /* Return to a safe address. */
4536 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS,
4537 PCICFG_VENDOR_ID_OFFSET, 4);
4542 * Indirect register read.
4544 * Reads NetXtreme II registers using an index/data register pair in PCI
4545 * configuration space. Using this mechanism avoids issues with posted
4546 * reads but is much slower than memory-mapped I/O.
4549 * The value of the register.
4552 bxe_reg_rd_ind(struct bxe_softc *sc, uint32_t offset)
4556 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, offset, 4);
4557 val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4);
4559 /* Return to a safe address. */
4560 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS,
4561 PCICFG_VENDOR_ID_OFFSET, 4);
4563 DBPRINT(sc, BXE_INSANE_REGS, "%s(); offset = 0x%08X, val = 0x%08X\n",
4564 __FUNCTION__, offset, val);
4570 static uint32_t dmae_reg_go_c[] = {
4571 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
4572 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
4573 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
4574 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
4579 * Copy DMAE command into memory and start the command.
4585 bxe_post_dmae(struct bxe_softc *sc, struct dmae_command *dmae, int idx)
4587 uint32_t cmd_offset;
4589 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
4591 for (i = 0; i < (sizeof(struct dmae_command) / 4); i++) {
4592 REG_WR(sc, cmd_offset + i * 4, *(((uint32_t *)dmae) + i));
4593 DBPRINT(sc, BXE_INSANE_REGS, "%s(): DMAE cmd[%d].%d : 0x%08X\n",
4594 __FUNCTION__, idx, i, cmd_offset + i * 4);
4597 /* Kick off the command. */
4598 REG_WR(sc, dmae_reg_go_c[idx], 1);
4603 * Perform a DMAE write to device memory.
4605 * Some of the registers on the 577XX controller are 128bits wide. It is
4606 * required that when accessing those registers that they be written
4607 * atomically and that no intervening bus acceses to the device occur.
4608 * This could be handled by a lock held across all driver instances for
4609 * the device or it can be handled by performing a DMA operation when
4610 * writing to the device. This code implements the latter.
4616 bxe_write_dmae(struct bxe_softc *sc, bus_addr_t dma_addr, uint32_t dst_addr,
4619 struct dmae_command dmae;
4620 uint32_t *data, *wb_comp;
4623 DBENTER(BXE_INSANE_REGS);
4625 DBPRINT(sc, BXE_EXTREME_REGS,
4626 "%s(): host addr = 0x%jX, device addr = 0x%08X, length = %d.\n",
4627 __FUNCTION__, (uintmax_t)dma_addr, dst_addr, (int)len32);
4629 wb_comp = BXE_SP(sc, wb_comp);
4630 /* Fall back to indirect access if DMAE is not ready. */
4631 if (!sc->dmae_ready) {
4632 data = BXE_SP(sc, wb_data[0]);
4634 DBPRINT(sc, BXE_WARN, "%s(): DMAE not ready, "
4635 "using indirect.\n", __FUNCTION__);
4637 bxe_init_ind_wr(sc, dst_addr, data, len32);
4638 goto bxe_write_dmae_exit;
4641 memset(&dmae, 0, sizeof(struct dmae_command));
4643 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4644 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4645 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4647 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4649 DMAE_CMD_ENDIANITY_DW_SWAP |
4651 (BP_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4652 (BP_E1HVN(sc) << DMAE_CMD_E1HVN_SHIFT));
4653 dmae.src_addr_lo = U64_LO(dma_addr);
4654 dmae.src_addr_hi = U64_HI(dma_addr);
4655 dmae.dst_addr_lo = dst_addr >> 2;
4656 dmae.dst_addr_hi = 0;
4658 dmae.comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp));
4659 dmae.comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp));
4660 dmae.comp_val = BXE_WB_COMP_VAL;
4666 bxe_post_dmae(sc, &dmae, INIT_DMAE_C(sc));
4670 /* Wait up to 200ms. */
4672 while (*wb_comp != BXE_WB_COMP_VAL) {
4674 DBPRINT(sc, BXE_FATAL,
4675 "%s(): DMAE timeout (dst_addr = 0x%08X, len = %d)!\n",
4676 __FUNCTION__, dst_addr, len32);
4683 BXE_DMAE_UNLOCK(sc);
4685 bxe_write_dmae_exit:
4686 DBEXIT(BXE_INSANE_REGS);
4691 * Perform a DMAE read from to device memory.
4693 * Some of the registers on the 577XX controller are 128bits wide. It is
4694 * required that when accessing those registers that they be read
4695 * atomically and that no intervening bus acceses to the device occur.
4696 * This could be handled by a lock held across all driver instances for
4697 * the device or it can be handled by performing a DMA operation when
4698 * reading from the device. This code implements the latter.
4704 bxe_read_dmae(struct bxe_softc *sc, uint32_t src_addr,
4707 struct dmae_command dmae;
4708 uint32_t *data, *wb_comp;
4711 DBENTER(BXE_INSANE_REGS);
4713 wb_comp = BXE_SP(sc, wb_comp);
4714 /* Fall back to indirect access if DMAE is not ready. */
4715 if (!sc->dmae_ready) {
4716 data = BXE_SP(sc, wb_data[0]);
4718 DBPRINT(sc, BXE_WARN, "%s(): DMAE not ready, "
4719 "using indirect.\n", __FUNCTION__);
4721 for (i = 0; i < len32; i++)
4722 data[i] = bxe_reg_rd_ind(sc, src_addr + i * 4);
4724 goto bxe_read_dmae_exit;
4727 memset(&dmae, 0, sizeof(struct dmae_command));
4729 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4730 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4731 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4733 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4735 DMAE_CMD_ENDIANITY_DW_SWAP |
4737 (BP_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4738 (BP_E1HVN(sc) << DMAE_CMD_E1HVN_SHIFT));
4740 dmae.src_addr_lo = src_addr >> 2;
4741 dmae.src_addr_hi = 0;
4742 dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data));
4743 dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data));
4745 dmae.comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp));
4746 dmae.comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp));
4747 dmae.comp_val = BXE_WB_COMP_VAL;
4751 memset(BXE_SP(sc, wb_data[0]), 0, sizeof(uint32_t) * 4);
4754 bxe_post_dmae(sc, &dmae, INIT_DMAE_C(sc));
4759 while (*wb_comp != BXE_WB_COMP_VAL) {
4761 DBPRINT(sc, BXE_FATAL,
4762 "%s(): DMAE timeout (src_addr = 0x%08X, len = %d)!\n",
4763 __FUNCTION__, src_addr, len32);
4770 BXE_DMAE_UNLOCK(sc);
4773 DBEXIT(BXE_INSANE_REGS);
4777 * DMAE write wrapper.
4783 bxe_wb_wr(struct bxe_softc *sc, int reg, uint32_t val_hi, uint32_t val_lo)
4785 uint32_t wb_write[2];
4787 wb_write[0] = val_hi;
4788 wb_write[1] = val_lo;
4789 REG_WR_DMAE(sc, reg, wb_write, 2);
4795 * Poll a register waiting for a value.
4798 * The last read register value.
4801 uint32_t bxe_reg_poll(struct bxe_softc *sc, uint32_t reg, uint32_t expected,
4807 val = REG_RD(sc, reg);
4808 if (val == expected)
4820 * Microcode assert display.
4822 * This function walks through each STORM processor and prints out a
4823 * listing of all asserts currently in effect. Useful for post-mortem
4827 * The number of asserts detected.
4830 bxe_mc_assert(struct bxe_softc *sc)
4832 uint32_t row0, row1, row2, row3;
4836 DBENTER(BXE_VERBOSE_INTR);
4840 last_idx = REG_RD8(sc, BAR_XSTORM_INTMEM +
4841 XSTORM_ASSERT_LIST_INDEX_OFFSET);
4844 DBPRINT(sc, BXE_FATAL, "DATA XSTORM_ASSERT_LIST_INDEX 0x%x\n",
4847 /* Print the asserts */
4848 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
4850 row0 = REG_RD(sc, BAR_XSTORM_INTMEM +
4851 XSTORM_ASSERT_LIST_OFFSET(i));
4852 row1 = REG_RD(sc, BAR_XSTORM_INTMEM +
4853 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
4854 row2 = REG_RD(sc, BAR_XSTORM_INTMEM +
4855 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
4856 row3 = REG_RD(sc, BAR_XSTORM_INTMEM +
4857 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
4859 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
4860 DBPRINT(sc, BXE_FATAL, "DATA XSTORM_ASSERT_INDEX %d = "
4861 "0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2,
4869 last_idx = REG_RD8(sc, BAR_TSTORM_INTMEM +
4870 TSTORM_ASSERT_LIST_INDEX_OFFSET);
4873 DBPRINT(sc, BXE_FATAL, "DATA TSTORM_ASSERT_LIST_INDEX 0x%x\n",
4876 /* Print the asserts */
4877 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
4879 row0 = REG_RD(sc, BAR_TSTORM_INTMEM +
4880 TSTORM_ASSERT_LIST_OFFSET(i));
4881 row1 = REG_RD(sc, BAR_TSTORM_INTMEM +
4882 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
4883 row2 = REG_RD(sc, BAR_TSTORM_INTMEM +
4884 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
4885 row3 = REG_RD(sc, BAR_TSTORM_INTMEM +
4886 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
4888 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
4889 DBPRINT(sc, BXE_FATAL, "DATA TSTORM_ASSERT_INDEX %d = "
4890 "0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2,
4898 last_idx = REG_RD8(sc, BAR_CSTORM_INTMEM +
4899 CSTORM_ASSERT_LIST_INDEX_OFFSET);
4902 DBPRINT(sc, BXE_FATAL, "DATA CSTORM_ASSERT_LIST_INDEX 0x%x\n",
4905 /* Print the asserts */
4906 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
4908 row0 = REG_RD(sc, BAR_CSTORM_INTMEM +
4909 CSTORM_ASSERT_LIST_OFFSET(i));
4910 row1 = REG_RD(sc, BAR_CSTORM_INTMEM +
4911 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
4912 row2 = REG_RD(sc, BAR_CSTORM_INTMEM +
4913 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
4914 row3 = REG_RD(sc, BAR_CSTORM_INTMEM +
4915 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
4917 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
4918 DBPRINT(sc, BXE_FATAL, "DATA CSTORM_ASSERT_INDEX %d = "
4919 "0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2,
4927 last_idx = REG_RD8(sc, BAR_USTORM_INTMEM +
4928 USTORM_ASSERT_LIST_INDEX_OFFSET);
4931 DBPRINT(sc, BXE_FATAL, "DATA USTORM_ASSERT_LIST_INDEX 0x%x\n",
4934 /* Print the asserts */
4935 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
4937 row0 = REG_RD(sc, BAR_USTORM_INTMEM +
4938 USTORM_ASSERT_LIST_OFFSET(i));
4939 row1 = REG_RD(sc, BAR_USTORM_INTMEM +
4940 USTORM_ASSERT_LIST_OFFSET(i) + 4);
4941 row2 = REG_RD(sc, BAR_USTORM_INTMEM +
4942 USTORM_ASSERT_LIST_OFFSET(i) + 8);
4943 row3 = REG_RD(sc, BAR_USTORM_INTMEM +
4944 USTORM_ASSERT_LIST_OFFSET(i) + 12);
4946 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
4947 DBPRINT(sc, BXE_FATAL, "DATA USTORM_ASSERT_INDEX %d = "
4948 "0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2,
4955 DBEXIT(BXE_VERBOSE_INTR);
4961 * Perform a panic dump.
4967 bxe_panic_dump(struct bxe_softc *sc)
4971 sc->stats_state = STATS_STATE_DISABLED;
4973 BXE_PRINTF("---------- Begin crash dump ----------\n");
4975 /* Idle check is run twice to verify the controller has stopped. */
4984 BXE_PRINTF("---------- End crash dump ----------\n");
4991 * Enables interrupt generation.
4997 bxe_int_enable(struct bxe_softc *sc)
4999 uint32_t hc_addr, val;
5002 DBENTER(BXE_VERBOSE_INTR);
5005 hc_addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
5006 val = REG_RD(sc, hc_addr);
5007 if (sc->msix_count > 0) {
5008 if (sc->msix_count == 1) {
5010 /* Single interrupt, multiple queues.*/
5011 DBPRINT(sc, BXE_VERBOSE_INTR,
5012 "%s(): Setting host coalescing registers for MSI-X (SIMQ).\n",
5016 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
5018 /* Enable single ISR mode, MSI/MSI-X, and attention messages. */
5019 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
5020 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
5021 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
5024 /* Multiple interrupts, multiple queues.*/
5025 DBPRINT(sc, BXE_VERBOSE_INTR,
5026 "%s(): Setting host coalescing registers for MSI-X (MIMQ).\n",
5029 /* Clear single ISR mode and INTx. */
5030 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
5031 HC_CONFIG_0_REG_INT_LINE_EN_0);
5033 /* Enable MSI/MSI-X and attention messages. */
5034 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
5035 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
5038 } else if (sc->msi_count > 0) {
5040 if (sc->msi_count == 1) {
5042 /* Single interrupt, multiple queues.*/
5043 DBPRINT(sc, BXE_VERBOSE_INTR,
5044 "%s(): Setting host coalescing registers for MSI (SIMQ).\n",
5048 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
5050 /* Enable single ISR mode, MSI/MSI-X, and attention
5053 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
5054 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
5055 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
5057 /* Multiple interrupts, multiple queues.*/
5058 DBPRINT(sc, BXE_VERBOSE_INTR,
5059 "%s(): Setting host coalescing registers for"
5063 /* Clear single ISR mode and INTx. */
5064 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
5065 HC_CONFIG_0_REG_INT_LINE_EN_0);
5067 /* Enable MSI/MSI-X and attention messages. */
5068 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
5069 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
5072 /* Single interrupt, single queue. */
5073 DBPRINT(sc, BXE_VERBOSE_INTR,
5074 "%s(): Setting host coalescing registers for INTA#.\n",
5077 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
5078 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
5079 HC_CONFIG_0_REG_INT_LINE_EN_0 |
5080 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
5081 REG_WR(sc, hc_addr, val);
5083 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
5086 /* Write the interrupt mode to the host coalescing block. */
5087 REG_WR(sc, hc_addr, val);
5089 if (CHIP_IS_E1H(sc)) {
5091 /* Init leading/trailing edge attention generation. */
5093 val = (0xee0f | (1 << (BP_E1HVN(sc) + 4)));
5096 * Check if this driver instance is the port
5100 /* Enable nig & GPIO3 attentions. */
5105 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, val);
5106 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, val);
5109 DBEXIT(BXE_VERBOSE_INTR);
5114 * Disables interrupt generation.
5120 bxe_int_disable(struct bxe_softc *sc)
5122 uint32_t hc_addr, val;
5125 DBENTER(BXE_VERBOSE_INTR | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
5128 hc_addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
5129 val = REG_RD(sc, hc_addr);
5131 val &= ~(HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
5132 HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0);
5134 REG_WR(sc, hc_addr, val);
5136 if (REG_RD(sc, hc_addr)!= val) {
5137 DBPRINT(sc, BXE_WARN, "%s(): BUG! Returned value from IGU "
5138 "doesn't match value written (0x%08X).\n",
5142 DBEXIT(BXE_VERBOSE_INTR | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
5145 #define BXE_CRC32_RESIDUAL 0xdebb20e3
5149 * 0 = Success, !0 = Failure.
5152 bxe_nvram_acquire_lock(struct bxe_softc *sc)
5157 DBENTER(BXE_VERBOSE_NVRAM);
5163 /* Acquire the NVRAM lock. */
5164 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
5165 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
5167 for (i = 0; i < NVRAM_TIMEOUT_COUNT * 10; i++) {
5168 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
5169 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
5175 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
5176 DBPRINT(sc, BXE_WARN, "%s(): Cannot acquire NVRAM lock!\n",
5181 DBEXIT(BXE_VERBOSE_NVRAM);
5187 * 0 = Success, !0 = Failure.
5190 bxe_nvram_release_lock(struct bxe_softc *sc)
5195 DBENTER(BXE_VERBOSE_NVRAM);
5201 /* Release the NVRAM lock. */
5202 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
5203 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
5205 for (i = 0; i < NVRAM_TIMEOUT_COUNT * 10; i++) {
5206 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
5207 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
5213 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
5214 DBPRINT(sc, BXE_WARN, "%s(): Cannot release NVRAM lock!\n",
5219 DBEXIT(BXE_VERBOSE_NVRAM);
5228 bxe_nvram_enable_access(struct bxe_softc *sc)
5232 DBENTER(BXE_VERBOSE_NVRAM);
5234 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
5236 /* Enable both bits, even on read */
5237 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
5238 (val | MCPR_NVM_ACCESS_ENABLE_EN |
5239 MCPR_NVM_ACCESS_ENABLE_WR_EN));
5241 DBEXIT(BXE_VERBOSE_NVRAM);
5249 bxe_nvram_disable_access(struct bxe_softc *sc)
5253 DBENTER(BXE_VERBOSE_NVRAM);
5255 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
5257 /* Disable both bits, even after read. */
5258 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
5259 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
5260 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
5262 DBEXIT(BXE_VERBOSE_NVRAM);
5267 * 0 = Success, !0 = Failure.
5270 bxe_nvram_read_dword(struct bxe_softc *sc, uint32_t offset, uint32_t *ret_val,
5276 DBENTER(BXE_INSANE_NVRAM);
5278 /* Build the command word. */
5279 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
5281 /* Need to clear DONE bit separately. */
5282 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
5284 /* Address within the NVRAM to read. */
5285 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
5286 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
5288 /* Issue a read command. */
5289 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
5291 /* Wait for completion. */
5294 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
5296 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
5298 if (val & MCPR_NVM_COMMAND_DONE) {
5299 val = REG_RD(sc, MCP_REG_MCPR_NVM_READ);
5307 DBPRINT(sc, BXE_INSANE_NVRAM, "%s(): Read 0x%08X from offset 0x%08X.\n",
5308 __FUNCTION__, *ret_val, offset);
5309 DBEXIT(BXE_INSANE_NVRAM);
5315 * 0 = Success, !0 = Failure.
5318 bxe_nvram_read(struct bxe_softc *sc, uint32_t offset, uint8_t *ret_buf,
5321 uint32_t cmd_flags, val;
5324 DBENTER(BXE_EXTREME_NVRAM);
5326 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
5327 DBPRINT(sc, BXE_WARN, "%s(): Unaligned address or invalid "
5328 "buffer for NVRAM read (offset = 0x%08X, buf_size = %d)!\n",
5329 __FUNCTION__, offset, buf_size);
5331 goto bxe_nvram_read_exit;
5334 if (offset + buf_size > sc->common.flash_size) {
5335 DBPRINT(sc, BXE_WARN, "%s(): Read extends beyond the end of "
5336 "the NVRAM (offset (0x%08X) + buf_size (%d) > flash_size "
5337 "(0x%08X))!\n", __FUNCTION__, offset, buf_size,
5338 sc->common.flash_size);
5340 goto bxe_nvram_read_exit;
5343 rc = bxe_nvram_acquire_lock(sc);
5345 goto bxe_nvram_read_exit;
5347 bxe_nvram_enable_access(sc);
5349 /* Read the first word(s). */
5350 cmd_flags = MCPR_NVM_COMMAND_FIRST;
5351 while ((buf_size > sizeof(uint32_t)) && (rc == 0)) {
5352 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
5353 memcpy(ret_buf, &val, 4);
5355 /* Advance to the next DWORD. */
5356 offset += sizeof(uint32_t);
5357 ret_buf += sizeof(uint32_t);
5358 buf_size -= sizeof(uint32_t);
5362 /* Read the final word. */
5364 cmd_flags |= MCPR_NVM_COMMAND_LAST;
5365 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
5366 memcpy(ret_buf, &val, 4);
5369 /* Disable access to NVRAM interface. */
5370 bxe_nvram_disable_access(sc);
5371 bxe_nvram_release_lock(sc);
5373 bxe_nvram_read_exit:
5374 DBEXIT(BXE_EXTREME_NVRAM);
5378 #ifdef BXE_NVRAM_WRITE_SUPPORT
5381 * 0 = Success, !0 = Failure.
5384 bxe_nvram_write_dword(struct bxe_softc *sc, uint32_t offset, uint32_t val,
5389 DBENTER(BXE_VERBOSE_NVRAM);
5391 /* Build the command word. */
5392 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
5394 /* Need to clear DONE bit separately. */
5395 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
5397 /* Write the data. */
5398 REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val);
5400 /* Address to write within the NVRAM. */
5401 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
5402 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
5404 /* Issue the write command. */
5405 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
5407 /* Wait for completion. */
5409 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
5411 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
5412 if (val & MCPR_NVM_COMMAND_DONE) {
5418 DBEXIT(BXE_VERBOSE_NVRAM);
5422 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
5429 bxe_nvram_write1(struct bxe_softc *sc, uint32_t offset, uint8_t *data_buf,
5432 uint32_t align_offset, cmd_flags, val;
5435 DBENTER(BXE_VERBOSE_NVRAM);
5437 if (offset + buf_size > sc->common.flash_size) {
5438 DBPRINT(sc, BXE_WARN, "%s(): Write extends beyond the end of "
5439 "the NVRAM (offset (0x%08X) + buf_size (%d) > flash_size "
5440 "(0x%08X))!\n", __FUNCTION__, offset, buf_size,
5441 sc->common.flash_size);
5443 goto bxe_nvram_write1_exit;
5446 /* request access to nvram interface */
5447 rc = bxe_nvram_acquire_lock(sc);
5449 goto bxe_nvram_write1_exit;
5451 /* Enable access to the NVRAM interface. */
5452 bxe_nvram_enable_access(sc);
5454 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
5455 align_offset = (offset & ~0x03);
5456 rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags);
5459 val &= ~(0xff << BYTE_OFFSET(offset));
5460 val |= (*data_buf << BYTE_OFFSET(offset));
5463 rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags);
5466 /* Disable access to the NVRAM interface. */
5467 bxe_nvram_disable_access(sc);
5468 bxe_nvram_release_lock(sc);
5470 bxe_nvram_write1_exit:
5471 DBEXIT(BXE_VERBOSE_NVRAM);
5477 * 0 = Success, !0 = Failure.
5480 bxe_nvram_write(struct bxe_softc *sc, uint32_t offset, uint8_t *data_buf,
5483 uint32_t cmd_flags, val, written_so_far;
5489 return (bxe_nvram_write1(sc, offset, data_buf, buf_size));
5491 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
5492 DBPRINT(sc, BXE_WARN, "%s(): Unaligned address or invalid "
5493 "buffer for NVRAM write "
5494 "(offset = 0x%08X, buf_size = %d)!\n", __FUNCTION__,
5497 goto bxe_nvram_write_exit;
5500 if (offset + buf_size > sc->common.flash_size) {
5501 DBPRINT(sc, BXE_WARN, "%s(): Write extends beyond the end of "
5502 "the NVRAM (offset (0x%08X) + buf_size (%d) > flash_size "
5503 "(0x%08X))!\n", __FUNCTION__, offset, buf_size,
5504 sc->common.flash_size);
5506 goto bxe_nvram_write_exit;
5509 /* Request access to NVRAM interface. */
5510 rc = bxe_nvram_acquire_lock(sc);
5512 goto bxe_nvram_write_exit;
5514 /* Enable access to the NVRAM interface. */
5515 bxe_nvram_enable_access(sc);
5518 cmd_flags = MCPR_NVM_COMMAND_FIRST;
5519 while ((written_so_far < buf_size) && (rc == 0)) {
5520 if (written_so_far == (buf_size - sizeof(uint32_t)))
5521 cmd_flags |= MCPR_NVM_COMMAND_LAST;
5522 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
5523 cmd_flags |= MCPR_NVM_COMMAND_LAST;
5524 else if ((offset % NVRAM_PAGE_SIZE) == 0)
5525 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
5527 memcpy(&val, data_buf, 4);
5529 rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags);
5531 /* Advance to the next DWORD. */
5532 offset += sizeof(uint32_t);
5533 data_buf += sizeof(uint32_t);
5534 written_so_far += sizeof(uint32_t);
5538 /* Disable access to the NVRAM interface. */
5539 bxe_nvram_disable_access(sc);
5540 bxe_nvram_release_lock(sc);
5542 bxe_nvram_write_exit:
5543 DBEXIT(BXE_VERBOSE_NVRAM);
5549 * This function validates NVRAM content by reading spcific
5550 * regions and validating that the NVRAM checksum matches the
5554 * 0 = Success, !0 = Failure.
5557 bxe_nvram_test(struct bxe_softc *sc)
5559 static const struct {
5563 { 0, 0x14 }, /* bootstrap area*/
5564 { 0x14, 0xec }, /* directory area */
5565 { 0x100, 0x350 }, /* manuf_info */
5566 { 0x450, 0xf0 }, /* feature_info */
5567 { 0x640, 0x64 }, /* upgrade_key_info */
5568 { 0x708, 0x70 }, /* manuf_key_info */
5571 uint32_t magic, csum, buf[0x350 / 4];
5575 DBENTER(BXE_VERBOSE_NVRAM);
5577 data = (uint8_t *) buf;
5579 /* Read the DWORD at offset 0 in NVRAM. */
5580 rc = bxe_nvram_read(sc, 0, data, 4);
5582 BXE_PRINTF("%s(%d): Error (%d) returned reading NVRAM!\n",
5583 __FILE__, __LINE__, rc);
5584 goto bxe_nvram_test_exit;
5587 /* Make sure we found our magic value. */
5588 magic = be32toh(buf[0]);
5589 if (magic != 0x669955aa) {
5590 BXE_PRINTF("%s(%d): Invalid magic value (0x%08x) found!\n",
5591 __FILE__, __LINE__, magic);
5593 goto bxe_nvram_test_exit;
5596 /* Read through each region in NVRAM and validate the checksum. */
5597 for (i = 0; nvram_tbl[i].size; i++) {
5598 DBPRINT(sc, BXE_VERBOSE_NVRAM, "%s(): Testing NVRAM region %d, "
5599 "starting offset = %d, length = %d\n", __FUNCTION__, i,
5600 nvram_tbl[i].offset, nvram_tbl[i].size);
5602 rc = bxe_nvram_read(sc, nvram_tbl[i].offset, data,
5605 BXE_PRINTF("%s(%d): Error (%d) returned reading NVRAM "
5606 "region %d!\n", __FILE__, __LINE__, rc, i);
5607 goto bxe_nvram_test_exit;
5610 csum = ether_crc32_le(data, nvram_tbl[i].size);
5611 if (csum != BXE_CRC32_RESIDUAL) {
5612 BXE_PRINTF("%s(%d): Checksum error (0x%08X) for NVRAM "
5613 "region %d!\n", __FILE__, __LINE__, csum, i);
5615 goto bxe_nvram_test_exit;
5619 bxe_nvram_test_exit:
5620 DBEXIT(BXE_VERBOSE_NVRAM);
5625 * Acknowledge status block and modify interrupt mode.
5630 static __inline void
5631 bxe_ack_sb(struct bxe_softc *sc, uint8_t sb_id, uint8_t storm, uint16_t index,
5632 uint8_t int_mode, uint8_t update)
5634 struct igu_ack_register igu_ack;
5637 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(sc) * 32 + COMMAND_REG_INT_ACK);
5638 igu_ack.status_block_index = index;
5639 igu_ack.sb_id_and_flags =
5640 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
5641 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
5642 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
5643 (int_mode << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
5646 REG_WR(sc, hc_addr, (*(uint32_t *) &igu_ack));
5651 * Update fastpath status block index.
5654 * 0 = Nu completes, 1 = TX completes, 2 = RX completes,
5655 * 3 = RX & TX completes
5657 static __inline uint16_t
5658 bxe_update_fpsb_idx(struct bxe_fastpath *fp)
5660 struct host_status_block *fpsb;
5663 fpsb = fp->status_block;
5668 /* Check for any CSTORM transmit completions. */
5669 if (fp->fp_c_idx != le16toh(fpsb->c_status_block.status_block_index)) {
5670 fp->fp_c_idx = le16toh(fpsb->c_status_block.status_block_index);
5674 /* Check for any USTORM receive completions. */
5675 if (fp->fp_u_idx != le16toh(fpsb->u_status_block.status_block_index)) {
5676 fp->fp_u_idx = le16toh(fpsb->u_status_block.status_block_index);
5684 * Acknowledge interrupt.
5687 * Interrupt value read from IGU.
5690 bxe_ack_int(struct bxe_softc *sc)
5692 uint32_t hc_addr, result;
5694 hc_addr = HC_REG_COMMAND_REG + BP_PORT(sc) * 32 + COMMAND_REG_SIMD_MASK;
5695 result = REG_RD(sc, hc_addr);
5696 DBPRINT(sc, BXE_INSANE_INTR, "%s(): Read 0x%08X from HC addr 0x%08X\n",
5697 __FUNCTION__, result, hc_addr);
5703 * Slowpath event handler.
5705 * Checks that a ramrod completion occurs while the
5706 * controller is in the proper state.
5712 bxe_sp_event(struct bxe_fastpath *fp, union eth_rx_cqe *rr_cqe)
5714 struct bxe_softc *sc;
5718 DBENTER(BXE_VERBOSE_RAMROD);
5720 cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
5721 command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
5722 DBPRINT(sc, BXE_VERBOSE_RAMROD, "%s(): CID = %d, ramrod command = %d, "
5723 "device state = 0x%08X, fp[%02d].state = 0x%08X, type = %d\n",
5724 __FUNCTION__, cid, command, sc->state, fp->index, fp->state,
5725 rr_cqe->ramrod_cqe.ramrod_type);
5727 /* Free up an entry on the slowpath queue. */
5730 /* Handle ramrod commands that completed on a client connection. */
5732 /* Check for a completion for the current state. */
5733 switch (command | fp->state) {
5734 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BXE_FP_STATE_OPENING):
5735 DBPRINT(sc, BXE_VERBOSE_RAMROD,
5736 "%s(): Completed fp[%02d] CLIENT_SETUP Ramrod.\n",
5738 fp->state = BXE_FP_STATE_OPEN;
5740 case (RAMROD_CMD_ID_ETH_HALT | BXE_FP_STATE_HALTING):
5741 DBPRINT(sc, BXE_VERBOSE_RAMROD,
5742 "%s(): Completed fp[%02d] ETH_HALT ramrod\n",
5744 fp->state = BXE_FP_STATE_HALTED;
5747 DBPRINT(sc, BXE_VERBOSE_RAMROD,
5748 "%s(): Unexpected microcode reply (%d) while "
5749 "in state 0x%04X!\n", __FUNCTION__, command,
5753 goto bxe_sp_event_exit;
5756 /* Handle ramrod commands that completed on the leading connection. */
5757 switch (command | sc->state) {
5758 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BXE_STATE_OPENING_WAIT4_PORT):
5759 DBPRINT(sc, BXE_VERBOSE_RAMROD,
5760 "%s(): Completed PORT_SETUP ramrod.\n", __FUNCTION__);
5761 sc->state = BXE_STATE_OPEN;
5763 case (RAMROD_CMD_ID_ETH_HALT | BXE_STATE_CLOSING_WAIT4_HALT):
5764 DBPRINT(sc, BXE_VERBOSE_RAMROD,
5765 "%s(): Completed ETH_HALT ramrod.\n", __FUNCTION__);
5766 sc->state = BXE_STATE_CLOSING_WAIT4_DELETE;
5767 fp->state = BXE_FP_STATE_HALTED;
5769 case (RAMROD_CMD_ID_ETH_CFC_DEL | BXE_STATE_CLOSING_WAIT4_HALT):
5770 DBPRINT(sc, BXE_VERBOSE_RAMROD,
5771 "%s(): Completed fp[%02d] ETH_CFC_DEL ramrod.\n",
5773 sc->fp[cid].state = BXE_FP_STATE_CLOSED;
5775 case (RAMROD_CMD_ID_ETH_SET_MAC | BXE_STATE_OPEN):
5776 DBPRINT(sc, BXE_VERBOSE_RAMROD,
5777 "%s(): Completed ETH_SET_MAC ramrod in STATE_OPEN state.\n",
5780 case (RAMROD_CMD_ID_ETH_SET_MAC | BXE_STATE_CLOSING_WAIT4_HALT):
5781 DBPRINT(sc, BXE_VERBOSE_RAMROD,
5782 "%s(): Completed ETH_SET_MAC ramrod in "
5783 "CLOSING_WAIT4_HALT state.\n", __FUNCTION__);
5786 DBPRINT(sc, BXE_FATAL, "%s(): Unexpected microcode reply (%d)! "
5787 "State is 0x%08X\n", __FUNCTION__, command, sc->state);
5791 /* Force bxe_wait_ramrod() to see the change. */
5793 DBEXIT(BXE_VERBOSE_RAMROD);
5797 * Lock access to a hardware resource using controller arbitration
5801 * 0 = Success, !0 = Failure.
5804 bxe_acquire_hw_lock(struct bxe_softc *sc, uint32_t resource)
5806 uint32_t hw_lock_control_reg, lock_status, resource_bit;
5810 DBENTER(BXE_VERBOSE_MISC);
5811 DBPRINT(sc, BXE_VERBOSE_MISC, "%s(): Locking resource 0x%08X\n",
5812 __FUNCTION__, resource);
5815 resource_bit = 1 << resource;
5818 hw_lock_control_reg = ((func <= 5) ?
5819 (MISC_REG_DRIVER_CONTROL_1 + func * 8) :
5820 (MISC_REG_DRIVER_CONTROL_7 + (func - 6) * 8));
5822 /* Validating that the resource is within range. */
5823 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
5824 DBPRINT(sc, BXE_WARN, "%s(): Resource is out of range! "
5825 "resource(0x%08X) > HW_LOCK_MAX_RESOURCE_VALUE(0x%08X)\n",
5826 __FUNCTION__, resource, HW_LOCK_MAX_RESOURCE_VALUE);
5828 goto bxe_acquire_hw_lock_exit;
5831 /* Validating that the resource is not already taken. */
5832 lock_status = REG_RD(sc, hw_lock_control_reg);
5833 if (lock_status & resource_bit) {
5834 DBPRINT(sc, BXE_WARN, "%s(): Failed to acquire lock! "
5835 "lock_status = 0x%08X, resource_bit = 0x%08X\n",
5836 __FUNCTION__, lock_status, resource_bit);
5838 goto bxe_acquire_hw_lock_exit;
5841 /* Try for 5 seconds every 5ms. */
5842 for (cnt = 0; cnt < 1000; cnt++) {
5843 /* Try to acquire the lock. */
5844 REG_WR(sc, hw_lock_control_reg + 4, resource_bit);
5845 lock_status = REG_RD(sc, hw_lock_control_reg);
5847 if (lock_status & resource_bit)
5848 goto bxe_acquire_hw_lock_exit;
5852 DBPRINT(sc, BXE_WARN, "%s(): Timeout!\n", __FUNCTION__);
5855 bxe_acquire_hw_lock_exit:
5856 DBEXIT(BXE_VERBOSE_MISC);
5861 * Unlock access to a hardware resource using controller arbitration
5865 * 0 = Success, !0 = Failure.
5868 bxe_release_hw_lock(struct bxe_softc *sc, uint32_t resource)
5870 uint32_t hw_lock_control_reg, lock_status, resource_bit;
5874 DBENTER(BXE_VERBOSE_MISC);
5875 DBPRINT(sc, BXE_VERBOSE_MISC, "%s(): Unlocking resource 0x%08X\n",
5876 __FUNCTION__, resource);
5878 resource_bit = 1 << resource;
5881 /* Validating that the resource is within range */
5882 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
5883 DBPRINT(sc, BXE_WARN, "%s(): Resource is out of range! "
5884 "resource(0x%08X) > HW_LOCK_MAX_RESOURCE_VALUE(0x%08X)\n",
5885 __FUNCTION__, resource, HW_LOCK_MAX_RESOURCE_VALUE);
5887 goto bxe_release_hw_lock_exit;
5890 /* Find the register for the resource lock. */
5891 hw_lock_control_reg = ((func <= 5) ?
5892 (MISC_REG_DRIVER_CONTROL_1 + func * 8) :
5893 (MISC_REG_DRIVER_CONTROL_7 + (func - 6) * 8));
5895 /* Validating that the resource is currently taken */
5896 lock_status = REG_RD(sc, hw_lock_control_reg);
5897 if (!(lock_status & resource_bit)) {
5898 DBPRINT(sc, BXE_WARN, "%s(): The resource is not currently "
5899 "locked! lock_status = 0x%08X, resource_bit = 0x%08X\n",
5900 __FUNCTION__, lock_status, resource_bit);
5902 goto bxe_release_hw_lock_exit;
5905 /* Free the hardware lock. */
5906 REG_WR(sc, hw_lock_control_reg, resource_bit);
5908 bxe_release_hw_lock_exit:
5909 DBEXIT(BXE_VERBOSE_MISC);
5914 bxe_get_gpio(struct bxe_softc *sc, int gpio_num, uint8_t port)
5916 uint32_t gpio_mask, gpio_reg;
5917 int gpio_port, gpio_shift, value;
5919 /* The GPIO should be swapped if swap register is set and active */
5920 gpio_port = (REG_RD(sc, NIG_REG_PORT_SWAP) && REG_RD(sc,
5921 NIG_REG_STRAP_OVERRIDE)) ^ port;
5922 gpio_shift = gpio_num +
5923 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
5924 gpio_mask = 1 << gpio_shift;
5926 if (gpio_num > MISC_REGISTERS_GPIO_3) {
5927 DBPRINT(sc, BXE_WARN, "%s(): Invalid GPIO %d\n",
5928 __FUNCTION__, gpio_num);
5932 /* read GPIO value */
5933 gpio_reg = REG_RD(sc, MISC_REG_GPIO);
5935 /* get the requested pin value */
5936 if ((gpio_reg & gpio_mask) == gpio_mask)
5941 DBPRINT(sc, BXE_VERBOSE_PHY, "pin %d value 0x%x\n", gpio_num, value);
5947 * Sets the state of a General Purpose I/O (GPIO).
5953 bxe_set_gpio(struct bxe_softc *sc, int gpio_num, uint32_t mode, uint8_t port)
5955 uint32_t gpio_reg, gpio_mask;
5956 int gpio_port, gpio_shift, rc;
5958 DBENTER(BXE_VERBOSE_MISC);
5960 /* The GPIO should be swapped if swap register is set and active. */
5961 gpio_port = (REG_RD(sc, NIG_REG_PORT_SWAP) && REG_RD(sc,
5962 NIG_REG_STRAP_OVERRIDE)) ^ port;
5963 gpio_shift = gpio_num +
5964 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
5965 gpio_mask = (1 << gpio_shift);
5968 if (gpio_num > MISC_REGISTERS_GPIO_3) {
5969 DBPRINT(sc, BXE_FATAL, "%s(): Invalid GPIO (%d)!\n",
5970 __FUNCTION__, gpio_num);
5972 goto bxe_set_gpio_exit;
5975 /* Make sure no one else is trying to use the GPIO. */
5976 rc = bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
5978 DBPRINT(sc, BXE_WARN, "%s(): Can't acquire GPIO lock!\n",
5980 goto bxe_set_gpio_exit;
5983 /* Read GPIO and mask all but the float bits. */
5984 gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
5987 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
5988 DBPRINT(sc, BXE_VERBOSE, "%s(): Set GPIO %d (shift %d) -> "
5989 "output low\n", __FUNCTION__, gpio_num, gpio_shift);
5990 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
5991 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
5993 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
5994 DBPRINT(sc, BXE_VERBOSE, "%s(): Set GPIO %d (shift %d) -> "
5995 "output high\n", __FUNCTION__, gpio_num, gpio_shift);
5996 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
5997 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
5999 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
6000 DBPRINT(sc, BXE_VERBOSE, "%s(): Set GPIO %d (shift %d) -> "
6001 "input\n", __FUNCTION__, gpio_num, gpio_shift);
6002 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
6005 DBPRINT(sc, BXE_FATAL, "%s(): Unknown GPIO mode (0x%08X)!\n",
6006 __FUNCTION__, mode);
6010 REG_WR(sc, MISC_REG_GPIO, gpio_reg);
6011 rc = bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
6013 DBPRINT(sc, BXE_WARN, "%s(): Can't release GPIO lock!\n",
6018 DBEXIT(BXE_VERBOSE_MISC);
6023 bxe_set_gpio_int(struct bxe_softc *sc, int gpio_num, uint32_t mode,
6026 uint32_t gpio_mask, gpio_reg;
6027 int gpio_port, gpio_shift;
6029 /* The GPIO should be swapped if swap register is set and active */
6030 gpio_port = (REG_RD(sc, NIG_REG_PORT_SWAP) && REG_RD(sc,
6031 NIG_REG_STRAP_OVERRIDE)) ^ port;
6032 gpio_shift = gpio_num +
6033 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
6034 gpio_mask = (1 << gpio_shift);
6035 if (gpio_num > MISC_REGISTERS_GPIO_3) {
6036 DBPRINT(sc, BXE_WARN, "%s(): Invalid GPIO %d\n",
6037 __FUNCTION__, gpio_num);
6041 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
6043 gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT);
6046 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
6047 DBPRINT(sc, BXE_VERBOSE_PHY, "Clear GPIO INT %d (shift %d) -> "
6048 "output low\n", gpio_num, gpio_shift);
6049 /* clear SET and set CLR */
6050 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
6051 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
6053 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
6054 DBPRINT(sc, BXE_VERBOSE_PHY, "Set GPIO INT %d (shift %d) -> "
6055 "output high\n", gpio_num, gpio_shift);
6056 /* clear CLR and set SET */
6057 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
6058 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
6064 REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg);
6065 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
6071 * Sets the state of a Shared Purpose I/O (SPIO).
6074 * 0 = Success, !0 = Failure.
6077 bxe_set_spio(struct bxe_softc *sc, int spio_num, uint32_t mode)
6079 uint32_t spio_reg, spio_mask;
6083 spio_mask = 1 << spio_num;
6085 /* Validate the SPIO. */
6086 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
6087 (spio_num > MISC_REGISTERS_SPIO_7)) {
6088 DBPRINT(sc, BXE_WARN, "%s(): Invalid SPIO (%d)!\n",
6089 __FUNCTION__, spio_num);
6091 goto bxe_set_spio_exit;
6094 rc = bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
6096 DBPRINT(sc, BXE_WARN, "%s(): Can't acquire SPIO lock!\n",
6098 goto bxe_set_spio_exit;
6101 /* Read SPIO and mask all but the float bits. */
6102 spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
6105 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
6106 DBPRINT(sc, BXE_VERBOSE_MISC, "%s(): Set SPIO %d -> "
6107 "output low\n", __FUNCTION__, spio_num);
6108 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
6109 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
6111 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
6112 DBPRINT(sc, BXE_VERBOSE_MISC, "%s(): Set SPIO %d -> "
6113 "output high\n", __FUNCTION__, spio_num);
6114 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
6115 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
6117 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
6118 DBPRINT(sc, BXE_VERBOSE_MISC, "%s(): Set SPIO %d -> "
6119 "input\n", __FUNCTION__, spio_num);
6120 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
6123 DBPRINT(sc, BXE_WARN, "%s(): Unknown SPIO mode (0x%08X)!\n",
6124 __FUNCTION__, mode);
6128 REG_WR(sc, MISC_REG_SPIO, spio_reg);
6129 rc = bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
6131 DBPRINT(sc, BXE_WARN, "%s(): Can't release SPIO lock!\n",
6140 * When the 57711E is operating in multi-function mode, the controller
6141 * must be configured to arbitrate TX between multiple VNICs.
6147 bxe_init_port_minmax(struct bxe_softc *sc)
6149 uint32_t fair_periodic_timeout_usec, r_param, t_fair;
6151 DBENTER(BXE_VERBOSE_MISC);
6153 r_param = sc->link_vars.line_speed / 8;
6155 memset(&(sc->cmng.rs_vars), 0,
6156 sizeof(struct rate_shaping_vars_per_port));
6157 memset(&(sc->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
6159 /* 100 usec in SDM ticks = 25 since each tick is 4 usec. */
6160 sc->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
6162 * This is the threshold below which no timer arming will occur.
6163 * We use a coefficient of 1, 25 so that the threshold is a
6164 * little bigger that real time to compensate for timer
6167 sc->cmng.rs_vars.rs_threshold = (RS_PERIODIC_TIMEOUT_USEC *
6169 /* Resolution of fairness timer. */
6170 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
6172 /* For 10G it is 1000us, for 1G it is 10000us. */
6173 t_fair = T_FAIR_COEF / sc->link_vars.line_speed;
6174 /* This is the threshold where we won't arm the timer
6176 sc->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
6178 * Multiply by 1e3/8 to get bytes/msec. We don't want the
6179 * credits to pass a credit of the T_FAIR*FAIR_MEM (algorithm
6182 sc->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
6183 /* Since each tick is 4 us. */
6184 sc->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
6186 DBEXIT(BXE_VERBOSE_MISC);
6191 * This function is called when a link interrupt is generated
6192 * and configures the controller for the new link state.
6198 bxe_link_attn(struct bxe_softc *sc)
6200 struct host_port_stats *pstats;
6201 uint32_t pause_enabled;
6202 int func, i, port, vn;
6204 DBENTER(BXE_VERBOSE_PHY);
6206 /* Make sure that we are synced with the current statistics. */
6207 bxe_stats_handle(sc, STATS_EVENT_STOP);
6209 bxe_link_update(&sc->link_params, &sc->link_vars);
6211 if (sc->link_vars.link_up) {
6212 if (CHIP_IS_E1H(sc)) {
6216 if (sc->link_vars.flow_ctrl & FLOW_CTRL_TX)
6219 REG_WR(sc, BAR_USTORM_INTMEM +
6220 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
6224 if (sc->link_vars.mac_type == MAC_TYPE_BMAC) {
6225 pstats = BXE_SP(sc, port_stats);
6226 /* Reset old BMAC statistics. */
6227 memset(&(pstats->mac_stx[0]), 0,
6228 sizeof(struct mac_stx));
6231 if ((sc->state == BXE_STATE_OPEN) ||
6232 (sc->state == BXE_STATE_DISABLED))
6233 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
6236 /* Need additional handling for multi-function devices. */
6239 if (sc->link_vars.link_up) {
6240 if (sc->dcc_enable == TRUE) {
6241 bxe_congestionmgmt(sc, TRUE);
6242 /* Store in internal memory. */
6244 sizeof(struct cmng_struct_per_port) / 4;
6246 REG_WR(sc, BAR_XSTORM_INTMEM +
6247 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + (i*4),
6248 ((uint32_t *)(&sc->cmng))[i]);
6252 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
6253 /* Don't send an attention to ourselves. */
6254 if (vn == BP_E1HVN(sc))
6256 func = ((vn << 1) | port);
6258 * Send an attention to other drivers on the same port.
6260 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_0 +
6261 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func) * 4, 1);
6265 DBEXIT(BXE_VERBOSE_PHY);
6269 * Sets the driver instance as the port management function (PMF).
6271 * This is only used on "multi-function" capable devices such as the
6272 * 57711E and initializes the controller so that the PMF driver instance
6273 * can interact with other driver instances that may be operating on
6274 * the same Ethernet port.
6280 bxe_pmf_update(struct bxe_softc *sc)
6285 /* Record that this driver instance is managing the port. */
6287 DBPRINT(sc, BXE_INFO, "%s(): Enabling this port as PMF.\n",
6290 /* Enable NIG attention. */
6292 val = (0xff0f | (1 << (BP_E1HVN(sc) + 4)));
6293 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, val);
6294 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, val);
6296 bxe_stats_handle(sc, STATS_EVENT_PMF);
6299 /* 8073 Download definitions */
6300 /* spi Parameters.*/
6301 #define SPI_CTRL_1_L 0xC000
6302 #define SPI_CTRL_1_H 0xC002
6303 #define SPI_CTRL_2_L 0xC400
6304 #define SPI_CTRL_2_H 0xC402
6305 #define SPI_TXFIFO 0xD000
6306 #define SPI_RXFIFO 0xD400
6308 /* Input Command Messages.*/
6310 * Write CPU/SPI Control Regs, followed by Count And CPU/SPI Controller
6311 * Reg add/data pairs.
6313 #define WR_CPU_CTRL_REGS 0x11
6315 * Read CPU/SPI Control Regs, followed by Count and CPU/SPI Controller
6318 #define RD_CPU_CTRL_REGS 0xEE
6320 * Write CPU/SPI Control Regs Continously, followed by Count and
6321 * CPU/SPI Controller Reg addr and data's.
6323 #define WR_CPU_CTRL_FIFO 0x66
6324 /* Output Command Messages.*/
6327 /* SPI Controller Commands (known As messages).*/
6328 #define MSGTYPE_HWR 0x40
6329 #define MSGTYPE_HRD 0x80
6330 #define WRSR_OPCODE 0x01
6331 #define WR_OPCODE 0x02
6332 #define RD_OPCODE 0x03
6333 #define WRDI_OPCODE 0x04
6334 #define RDSR_OPCODE 0x05
6335 #define WREN_OPCODE 0x06
6336 #define WR_BLOCK_SIZE 0x40 /* Maximum 64 Bytes Writes.*/
6339 * Post a slowpath command.
6341 * A slowpath command is used to propogate a configuration change through
6342 * the controller in a controlled manner, allowing each STORM processor and
6343 * other H/W blocks to phase in the change. The commands sent on the
6344 * slowpath are referred to as ramrods. Depending on the ramrod used the
6345 * completion of the ramrod will occur in different ways. Here's a
6346 * breakdown of ramrods and how they complete:
6348 * RAMROD_CMD_ID_ETH_PORT_SETUP
6349 * Used to setup the leading connection on a port. Completes on the
6350 * Receive Completion Queue (RCQ) of that port (typically fp[0]).
6352 * RAMROD_CMD_ID_ETH_CLIENT_SETUP
6353 * Used to setup an additional connection on a port. Completes on the
6354 * RCQ of the multi-queue/RSS connection being initialized.
6356 * RAMROD_CMD_ID_ETH_STAT_QUERY
6357 * Used to force the storm processors to update the statistics database
6358 * in host memory. This ramrod is send on the leading connection CID and
6359 * completes as an index increment of the CSTORM on the default status
6362 * RAMROD_CMD_ID_ETH_UPDATE
6363 * Used to update the state of the leading connection, usually to udpate
6364 * the RSS indirection table. Completes on the RCQ of the leading
6365 * connection. (Not currently used under FreeBSD until OS support becomes
6368 * RAMROD_CMD_ID_ETH_HALT
6369 * Used when tearing down a connection prior to driver unload. Completes
6370 * on the RCQ of the multi-queue/RSS connection being torn down. Don't
6371 * use this on the leading connection.
6373 * RAMROD_CMD_ID_ETH_SET_MAC
6374 * Sets the Unicast/Broadcast/Multicast used by the port. Completes on
6375 * the RCQ of the leading connection.
6377 * RAMROD_CMD_ID_ETH_CFC_DEL
6378 * Used when tearing down a conneciton prior to driver unload. Completes
6379 * on the RCQ of the leading connection (since the current connection
6380 * has been completely removed from controller memory).
6382 * RAMROD_CMD_ID_ETH_PORT_DEL
6383 * Used to tear down the leading connection prior to driver unload,
6384 * typically fp[0]. Completes as an index increment of the CSTORM on the
6385 * default status block.
6387 * RAMROD_CMD_ID_ETH_FORWARD_SETUP
6388 * Used for connection offload. Completes on the RCQ of the multi-queue
6389 * RSS connection that is being offloaded. (Not currently used under
6392 * There can only be one command pending per function.
6395 * 0 = Success, !0 = Failure.
6398 bxe_sp_post(struct bxe_softc *sc, int command, int cid, uint32_t data_hi,
6399 uint32_t data_lo, int common)
6403 DBRUNMSG((BXE_EXTREME_LOAD | BXE_EXTREME_RESET |
6404 BXE_EXTREME_UNLOAD | BXE_EXTREME_RAMROD),
6405 bxe_decode_ramrod_cmd(sc, command));
6407 DBPRINT(sc, BXE_VERBOSE_RAMROD, "%s(): cid = %d, data_hi = 0x%08X, "
6408 "data_low = 0x%08X, remaining spq entries = %d\n", __FUNCTION__,
6409 cid, data_hi, data_lo, sc->spq_left);
6412 /* Skip all slowpath commands if the driver has panic'd. */
6415 goto bxe_sp_post_exit;
6420 /* We are limited to 8 slowpath commands. */
6421 if (!sc->spq_left) {
6422 BXE_PRINTF("%s(%d): Slowpath queue is full!\n",
6423 __FILE__, __LINE__);
6426 goto bxe_sp_post_exit;
6429 /* Encode the CID with the command. */
6430 sc->spq_prod_bd->hdr.conn_and_cmd_data =
6431 htole32(((command << SPE_HDR_CMD_ID_SHIFT) | HW_CID(sc, cid)));
6432 sc->spq_prod_bd->hdr.type = htole16(ETH_CONNECTION_TYPE);
6435 sc->spq_prod_bd->hdr.type |=
6436 htole16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
6438 /* Point the hardware at the new configuration data. */
6439 sc->spq_prod_bd->data.mac_config_addr.hi = htole32(data_hi);
6440 sc->spq_prod_bd->data.mac_config_addr.lo = htole32(data_lo);
6442 /* Reduce the number of available slots for slowpath commands. */
6445 /* Manage the end of the ring. */
6446 if (sc->spq_prod_bd == sc->spq_last_bd) {
6447 sc->spq_prod_bd = sc->spq;
6448 sc->spq_prod_idx = 0;
6449 DBPRINT(sc, BXE_VERBOSE, "%s(): End of slowpath queue.\n",
6457 /* Kick off the slowpath command. */
6458 REG_WR(sc, BAR_XSTORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
6468 * Acquire the MCP access lock.
6471 * 0 = Success, !0 = Failure.
6474 bxe_acquire_alr(struct bxe_softc *sc)
6479 DBENTER(BXE_VERBOSE_MISC);
6483 /* Acquire lock using mcpr_access_lock SPLIT register. */
6484 for (i = 0; i < retries * 10; i++) {
6486 REG_WR(sc, GRCBASE_MCP + 0x9c, val);
6487 val = REG_RD(sc, GRCBASE_MCP + 0x9c);
6489 if (val & (1L << 31))
6495 if (!(val & (1L << 31))) {
6496 DBPRINT(sc, BXE_WARN,
6497 "%s(): Cannot acquire MCP split access lock.\n",
6502 DBEXIT(BXE_VERBOSE_MISC);
6508 * Release the MCP access lock.
6514 bxe_release_alr(struct bxe_softc* sc)
6517 DBENTER(BXE_VERBOSE_MISC);
6519 REG_WR(sc, GRCBASE_MCP + 0x9c, 0);
6521 DBEXIT(BXE_VERBOSE_MISC);
6525 * Update driver's copies of the values in the host default status block.
6528 * Bitmap indicating changes to the block.
6530 static __inline uint16_t
6531 bxe_update_dsb_idx(struct bxe_softc *sc)
6533 struct host_def_status_block *dsb;
6538 /* Read memory barrier since block is written by hardware. */
6541 if (sc->def_att_idx !=
6542 le16toh(dsb->atten_status_block.attn_bits_index)) {
6544 le16toh(dsb->atten_status_block.attn_bits_index);
6548 if (sc->def_c_idx !=
6549 le16toh(dsb->c_def_status_block.status_block_index)) {
6551 le16toh(dsb->c_def_status_block.status_block_index);
6555 if (sc->def_u_idx !=
6556 le16toh(dsb->u_def_status_block.status_block_index)) {
6558 le16toh(dsb->u_def_status_block.status_block_index);
6562 if (sc->def_x_idx !=
6563 le16toh(dsb->x_def_status_block.status_block_index)) {
6565 le16toh(dsb->x_def_status_block.status_block_index);
6569 if (sc->def_t_idx !=
6570 le16toh(dsb->t_def_status_block.status_block_index)) {
6572 le16toh(dsb->t_def_status_block.status_block_index);
6580 * Handle any attentions that have been newly asserted.
6586 bxe_attn_int_asserted(struct bxe_softc *sc, uint32_t asserted)
6588 uint32_t aeu_addr, hc_addr, nig_int_mask_addr;
6589 uint32_t aeu_mask, nig_mask;
6592 DBENTER(BXE_VERBOSE_INTR);
6595 hc_addr = (HC_REG_COMMAND_REG + port * 32 + COMMAND_REG_ATTN_BITS_SET);
6596 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6597 MISC_REG_AEU_MASK_ATTN_FUNC_0;
6598 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
6599 NIG_REG_MASK_INTERRUPT_PORT0;
6602 if (sc->attn_state & asserted)
6603 BXE_PRINTF("%s(%d): IGU attention ERROR!\n",
6604 __FILE__, __LINE__);
6606 rc = bxe_acquire_hw_lock(sc,
6607 HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
6609 DBPRINT(sc, BXE_WARN,
6610 "%s(): Failed to acquire attention lock for port %d!\n",
6611 __FUNCTION__, port);
6612 goto bxe_attn_int_asserted_exit;
6615 aeu_mask = REG_RD(sc, aeu_addr);
6616 DBPRINT(sc, BXE_VERBOSE_INTR,
6617 "%s(): aeu_mask = 0x%08X, newly asserted = 0x%08X\n", __FUNCTION__,
6618 aeu_mask, asserted);
6620 aeu_mask &= ~(asserted & 0xff);
6621 DBPRINT(sc, BXE_VERBOSE_INTR, "%s(): new mask = 0x%08X\n", __FUNCTION__,
6623 REG_WR(sc, aeu_addr, aeu_mask);
6625 rc = bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
6627 DBPRINT(sc, BXE_WARN,
6628 "%s(): Failed to release attention lock!\n", __FUNCTION__);
6629 goto bxe_attn_int_asserted_exit;
6632 DBPRINT(sc, BXE_VERBOSE_INTR, "%s(): attn_state = 0x%08X\n",
6633 __FUNCTION__, sc->attn_state);
6635 sc->attn_state |= asserted;
6636 DBPRINT(sc, BXE_VERBOSE_INTR, "%s(): new attn_state = 0x%08X\n",
6637 __FUNCTION__, sc->attn_state);
6639 if (asserted & ATTN_HARD_WIRED_MASK) {
6640 if (asserted & ATTN_NIG_FOR_FUNC) {
6641 bxe_acquire_phy_lock(sc);
6643 /* Save NIG interrupt mask. */
6644 nig_mask = REG_RD(sc, nig_int_mask_addr);
6645 REG_WR(sc, nig_int_mask_addr, 0);
6650 if (asserted & ATTN_SW_TIMER_4_FUNC)
6651 DBPRINT(sc, BXE_WARN, "%s(): ATTN_SW_TIMER_4_FUNC!\n",
6654 if (asserted & GPIO_2_FUNC)
6655 DBPRINT(sc, BXE_WARN, "%s(): GPIO_2_FUNC!\n",
6658 if (asserted & GPIO_3_FUNC)
6659 DBPRINT(sc, BXE_WARN, "%s(): GPIO_3_FUNC!\n",
6662 if (asserted & GPIO_4_FUNC)
6663 DBPRINT(sc, BXE_WARN, "%s(): GPIO_4_FUNC!\n",
6667 if (asserted & ATTN_GENERAL_ATTN_1) {
6668 DBPRINT(sc, BXE_WARN,
6669 "%s(): ATTN_GENERAL_ATTN_1!\n",
6671 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
6674 if (asserted & ATTN_GENERAL_ATTN_2) {
6675 DBPRINT(sc, BXE_WARN,
6676 "%s(): ATTN_GENERAL_ATTN_2!\n",
6678 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
6681 if (asserted & ATTN_GENERAL_ATTN_3) {
6682 DBPRINT(sc, BXE_WARN,
6683 "%s(): ATTN_GENERAL_ATTN_3!\n",
6685 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
6688 if (asserted & ATTN_GENERAL_ATTN_4) {
6689 DBPRINT(sc, BXE_WARN,
6690 "%s(): ATTN_GENERAL_ATTN_4!\n",
6692 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
6695 if (asserted & ATTN_GENERAL_ATTN_5) {
6696 DBPRINT(sc, BXE_WARN,
6697 "%s(): ATTN_GENERAL_ATTN_5!\n",
6699 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
6701 if (asserted & ATTN_GENERAL_ATTN_6) {
6702 DBPRINT(sc, BXE_WARN,
6703 "%s(): ATTN_GENERAL_ATTN_6!\n",
6705 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
6710 DBPRINT(sc, BXE_VERBOSE_INTR,
6711 "%s(): Writing 0x%08X to HC addr 0x%08X\n", __FUNCTION__,
6713 REG_WR(sc, hc_addr, asserted);
6715 /* Now set back the NIG mask. */
6716 if (asserted & ATTN_NIG_FOR_FUNC) {
6717 REG_WR(sc, nig_int_mask_addr, nig_mask);
6718 bxe_release_phy_lock(sc);
6721 bxe_attn_int_asserted_exit:
6722 DBEXIT(BXE_VERBOSE_INTR);
6726 * Handle any attentions that have been newly deasserted.
6731 static __inline void
6732 bxe_attn_int_deasserted0(struct bxe_softc *sc, uint32_t attn)
6734 uint32_t val, swap_val, swap_override;
6735 int port, reg_offset;
6737 DBENTER(BXE_VERBOSE_INTR);
6740 reg_offset = port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6741 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6743 /* Handle SPIO5 attention. */
6744 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
6745 val = REG_RD(sc, reg_offset);
6746 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
6747 REG_WR(sc, reg_offset, val);
6749 DBPRINT(sc, BXE_FATAL, "%s(): SPIO5 H/W attention!\n",
6751 /* Fan failure attention */
6752 switch (XGXS_EXT_PHY_TYPE(sc->link_params.ext_phy_config)) {
6753 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6755 * SPIO5 is used on A1022G boards to indicate
6756 * fan failure. Shutdown the controller and
6757 * associated PHY to avoid damage.
6760 /* Low power mode is controled by GPIO 2. */
6761 bxe_set_gpio(sc, MISC_REGISTERS_GPIO_2,
6762 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
6763 /* PHY reset is controled by GPIO 1. */
6764 bxe_set_gpio(sc, MISC_REGISTERS_GPIO_1,
6765 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
6767 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6768 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
6770 * The PHY reset is controlled by GPIO 1.
6771 * Fake the port number to cancel the swap done in
6774 swap_val = REG_RD(sc, NIG_REG_PORT_SWAP);
6775 swap_override = REG_RD(sc, NIG_REG_STRAP_OVERRIDE);
6776 port = (swap_val && swap_override) ^ 1;
6777 bxe_set_gpio(sc, MISC_REGISTERS_GPIO_1,
6778 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
6784 /* Mark the failure. */
6785 sc->link_params.ext_phy_config &=
6786 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6787 sc->link_params.ext_phy_config |=
6788 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
6789 SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config,
6790 sc->link_params.ext_phy_config);
6791 /* Log the failure */
6792 BXE_PRINTF("A fan failure has caused the driver to "
6793 "shutdown the device to prevent permanent damage.\n");
6796 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
6797 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
6798 bxe_acquire_phy_lock(sc);
6799 bxe_handle_module_detect_int(&sc->link_params);
6800 bxe_release_phy_lock(sc);
6803 /* Checking for an assert on the zero block */
6804 if (attn & HW_INTERRUT_ASSERT_SET_0) {
6805 val = REG_RD(sc, reg_offset);
6806 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
6807 REG_WR(sc, reg_offset, val);
6809 BXE_PRINTF("%s(%d): FATAL hardware block attention "
6810 "(set0 = 0x%08X)!\n", __FILE__, __LINE__,
6811 (attn & (uint32_t)HW_INTERRUT_ASSERT_SET_0));
6816 DBEXIT(BXE_VERBOSE_INTR);
6820 * Handle any attentions that have been newly deasserted.
6825 static __inline void
6826 bxe_attn_int_deasserted1(struct bxe_softc *sc, uint32_t attn)
6829 int port, reg_offset;
6831 DBENTER(BXE_VERBOSE_INTR);
6833 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
6834 val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR);
6836 DBPRINT(sc, BXE_FATAL,
6837 "%s(): Doorbell hardware attention (0x%08X).\n",
6840 /* DORQ discard attention */
6842 DBPRINT(sc, BXE_FATAL,
6843 "%s(): FATAL doorbell queue error!\n",
6847 if (attn & HW_INTERRUT_ASSERT_SET_1) {
6849 reg_offset = port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
6850 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1;
6852 val = REG_RD(sc, reg_offset);
6853 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
6854 REG_WR(sc, reg_offset, val);
6856 BXE_PRINTF("%s(%d): FATAL hardware block attention "
6857 "(set1 = 0x%08X)!\n", __FILE__, __LINE__,
6858 (attn & (uint32_t)HW_INTERRUT_ASSERT_SET_1));
6863 DBEXIT(BXE_VERBOSE_INTR);
6867 * Handle any attentions that have been newly deasserted.
6872 static __inline void
6873 bxe_attn_int_deasserted2(struct bxe_softc *sc, uint32_t attn)
6876 int port, reg_offset;
6878 DBENTER(BXE_VERBOSE_INTR);
6880 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
6881 val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR);
6883 DBPRINT(sc, BXE_FATAL,
6884 "%s(): CFC hardware attention (0x%08X).\n", __FUNCTION__,
6887 /* CFC error attention. */
6889 DBPRINT(sc, BXE_FATAL, "%s(): FATAL CFC error!\n",
6893 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
6894 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0);
6896 DBPRINT(sc, BXE_FATAL,
6897 "%s(): PXP hardware attention (0x%08X).\n", __FUNCTION__,
6900 /* RQ_USDMDP_FIFO_OVERFLOW */
6902 DBPRINT(sc, BXE_FATAL, "%s(): FATAL PXP error!\n",
6906 if (attn & HW_INTERRUT_ASSERT_SET_2) {
6908 reg_offset = port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
6909 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2;
6911 val = REG_RD(sc, reg_offset);
6912 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
6913 REG_WR(sc, reg_offset, val);
6915 BXE_PRINTF("%s(%d): FATAL hardware block attention (set2 = "
6916 "0x%08X)! port=%d, val written=0x%x attn=0x%x\n", __FILE__,
6917 __LINE__, (attn & (uint32_t)HW_INTERRUT_ASSERT_SET_2),
6923 DBEXIT(BXE_VERBOSE_INTR);
6927 * Handle any attentions that have been newly deasserted.
6932 static __inline void
6933 bxe_attn_int_deasserted3(struct bxe_softc *sc, uint32_t attn)
6938 DBENTER(BXE_VERBOSE_INTR);
6940 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
6941 /* Look for any port assertions. */
6942 if (attn & BXE_PMF_LINK_ASSERT) {
6944 * We received a message from the driver instance
6945 * that is managing the Ethernet port (link up/down).
6946 * Go ahead and handle it.
6950 DBPRINT(sc, BXE_INFO,
6951 "%s(): Received link attention from PMF.\n",
6954 /* Clear the attention. */
6955 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func * 4, 0);
6956 sc->mf_config[BP_E1HVN(sc)] =
6958 mf_cfg.func_mf_config[(sc->bxe_func & 1)].config);
6959 val = SHMEM_RD(sc, func_mb[func].drv_status);
6960 if (sc->dcc_enable == TRUE) {
6961 if (val & DRV_STATUS_DCC_EVENT_MASK)
6963 val & DRV_STATUS_DCC_EVENT_MASK);
6965 bxe__link_status_update(sc);
6967 if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF))
6969 /* Look for any microcode assertions. */
6970 } else if (attn & BXE_MC_ASSERT_BITS) {
6971 DBPRINT(sc, BXE_FATAL, "%s(): Microcode assert!\n",
6974 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0);
6975 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0);
6976 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0);
6977 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0);
6981 /* Look for any bootcode assertions. */
6982 } else if (attn & BXE_MCP_ASSERT) {
6983 DBPRINT(sc, BXE_FATAL, "%s(): Bootcode assert!\n",
6986 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0);
6988 DBRUN(bxe_dump_fw(sc));
6990 DBPRINT(sc, BXE_FATAL,
6991 "%s(): Unknown hardware assertion "
6992 "(attn = 0x%08X)!\n", __FUNCTION__, attn);
6995 /* Look for any hardware latched attentions. */
6996 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
6997 DBPRINT(sc, BXE_FATAL,
6998 "%s(): Latched attention 0x%08X (masked)!\n", __FUNCTION__,
7001 /* Check if a GRC register access timeout occurred. */
7002 if (attn & BXE_GRC_TIMEOUT) {
7003 val = CHIP_IS_E1H(sc) ? REG_RD(sc,
7004 MISC_REG_GRC_TIMEOUT_ATTN) : 0;
7006 DBPRINT(sc, BXE_WARN,
7007 "%s(): GRC timeout for register 0x%08X!\n",
7011 /* Check if a GRC reserved register was accessed. */
7012 if (attn & BXE_GRC_RSV) {
7013 val = CHIP_IS_E1H(sc) ? REG_RD(sc,
7014 MISC_REG_GRC_RSV_ATTN) : 0;
7016 DBPRINT(sc, BXE_WARN,
7017 "%s(): GRC register 0x%08X is reserved!\n",
7021 REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
7024 DBEXIT(BXE_VERBOSE_INTR);
7028 * Handle any attentions that have been newly deasserted.
7034 bxe_attn_int_deasserted(struct bxe_softc *sc, uint32_t deasserted)
7036 struct attn_route attn;
7037 struct attn_route group_mask;
7038 uint32_t val, reg_addr, aeu_mask;
7041 DBENTER(BXE_VERBOSE_INTR);
7044 * Need to take HW lock because MCP or other port might also try
7045 * to handle this event.
7047 bxe_acquire_alr(sc);
7050 /* Get the current attention signal bits. */
7051 attn.sig[0] = REG_RD(sc,
7052 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port * 4);
7053 attn.sig[1] = REG_RD(sc,
7054 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port * 4);
7055 attn.sig[2] = REG_RD(sc,
7056 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port * 4);
7057 attn.sig[3] = REG_RD(sc,
7058 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port * 4);
7060 DBPRINT(sc, BXE_EXTREME_INTR,
7061 "%s(): attention = 0x%08X 0x%08X 0x%08X 0x%08X\n", __FUNCTION__,
7062 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
7065 * Compare the current attention bits to each attention group
7066 * to see if anyone has registered this attention.
7068 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
7069 if (deasserted & (1 << index)) {
7070 group_mask = sc->attn_group[index];
7072 DBPRINT(sc, BXE_EXTREME_INTR,
7073 "%s(): group[%02d] = 0x%08X 0x%08X 0x%08x 0X%08x\n",
7074 __FUNCTION__, index, group_mask.sig[0],
7075 group_mask.sig[1], group_mask.sig[2],
7078 /* Handle any registered attentions. */
7079 bxe_attn_int_deasserted3(sc,
7080 attn.sig[3] & group_mask.sig[3]);
7081 bxe_attn_int_deasserted1(sc,
7082 attn.sig[1] & group_mask.sig[1]);
7083 bxe_attn_int_deasserted2(sc,
7084 attn.sig[2] & group_mask.sig[2]);
7085 bxe_attn_int_deasserted0(sc,
7086 attn.sig[0] & group_mask.sig[0]);
7088 if ((attn.sig[0] & group_mask.sig[0] &
7089 HW_PRTY_ASSERT_SET_0) ||
7090 (attn.sig[1] & group_mask.sig[1] &
7091 HW_PRTY_ASSERT_SET_1) ||
7092 (attn.sig[2] & group_mask.sig[2] &
7093 HW_PRTY_ASSERT_SET_2))
7094 BXE_PRINTF("%s(%d): FATAL hardware block "
7095 "parity attention!\n", __FILE__, __LINE__);
7099 bxe_release_alr(sc);
7101 reg_addr = (HC_REG_COMMAND_REG +
7102 port * 32 + COMMAND_REG_ATTN_BITS_CLR);
7105 DBPRINT(sc, BXE_EXTREME_INTR,
7106 "%s(): About to mask 0x%08X at HC addr 0x%08X\n", __FUNCTION__,
7107 deasserted, reg_addr);
7108 REG_WR(sc, reg_addr, val);
7110 if (~sc->attn_state & deasserted)
7111 DBPRINT(sc, BXE_FATAL, "%s(): IGU Bug!\n", __FUNCTION__);
7113 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7114 MISC_REG_AEU_MASK_ATTN_FUNC_0;
7116 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7117 aeu_mask = REG_RD(sc, reg_addr);
7119 DBPRINT(sc, BXE_EXTREME_INTR,
7120 "%s(): Current aeu_mask = 0x%08X, newly deasserted = 0x%08X\n",
7121 __FUNCTION__, aeu_mask, deasserted);
7122 aeu_mask |= (deasserted & 0xff);
7124 DBPRINT(sc, BXE_EXTREME_INTR, "%s(): New aeu_mask = 0x%08X\n",
7125 __FUNCTION__, aeu_mask);
7127 REG_WR(sc, reg_addr, aeu_mask);
7128 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7130 DBPRINT(sc, BXE_EXTREME_INTR, "%s(): Current attn_state = 0x%08X\n",
7131 __FUNCTION__, sc->attn_state);
7133 sc->attn_state &= ~deasserted;
7134 DBPRINT(sc, BXE_EXTREME_INTR, "%s(): New attn_state = 0x%08X\n",
7135 __FUNCTION__, sc->attn_state);
7137 DBEXIT(BXE_VERBOSE_INTR);
7141 * Handle interrupts caused by internal attentions (everything else other
7142 * than RX, TX, and link state changes).
7148 bxe_attn_int(struct bxe_softc* sc)
7150 uint32_t attn_ack, attn_bits, attn_state;
7151 uint32_t asserted, deasserted;
7153 DBENTER(BXE_VERBOSE_INTR);
7155 attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits);
7157 le32toh(sc->def_sb->atten_status_block.attn_bits_ack);
7158 attn_state = sc->attn_state;
7159 asserted = attn_bits & ~attn_ack & ~attn_state;
7160 deasserted = ~attn_bits & attn_ack & attn_state;
7162 /* Make sure we're in a sane state. */
7163 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
7164 BXE_PRINTF("%s(%d): Bad attention state!\n",
7165 __FILE__, __LINE__);
7167 /* Handle any attentions that are newly asserted. */
7169 DBPRINT(sc, BXE_VERBOSE_INTR,
7170 "%s(): attn_state = 0x%08X, attn_bits = 0x%08X, "
7171 "attn_ack = 0x%08X, asserted = 0x%08X\n", __FUNCTION__,
7172 attn_state, attn_bits, attn_ack, asserted);
7173 bxe_attn_int_asserted(sc, asserted);
7176 /* Handle any attentions that are newly deasserted. */
7178 DBPRINT(sc, BXE_VERBOSE_INTR,
7179 "%s(): attn_state = 0x%08X, attn_bits = 0x%08X, "
7180 "attn_ack = 0x%08X, deasserted = 0x%08X\n", __FUNCTION__,
7181 attn_state, attn_bits, attn_ack, deasserted);
7182 bxe_attn_int_deasserted(sc, deasserted);
7185 DBEXIT(BXE_VERBOSE_INTR);
7188 /* sum[hi:lo] += add[hi:lo] */
7189 #define ADD_64(s_hi, a_hi, s_lo, a_lo) do { \
7191 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
7194 /* Subtraction = minuend -= subtrahend */
7195 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
7197 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
7201 /* difference = minuend - subtrahend */
7202 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) do { \
7203 if (m_lo < s_lo) { \
7205 d_hi = m_hi - s_hi; \
7207 /* we can 'loan' 1 */ \
7209 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
7211 /* m_hi <= s_hi */ \
7216 /* m_lo >= s_lo */ \
7217 if (m_hi < s_hi) { \
7221 /* m_hi >= s_hi */ \
7222 d_hi = m_hi - s_hi; \
7223 d_lo = m_lo - s_lo; \
7228 #define UPDATE_STAT64(s, t) do { \
7229 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi,\
7230 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
7231 pstats->mac_stx[0].t##_hi = new->s##_hi; \
7232 pstats->mac_stx[0].t##_lo = new->s##_lo; \
7233 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
7234 pstats->mac_stx[1].t##_lo, diff.lo); \
7237 #define UPDATE_STAT64_NIG(s, t) do { \
7238 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
7239 diff.lo, new->s##_lo, old->s##_lo); \
7240 ADD_64(estats->t##_hi, diff.hi, \
7241 estats->t##_lo, diff.lo); \
7244 /* sum[hi:lo] += add */
7245 #define ADD_EXTEND_64(s_hi, s_lo, a) do { \
7247 s_hi += (s_lo < a) ? 1 : 0; \
7250 #define UPDATE_EXTEND_STAT(s) do { \
7251 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
7252 pstats->mac_stx[1].s##_lo, new->s); \
7255 #define UPDATE_EXTEND_TSTAT(s, t) do { \
7256 diff = (tclient->s) - (old_tclient->s); \
7257 old_tclient->s = (tclient->s); \
7258 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
7261 #define UPDATE_EXTEND_XSTAT(s, t) do { \
7262 diff = xclient->s - old_xclient->s; \
7263 old_xclient->s = xclient->s; \
7264 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
7267 #define UPDATE_EXTEND_USTAT(s, t) do { \
7268 diff = uclient->s - old_uclient->s; \
7269 old_uclient->s = uclient->s; \
7270 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
7273 #define SUB_EXTEND_64(m_hi, m_lo, s)do { \
7274 SUB_64(m_hi, 0, m_lo, s); \
7277 #define SUB_EXTEND_USTAT(s, t)do { \
7278 diff = (uclient->s) - (old_uclient->s); \
7279 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
7286 #define BITS_PER_LONG 32
7288 #define BITS_PER_LONG 64
7291 static __inline long
7292 bxe_hilo(uint32_t *hiref)
7297 #if (BITS_PER_LONG == 64)
7298 uint32_t hi = *hiref;
7299 return (HILO_U64(hi, lo));
7306 * Request the STORM statistics by posting a slowpath ramrod.
7312 bxe_stats_storm_post(struct bxe_softc *sc)
7314 struct eth_query_ramrod_data ramrod_data = {0};
7317 DBENTER(BXE_INSANE_STATS);
7319 if (!sc->stats_pending) {
7320 ramrod_data.drv_counter = sc->stats_counter++;
7321 ramrod_data.collect_port = sc->port.pmf ? 1 : 0;
7322 for (i = 0; i < sc->num_queues; i++)
7323 ramrod_data.ctr_id_vector |= (1 << sc->fp[i].cl_id);
7325 rc = bxe_sp_post(sc, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
7326 ((uint32_t *)&ramrod_data)[1],
7327 ((uint32_t *)&ramrod_data)[0], 0);
7329 /* Stats ramrod has it's own slot on the SPQ. */
7331 sc->stats_pending = 1;
7335 DBEXIT(BXE_INSANE_STATS);
7339 * Setup the adrress used by the driver to report port-based statistics
7340 * back to the controller.
7346 bxe_stats_port_base_init(struct bxe_softc *sc)
7348 uint32_t *stats_comp;
7349 struct dmae_command *dmae;
7351 DBENTER(BXE_VERBOSE_STATS);
7353 /* Only the port management function (PMF) does this work. */
7354 if ((sc->port.pmf == 0) || !sc->port.port_stx) {
7355 BXE_PRINTF("%s(%d): Invalid statistcs port setup!\n",
7356 __FILE__, __LINE__);
7357 goto bxe_stats_port_base_init_exit;
7360 stats_comp = BXE_SP(sc, stats_comp);
7361 sc->executer_idx = 0;
7363 /* DMA the address of the drivers port statistics block. */
7364 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
7365 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
7366 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
7367 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
7369 DMAE_CMD_ENDIANITY_B_DW_SWAP |
7371 DMAE_CMD_ENDIANITY_DW_SWAP |
7373 (BP_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
7374 (BP_E1HVN(sc) << DMAE_CMD_E1HVN_SHIFT));
7375 dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
7376 dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
7377 dmae->dst_addr_lo = sc->port.port_stx >> 2;
7378 dmae->dst_addr_hi = 0;
7379 dmae->len = sizeof(struct host_port_stats) >> 2;
7380 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
7381 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
7382 dmae->comp_val = DMAE_COMP_VAL;
7385 bxe_stats_hw_post(sc);
7388 bxe_stats_port_base_init_exit:
7389 DBEXIT(BXE_VERBOSE_STATS);
7393 * Setup the adrress used by the driver to report function-based statistics
7394 * back to the controller.
7400 bxe_stats_func_base_init(struct bxe_softc *sc)
7406 DBENTER(BXE_VERBOSE_STATS);
7408 /* Only the port management function (PMF) does this work. */
7409 if ((sc->port.pmf == 0) || !sc->func_stx) {
7410 BXE_PRINTF("%s(%d): Invalid statistcs function setup!\n",
7411 __FILE__, __LINE__);
7412 goto bxe_stats_func_base_init_exit;
7416 func_stx = sc->func_stx;
7417 vn_max = IS_E1HMF(sc) ? E1HVN_MAX : E1VN_MAX;
7419 /* Initialize each function individually. */
7420 for (vn = VN_0; vn < vn_max; vn++) {
7421 func = 2 * vn + port;
7422 sc->func_stx = SHMEM_RD(sc, func_mb[func].fw_mb_param);
7423 bxe_stats_func_init(sc);
7424 bxe_stats_hw_post(sc);
7428 sc->func_stx = func_stx;
7430 bxe_stats_func_base_init_exit:
7431 DBEXIT(BXE_VERBOSE_STATS);
7435 * DMA the function-based statistics to the controller.
7441 bxe_stats_func_base_update(struct bxe_softc *sc)
7443 uint32_t *stats_comp;
7444 struct dmae_command *dmae;
7446 DBENTER(BXE_VERBOSE_STATS);
7448 /* Only the port management function (PMF) does this work. */
7449 if ((sc->port.pmf == 0) || !sc->func_stx) {
7450 BXE_PRINTF("%s(%d): Invalid statistcs function update!\n",
7451 __FILE__, __LINE__);
7452 goto bxe_stats_func_base_update_exit;
7455 dmae = &sc->stats_dmae;
7456 stats_comp = BXE_SP(sc, stats_comp);
7457 sc->executer_idx = 0;
7458 memset(dmae, 0, sizeof(struct dmae_command));
7460 /* DMA the function statistics from the driver to the H/W. */
7461 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
7462 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
7463 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
7465 DMAE_CMD_ENDIANITY_B_DW_SWAP |
7467 DMAE_CMD_ENDIANITY_DW_SWAP |
7469 (BP_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
7470 (BP_E1HVN(sc) << DMAE_CMD_E1HVN_SHIFT));
7471 dmae->src_addr_lo = sc->func_stx >> 2;
7472 dmae->src_addr_hi = 0;
7473 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats_base));
7474 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats_base));
7475 dmae->len = sizeof(struct host_func_stats) >> 2;
7476 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
7477 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
7478 dmae->comp_val = DMAE_COMP_VAL;
7481 bxe_stats_hw_post(sc);
7484 bxe_stats_func_base_update_exit:
7485 DBEXIT(BXE_VERBOSE_STATS);
7490 * Initialize statistics.
7496 bxe_stats_init(struct bxe_softc *sc)
7498 struct bxe_fastpath *fp;
7501 DBENTER(BXE_VERBOSE_STATS);
7503 if (sc->stats_enable == FALSE)
7504 goto bxe_stats_init_exit;
7508 sc->executer_idx = 0;
7509 sc->stats_counter = 0;
7510 sc->stats_pending = 0;
7512 /* Fetch the offset of port & function statistics in shared memory. */
7514 sc->port.port_stx = 0;
7517 sc->port.port_stx = SHMEM_RD(sc, port_mb[port].port_stx);
7518 sc->func_stx = SHMEM_RD(sc, func_mb[func].fw_mb_param);
7521 DBPRINT(sc, BXE_VERBOSE_STATS, "%s(): sc->port.port_stx = 0x%08X\n",
7522 __FUNCTION__, sc->port.port_stx);
7523 DBPRINT(sc, BXE_VERBOSE_STATS, "%s(): sc->func_stx = 0x%08X\n",
7524 __FUNCTION__, sc->func_stx);
7526 /* Port statistics. */
7527 memset(&(sc->port.old_nig_stats), 0, sizeof(struct nig_stats));
7528 sc->port.old_nig_stats.brb_discard = REG_RD(sc,
7529 NIG_REG_STAT0_BRB_DISCARD + port * 0x38);
7530 sc->port.old_nig_stats.brb_truncate = REG_RD(sc,
7531 NIG_REG_STAT0_BRB_TRUNCATE + port * 0x38);
7532 REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port * 0x50,
7533 &(sc->port.old_nig_stats.egress_mac_pkt0_lo), 2);
7534 REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port * 0x50,
7535 &(sc->port.old_nig_stats.egress_mac_pkt1_lo), 2);
7537 /* Function statistics. */
7538 for (i = 0; i < sc->num_queues; i++) {
7541 /* Clear all per-queue statistics. */
7542 memset(&fp->old_tclient, 0,
7543 sizeof(struct tstorm_per_client_stats));
7544 memset(&fp->old_uclient, 0,
7545 sizeof(struct ustorm_per_client_stats));
7546 memset(&fp->old_xclient, 0,
7547 sizeof(struct xstorm_per_client_stats));
7548 memset(&fp->eth_q_stats, 0,
7549 sizeof(struct bxe_q_stats));
7552 /* ToDo: Clear any driver specific statistics? */
7554 sc->stats_state = STATS_STATE_DISABLED;
7556 if (sc->port.pmf == 1) {
7557 /* Init port & function stats if we're PMF. */
7558 if (sc->port.port_stx)
7559 bxe_stats_port_base_init(sc);
7561 bxe_stats_func_base_init(sc);
7562 } else if (sc->func_stx)
7563 /* Update function stats if we're not PMF. */
7564 bxe_stats_func_base_update(sc);
7566 bxe_stats_init_exit:
7567 DBEXIT(BXE_VERBOSE_STATS);
7576 bxe_stats_hw_post(struct bxe_softc *sc)
7578 struct dmae_command *dmae;
7579 uint32_t *stats_comp;
7582 DBENTER(BXE_INSANE_STATS);
7584 dmae = &sc->stats_dmae;
7585 stats_comp = BXE_SP(sc, stats_comp);
7586 *stats_comp = DMAE_COMP_VAL;
7588 if (sc->executer_idx) {
7589 loader_idx = PMF_DMAE_C(sc);
7591 memset(dmae, 0, sizeof(struct dmae_command));
7593 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
7594 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
7595 DMAE_CMD_DST_RESET |
7597 DMAE_CMD_ENDIANITY_B_DW_SWAP |
7599 DMAE_CMD_ENDIANITY_DW_SWAP |
7601 (BP_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
7602 (BP_E1HVN(sc) << DMAE_CMD_E1HVN_SHIFT));
7604 dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, dmae[0]));
7605 dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, dmae[0]));
7606 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
7607 sizeof(struct dmae_command) * (loader_idx + 1)) >> 2;
7608 dmae->dst_addr_hi = 0;
7609 dmae->len = sizeof(struct dmae_command) >> 2;
7614 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
7615 dmae->comp_addr_hi = 0;
7619 bxe_post_dmae(sc, dmae, loader_idx);
7621 } else if (sc->func_stx) {
7623 bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
7626 DBEXIT(BXE_INSANE_STATS);
7630 * Delay routine which polls for the DMA engine to complete.
7633 * 0 = Failure, !0 = Success
7636 bxe_stats_comp(struct bxe_softc *sc)
7638 uint32_t *stats_comp;
7641 DBENTER(BXE_VERBOSE_STATS);
7643 stats_comp = BXE_SP(sc, stats_comp);
7646 while (*stats_comp != DMAE_COMP_VAL) {
7648 BXE_PRINTF("%s(%d): Timeout waiting for statistics "
7649 "completions.\n", __FILE__, __LINE__);
7656 DBEXIT(BXE_VERBOSE_STATS);
7657 /* ToDo: Shouldn't this return the value of cnt? */
7662 * DMA port statistcs from controller to driver.
7668 bxe_stats_pmf_update(struct bxe_softc *sc)
7670 struct dmae_command *dmae;
7671 uint32_t opcode, *stats_comp;
7674 DBENTER(BXE_VERBOSE_STATS);
7676 stats_comp = BXE_SP(sc, stats_comp);
7677 loader_idx = PMF_DMAE_C(sc);
7679 /* We shouldn't be here if any of the following are false. */
7680 if (!IS_E1HMF(sc) || (sc->port.pmf == 0) || !sc->port.port_stx) {
7681 BXE_PRINTF("%s(%d): Statistics bug!\n", __FILE__, __LINE__);
7682 goto bxe_stats_pmf_update_exit;
7685 sc->executer_idx = 0;
7687 /* Instruct DMA engine to copy port statistics from H/W to driver. */
7688 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
7689 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
7690 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
7692 DMAE_CMD_ENDIANITY_B_DW_SWAP |
7694 DMAE_CMD_ENDIANITY_DW_SWAP |
7696 (BP_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
7697 (BP_E1HVN(sc) << DMAE_CMD_E1HVN_SHIFT));
7699 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
7700 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
7701 dmae->src_addr_lo = sc->port.port_stx >> 2;
7702 dmae->src_addr_hi = 0;
7703 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
7704 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
7705 dmae->len = DMAE_LEN32_RD_MAX;
7706 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
7707 dmae->comp_addr_hi = 0;
7710 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
7711 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
7712 dmae->src_addr_lo = (sc->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
7713 dmae->src_addr_hi = 0;
7714 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats) +
7715 DMAE_LEN32_RD_MAX * 4);
7716 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats) +
7717 DMAE_LEN32_RD_MAX * 4);
7718 dmae->len = (sizeof(struct host_port_stats) >> 2) -
7720 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
7721 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
7722 dmae->comp_val = DMAE_COMP_VAL;
7724 /* Start the DMA and wait for the result. */
7726 bxe_stats_hw_post(sc);
7729 bxe_stats_pmf_update_exit:
7730 DBEXIT(BXE_VERBOSE_STATS);
7734 * Prepare the DMAE parameters required for all statistics.
7736 * This function should only be called by the driver instance
7737 * that is designated as the port management function (PMF).
7743 bxe_stats_port_init(struct bxe_softc *sc)
7745 struct dmae_command *dmae;
7746 uint32_t mac_addr, opcode, *stats_comp;
7747 int loader_idx, port, vn;
7749 DBENTER(BXE_VERBOSE_STATS);
7753 loader_idx = PMF_DMAE_C(sc);
7754 stats_comp = BXE_SP(sc, stats_comp);
7756 /* Only the port management function (PMF) does this work. */
7757 if (!sc->link_vars.link_up || (sc->port.pmf == 0)) {
7758 BXE_PRINTF("%s(%d): Invalid statistics port setup!\n",
7759 __FILE__, __LINE__);
7760 goto bxe_stats_port_init_exit;
7763 sc->executer_idx = 0;
7765 /* The same opcde is used for multiple DMA operations. */
7766 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
7767 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
7768 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
7770 DMAE_CMD_ENDIANITY_B_DW_SWAP |
7772 DMAE_CMD_ENDIANITY_DW_SWAP |
7774 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
7775 (vn << DMAE_CMD_E1HVN_SHIFT));
7777 /* Setup the DMA for port statistics. */
7778 if (sc->port.port_stx) {
7779 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
7780 dmae->opcode = opcode;
7781 dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
7782 dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
7783 dmae->dst_addr_lo = sc->port.port_stx >> 2;
7784 dmae->dst_addr_hi = 0;
7785 dmae->len = sizeof(struct host_port_stats) >> 2;
7786 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
7787 dmae->comp_addr_hi = 0;
7791 /* Setup the DMA for function statistics. */
7793 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
7794 dmae->opcode = opcode;
7795 dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats));
7796 dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats));
7797 dmae->dst_addr_lo = sc->func_stx >> 2;
7798 dmae->dst_addr_hi = 0;
7799 dmae->len = sizeof(struct host_func_stats) >> 2;
7800 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
7801 dmae->comp_addr_hi = 0;
7805 /* Setup statistics reporting for the MAC. */
7806 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
7807 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
7808 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
7810 DMAE_CMD_ENDIANITY_B_DW_SWAP |
7812 DMAE_CMD_ENDIANITY_DW_SWAP |
7814 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
7815 (vn << DMAE_CMD_E1HVN_SHIFT));
7817 if (sc->link_vars.mac_type == MAC_TYPE_BMAC) {
7818 /* Enable statistics for the 10Gb BMAC. */
7820 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
7821 NIG_REG_INGRESS_BMAC0_MEM);
7823 /* Setup BMAC TX statistics (TX_STAT_GTPKT .. TX_STAT_GTBYT). */
7824 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
7825 dmae->opcode = opcode;
7826 dmae->src_addr_lo = (mac_addr +
7827 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
7828 dmae->src_addr_hi = 0;
7829 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats));
7830 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats));
7831 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
7832 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
7833 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
7834 dmae->comp_addr_hi = 0;
7837 /* Setup BMAC RX statistcs (RX_STAT_GR64 .. RX_STAT_GRIPJ). */
7838 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
7839 dmae->opcode = opcode;
7840 dmae->src_addr_lo = (mac_addr +
7841 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
7842 dmae->src_addr_hi = 0;
7843 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats) +
7844 offsetof(struct bmac_stats, rx_stat_gr64_lo));
7845 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats) +
7846 offsetof(struct bmac_stats, rx_stat_gr64_lo));
7847 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
7848 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
7849 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
7850 dmae->comp_addr_hi = 0;
7853 } else if (sc->link_vars.mac_type == MAC_TYPE_EMAC) {
7854 /* Enable statistics for the 1Gb EMAC. */
7856 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
7858 /* Setup EMAC RX statistics. */
7859 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
7860 dmae->opcode = opcode;
7861 dmae->src_addr_lo = (mac_addr + EMAC_REG_EMAC_RX_STAT_AC) >> 2;
7862 dmae->src_addr_hi = 0;
7863 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats));
7864 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats));
7865 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
7866 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
7867 dmae->comp_addr_hi = 0;
7870 /* Setup additional EMAC RX statistics. */
7871 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
7872 dmae->opcode = opcode;
7873 dmae->src_addr_lo = (mac_addr +
7874 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
7875 dmae->src_addr_hi = 0;
7876 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats) +
7877 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
7878 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats) +
7879 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
7881 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
7882 dmae->comp_addr_hi = 0;
7885 /* Setup EMAC TX statistics. */
7886 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
7887 dmae->opcode = opcode;
7888 dmae->src_addr_lo = (mac_addr + EMAC_REG_EMAC_TX_STAT_AC) >> 2;
7889 dmae->src_addr_hi = 0;
7890 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats) +
7891 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
7892 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats) +
7893 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
7894 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
7895 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
7896 dmae->comp_addr_hi = 0;
7899 DBPRINT(sc, BXE_WARN, "%s(): Undefined MAC type.\n",
7903 /* Enable NIG statistics. */
7904 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
7905 dmae->opcode = opcode;
7906 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
7907 NIG_REG_STAT0_BRB_DISCARD) >> 2;
7908 dmae->src_addr_hi = 0;
7909 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats));
7910 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats));
7911 dmae->len = (sizeof(struct nig_stats) - 4 * sizeof(uint32_t)) >> 2;
7912 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
7913 dmae->comp_addr_hi = 0;
7916 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
7917 dmae->opcode = opcode;
7918 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
7919 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
7920 dmae->src_addr_hi = 0;
7921 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats) +
7922 offsetof(struct nig_stats, egress_mac_pkt0_lo));
7923 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats) +
7924 offsetof(struct nig_stats, egress_mac_pkt0_lo));
7925 dmae->len = (2 * sizeof(uint32_t)) >> 2;
7926 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
7927 dmae->comp_addr_hi = 0;
7930 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
7931 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
7932 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
7933 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
7935 DMAE_CMD_ENDIANITY_B_DW_SWAP |
7937 DMAE_CMD_ENDIANITY_DW_SWAP |
7939 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
7940 (vn << DMAE_CMD_E1HVN_SHIFT));
7941 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
7942 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
7943 dmae->src_addr_hi = 0;
7944 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats) +
7945 offsetof(struct nig_stats, egress_mac_pkt1_lo));
7946 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats) +
7947 offsetof(struct nig_stats, egress_mac_pkt1_lo));
7948 dmae->len = (2 * sizeof(uint32_t)) >> 2;
7949 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
7950 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
7951 dmae->comp_val = DMAE_COMP_VAL;
7953 /* Clear the statistics completion value. */
7956 bxe_stats_port_init_exit:
7957 DBEXIT(BXE_VERBOSE_STATS);
7961 * Prepare the DMAE parameters required for function statistics.
7963 * This function is called by all driver instances.
7969 bxe_stats_func_init(struct bxe_softc *sc)
7971 struct dmae_command *dmae;
7972 uint32_t *stats_comp;
7974 DBENTER(BXE_VERBOSE_STATS);
7976 if (!sc->func_stx) {
7977 BXE_PRINTF("%s(%d): Invalid statistics function setup!\n",
7978 __FILE__, __LINE__);
7979 goto bxe_stats_func_init_exit;
7982 dmae = &sc->stats_dmae;
7983 stats_comp = BXE_SP(sc, stats_comp);
7984 sc->executer_idx = 0;
7985 memset(dmae, 0, sizeof(struct dmae_command));
7987 /* Setup the DMA for function statistics. */
7988 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
7989 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
7990 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
7992 DMAE_CMD_ENDIANITY_B_DW_SWAP |
7994 DMAE_CMD_ENDIANITY_DW_SWAP |
7996 (BP_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
7997 (BP_E1HVN(sc) << DMAE_CMD_E1HVN_SHIFT));
7999 dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats));
8000 dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats));
8001 dmae->dst_addr_lo = sc->func_stx >> 2;
8002 dmae->dst_addr_hi = 0;
8003 dmae->len = sizeof(struct host_func_stats) >> 2;
8004 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
8005 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
8006 dmae->comp_val = DMAE_COMP_VAL;
8010 bxe_stats_func_init_exit:
8011 DBEXIT(BXE_VERBOSE_STATS);
8015 * Starts a statistics update DMA and waits for completion.
8021 bxe_stats_start(struct bxe_softc *sc)
8024 DBENTER(BXE_VERBOSE_STATS);
8026 if (sc->port.pmf == 1)
8027 bxe_stats_port_init(sc);
8028 else if (sc->func_stx)
8029 bxe_stats_func_init(sc);
8031 bxe_stats_hw_post(sc);
8032 bxe_stats_storm_post(sc);
8034 DBEXIT(BXE_VERBOSE_STATS);
8042 bxe_stats_pmf_start(struct bxe_softc *sc)
8044 DBENTER(BXE_VERBOSE_STATS);
8047 bxe_stats_pmf_update(sc);
8048 bxe_stats_start(sc);
8050 DBEXIT(BXE_VERBOSE_STATS);
8058 bxe_stats_restart(struct bxe_softc *sc)
8061 DBENTER(BXE_VERBOSE_STATS);
8064 bxe_stats_start(sc);
8066 DBEXIT(BXE_VERBOSE_STATS);
8070 * Update the Big MAC (10Gb BMAC) statistics.
8076 bxe_stats_bmac_update(struct bxe_softc *sc)
8078 struct bmac_stats *new;
8079 struct host_port_stats *pstats;
8080 struct bxe_port_stats *estats;
8081 struct regpair diff;
8083 DBENTER(BXE_INSANE_STATS);
8085 new = BXE_SP(sc, mac_stats.bmac_stats);
8086 pstats = BXE_SP(sc, port_stats);
8087 estats = &sc->eth_stats;
8089 UPDATE_STAT64(rx_stat_grerb,
8090 rx_stat_ifhcinbadoctets);
8091 UPDATE_STAT64(rx_stat_grfcs,
8092 rx_stat_dot3statsfcserrors);
8093 UPDATE_STAT64(rx_stat_grund,
8094 rx_stat_etherstatsundersizepkts);
8095 UPDATE_STAT64(rx_stat_grovr,
8096 rx_stat_dot3statsframestoolong);
8097 UPDATE_STAT64(rx_stat_grfrg,
8098 rx_stat_etherstatsfragments);
8099 UPDATE_STAT64(rx_stat_grjbr,
8100 rx_stat_etherstatsjabbers);
8101 UPDATE_STAT64(rx_stat_grxcf,
8102 rx_stat_maccontrolframesreceived);
8103 UPDATE_STAT64(rx_stat_grxpf,
8104 rx_stat_xoffstateentered);
8105 UPDATE_STAT64(rx_stat_grxpf,
8107 UPDATE_STAT64(tx_stat_gtxpf,
8108 tx_stat_outxoffsent);
8109 UPDATE_STAT64(tx_stat_gtxpf,
8110 tx_stat_flowcontroldone);
8111 UPDATE_STAT64(tx_stat_gt64,
8112 tx_stat_etherstatspkts64octets);
8113 UPDATE_STAT64(tx_stat_gt127,
8114 tx_stat_etherstatspkts65octetsto127octets);
8115 UPDATE_STAT64(tx_stat_gt255,
8116 tx_stat_etherstatspkts128octetsto255octets);
8117 UPDATE_STAT64(tx_stat_gt511,
8118 tx_stat_etherstatspkts256octetsto511octets);
8119 UPDATE_STAT64(tx_stat_gt1023,
8120 tx_stat_etherstatspkts512octetsto1023octets);
8121 UPDATE_STAT64(tx_stat_gt1518,
8122 tx_stat_etherstatspkts1024octetsto1522octets);
8123 UPDATE_STAT64(tx_stat_gt2047,
8125 UPDATE_STAT64(tx_stat_gt4095,
8127 UPDATE_STAT64(tx_stat_gt9216,
8129 UPDATE_STAT64(tx_stat_gt16383,
8130 tx_stat_bmac_16383);
8131 UPDATE_STAT64(tx_stat_gterr,
8132 tx_stat_dot3statsinternalmactransmiterrors);
8133 UPDATE_STAT64(tx_stat_gtufl,
8136 estats->pause_frames_received_hi =
8137 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
8138 estats->pause_frames_received_lo =
8139 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
8140 estats->pause_frames_sent_hi =
8141 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
8142 estats->pause_frames_sent_lo =
8143 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
8145 DBEXIT(BXE_INSANE_STATS);
8149 * Update the Ethernet MAC (1Gb EMAC) statistics.
8155 bxe_stats_emac_update(struct bxe_softc *sc)
8157 struct emac_stats *new;
8158 struct host_port_stats *pstats;
8159 struct bxe_port_stats *estats;
8161 DBENTER(BXE_INSANE_STATS);
8163 new = BXE_SP(sc, mac_stats.emac_stats);
8164 pstats = BXE_SP(sc, port_stats);
8165 estats = &sc->eth_stats;
8167 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
8168 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
8169 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
8170 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
8171 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
8172 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
8173 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
8174 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
8175 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
8176 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
8177 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
8178 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
8179 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
8180 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
8181 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
8182 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
8183 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
8184 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
8185 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
8186 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
8187 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
8188 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
8189 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
8190 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
8191 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
8192 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
8193 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
8194 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
8195 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
8196 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
8197 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
8199 estats->pause_frames_received_hi =
8200 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
8201 estats->pause_frames_received_lo =
8202 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
8203 ADD_64(estats->pause_frames_received_hi,
8204 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
8205 estats->pause_frames_received_lo,
8206 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
8208 estats->pause_frames_sent_hi =
8209 pstats->mac_stx[1].tx_stat_outxonsent_hi;
8210 estats->pause_frames_sent_lo =
8211 pstats->mac_stx[1].tx_stat_outxonsent_lo;
8212 ADD_64(estats->pause_frames_sent_hi,
8213 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
8214 estats->pause_frames_sent_lo,
8215 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
8217 DBEXIT(BXE_INSANE_STATS);
8222 * 0 = Success, !0 = Failure.
8225 bxe_stats_hw_update(struct bxe_softc *sc)
8227 struct nig_stats *new, *old;
8228 struct host_port_stats *pstats;
8229 struct bxe_port_stats *estats;
8230 struct regpair diff;
8231 uint32_t nig_timer_max;
8234 DBENTER(BXE_INSANE_STATS);
8237 new = BXE_SP(sc, nig_stats);
8238 old = &(sc->port.old_nig_stats);
8239 pstats = BXE_SP(sc, port_stats);
8240 estats = &sc->eth_stats;
8242 /* Update statistics for the active MAC. */
8243 if (sc->link_vars.mac_type == MAC_TYPE_BMAC)
8244 bxe_stats_bmac_update(sc);
8245 else if (sc->link_vars.mac_type == MAC_TYPE_EMAC)
8246 bxe_stats_emac_update(sc);
8248 DBPRINT(sc, BXE_WARN,
8249 "%s(): Statistics updated by DMAE but no MAC is active!\n",
8252 goto bxe_stats_hw_update_exit;
8255 /* Now update the hardware (NIG) statistics. */
8256 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
8257 new->brb_discard - old->brb_discard);
8258 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
8259 new->brb_truncate - old->brb_truncate);
8261 UPDATE_STAT64_NIG(egress_mac_pkt0,
8262 etherstatspkts1024octetsto1522octets);
8263 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
8265 memcpy(old, new, sizeof(struct nig_stats));
8267 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
8268 sizeof(struct mac_stx));
8269 estats->brb_drop_hi = pstats->brb_drop_hi;
8270 estats->brb_drop_lo = pstats->brb_drop_lo;
8272 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
8276 SHMEM_RD(sc, port_mb[BP_PORT(sc)].stat_nig_timer);
8277 if (nig_timer_max != estats->nig_timer_max) {
8278 estats->nig_timer_max = nig_timer_max;
8279 DBPRINT(sc, BXE_WARN,
8280 "%s(): NIG timer reached max value (%u)!\n",
8281 __FUNCTION__, estats->nig_timer_max);
8285 bxe_stats_hw_update_exit:
8286 DBEXIT(BXE_INSANE_STATS);
8292 * 0 = Success, !0 = Failure.
8296 bxe_stats_storm_update(struct bxe_softc *sc)
8299 struct eth_stats_query *stats;
8300 struct bxe_port_stats *estats;
8301 struct host_func_stats *fstats;
8302 struct bxe_q_stats *qstats;
8303 struct tstorm_per_port_stats *tport;
8304 struct tstorm_per_client_stats *tclient;
8305 struct ustorm_per_client_stats *uclient;
8306 struct xstorm_per_client_stats *xclient;
8307 struct tstorm_per_client_stats *old_tclient;
8308 struct ustorm_per_client_stats *old_uclient;
8309 struct xstorm_per_client_stats *old_xclient;
8310 struct bxe_fastpath * fp;
8313 DBENTER(BXE_INSANE_STATS);
8317 stats = BXE_SP(sc, fw_stats);
8318 tport = &stats->tstorm_common.port_statistics;
8319 fstats = BXE_SP(sc, func_stats);
8321 memcpy(&(fstats->total_bytes_received_hi),
8322 &(BXE_SP(sc, func_stats_base)->total_bytes_received_hi),
8323 sizeof(struct host_func_stats) - 2 * sizeof(uint32_t));
8325 estats = &sc->eth_stats;
8326 estats->no_buff_discard_hi = 0;
8327 estats->no_buff_discard_lo = 0;
8328 estats->error_bytes_received_hi = 0;
8329 estats->error_bytes_received_lo = 0;
8330 estats->etherstatsoverrsizepkts_hi = 0;
8331 estats->etherstatsoverrsizepkts_lo = 0;
8333 for (i = 0; i < sc->num_queues; i++) {
8336 tclient = &stats->tstorm_common.client_statistics[cl_id];
8337 old_tclient = &fp->old_tclient;
8338 uclient = &stats->ustorm_common.client_statistics[cl_id];
8339 old_uclient = &fp->old_uclient;
8340 xclient = &stats->xstorm_common.client_statistics[cl_id];
8341 old_xclient = &fp->old_xclient;
8342 qstats = &fp->eth_q_stats;
8344 /* Are TSTORM statistics valid? */
8345 if ((uint16_t)(le16toh(tclient->stats_counter) + 1) !=
8346 sc->stats_counter) {
8347 DBPRINT(sc, BXE_WARN, "%s(): Stats not updated by TSTORM "
8348 "(tstorm counter (%d) != stats_counter (%d))!\n",
8349 __FUNCTION__, tclient->stats_counter, sc->stats_counter);
8351 goto bxe_stats_storm_update_exit;
8354 /* Are USTORM statistics valid? */
8355 if ((uint16_t)(le16toh(uclient->stats_counter) + 1) !=
8356 sc->stats_counter) {
8357 DBPRINT(sc, BXE_WARN, "%s(): Stats not updated by USTORM "
8358 "(ustorm counter (%d) != stats_counter (%d))!\n",
8359 __FUNCTION__, uclient->stats_counter, sc->stats_counter);
8361 goto bxe_stats_storm_update_exit;
8364 /* Are XSTORM statistics valid? */
8365 if ((uint16_t)(le16toh(xclient->stats_counter) + 1) !=
8366 sc->stats_counter) {
8367 DBPRINT(sc, BXE_WARN, "%s(): Stats not updated by XSTORM "
8368 "(xstorm counter (%d) != stats_counter (%d))!\n",
8369 __FUNCTION__, xclient->stats_counter, sc->stats_counter);
8371 goto bxe_stats_storm_update_exit;
8374 qstats->total_bytes_received_hi =
8375 (tclient->rcv_broadcast_bytes.hi);
8376 qstats->total_bytes_received_lo =
8377 le32toh(tclient->rcv_broadcast_bytes.lo);
8379 ADD_64(qstats->total_bytes_received_hi,
8380 le32toh(tclient->rcv_multicast_bytes.hi),
8381 qstats->total_bytes_received_lo,
8382 le32toh(tclient->rcv_multicast_bytes.lo));
8384 ADD_64(qstats->total_bytes_received_hi,
8385 le32toh(tclient->rcv_unicast_bytes.hi),
8386 qstats->total_bytes_received_lo,
8387 le32toh(tclient->rcv_unicast_bytes.lo));
8389 SUB_64(qstats->total_bytes_received_hi,
8390 le32toh(uclient->bcast_no_buff_bytes.hi),
8391 qstats->total_bytes_received_lo,
8392 le32toh(uclient->bcast_no_buff_bytes.lo));
8394 SUB_64(qstats->total_bytes_received_hi,
8395 le32toh(uclient->mcast_no_buff_bytes.hi),
8396 qstats->total_bytes_received_lo,
8397 le32toh(uclient->mcast_no_buff_bytes.lo));
8399 SUB_64(qstats->total_bytes_received_hi,
8400 le32toh(uclient->ucast_no_buff_bytes.hi),
8401 qstats->total_bytes_received_lo,
8402 le32toh(uclient->ucast_no_buff_bytes.lo));
8404 qstats->valid_bytes_received_hi =
8405 qstats->total_bytes_received_hi;
8406 qstats->valid_bytes_received_lo =
8407 qstats->total_bytes_received_lo;
8409 qstats->error_bytes_received_hi =
8410 le32toh(tclient->rcv_error_bytes.hi);
8411 qstats->error_bytes_received_lo =
8412 le32toh(tclient->rcv_error_bytes.lo);
8414 ADD_64(qstats->total_bytes_received_hi,
8415 qstats->error_bytes_received_hi,
8416 qstats->total_bytes_received_lo,
8417 qstats->error_bytes_received_lo);
8419 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
8420 total_unicast_packets_received);
8421 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
8422 total_multicast_packets_received);
8423 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
8424 total_broadcast_packets_received);
8425 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
8426 etherstatsoverrsizepkts);
8427 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
8429 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
8430 total_unicast_packets_received);
8431 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
8432 total_multicast_packets_received);
8433 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
8434 total_broadcast_packets_received);
8435 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
8436 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
8437 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
8439 qstats->total_bytes_transmitted_hi =
8440 le32toh(xclient->unicast_bytes_sent.hi);
8441 qstats->total_bytes_transmitted_lo =
8442 le32toh(xclient->unicast_bytes_sent.lo);
8444 ADD_64(qstats->total_bytes_transmitted_hi,
8445 le32toh(xclient->multicast_bytes_sent.hi),
8446 qstats->total_bytes_transmitted_lo,
8447 le32toh(xclient->multicast_bytes_sent.lo));
8449 ADD_64(qstats->total_bytes_transmitted_hi,
8450 le32toh(xclient->broadcast_bytes_sent.hi),
8451 qstats->total_bytes_transmitted_lo,
8452 le32toh(xclient->broadcast_bytes_sent.lo));
8454 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
8455 total_unicast_packets_transmitted);
8457 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
8458 total_multicast_packets_transmitted);
8460 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
8461 total_broadcast_packets_transmitted);
8463 old_tclient->checksum_discard = tclient->checksum_discard;
8464 old_tclient->ttl0_discard = tclient->ttl0_discard;
8466 ADD_64(fstats->total_bytes_received_hi,
8467 qstats->total_bytes_received_hi,
8468 fstats->total_bytes_received_lo,
8469 qstats->total_bytes_received_lo);
8470 ADD_64(fstats->total_bytes_transmitted_hi,
8471 qstats->total_bytes_transmitted_hi,
8472 fstats->total_bytes_transmitted_lo,
8473 qstats->total_bytes_transmitted_lo);
8474 ADD_64(fstats->total_unicast_packets_received_hi,
8475 qstats->total_unicast_packets_received_hi,
8476 fstats->total_unicast_packets_received_lo,
8477 qstats->total_unicast_packets_received_lo);
8478 ADD_64(fstats->total_multicast_packets_received_hi,
8479 qstats->total_multicast_packets_received_hi,
8480 fstats->total_multicast_packets_received_lo,
8481 qstats->total_multicast_packets_received_lo);
8482 ADD_64(fstats->total_broadcast_packets_received_hi,
8483 qstats->total_broadcast_packets_received_hi,
8484 fstats->total_broadcast_packets_received_lo,
8485 qstats->total_broadcast_packets_received_lo);
8486 ADD_64(fstats->total_unicast_packets_transmitted_hi,
8487 qstats->total_unicast_packets_transmitted_hi,
8488 fstats->total_unicast_packets_transmitted_lo,
8489 qstats->total_unicast_packets_transmitted_lo);
8490 ADD_64(fstats->total_multicast_packets_transmitted_hi,
8491 qstats->total_multicast_packets_transmitted_hi,
8492 fstats->total_multicast_packets_transmitted_lo,
8493 qstats->total_multicast_packets_transmitted_lo);
8494 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
8495 qstats->total_broadcast_packets_transmitted_hi,
8496 fstats->total_broadcast_packets_transmitted_lo,
8497 qstats->total_broadcast_packets_transmitted_lo);
8498 ADD_64(fstats->valid_bytes_received_hi,
8499 qstats->valid_bytes_received_hi,
8500 fstats->valid_bytes_received_lo,
8501 qstats->valid_bytes_received_lo);
8503 ADD_64(estats->error_bytes_received_hi,
8504 qstats->error_bytes_received_hi,
8505 estats->error_bytes_received_lo,
8506 qstats->error_bytes_received_lo);
8507 ADD_64(estats->etherstatsoverrsizepkts_hi,
8508 qstats->etherstatsoverrsizepkts_hi,
8509 estats->etherstatsoverrsizepkts_lo,
8510 qstats->etherstatsoverrsizepkts_lo);
8511 ADD_64(estats->no_buff_discard_hi,
8512 qstats->no_buff_discard_hi,
8513 estats->no_buff_discard_lo,
8514 qstats->no_buff_discard_lo);
8517 ADD_64(fstats->total_bytes_received_hi,
8518 estats->rx_stat_ifhcinbadoctets_hi,
8519 fstats->total_bytes_received_lo,
8520 estats->rx_stat_ifhcinbadoctets_lo);
8522 memcpy(estats, &(fstats->total_bytes_received_hi),
8523 sizeof(struct host_func_stats) - 2 * sizeof(uint32_t));
8525 ADD_64(estats->etherstatsoverrsizepkts_hi,
8526 estats->rx_stat_dot3statsframestoolong_hi,
8527 estats->etherstatsoverrsizepkts_lo,
8528 estats->rx_stat_dot3statsframestoolong_lo);
8529 ADD_64(estats->error_bytes_received_hi,
8530 estats->rx_stat_ifhcinbadoctets_hi,
8531 estats->error_bytes_received_lo,
8532 estats->rx_stat_ifhcinbadoctets_lo);
8535 estats->mac_filter_discard =
8536 le32toh(tport->mac_filter_discard);
8537 estats->xxoverflow_discard =
8538 le32toh(tport->xxoverflow_discard);
8539 estats->brb_truncate_discard =
8540 le32toh(tport->brb_truncate_discard);
8541 estats->mac_discard = le32toh(tport->mac_discard);
8544 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
8546 sc->stats_pending = 0;
8548 bxe_stats_storm_update_exit:
8550 DBEXIT(BXE_INSANE_STATS);
8555 * Copy the controller maintained statistics over to the OS.
8561 bxe_stats_net_update(struct bxe_softc *sc)
8563 struct tstorm_per_client_stats *old_tclient;
8564 struct bxe_port_stats *estats;
8567 DBENTER(BXE_INSANE_STATS);
8569 old_tclient = &sc->fp[0].old_tclient;
8570 estats = &sc->eth_stats;
8574 * Update the OS interface statistics from
8575 * the hardware statistics.
8578 ifp->if_collisions =
8579 (u_long) estats->tx_stat_dot3statssinglecollisionframes_lo +
8580 (u_long) estats->tx_stat_dot3statsmultiplecollisionframes_lo +
8581 (u_long) estats->tx_stat_dot3statslatecollisions_lo +
8582 (u_long) estats->tx_stat_dot3statsexcessivecollisions_lo;
8585 (u_long) old_tclient->checksum_discard +
8586 (u_long) estats->no_buff_discard_lo +
8587 (u_long) estats->mac_discard +
8588 (u_long) estats->rx_stat_etherstatsundersizepkts_lo +
8589 (u_long) estats->brb_drop_lo +
8590 (u_long) estats->brb_truncate_discard +
8591 (u_long) estats->rx_stat_dot3statsfcserrors_lo +
8592 (u_long) estats->rx_stat_dot3statsalignmenterrors_lo +
8593 (u_long) estats->xxoverflow_discard;
8596 (u_long) estats->tx_stat_dot3statslatecollisions_lo +
8597 (u_long) estats->tx_stat_dot3statsexcessivecollisions_lo +
8598 (u_long) estats->tx_stat_dot3statsinternalmactransmiterrors_lo;
8601 bxe_hilo(&estats->total_unicast_packets_received_hi) +
8602 bxe_hilo(&estats->total_multicast_packets_received_hi) +
8603 bxe_hilo(&estats->total_broadcast_packets_received_hi);
8606 bxe_hilo(&estats->total_unicast_packets_transmitted_hi) +
8607 bxe_hilo(&estats->total_multicast_packets_transmitted_hi) +
8608 bxe_hilo(&estats->total_broadcast_packets_transmitted_hi);
8610 DBEXIT(BXE_INSANE_STATS);
8619 bxe_stats_update(struct bxe_softc *sc)
8621 uint32_t *stats_comp;
8624 DBENTER(BXE_INSANE_STATS);
8626 stats_comp = BXE_SP(sc, stats_comp);
8629 /* Make sure the statistics DMAE update has completed. */
8630 if (*stats_comp != DMAE_COMP_VAL)
8631 goto bxe_stats_update_exit;
8633 /* Check for any hardware statistics updates. */
8634 if (sc->port.pmf == 1)
8635 update = (bxe_stats_hw_update(sc) == 0);
8637 /* Check for any STORM statistics updates. */
8638 update |= (bxe_stats_storm_update(sc) == 0);
8640 /* If we got updated hardware statistics then update the OS. */
8642 bxe_stats_net_update(sc);
8644 /* Check if any statistics updates are pending. */
8645 if (sc->stats_pending) {
8646 /* The update hasn't completed, keep waiting. */
8647 sc->stats_pending++;
8649 /* Have we been waiting for too long? */
8650 if (sc->stats_pending >= 3) {
8652 "%s(%d): Failed to get statistics after "
8653 "3 tries!\n", __FILE__, __LINE__);
8655 goto bxe_stats_update_exit;
8660 /* Kickoff the next statistics request. */
8661 bxe_stats_hw_post(sc);
8662 bxe_stats_storm_post(sc);
8664 bxe_stats_update_exit:
8665 DBEXIT(BXE_INSANE_STATS);
8674 bxe_stats_port_stop(struct bxe_softc *sc)
8676 struct dmae_command *dmae;
8677 uint32_t opcode, *stats_comp;
8680 DBENTER(BXE_VERBOSE_STATS);
8682 stats_comp = BXE_SP(sc, stats_comp);
8683 loader_idx = PMF_DMAE_C(sc);
8684 sc->executer_idx = 0;
8686 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
8688 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
8690 DMAE_CMD_ENDIANITY_B_DW_SWAP |
8692 DMAE_CMD_ENDIANITY_DW_SWAP |
8694 (BP_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
8695 (BP_E1HVN(sc) << DMAE_CMD_E1HVN_SHIFT));
8697 if (sc->port.port_stx) {
8698 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
8701 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
8703 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
8705 dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
8706 dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
8707 dmae->dst_addr_lo = sc->port.port_stx >> 2;
8708 dmae->dst_addr_hi = 0;
8709 dmae->len = sizeof(struct host_port_stats) >> 2;
8712 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
8713 dmae->comp_addr_hi = 0;
8716 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc,
8718 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc,
8720 dmae->comp_val = DMAE_COMP_VAL;
8727 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
8728 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
8729 dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats));
8730 dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats));
8731 dmae->dst_addr_lo = sc->func_stx >> 2;
8732 dmae->dst_addr_hi = 0;
8733 dmae->len = sizeof(struct host_func_stats) >> 2;
8734 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
8735 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
8736 dmae->comp_val = DMAE_COMP_VAL;
8741 DBEXIT(BXE_VERBOSE_STATS);
8749 bxe_stats_stop(struct bxe_softc *sc)
8753 DBENTER(BXE_VERBOSE_STATS);
8757 /* Wait for any pending completions. */
8760 if (sc->port.pmf == 1)
8761 update = (bxe_stats_hw_update(sc) == 0);
8763 update |= (bxe_stats_storm_update(sc) == 0);
8766 bxe_stats_net_update(sc);
8768 if (sc->port.pmf == 1)
8769 bxe_stats_port_stop(sc);
8771 bxe_stats_hw_post(sc);
8775 DBEXIT(BXE_VERBOSE_STATS);
8779 * A dummy function to fill in the statistics state transition table.
8785 bxe_stats_do_nothing(struct bxe_softc *sc)
8787 DBENTER(BXE_VERBOSE_STATS);
8788 DBEXIT(BXE_VERBOSE_STATS);
8791 static const struct {
8792 void (*action)(struct bxe_softc *sc);
8793 enum bxe_stats_state next_state;
8794 } bxe_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
8797 /* DISABLED PMF */ {bxe_stats_pmf_update, STATS_STATE_DISABLED},
8798 /* LINK_UP */ {bxe_stats_start, STATS_STATE_ENABLED},
8799 /* UPDATE */ {bxe_stats_do_nothing, STATS_STATE_DISABLED},
8800 /* STOP */ {bxe_stats_do_nothing, STATS_STATE_DISABLED}
8804 /* ENABLED PMF */ {bxe_stats_pmf_start, STATS_STATE_ENABLED},
8805 /* LINK_UP */ {bxe_stats_restart, STATS_STATE_ENABLED},
8806 /* UPDATE */ {bxe_stats_update, STATS_STATE_ENABLED},
8807 /* STOP */ {bxe_stats_stop, STATS_STATE_DISABLED}
8812 * Move to the next state of the statistics state machine.
8818 bxe_stats_handle(struct bxe_softc *sc, enum bxe_stats_event event)
8820 enum bxe_stats_state state;
8822 DBENTER(BXE_EXTREME_STATS);
8824 state = sc->stats_state;
8827 if (event != STATS_EVENT_UPDATE)
8828 DBPRINT(sc, BXE_VERBOSE_STATS,
8829 "%s(): Current state = %d, event = %d.\n", __FUNCTION__,
8833 bxe_stats_stm[state][event].action(sc);
8834 sc->stats_state = bxe_stats_stm[state][event].next_state;
8837 if (event != STATS_EVENT_UPDATE)
8838 DBPRINT(sc, BXE_VERBOSE_STATS, "%s(): New state = %d.\n",
8839 __FUNCTION__, sc->stats_state);
8842 DBEXIT(BXE_EXTREME_STATS);
8846 * bxe_chktso_window()
8847 * Checks to ensure the 13 bd sliding window is >= MSS for TSO.
8848 * Check that (13 total bds - 3bds) = 10 bd window >= MSS.
8849 * The window: 3 bds are = 1 (for headers BD) + 2 (for PBD and last BD)
8850 * The headers comes in a seperate bd in FreeBSD. So 13-3=10.
8853 * 0 if OK to send, 1 if packet needs further defragmentation.
8856 bxe_chktso_window(struct bxe_softc* sc, int nsegs, bus_dma_segment_t *segs,
8859 uint32_t num_wnds, wnd_size, wnd_sum;
8860 int32_t frag_idx, wnd_idx;
8861 unsigned short lso_mss;
8867 num_wnds = nsegs - wnd_size;
8868 lso_mss = htole16(m0->m_pkthdr.tso_segsz);
8871 * Total Header lengths Eth+IP+TCP in 1st FreeBSD mbuf so
8872 * calculate the first window sum of data skip the first
8873 * assuming it is the header in FreeBSD.
8875 for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++)
8876 wnd_sum += htole16(segs[frag_idx].ds_len);
8878 /* Chk the first 10 bd window size */
8879 if (wnd_sum < lso_mss)
8880 return (defrag = 1);
8882 /* Run through the windows */
8883 for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) {
8884 /* Subtract the 1st mbuf->m_len of the last wndw(-header). */
8885 wnd_sum -= htole16(segs[wnd_idx+1].ds_len);
8886 /* Add the next mbuf len to the len of our new window. */
8887 wnd_sum += htole16(segs[frag_idx].ds_len);
8888 if (wnd_sum < lso_mss) {
8899 * Encapsultes an mbuf cluster into the tx_bd chain structure and
8900 * makes the memory visible to the controller.
8902 * If an mbuf is submitted to this routine and cannot be given to the
8903 * controller (e.g. it has too many fragments) then the function may free
8904 * the mbuf and return to the caller.
8907 * 0 = Success, !0 = Failure
8908 * Note the side effect that an mbuf may be freed if it causes a problem.
8911 bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
8913 bus_dma_segment_t segs[32];
8916 struct eth_tx_parse_bd *tx_parse_bd;
8917 struct eth_tx_bd *tx_data_bd;
8918 struct eth_tx_bd *tx_total_pkt_size_bd;
8919 struct eth_tx_start_bd *tx_start_bd;
8920 uint16_t etype, sw_tx_bd_prod, sw_pkt_prod, total_pkt_size;
8921 // uint16_t bd_index, pkt_index;
8923 int i, defragged, e_hlen, error, nsegs, rc, nbds, vlan_off, ovlan;
8924 struct bxe_softc *sc;
8927 DBENTER(BXE_VERBOSE_SEND);
8929 DBRUN(M_ASSERTPKTHDR(*m_head));
8932 rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0;
8936 tx_total_pkt_size_bd = NULL;
8938 /* Get the H/W pointer (0 to 65535) for packets and BD's. */
8939 sw_pkt_prod = fp->tx_pkt_prod;
8940 sw_tx_bd_prod = fp->tx_bd_prod;
8942 /* Create the S/W index (0 to MAX_TX_BD) for packets and BD's. */
8943 // pkt_index = TX_BD(sw_pkt_prod);
8944 // bd_index = TX_BD(sw_tx_bd_prod);
8946 mac_type = UNICAST_ADDRESS;
8948 /* Map the mbuf into the next open DMAable memory. */
8949 map = fp->tx_mbuf_map[TX_BD(sw_pkt_prod)];
8950 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, map, m0,
8951 segs, &nsegs, BUS_DMA_NOWAIT);
8953 /* Handle any mapping errors. */
8954 if(__predict_false(error != 0)){
8955 fp->tx_dma_mapping_failure++;
8956 if (error == ENOMEM) {
8957 /* Resource issue, try again later. */
8959 } else if (error == EFBIG) {
8960 /* Possibly recoverable with defragmentation. */
8961 fp->mbuf_defrag_attempts++;
8962 m0 = m_defrag(*m_head, M_DONTWAIT);
8964 fp->mbuf_defrag_failures++;
8967 /* Defrag successful, try mapping again.*/
8969 error = bus_dmamap_load_mbuf_sg(
8970 fp->tx_mbuf_tag, map, m0,
8971 segs, &nsegs, BUS_DMA_NOWAIT);
8973 fp->tx_dma_mapping_failure++;
8978 /* Unknown, unrecoverable mapping error. */
8979 DBPRINT(sc, BXE_WARN_SEND,
8980 "%s(): Unknown TX mapping error! "
8981 "rc = %d.\n", __FUNCTION__, error);
8982 DBRUN(bxe_dump_mbuf(sc, m0));
8986 goto bxe_tx_encap_continue;
8989 /* Make sure there's enough room in the send queue. */
8990 if (__predict_false((nsegs + 2) >
8991 (USABLE_TX_BD - fp->tx_bd_used))) {
8992 /* Recoverable, try again later. */
8993 fp->tx_hw_queue_full++;
8994 bus_dmamap_unload(fp->tx_mbuf_tag, map);
8996 goto bxe_tx_encap_continue;
8999 /* Capture the current H/W TX chain high watermark. */
9000 if (__predict_false(fp->tx_hw_max_queue_depth <
9002 fp->tx_hw_max_queue_depth = fp->tx_bd_used;
9004 /* Now make sure it fits in the packet window. */
9005 if (__predict_false(nsegs > 12)) {
9007 * The mbuf may be to big for the controller
9008 * to handle. If the frame is a TSO frame
9009 * we'll need to do an additional check.
9011 if(m0->m_pkthdr.csum_flags & CSUM_TSO){
9012 if (bxe_chktso_window(sc,nsegs,segs,m0) == 0)
9014 goto bxe_tx_encap_continue;
9016 fp->tx_window_violation_tso++;
9018 fp->tx_window_violation_std++;
9020 /* No sense trying to defrag again, we'll drop the frame. */
9025 bxe_tx_encap_continue:
9026 /* Check for errors */
9029 /* Recoverable try again later */
9031 fp->tx_soft_errors++;
9032 fp->tx_mbuf_alloc--;
9036 goto bxe_tx_encap_exit;
9039 /* Save the mbuf and mapping. */
9040 fp->tx_mbuf_ptr[TX_BD(sw_pkt_prod)] = m0;
9041 fp->tx_mbuf_map[TX_BD(sw_pkt_prod)] = map;
9043 /* Set flag according to packet type (UNICAST_ADDRESS is default). */
9044 if (m0->m_flags & M_BCAST)
9045 mac_type = BROADCAST_ADDRESS;
9046 else if (m0->m_flags & M_MCAST)
9047 mac_type = MULTICAST_ADDRESS;
9049 /* Prepare the first transmit (Start) BD for the mbuf. */
9050 tx_start_bd = &fp->tx_chain[TX_BD(sw_tx_bd_prod)].start_bd;
9052 tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
9053 tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
9054 tx_start_bd->nbytes = htole16(segs[0].ds_len);
9055 total_pkt_size += tx_start_bd->nbytes;
9056 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9057 tx_start_bd->general_data =
9058 (mac_type << ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
9060 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
9062 /* All frames have at least Start BD + Parsing BD. */
9064 tx_start_bd->nbd = htole16(nbds);
9066 if (m0->m_flags & M_VLANTAG) {
9067 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9068 tx_start_bd->vlan = htole16(m0->m_pkthdr.ether_vtag);
9071 * In cases where the VLAN tag is not used the firmware
9072 * expects to see a packet counter in the VLAN tag field
9073 * Failure to do so will cause an assertion which will
9074 * stop the controller.
9076 tx_start_bd->vlan = htole16(fp->tx_pkt_prod);
9079 * Add a parsing BD from the chain. The parsing BD is always added,
9080 * however, it is only used for TSO & chksum.
9082 sw_tx_bd_prod = NEXT_TX_BD(sw_tx_bd_prod);
9083 tx_parse_bd = (struct eth_tx_parse_bd *)
9084 &fp->tx_chain[TX_BD(sw_tx_bd_prod)].parse_bd;
9085 memset(tx_parse_bd, 0, sizeof(struct eth_tx_parse_bd));
9087 /* Gather all info about the packet and add to tx_parse_bd */
9088 if (m0->m_pkthdr.csum_flags) {
9089 struct ether_vlan_header *eh;
9090 struct ip *ip = NULL;
9091 struct tcphdr *th = NULL;
9093 struct udphdr *uh = NULL;
9095 /* Map Ethernet header to find type & header length. */
9096 eh = mtod(m0, struct ether_vlan_header *);
9098 /* Handle VLAN encapsulation if present. */
9099 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
9100 etype = ntohs(eh->evl_proto);
9101 e_hlen = ETHER_HDR_LEN + vlan_off;
9103 etype = ntohs(eh->evl_encap_proto);
9104 e_hlen = ETHER_HDR_LEN;
9107 /* Set the Ethernet header length in 16 bit words. */
9108 tx_parse_bd->global_data = (e_hlen + ovlan) >> 1;
9109 tx_parse_bd->global_data |= ((m0->m_flags & M_VLANTAG) <<
9110 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT);
9114 /* If mbuf len < 20bytes, IP header is in next mbuf. */
9115 if (m0->m_len < sizeof(struct ip))
9116 ip = (struct ip *) m0->m_next->m_data;
9118 ip = (struct ip *) (m0->m_data + e_hlen);
9120 /* Calculate IP header length (16 bit words). */
9121 tx_parse_bd->ip_hlen = (ip->ip_hl << 1);
9123 /* Calculate enet + IP header length (16 bit words). */
9124 tx_parse_bd->total_hlen = tx_parse_bd->ip_hlen +
9127 if (m0->m_pkthdr.csum_flags & CSUM_IP) {
9128 fp->tx_offload_frames_csum_ip++;
9129 flags |= ETH_TX_BD_FLAGS_IP_CSUM;
9132 /* Handle any checksums requested by the stack. */
9133 if ((m0->m_pkthdr.csum_flags & CSUM_TCP)||
9134 (m0->m_pkthdr.csum_flags & CSUM_TSO)){
9136 /* Get the TCP header. */
9137 th = (struct tcphdr *)((caddr_t)ip +
9140 /* Add the TCP checksum offload flag. */
9141 flags |= ETH_TX_BD_FLAGS_L4_CSUM;
9142 fp->tx_offload_frames_csum_tcp++;
9144 /* Update the enet + IP + TCP header length. */
9145 tx_parse_bd->total_hlen +=
9146 (uint16_t)(th->th_off << 1);
9148 /* Get the pseudo header checksum. */
9149 tx_parse_bd->tcp_pseudo_csum =
9152 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) {
9154 * The hardware doesn't actually support UDP
9155 * checksum offload but we can fake it by
9156 * doing TCP checksum offload and factoring
9157 * out the extra bytes that are different
9158 * between the TCP header and the UDP header.
9160 * Calculation will begin 10 bytes before the
9161 * actual start of the UDP header. To work
9162 * around this we need to calculate the
9163 * checksum of the 10 bytes before the UDP
9164 * header and factor that out of the UDP
9165 * pseudo header checksum before asking the
9166 * H/W to calculate the full UDP checksum.
9171 /* This value is 10. */
9172 uint8_t fix = (uint8_t) (offsetof(struct tcphdr, th_sum) -
9173 (int) offsetof(struct udphdr, uh_sum));
9176 * Add the TCP checksum offload flag for
9179 flags |= ETH_TX_BD_FLAGS_L4_CSUM;
9180 fp->tx_offload_frames_csum_udp++;
9181 tx_parse_bd->global_data |=
9182 ETH_TX_PARSE_BD_UDP_CS_FLG;
9184 /* Get a pointer to the UDP header. */
9185 uh = (struct udphdr *)((caddr_t)ip +
9188 /* Set pointer 10 bytes before UDP header. */
9189 tmp_uh = (uint32_t *)((uint8_t *)uh -
9193 * Calculate a pseudo header checksum over
9194 * the 10 bytes before the UDP header.
9196 tmp_csum = in_pseudo(ntohl(*tmp_uh),
9197 ntohl(*(tmp_uh + 1)),
9198 ntohl((*(tmp_uh + 2)) & 0x0000FFFF));
9200 /* Update the enet + IP + UDP header length. */
9201 tx_parse_bd->total_hlen +=
9202 (sizeof(struct udphdr) >> 1);
9203 tx_parse_bd->tcp_pseudo_csum =
9204 ~in_addword(uh->uh_sum, ~tmp_csum);
9207 /* Update the offload flags. */
9208 tx_start_bd->bd_flags.as_bitfield |= flags;
9211 case ETHERTYPE_IPV6:
9212 fp->tx_unsupported_tso_request_ipv6++;
9213 /* ToDo: Add IPv6 support. */
9217 fp->tx_unsupported_tso_request_not_tcp++;
9218 /* ToDo - How to handle this error? */
9221 /* Setup the Parsing BD with TSO specific info */
9222 if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
9223 uint16_t hdr_len = tx_parse_bd->total_hlen << 1;
9225 tx_start_bd->bd_flags.as_bitfield |=
9226 ETH_TX_BD_FLAGS_SW_LSO;
9227 fp->tx_offload_frames_tso++;
9229 /* ToDo: Does this really help? */
9230 if (__predict_false(tx_start_bd->nbytes > hdr_len)) {
9231 fp->tx_header_splits++;
9233 * Split the first BD into 2 BDs to make the
9234 * firmwares job easy...
9237 DBPRINT(sc, BXE_EXTREME_SEND,
9238 "%s(): TSO split headr size is %d (%x:%x) nbds %d\n",
9239 __FUNCTION__, tx_start_bd->nbytes,
9240 tx_start_bd->addr_hi,
9241 tx_start_bd->addr_lo, nbds);
9243 sw_tx_bd_prod = NEXT_TX_BD(sw_tx_bd_prod);
9245 /* New transmit BD (after the tx_parse_bd). */
9247 &fp->tx_chain[TX_BD(sw_tx_bd_prod)].reg_bd;
9248 tx_data_bd->addr_hi =
9249 htole32(U64_HI(segs[0].ds_addr + hdr_len));
9250 tx_data_bd->addr_lo =
9251 htole32(U64_LO(segs[0].ds_addr + hdr_len));
9252 tx_data_bd->nbytes =
9253 htole16(segs[0].ds_len) - hdr_len;
9254 if (tx_total_pkt_size_bd == NULL)
9255 tx_total_pkt_size_bd = tx_data_bd;
9259 * The controller needs the following info for TSO:
9260 * MSS, tcp_send_seq, ip_id, and tcp_pseudo_csum.
9262 tx_parse_bd->lso_mss = htole16(m0->m_pkthdr.tso_segsz);
9263 tx_parse_bd->tcp_send_seq = ntohl(th->th_seq);
9264 tx_parse_bd->tcp_flags = th->th_flags;
9265 tx_parse_bd->ip_id = ntohs(ip->ip_id);
9267 tx_parse_bd->tcp_pseudo_csum =
9268 ntohs(in_pseudo(ip->ip_src.s_addr,
9269 ip->ip_dst.s_addr, htons(IPPROTO_TCP)));
9271 tx_parse_bd->global_data |=
9272 ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9276 /* Prepare remaining BDs. Start_tx_bd contains first seg (frag). */
9277 for (i = 1; i < nsegs ; i++) {
9278 sw_tx_bd_prod = NEXT_TX_BD(sw_tx_bd_prod);
9279 tx_data_bd = &fp->tx_chain[TX_BD(sw_tx_bd_prod)].reg_bd;
9280 tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr));
9281 tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr));
9282 tx_data_bd->nbytes = htole16(segs[i].ds_len);
9283 if (tx_total_pkt_size_bd == NULL)
9284 tx_total_pkt_size_bd = tx_data_bd;
9285 total_pkt_size += tx_data_bd->nbytes;
9288 if(tx_total_pkt_size_bd != NULL)
9289 tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size;
9291 /* Update TX BD producer index value for next TX */
9292 sw_tx_bd_prod = NEXT_TX_BD(sw_tx_bd_prod);
9294 /* Update the used TX BD counter. */
9295 fp->tx_bd_used += nbds;
9298 * If the chain of tx_bd's describing this frame
9299 * is adjacent to or spans an eth_tx_next_bd element
9300 * then we need to increment the nbds value.
9302 if(TX_IDX(sw_tx_bd_prod) < nbds)
9305 /* Don't allow reordering of writes for nbd and packets. */
9307 fp->tx_db.data.prod += nbds;
9309 /* Producer points to the next free tx_bd at this point. */
9311 fp->tx_bd_prod = sw_tx_bd_prod;
9313 DOORBELL(sc, fp->index, fp->tx_db.raw);
9317 /* Prevent speculative reads from getting ahead of the status block. */
9318 bus_space_barrier(sc->bxe_btag, sc->bxe_bhandle,
9319 0, 0, BUS_SPACE_BARRIER_READ);
9321 /* Prevent speculative reads from getting ahead of the doorbell. */
9322 bus_space_barrier(sc->bxe_db_btag, sc->bxe_db_bhandle,
9323 0, 0, BUS_SPACE_BARRIER_READ);
9326 DBEXIT(BXE_VERBOSE_SEND);
9332 * Legacy (non-RSS) dispatch routine.
9338 bxe_tx_start(struct ifnet *ifp)
9340 struct bxe_softc *sc;
9341 struct bxe_fastpath *fp;
9344 DBENTER(BXE_EXTREME_SEND);
9346 /* Exit if the transmit queue is full or link down. */
9347 if (((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
9348 IFF_DRV_RUNNING) || !sc->link_vars.link_up) {
9349 DBPRINT(sc, BXE_WARN,
9350 "%s(): No link or TX queue full, ignoring "
9351 "transmit request.\n", __FUNCTION__);
9352 goto bxe_tx_start_exit;
9355 /* Set the TX queue for the frame. */
9359 bxe_tx_start_locked(ifp, fp);
9363 DBEXIT(BXE_EXTREME_SEND);
9368 * Legacy (non-RSS) transmit routine.
9374 bxe_tx_start_locked(struct ifnet *ifp, struct bxe_fastpath *fp)
9376 struct bxe_softc *sc;
9377 struct mbuf *m = NULL;
9381 DBENTER(BXE_EXTREME_SEND);
9383 BXE_FP_LOCK_ASSERT(fp);
9385 /* Keep adding entries while there are frames to send. */
9386 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
9388 /* Check for any frames to send. */
9389 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
9390 if (__predict_false(m == NULL))
9393 /* The transmit mbuf now belongs to us, keep track of it. */
9394 fp->tx_mbuf_alloc++;
9397 * Pack the data into the transmit ring. If we
9398 * don't have room, place the mbuf back at the
9399 * head of the TX queue, set the OACTIVE flag,
9400 * and wait for the NIC to drain the chain.
9402 if (__predict_false(bxe_tx_encap(fp, &m))) {
9403 fp->tx_encap_failures++;
9404 /* Very Bad Frames(tm) may have been dropped. */
9407 * Mark the TX queue as full and return
9410 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
9411 IFQ_DRV_PREPEND(&ifp->if_snd, m);
9412 fp->tx_mbuf_alloc--;
9413 fp->tx_queue_xoff++;
9418 /* Stop looking for more work. */
9422 /* The transmit frame was enqueued successfully. */
9425 /* Send a copy of the frame to any BPF listeners. */
9429 /* No TX packets were dequeued. */
9431 /* Reset the TX watchdog timeout timer. */
9432 fp->watchdog_timer = BXE_TX_TIMEOUT;
9434 DBEXIT(BXE_EXTREME_SEND);
9437 #if __FreeBSD_version >= 800000
9439 * Multiqueue (RSS) dispatch routine.
9442 * 0 if transmit succeeds, !0 otherwise.
9445 bxe_tx_mq_start(struct ifnet *ifp, struct mbuf *m)
9447 struct bxe_softc *sc;
9448 struct bxe_fastpath *fp;
9452 DBENTER(BXE_EXTREME_SEND);
9456 /* If using flow ID, assign the TX queue based on the flow ID. */
9457 if ((m->m_flags & M_FLOWID) != 0)
9458 fp_index = m->m_pkthdr.flowid % sc->num_queues;
9460 /* Select the fastpath TX queue for the frame. */
9461 fp = &sc->fp[fp_index];
9463 /* Skip H/W enqueue if transmit queue is full or link down. */
9464 if (((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
9465 IFF_DRV_RUNNING) || !sc->link_vars.link_up) {
9466 /* Stash the mbuf if we can. */
9467 rc = drbr_enqueue(ifp, fp->br, m);
9468 goto bxe_tx_mq_start_exit;
9472 rc = bxe_tx_mq_start_locked(ifp, fp, m);
9475 bxe_tx_mq_start_exit:
9476 DBEXIT(BXE_EXTREME_SEND);
9482 * Multiqueue (TSS) transmit routine. This routine is responsible
9483 * for adding a frame to the hardware's transmit queue.
9486 * 0 if transmit succeeds, !0 otherwise.
9489 bxe_tx_mq_start_locked(struct ifnet *ifp,
9490 struct bxe_fastpath *fp, struct mbuf *m)
9492 struct bxe_softc *sc;
9494 int depth, rc, tx_count;
9497 DBENTER(BXE_EXTREME_SEND);
9501 /* Fetch the depth of the driver queue. */
9502 depth = drbr_inuse(ifp, fp->br);
9503 if (depth > fp->tx_max_drbr_queue_depth)
9504 fp->tx_max_drbr_queue_depth = depth;
9506 BXE_FP_LOCK_ASSERT(fp);
9509 /* No new work, check for pending frames. */
9510 next = drbr_dequeue(ifp, fp->br);
9511 } else if (drbr_needs_enqueue(ifp, fp->br)) {
9512 /* Both new and pending work, maintain packet order. */
9513 rc = drbr_enqueue(ifp, fp->br, m);
9515 fp->tx_soft_errors++;
9516 goto bxe_tx_mq_start_locked_exit;
9518 next = drbr_dequeue(ifp, fp->br);
9520 /* New work only, nothing pending. */
9523 /* Keep adding entries while there are frames to send. */
9524 while (next != NULL) {
9526 /* The transmit mbuf now belongs to us, keep track of it. */
9527 fp->tx_mbuf_alloc++;
9530 * Pack the data into the transmit ring. If we
9531 * don't have room, place the mbuf back at the
9532 * head of the TX queue, set the OACTIVE flag,
9533 * and wait for the NIC to drain the chain.
9535 rc = bxe_tx_encap(fp, &next);
9536 if (__predict_false(rc != 0)) {
9537 fp->tx_encap_failures++;
9538 /* Very Bad Frames(tm) may have been dropped. */
9541 * Mark the TX queue as full and save
9544 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
9545 fp->tx_frame_deferred++;
9547 /* This may reorder frame. */
9548 rc = drbr_enqueue(ifp, fp->br, next);
9549 fp->tx_mbuf_alloc--;
9552 /* Stop looking for more work. */
9556 /* The transmit frame was enqueued successfully. */
9559 /* Send a copy of the frame to any BPF listeners. */
9560 BPF_MTAP(ifp, next);
9562 /* Handle any completions if we're running low. */
9563 if (fp->tx_bd_used >= BXE_TX_CLEANUP_THRESHOLD)
9566 /* Close TX since there's so little room left. */
9567 if (fp->tx_bd_used >= BXE_TX_CLEANUP_THRESHOLD) {
9568 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
9572 next = drbr_dequeue(ifp, fp->br);
9575 /* No TX packets were dequeued. */
9577 /* Reset the TX watchdog timeout timer. */
9578 fp->watchdog_timer = BXE_TX_TIMEOUT;
9580 bxe_tx_mq_start_locked_exit:
9581 DBEXIT(BXE_EXTREME_SEND);
9587 bxe_mq_flush(struct ifnet *ifp)
9589 struct bxe_softc *sc;
9590 struct bxe_fastpath *fp;
9596 DBENTER(BXE_VERBOSE_UNLOAD);
9598 for (i = 0; i < sc->num_queues; i++) {
9601 if (fp->br != NULL) {
9602 DBPRINT(sc, BXE_VERBOSE_UNLOAD,
9603 "%s(): Clearing fp[%02d]...\n",
9604 __FUNCTION__, fp->index);
9607 while ((m = buf_ring_dequeue_sc(fp->br)) != NULL)
9615 DBEXIT(BXE_VERBOSE_UNLOAD);
9617 #endif /* FreeBSD_version >= 800000 */
9621 * Handles any IOCTL calls from the operating system.
9624 * 0 for success, positive value for failure.
9627 bxe_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
9629 struct bxe_softc *sc;
9631 int error, mask, reinit;
9634 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_MISC);
9636 ifr = (struct ifreq *)data;
9643 DBPRINT(sc, BXE_VERBOSE_MISC, "%s(): Received SIOCSIFMTU\n",
9646 /* Check that the MTU setting is supported. */
9647 if ((ifr->ifr_mtu < BXE_MIN_MTU) ||
9648 (ifr->ifr_mtu > BXE_JUMBO_MTU)) {
9654 ifp->if_mtu = ifr->ifr_mtu;
9655 BXE_CORE_UNLOCK(sc);
9660 /* Toggle the interface state up or down. */
9661 DBPRINT(sc, BXE_VERBOSE_MISC, "%s(): Received SIOCSIFFLAGS\n",
9665 /* Check if the interface is up. */
9666 if (ifp->if_flags & IFF_UP) {
9667 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
9668 /* Set promiscuous/multicast flags. */
9669 bxe_set_rx_mode(sc);
9672 bxe_init_locked(sc, LOAD_NORMAL);
9675 /* Bring down the interface. */
9676 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
9677 bxe_stop_locked(sc, UNLOAD_NORMAL);
9679 BXE_CORE_UNLOCK(sc);
9684 /* Add/Delete multicast addresses. */
9685 DBPRINT(sc, BXE_VERBOSE_MISC,
9686 "%s(): Received SIOCADDMULTI/SIOCDELMULTI\n", __FUNCTION__);
9689 /* Check if the interface is up. */
9690 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
9691 /* Set receive mode flags. */
9692 bxe_set_rx_mode(sc);
9693 BXE_CORE_UNLOCK(sc);
9698 /* Set/Get Interface media */
9699 DBPRINT(sc, BXE_VERBOSE_MISC,
9700 "%s(): Received SIOCSIFMEDIA/SIOCGIFMEDIA\n", __FUNCTION__);
9702 error = ifmedia_ioctl(ifp, ifr, &sc->bxe_ifmedia, command);
9705 /* Set interface capability */
9707 /* Find out which capabilities have changed. */
9708 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
9709 DBPRINT(sc, BXE_VERBOSE_MISC,
9710 "%s(): Received SIOCSIFCAP (mask = 0x%08X)\n", __FUNCTION__,
9715 /* Toggle the LRO capabilites enable flag. */
9716 if (mask & IFCAP_LRO) {
9717 ifp->if_capenable ^= IFCAP_LRO;
9718 sc->bxe_flags ^= BXE_TPA_ENABLE_FLAG;
9719 DBPRINT(sc, BXE_INFO_MISC,
9720 "%s(): Toggling LRO (bxe_flags = "
9721 "0x%08X).\n", __FUNCTION__, sc->bxe_flags);
9723 /* LRO requires different buffer setup. */
9727 /* Toggle the TX checksum capabilites enable flag. */
9728 if (mask & IFCAP_TXCSUM) {
9729 DBPRINT(sc, BXE_VERBOSE_MISC,
9730 "%s(): Toggling IFCAP_TXCSUM.\n", __FUNCTION__);
9732 ifp->if_capenable ^= IFCAP_TXCSUM;
9734 if (IFCAP_TXCSUM & ifp->if_capenable)
9735 ifp->if_hwassist = BXE_IF_HWASSIST;
9737 ifp->if_hwassist = 0;
9740 /* Toggle the RX checksum capabilities enable flag. */
9741 if (mask & IFCAP_RXCSUM) {
9742 DBPRINT(sc, BXE_VERBOSE_MISC,
9743 "%s(): Toggling IFCAP_RXCSUM.\n", __FUNCTION__);
9745 ifp->if_capenable ^= IFCAP_RXCSUM;
9747 if (IFCAP_RXCSUM & ifp->if_capenable)
9748 ifp->if_hwassist = BXE_IF_HWASSIST;
9750 ifp->if_hwassist = 0;
9753 /* Toggle VLAN_MTU capabilities enable flag. */
9754 if (mask & IFCAP_VLAN_MTU) {
9755 /* ToDo: Is this really true? */
9756 BXE_PRINTF("%s(%d): Changing VLAN_MTU not supported.\n",
9757 __FILE__, __LINE__);
9761 /* Toggle VLANHWTAG capabilities enabled flag. */
9762 if (mask & IFCAP_VLAN_HWTAGGING) {
9763 /* ToDo: Is this really true? */
9765 "%s(%d): Changing VLAN_HWTAGGING not supported!\n",
9766 __FILE__, __LINE__);
9770 /* Toggle TSO4 capabilities enabled flag. */
9771 if (mask & IFCAP_TSO4) {
9772 DBPRINT(sc, BXE_VERBOSE_MISC,
9773 "%s(): Toggling IFCAP_TSO4.\n", __FUNCTION__);
9775 ifp->if_capenable ^= IFCAP_TSO4;
9778 /* Toggle TSO6 capabilities enabled flag. */
9779 if (mask & IFCAP_TSO6) {
9780 /* ToDo: Add TSO6 support. */
9782 "%s(%d): Changing TSO6 not supported!\n",
9783 __FILE__, __LINE__);
9785 BXE_CORE_UNLOCK(sc);
9788 * ToDo: Look into supporting:
9793 * WOL[_UCAST|_MCAST|_MAGIC]
9798 /* We don't know how to handle the IOCTL, pass it on. */
9799 error = ether_ioctl(ifp, command, data);
9803 /* Restart the controller with the new capabilities. */
9804 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && (reinit != 0)) {
9806 bxe_stop_locked(sc, UNLOAD_NORMAL);
9807 bxe_init_locked(sc, LOAD_NORMAL);
9808 BXE_CORE_UNLOCK(sc);
9811 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_MISC);
9817 * Gets the current value of the RX Completion Consumer index
9818 * from the fastpath status block, updates it as necessary if
9819 * it is pointing to a "Next Page" entry, and returns it to the
9823 * The adjusted value of *fp->rx_cons_sb.
9825 static __inline uint16_t
9826 bxe_rx_cq_cons(struct bxe_fastpath *fp)
9828 volatile uint16_t rx_cq_cons_sb = 0;
9831 rx_cq_cons_sb = (volatile uint16_t) le16toh(*fp->rx_cq_cons_sb);
9834 * It is valid for the hardware's copy of the completion
9835 * consumer index to be pointing at a "Next Page" entry in
9836 * the completion chain but the driver prefers to assume
9837 * that it is pointing at the next available CQE so we
9838 * need to adjust the value accordingly.
9840 if ((rx_cq_cons_sb & USABLE_RCQ_ENTRIES_PER_PAGE) ==
9841 USABLE_RCQ_ENTRIES_PER_PAGE)
9844 return (rx_cq_cons_sb);
9848 bxe_has_tx_work(struct bxe_fastpath *fp)
9852 return (((fp->tx_pkt_prod != le16toh(*fp->tx_pkt_cons_sb)) || \
9853 (fp->tx_pkt_prod != fp->tx_pkt_cons)));
9857 * Checks if there are any received frames to process on the
9861 * 0 = No received frames pending, !0 = Received frames
9865 bxe_has_rx_work(struct bxe_fastpath *fp)
9869 return (bxe_rx_cq_cons(fp) != fp->rx_cq_cons);
9873 * Slowpath task entry point.
9879 bxe_task_sp(void *xsc, int pending)
9881 struct bxe_softc *sc;
9886 DBPRINT(sc, BXE_EXTREME_INTR, "%s(): pending = %d.\n", __FUNCTION__,
9889 /* Check for the source of the interrupt. */
9890 sp_status = bxe_update_dsb_idx(sc);
9892 /* Handle any hardware attentions. */
9893 if (sp_status & 0x1) {
9898 /* CSTORM event asserted (query_stats, port delete ramrod, etc.). */
9899 if (sp_status & 0x2) {
9900 sc->stats_pending = 0;
9904 /* Check for other weirdness. */
9905 if (sp_status != 0) {
9906 DBPRINT(sc, BXE_WARN, "%s(): Unexpected slowpath interrupt "
9907 "(sp_status = 0x%04X)!\n", __FUNCTION__, sp_status);
9910 /* Acknowledge the xSTORM tags and enable slowpath interrupts. */
9911 bxe_ack_sb(sc, DEF_SB_ID, ATTENTION_ID, le16toh(sc->def_att_idx),
9913 bxe_ack_sb(sc, DEF_SB_ID, USTORM_ID, le16toh(sc->def_u_idx),
9915 bxe_ack_sb(sc, DEF_SB_ID, CSTORM_ID, le16toh(sc->def_c_idx),
9917 bxe_ack_sb(sc, DEF_SB_ID, XSTORM_ID, le16toh(sc->def_x_idx),
9919 bxe_ack_sb(sc, DEF_SB_ID, TSTORM_ID, le16toh(sc->def_t_idx),
9925 * Legacy interrupt entry point.
9927 * Verifies that the controller generated the interrupt and
9928 * then calls a separate routine to handle the various
9929 * interrupt causes: link, RX, and TX.
9935 bxe_intr_legacy(void *xsc)
9937 struct bxe_softc *sc;
9938 struct bxe_fastpath *fp;
9939 uint32_t mask, fp_status;
9944 /* Don't handle any interrupts if we're not ready. */
9945 if (__predict_false(sc->intr_sem != 0))
9946 goto bxe_intr_legacy_exit;
9948 /* Bail out if the interrupt wasn't generated by our hardware. */
9949 fp_status = bxe_ack_int(sc);
9951 goto bxe_intr_legacy_exit;
9953 /* Handle the fastpath interrupt. */
9955 * sb_id = 0 for ustorm, 1 for cstorm.
9956 * The bits returned from ack_int() are 0-15,
9957 * bit 0=attention status block
9958 * bit 1=fast path status block
9959 * A mask of 0x2 or more = tx/rx event
9960 * A mask of 1 = slow path event
9963 mask = (0x2 << fp->sb_id);
9964 DBPRINT(sc, BXE_INSANE_INTR, "%s(): fp_status = 0x%08X, mask = "
9965 "0x%08X\n", __FUNCTION__, fp_status, mask);
9967 /* CSTORM event means fastpath completion. */
9968 if (fp_status & mask) {
9969 /* This interrupt must be ours, disable further interrupts. */
9970 bxe_ack_sb(sc, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9972 taskqueue_enqueue(fp->tq, &fp->task);
9974 bxe_task_fp((void *)fp, 0);
9976 /* Clear this event from the status flags. */
9980 /* Handle all slow path interrupts and attentions */
9981 if (fp_status & 0x1) {
9982 /* Acknowledge and disable further slowpath interrupts. */
9983 bxe_ack_sb(sc, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
9985 /* Schedule the slowpath task. */
9986 taskqueue_enqueue(sc->tq, &sc->task);
9988 bxe_task_sp(xsc, 0);
9990 /* Clear this event from the status flags. */
9996 DBPRINT(sc, BXE_WARN,
9997 "%s(): Unexpected fastpath status (fp_status = 0x%08X)!\n",
9998 __FUNCTION__, fp_status);
10002 DBEXIT(BXE_EXTREME_INTR);
10004 bxe_intr_legacy_exit:
10009 * Slowpath interrupt entry point.
10011 * Acknowledge the interrupt and schedule a slowpath task.
10017 bxe_intr_sp(void *xsc)
10019 struct bxe_softc *sc;
10023 DBPRINT(sc, BXE_INSANE_INTR, "%s(%d): Slowpath interrupt.\n",
10024 __FUNCTION__, curcpu);
10026 /* Don't handle any interrupts if we're not ready. */
10027 if (__predict_false(sc->intr_sem != 0))
10028 goto bxe_intr_sp_exit;
10030 /* Acknowledge and disable further slowpath interrupts. */
10031 bxe_ack_sb(sc, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
10034 /* Schedule the slowpath task. */
10035 taskqueue_enqueue(sc->tq, &sc->task);
10037 bxe_task_sp(xsc, 0);
10045 * Fastpath interrupt entry point.
10047 * Acknowledge the interrupt and schedule a fastpath task.
10053 bxe_intr_fp (void *xfp)
10055 struct bxe_fastpath *fp;
10056 struct bxe_softc *sc;
10061 DBPRINT(sc, BXE_INSANE_INTR,
10062 "%s(%d): fp[%02d].sb_id = %d interrupt.\n",
10063 __FUNCTION__, curcpu, fp->index, fp->sb_id);
10065 /* Don't handle any interrupts if we're not ready. */
10066 if (__predict_false(sc->intr_sem != 0))
10067 goto bxe_intr_fp_exit;
10069 /* Disable further interrupts. */
10070 bxe_ack_sb(sc, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
10072 taskqueue_enqueue(fp->tq, &fp->task);
10074 bxe_task_fp (xfp, 0);
10082 * Fastpath task entry point.
10084 * Handle any pending transmit or receive events.
10090 bxe_task_fp (void *xfp, int pending)
10092 struct bxe_fastpath *fp;
10093 struct bxe_softc *sc;
10098 DBPRINT(sc, BXE_EXTREME_INTR, "%s(%d): Fastpath task on fp[%02d]"
10099 ".sb_id = %d\n", __FUNCTION__, curcpu, fp->index, fp->sb_id);
10101 /* Update the fast path indices */
10102 bxe_update_fpsb_idx(fp);
10104 /* Service any completed TX frames. */
10105 if (bxe_has_tx_work(fp)) {
10111 /* Service any completed RX frames. */
10115 /* Acknowledge the fastpath status block indices. */
10116 bxe_ack_sb(sc, fp->sb_id, USTORM_ID, fp->fp_u_idx, IGU_INT_NOP, 1);
10117 bxe_ack_sb(sc, fp->sb_id, CSTORM_ID, fp->fp_c_idx, IGU_INT_ENABLE, 1);
10121 * Clears the fastpath (per-queue) status block.
10127 bxe_zero_sb(struct bxe_softc *sc, int sb_id)
10131 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_INTR);
10132 port = BP_PORT(sc);
10135 bxe_init_fill(sc, CSEM_REG_FAST_MEMORY +
10136 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
10137 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
10138 bxe_init_fill(sc, CSEM_REG_FAST_MEMORY +
10139 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
10140 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
10142 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_INTR);
10146 * Initialize the fastpath (per queue) status block.
10152 bxe_init_sb(struct bxe_softc *sc, struct host_status_block *sb,
10153 bus_addr_t mapping, int sb_id)
10156 int func, index, port;
10158 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_INTR);
10160 port = BP_PORT(sc);
10161 func = BP_FUNC(sc);
10163 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_INTR),
10164 "%s(): Initializing sb_id = %d on port %d, function %d.\n",
10165 __FUNCTION__, sb_id, port, func);
10167 /* Setup the USTORM status block. */
10168 section = ((uint64_t)mapping) + offsetof(struct host_status_block,
10170 sb->u_status_block.status_block_id = sb_id;
10172 REG_WR(sc, BAR_CSTORM_INTMEM +
10173 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
10174 REG_WR(sc, BAR_CSTORM_INTMEM +
10175 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
10177 REG_WR8(sc, BAR_CSTORM_INTMEM + FP_USB_FUNC_OFF +
10178 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
10180 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
10181 REG_WR16(sc, BAR_CSTORM_INTMEM +
10182 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 0x1);
10184 /* Setup the CSTORM status block. */
10185 section = ((uint64_t)mapping) + offsetof(struct host_status_block,
10187 sb->c_status_block.status_block_id = sb_id;
10189 /* Write the status block address to CSTORM. Order is important! */
10190 REG_WR(sc, BAR_CSTORM_INTMEM +
10191 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
10192 REG_WR(sc, BAR_CSTORM_INTMEM +
10193 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
10195 REG_WR8(sc, BAR_CSTORM_INTMEM + FP_CSB_FUNC_OFF +
10196 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
10198 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
10199 REG_WR16(sc, BAR_CSTORM_INTMEM +
10200 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 0x1);
10202 /* Enable interrupts. */
10203 bxe_ack_sb(sc, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
10205 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_INTR);
10209 * Clears the default status block.
10215 bxe_zero_def_sb(struct bxe_softc *sc)
10219 func = BP_FUNC(sc);
10221 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_INTR);
10222 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_INTR),
10223 "%s(): Clearing default status block on function %d.\n",
10224 __FUNCTION__, func);
10226 /* Fill the STORM's copy of the default status block with 0. */
10227 bxe_init_fill(sc, TSEM_REG_FAST_MEMORY +
10228 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
10229 sizeof(struct tstorm_def_status_block) / 4);
10230 bxe_init_fill(sc, CSEM_REG_FAST_MEMORY +
10231 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
10232 sizeof(struct cstorm_def_status_block_u) / 4);
10233 bxe_init_fill(sc, CSEM_REG_FAST_MEMORY +
10234 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
10235 sizeof(struct cstorm_def_status_block_c) / 4);
10236 bxe_init_fill(sc, XSEM_REG_FAST_MEMORY +
10237 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
10238 sizeof(struct xstorm_def_status_block) / 4);
10240 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_INTR);
10244 * Initialize default status block.
10250 bxe_init_def_sb(struct bxe_softc *sc, struct host_def_status_block *def_sb,
10251 bus_addr_t mapping, int sb_id)
10254 int func, index, port, reg_offset, val;
10256 port = BP_PORT(sc);
10257 func = BP_FUNC(sc);
10259 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_INTR);
10260 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_INTR),
10261 "%s(): Initializing default status block on port %d, function %d.\n",
10262 __FUNCTION__, port, func);
10264 /* Setup the default status block (DSB). */
10265 section = ((uint64_t)mapping) + offsetof(struct host_def_status_block,
10266 atten_status_block);
10267 def_sb->atten_status_block.status_block_id = sb_id;
10268 sc->attn_state = 0;
10269 sc->def_att_idx = 0;
10272 * Read routing configuration for attn signal
10273 * output of groups. Currently, only groups
10274 * 0 through 3 are wired.
10276 reg_offset = port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
10277 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
10279 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
10280 sc->attn_group[index].sig[0] = REG_RD(sc, reg_offset +
10282 sc->attn_group[index].sig[1] = REG_RD(sc, reg_offset +
10283 0x10 * index + 0x4);
10284 sc->attn_group[index].sig[2] = REG_RD(sc, reg_offset +
10285 0x10 * index + 0x8);
10286 sc->attn_group[index].sig[3] = REG_RD(sc, reg_offset +
10287 0x10 * index + 0xc);
10289 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET |
10291 "%s(): attn_group[%d] = 0x%08X 0x%08X 0x%08x 0X%08x\n",
10292 __FUNCTION__, index, sc->attn_group[index].sig[0],
10293 sc->attn_group[index].sig[1], sc->attn_group[index].sig[2],
10294 sc->attn_group[index].sig[3]);
10297 reg_offset = port ? HC_REG_ATTN_MSG1_ADDR_L : HC_REG_ATTN_MSG0_ADDR_L;
10299 REG_WR(sc, reg_offset, U64_LO(section));
10300 REG_WR(sc, reg_offset + 4, U64_HI(section));
10302 reg_offset = port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0;
10304 val = REG_RD(sc, reg_offset);
10306 REG_WR(sc, reg_offset, val);
10309 section = ((uint64_t)mapping) + offsetof(struct host_def_status_block,
10310 u_def_status_block);
10311 def_sb->u_def_status_block.status_block_id = sb_id;
10314 REG_WR(sc, BAR_CSTORM_INTMEM +
10315 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
10316 REG_WR(sc, BAR_CSTORM_INTMEM +
10317 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4), U64_HI(section));
10318 REG_WR8(sc, BAR_CSTORM_INTMEM + DEF_USB_FUNC_OFF +
10319 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
10321 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
10322 REG_WR16(sc, BAR_CSTORM_INTMEM +
10323 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
10326 section = ((uint64_t)mapping) + offsetof(struct host_def_status_block,
10327 c_def_status_block);
10328 def_sb->c_def_status_block.status_block_id = sb_id;
10331 REG_WR(sc, BAR_CSTORM_INTMEM +
10332 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
10333 REG_WR(sc, BAR_CSTORM_INTMEM +
10334 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4), U64_HI(section));
10335 REG_WR8(sc, BAR_CSTORM_INTMEM + DEF_CSB_FUNC_OFF +
10336 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
10338 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
10339 REG_WR16(sc, BAR_CSTORM_INTMEM +
10340 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
10343 section = ((uint64_t)mapping) + offsetof(struct host_def_status_block,
10344 t_def_status_block);
10345 def_sb->t_def_status_block.status_block_id = sb_id;
10348 REG_WR(sc, BAR_TSTORM_INTMEM +
10349 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
10350 REG_WR(sc, BAR_TSTORM_INTMEM +
10351 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), U64_HI(section));
10352 REG_WR8(sc, BAR_TSTORM_INTMEM + DEF_TSB_FUNC_OFF +
10353 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
10355 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
10356 REG_WR16(sc, BAR_TSTORM_INTMEM +
10357 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
10360 section = ((uint64_t)mapping) + offsetof(struct host_def_status_block,
10361 x_def_status_block);
10362 def_sb->x_def_status_block.status_block_id = sb_id;
10365 REG_WR(sc, BAR_XSTORM_INTMEM +
10366 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
10367 REG_WR(sc, BAR_XSTORM_INTMEM +
10368 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), U64_HI(section));
10369 REG_WR8(sc, BAR_XSTORM_INTMEM + DEF_XSB_FUNC_OFF +
10370 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
10372 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
10373 REG_WR16(sc, BAR_XSTORM_INTMEM +
10374 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
10376 sc->stats_pending = 0;
10377 sc->set_mac_pending = 0;
10379 bxe_ack_sb(sc, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
10381 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_INTR);
10385 * Update interrupt coalescing parameters.
10391 bxe_update_coalesce(struct bxe_softc *sc)
10393 int i, port, sb_id;
10395 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
10397 port = BP_PORT(sc);
10398 /* Cycle through each fastpath queue and set the coalescing values. */
10399 for (i = 0; i < sc->num_queues; i++) {
10400 sb_id = sc->fp[i].sb_id;
10402 /* Receive interrupt coalescing is done on USTORM. */
10403 REG_WR8(sc, BAR_CSTORM_INTMEM +
10404 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
10405 U_SB_ETH_RX_CQ_INDEX), sc->rx_ticks / (BXE_BTR * 4));
10407 REG_WR16(sc, BAR_CSTORM_INTMEM +
10408 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
10409 U_SB_ETH_RX_CQ_INDEX),
10410 (sc->rx_ticks / (BXE_BTR * 4)) ? 0 : 1);
10412 /* Transmit interrupt coalescing is done on CSTORM. */
10413 REG_WR8(sc, BAR_CSTORM_INTMEM +
10414 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
10415 C_SB_ETH_TX_CQ_INDEX), sc->tx_ticks / (BXE_BTR * 4));
10416 REG_WR16(sc, BAR_CSTORM_INTMEM +
10417 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
10418 C_SB_ETH_TX_CQ_INDEX),
10419 (sc->tx_ticks / (BXE_BTR * 4)) ? 0 : 1);
10422 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
10426 * Allocate an mbuf and assign it to the TPA pool.
10429 * 0 = Success, !0 = Failure
10432 * fp->tpa_mbuf_ptr[queue]
10433 * fp->tpa_mbuf_map[queue]
10434 * fp->tpa_mbuf_segs[queue]
10437 bxe_alloc_tpa_mbuf(struct bxe_fastpath *fp, int queue)
10439 struct bxe_softc *sc;
10440 bus_dma_segment_t segs[1];
10446 DBENTER(BXE_INSANE_TPA);
10449 DBRUNIF((fp->disable_tpa == TRUE),
10450 BXE_PRINTF("%s(): fp[%02d] TPA disabled!\n",
10451 __FUNCTION__, fp->index));
10454 /* Simulate an mbuf allocation failure. */
10455 if (DB_RANDOMTRUE(bxe_debug_mbuf_allocation_failure)) {
10456 sc->debug_sim_mbuf_alloc_failed++;
10457 fp->mbuf_tpa_alloc_failed++;
10459 goto bxe_alloc_tpa_mbuf_exit;
10463 /* Allocate the new TPA mbuf. */
10464 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, sc->mbuf_alloc_size);
10465 if (__predict_false(m == NULL)) {
10466 fp->mbuf_tpa_alloc_failed++;
10468 goto bxe_alloc_tpa_mbuf_exit;
10471 DBRUN(fp->tpa_mbuf_alloc++);
10473 /* Initialize the mbuf buffer length. */
10474 m->m_pkthdr.len = m->m_len = sc->mbuf_alloc_size;
10477 /* Simulate an mbuf mapping failure. */
10478 if (DB_RANDOMTRUE(bxe_debug_dma_map_addr_failure)) {
10479 sc->debug_sim_mbuf_map_failed++;
10480 fp->mbuf_tpa_mapping_failed++;
10482 DBRUN(fp->tpa_mbuf_alloc--);
10484 goto bxe_alloc_tpa_mbuf_exit;
10488 /* Map the TPA mbuf into non-paged pool. */
10489 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
10490 fp->tpa_mbuf_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT);
10491 if (__predict_false(rc != 0)) {
10492 fp->mbuf_tpa_mapping_failed++;
10494 DBRUN(fp->tpa_mbuf_alloc--);
10495 goto bxe_alloc_tpa_mbuf_exit;
10498 /* All mubfs must map to a single segment. */
10499 KASSERT(nsegs == 1, ("%s(): Too many segments (%d) returned!",
10500 __FUNCTION__, nsegs));
10502 /* Release any existing TPA mbuf mapping. */
10503 if (fp->tpa_mbuf_map[queue] != NULL) {
10504 bus_dmamap_sync(fp->rx_mbuf_tag,
10505 fp->tpa_mbuf_map[queue], BUS_DMASYNC_POSTREAD);
10506 bus_dmamap_unload(fp->rx_mbuf_tag,
10507 fp->tpa_mbuf_map[queue]);
10510 /* Save the mbuf and mapping info for the TPA mbuf. */
10511 map = fp->tpa_mbuf_map[queue];
10512 fp->tpa_mbuf_map[queue] = fp->tpa_mbuf_spare_map;
10513 fp->tpa_mbuf_spare_map = map;
10514 bus_dmamap_sync(fp->rx_mbuf_tag,
10515 fp->tpa_mbuf_map[queue], BUS_DMASYNC_PREREAD);
10516 fp->tpa_mbuf_ptr[queue] = m;
10517 fp->tpa_mbuf_segs[queue] = segs[0];
10519 bxe_alloc_tpa_mbuf_exit:
10520 DBEXIT(BXE_INSANE_TPA);
10525 * Allocate mbufs for a fastpath TPA pool.
10528 * 0 = Success, !0 = Failure.
10535 bxe_fill_tpa_pool(struct bxe_fastpath *fp)
10537 struct bxe_softc *sc;
10538 int max_agg_queues, queue, rc;
10541 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
10544 if (!TPA_ENABLED(sc)) {
10545 fp->disable_tpa = TRUE;
10546 goto bxe_fill_tpa_pool_exit;
10549 max_agg_queues = CHIP_IS_E1(sc) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
10550 ETH_MAX_AGGREGATION_QUEUES_E1H;
10552 /* Assume the fill operation worked. */
10553 fp->disable_tpa = FALSE;
10555 /* Fill the TPA pool. */
10556 for (queue = 0; queue < max_agg_queues; queue++) {
10557 rc = bxe_alloc_tpa_mbuf(fp, queue);
10560 "%s(%d): fp[%02d] TPA disabled!\n",
10561 __FILE__, __LINE__, fp->index);
10562 fp->disable_tpa = TRUE;
10565 fp->tpa_state[queue] = BXE_TPA_STATE_STOP;
10568 bxe_fill_tpa_pool_exit:
10569 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
10574 * Free all mbufs from a fastpath TPA pool.
10580 * fp->tpa_mbuf_ptr[]
10581 * fp->tpa_mbuf_map[]
10582 * fp->tpa_mbuf_alloc
10585 bxe_free_tpa_pool(struct bxe_fastpath *fp)
10587 struct bxe_softc *sc;
10588 int i, max_agg_queues;
10591 DBENTER(BXE_INSANE_LOAD | BXE_INSANE_UNLOAD | BXE_INSANE_TPA);
10593 if (fp->rx_mbuf_tag == NULL)
10594 goto bxe_free_tpa_pool_exit;
10596 max_agg_queues = CHIP_IS_E1H(sc) ?
10597 ETH_MAX_AGGREGATION_QUEUES_E1H :
10598 ETH_MAX_AGGREGATION_QUEUES_E1;
10600 /* Release all mbufs and and all DMA maps in the TPA pool. */
10601 for (i = 0; i < max_agg_queues; i++) {
10602 if (fp->tpa_mbuf_map[i] != NULL) {
10603 bus_dmamap_sync(fp->rx_mbuf_tag, fp->tpa_mbuf_map[i],
10604 BUS_DMASYNC_POSTREAD);
10605 bus_dmamap_unload(fp->rx_mbuf_tag, fp->tpa_mbuf_map[i]);
10608 if (fp->tpa_mbuf_ptr[i] != NULL) {
10609 m_freem(fp->tpa_mbuf_ptr[i]);
10610 DBRUN(fp->tpa_mbuf_alloc--);
10611 fp->tpa_mbuf_ptr[i] = NULL;
10615 bxe_free_tpa_pool_exit:
10616 DBEXIT(BXE_INSANE_LOAD | BXE_INSANE_UNLOAD | BXE_INSANE_TPA);
10620 * Allocate an mbuf and assign it to the receive scatter gather chain.
10621 * The caller must take care to save a copy of the existing mbuf in the
10625 * 0 = Success, !0= Failure.
10628 * fp->sg_chain[index]
10629 * fp->rx_sge_buf_ptr[index]
10630 * fp->rx_sge_buf_map[index]
10631 * fp->rx_sge_spare_map
10634 bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, uint16_t index)
10636 struct bxe_softc *sc;
10637 struct eth_rx_sge *sge;
10638 bus_dma_segment_t segs[1];
10644 DBENTER(BXE_INSANE_TPA);
10648 /* Simulate an mbuf allocation failure. */
10649 if (DB_RANDOMTRUE(bxe_debug_mbuf_allocation_failure)) {
10650 sc->debug_sim_mbuf_alloc_failed++;
10651 fp->mbuf_sge_alloc_failed++;
10653 goto bxe_alloc_rx_sge_mbuf_exit;
10657 /* Allocate a new SGE mbuf. */
10658 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
10659 if (__predict_false(m == NULL)) {
10660 fp->mbuf_sge_alloc_failed++;
10662 goto bxe_alloc_rx_sge_mbuf_exit;
10665 DBRUN(fp->sge_mbuf_alloc++);
10667 /* Initialize the mbuf buffer length. */
10668 m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE;
10671 /* Simulate an mbuf mapping failure. */
10672 if (DB_RANDOMTRUE(bxe_debug_dma_map_addr_failure)) {
10673 sc->debug_sim_mbuf_map_failed++;
10674 fp->mbuf_sge_mapping_failed++;
10676 DBRUN(fp->sge_mbuf_alloc--);
10678 goto bxe_alloc_rx_sge_mbuf_exit;
10682 /* Map the SGE mbuf into non-paged pool. */
10683 rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_buf_tag,
10684 fp->rx_sge_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT);
10685 if (__predict_false(rc != 0)) {
10686 fp->mbuf_sge_mapping_failed++;
10688 DBRUN(fp->sge_mbuf_alloc--);
10689 goto bxe_alloc_rx_sge_mbuf_exit;
10692 /* All mubfs must map to a single segment. */
10693 KASSERT(nsegs == 1, ("%s(): Too many segments (%d) returned!",
10694 __FUNCTION__, nsegs));
10696 /* Unload any existing SGE mbuf mapping. */
10697 if (fp->rx_sge_buf_map[index] != NULL) {
10698 bus_dmamap_sync(fp->rx_sge_buf_tag,
10699 fp->rx_sge_buf_map[index], BUS_DMASYNC_POSTREAD);
10700 bus_dmamap_unload(fp->rx_sge_buf_tag,
10701 fp->rx_sge_buf_map[index]);
10704 /* Add the new SGE mbuf to the SGE ring. */
10705 map = fp->rx_sge_buf_map[index];
10706 fp->rx_sge_buf_map[index] = fp->rx_sge_spare_map;
10707 fp->rx_sge_spare_map = map;
10708 bus_dmamap_sync(fp->rx_sge_buf_tag,
10709 fp->rx_sge_buf_map[index], BUS_DMASYNC_PREREAD);
10710 fp->rx_sge_buf_ptr[index] = m;
10711 sge = &fp->sg_chain[index];
10712 sge->addr_hi = htole32(U64_HI(segs[0].ds_addr));
10713 sge->addr_lo = htole32(U64_LO(segs[0].ds_addr));
10715 bxe_alloc_rx_sge_mbuf_exit:
10716 DBEXIT(BXE_INSANE_TPA);
10721 * Allocate mbufs for a SGE chain.
10724 * 0 = Success, !0 = Failure.
10731 bxe_fill_sg_chain(struct bxe_fastpath *fp)
10733 struct bxe_softc *sc;
10739 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
10742 if (!TPA_ENABLED(sc)) {
10743 fp->disable_tpa = TRUE;
10744 goto bxe_fill_sg_chain_exit;
10747 /* Assume the fill operation works. */
10748 fp->disable_tpa = FALSE;
10750 /* Fill the RX SGE chain. */
10752 for (i = 0; i < USABLE_RX_SGE; i++) {
10753 rc = bxe_alloc_rx_sge_mbuf(fp, index);
10756 "%s(%d): fp[%02d] SGE memory allocation failure!\n",
10757 __FILE__, __LINE__, fp->index);
10759 fp->disable_tpa = TRUE;
10762 index = NEXT_SGE_IDX(index);
10765 /* Update the driver's copy of the RX SGE producer index. */
10766 fp->rx_sge_prod = index;
10768 bxe_fill_sg_chain_exit:
10769 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
10774 * Free all elements from the receive scatter gather chain.
10780 * fp->rx_sge_buf_ptr[]
10781 * fp->rx_sge_buf_map[]
10782 * fp->sge_mbuf_alloc
10785 bxe_free_sg_chain(struct bxe_fastpath *fp)
10787 struct bxe_softc *sc;
10791 DBENTER(BXE_INSANE_TPA);
10793 if (fp->rx_sge_buf_tag == NULL)
10794 goto bxe_free_sg_chain_exit;
10796 /* Free all mbufs and unload all maps. */
10797 for (i = 0; i < TOTAL_RX_SGE; i++) {
10798 /* Free the map and the mbuf if they're allocated. */
10799 if (fp->rx_sge_buf_map[i] != NULL) {
10800 bus_dmamap_sync(fp->rx_sge_buf_tag,
10801 fp->rx_sge_buf_map[i], BUS_DMASYNC_POSTREAD);
10802 bus_dmamap_unload(fp->rx_sge_buf_tag,
10803 fp->rx_sge_buf_map[i]);
10806 if (fp->rx_sge_buf_ptr[i] != NULL) {
10807 m_freem(fp->rx_sge_buf_ptr[i]);
10808 DBRUN(fp->sge_mbuf_alloc--);
10809 fp->rx_sge_buf_ptr[i] = NULL;
10813 bxe_free_sg_chain_exit:
10814 DBEXIT(BXE_INSANE_TPA);
10818 * Allocate an mbuf, if necessary, and add it to the receive chain.
10821 * 0 = Success, !0 = Failure.
10824 bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, uint16_t index)
10826 struct bxe_softc *sc;
10827 struct eth_rx_bd *rx_bd;
10828 bus_dma_segment_t segs[1];
10834 DBENTER(BXE_INSANE_LOAD | BXE_INSANE_RESET | BXE_INSANE_RECV);
10838 /* Simulate an mbuf allocation failure. */
10839 if (DB_RANDOMTRUE(bxe_debug_mbuf_allocation_failure)) {
10840 sc->debug_sim_mbuf_alloc_failed++;
10841 fp->mbuf_rx_bd_alloc_failed++;
10843 goto bxe_alloc_rx_bd_mbuf_exit;
10847 /* Allocate the new RX BD mbuf. */
10848 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, sc->mbuf_alloc_size);
10849 if (__predict_false(m == NULL)) {
10850 fp->mbuf_rx_bd_alloc_failed++;
10852 goto bxe_alloc_rx_bd_mbuf_exit;
10855 DBRUN(fp->rx_mbuf_alloc++);
10857 /* Initialize the mbuf buffer length. */
10858 m->m_pkthdr.len = m->m_len = sc->mbuf_alloc_size;
10861 /* Simulate an mbuf mapping failure. */
10862 if (DB_RANDOMTRUE(bxe_debug_dma_map_addr_failure)) {
10863 sc->debug_sim_mbuf_map_failed++;
10864 fp->mbuf_rx_bd_mapping_failed++;
10866 DBRUN(fp->rx_mbuf_alloc--);
10868 goto bxe_alloc_rx_bd_mbuf_exit;
10872 /* Map the TPA mbuf into non-paged pool. */
10873 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
10874 fp->rx_mbuf_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT);
10875 if (__predict_false(rc != 0)) {
10876 fp->mbuf_rx_bd_mapping_failed++;
10878 DBRUN(fp->rx_mbuf_alloc--);
10879 goto bxe_alloc_rx_bd_mbuf_exit;
10882 /* All mubfs must map to a single segment. */
10883 KASSERT(nsegs == 1, ("%s(): Too many segments (%d) returned!",
10884 __FUNCTION__, nsegs));
10886 /* Release any existing RX BD mbuf mapping. */
10887 if (fp->rx_mbuf_map[index] != NULL) {
10888 bus_dmamap_sync(fp->rx_mbuf_tag,
10889 fp->rx_mbuf_map[index], BUS_DMASYNC_POSTREAD);
10890 bus_dmamap_unload(fp->rx_mbuf_tag,
10891 fp->rx_mbuf_map[index]);
10894 /* Save the mbuf and mapping info. */
10895 map = fp->rx_mbuf_map[index];
10896 fp->rx_mbuf_map[index] = fp->rx_mbuf_spare_map;
10897 fp->rx_mbuf_spare_map = map;
10898 bus_dmamap_sync(fp->rx_mbuf_tag,
10899 fp->rx_mbuf_map[index], BUS_DMASYNC_PREREAD);
10900 fp->rx_mbuf_ptr[index] = m;
10901 rx_bd = &fp->rx_chain[index];
10902 rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
10903 rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
10905 bxe_alloc_rx_bd_mbuf_exit:
10906 DBEXIT(BXE_INSANE_LOAD | BXE_INSANE_RESET | BXE_INSANE_RECV);
10913 * Allocate mbufs for a receive chain.
10916 * 0 = Success, !0 = Failure.
10922 bxe_fill_rx_bd_chain(struct bxe_fastpath *fp)
10924 struct bxe_softc *sc;
10929 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
10932 /* Allocate buffers for all the RX BDs in RX BD Chain. */
10933 for (i = 0; i < USABLE_RX_BD; i++) {
10934 rc = bxe_alloc_rx_bd_mbuf(fp, index);
10937 "%s(%d): Memory allocation failure! Cannot fill fp[%02d] RX chain.\n",
10938 __FILE__, __LINE__, fp->index);
10942 index = NEXT_RX_BD(index);
10945 fp->rx_bd_prod = index;
10946 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
10951 * Free all buffers from the receive chain.
10957 * fp->rx_mbuf_ptr[]
10958 * fp->rx_mbuf_map[]
10959 * fp->rx_mbuf_alloc
10962 bxe_free_rx_bd_chain(struct bxe_fastpath *fp)
10964 struct bxe_softc *sc;
10968 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
10970 if (fp->rx_mbuf_tag == NULL)
10971 goto bxe_free_rx_bd_chain_exit;
10973 /* Free all mbufs and unload all maps. */
10974 for (i = 0; i < TOTAL_RX_BD; i++) {
10975 if (fp->rx_mbuf_map[i] != NULL) {
10976 bus_dmamap_sync(fp->rx_mbuf_tag, fp->rx_mbuf_map[i],
10977 BUS_DMASYNC_POSTREAD);
10978 bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_map[i]);
10981 if (fp->rx_mbuf_ptr[i] != NULL) {
10982 m_freem(fp->rx_mbuf_ptr[i]);
10983 DBRUN(fp->rx_mbuf_alloc--);
10984 fp->rx_mbuf_ptr[i] = NULL;
10988 bxe_free_rx_bd_chain_exit:
10989 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
10993 * Setup mutexes used by the driver.
10999 bxe_mutexes_alloc(struct bxe_softc *sc)
11001 struct bxe_fastpath *fp;
11004 DBENTER(BXE_VERBOSE_LOAD);
11006 BXE_CORE_LOCK_INIT(sc, device_get_nameunit(sc->dev));
11007 BXE_SP_LOCK_INIT(sc, "bxe_sp_lock");
11008 BXE_DMAE_LOCK_INIT(sc, "bxe_dmae_lock");
11009 BXE_PHY_LOCK_INIT(sc, "bxe_phy_lock");
11010 BXE_FWMB_LOCK_INIT(sc, "bxe_fwmb_lock");
11011 BXE_PRINT_LOCK_INIT(sc, "bxe_print_lock");
11013 /* Allocate one mutex for each fastpath structure. */
11014 for (i = 0; i < sc->num_queues; i++ ) {
11017 /* Allocate per fastpath mutexes. */
11018 snprintf(fp->mtx_name, sizeof(fp->mtx_name), "%s:fp[%02d]",
11019 device_get_nameunit(sc->dev), fp->index);
11020 mtx_init(&fp->mtx, fp->mtx_name, NULL, MTX_DEF);
11023 DBEXIT(BXE_VERBOSE_LOAD);
11027 * Free mutexes used by the driver.
11033 bxe_mutexes_free(struct bxe_softc *sc)
11035 struct bxe_fastpath *fp;
11038 DBENTER(BXE_VERBOSE_UNLOAD);
11040 for (i = 0; i < sc->num_queues; i++ ) {
11043 /* Release per fastpath mutexes. */
11044 if (mtx_initialized(&fp->mtx))
11045 mtx_destroy(&fp->mtx);
11048 BXE_PRINT_LOCK_DESTROY(sc);
11049 BXE_FWMB_LOCK_DESTROY(sc);
11050 BXE_PHY_LOCK_DESTROY(sc);
11051 BXE_DMAE_LOCK_DESTROY(sc);
11052 BXE_SP_LOCK_DESTROY(sc);
11053 BXE_CORE_LOCK_DESTROY(sc);
11055 DBEXIT(BXE_VERBOSE_UNLOAD);
11060 * Free memory and clear the RX data structures.
11066 bxe_clear_rx_chains(struct bxe_softc *sc)
11068 struct bxe_fastpath *fp;
11071 DBENTER(BXE_VERBOSE_RESET);
11073 for (i = 0; i < sc->num_queues; i++) {
11076 /* Free all RX buffers. */
11077 bxe_free_rx_bd_chain(fp);
11078 bxe_free_tpa_pool(fp);
11079 bxe_free_sg_chain(fp);
11081 /* Check if any mbufs lost in the process. */
11082 DBRUNIF((fp->tpa_mbuf_alloc), DBPRINT(sc, BXE_FATAL,
11083 "%s(): Memory leak! Lost %d mbufs from fp[%02d] TPA pool!\n",
11084 __FUNCTION__, fp->tpa_mbuf_alloc, fp->index));
11085 DBRUNIF((fp->sge_mbuf_alloc), DBPRINT(sc, BXE_FATAL,
11086 "%s(): Memory leak! Lost %d mbufs from fp[%02d] SGE chain!\n",
11087 __FUNCTION__, fp->sge_mbuf_alloc, fp->index));
11088 DBRUNIF((fp->rx_mbuf_alloc), DBPRINT(sc, BXE_FATAL,
11089 "%s(): Memory leak! Lost %d mbufs from fp[%02d] RX chain!\n",
11090 __FUNCTION__, fp->rx_mbuf_alloc, fp->index));
11093 DBEXIT(BXE_VERBOSE_RESET);
11097 * Initialize the receive rings.
11103 bxe_init_rx_chains(struct bxe_softc *sc)
11105 struct bxe_fastpath *fp;
11108 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11110 func = BP_FUNC(sc);
11112 /* Allocate memory for RX and CQ chains. */
11113 for (i = 0; i < sc->num_queues; i++) {
11115 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
11116 "%s(): Initializing fp[%02d] RX chain.\n", __FUNCTION__, i);
11118 fp->rx_bd_cons = fp->rx_bd_prod = 0;
11119 fp->rx_cq_cons = fp->rx_cq_prod = 0;
11121 /* Pointer to status block's CQ consumer index. */
11122 fp->rx_cq_cons_sb = &fp->status_block->
11123 u_status_block.index_values[HC_INDEX_U_ETH_RX_CQ_CONS];
11125 /* Pointer to status block's receive consumer index. */
11126 fp->rx_bd_cons_sb = &fp->status_block->
11127 u_status_block.index_values[HC_INDEX_U_ETH_RX_BD_CONS];
11129 fp->rx_cq_prod = TOTAL_RCQ_ENTRIES;
11130 fp->rx_pkts = fp->rx_tpa_pkts = fp->rx_soft_errors = 0;
11132 /* Allocate memory for the receive chain. */
11133 rc = bxe_fill_rx_bd_chain(fp);
11135 goto bxe_init_rx_chains_exit;
11137 /* Allocate memory for TPA pool. */
11138 rc = bxe_fill_tpa_pool(fp);
11140 goto bxe_init_rx_chains_exit;
11142 /* Allocate memory for scatter-gather chain. */
11143 rc = bxe_fill_sg_chain(fp);
11145 goto bxe_init_rx_chains_exit;
11147 /* Prepare the receive BD and CQ buffers for DMA access. */
11148 bus_dmamap_sync(fp->rx_dma.tag, fp->rx_dma.map,
11149 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
11151 bus_dmamap_sync(fp->rcq_dma.tag, fp->rcq_dma.map,
11152 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
11155 * Tell the controller that we have rx_bd's and CQE's
11156 * available. Warning! this will generate an interrupt
11157 * (to the TSTORM). This must only be done when the
11158 * controller is initialized.
11160 bxe_update_rx_prod(sc, fp, fp->rx_bd_prod,
11161 fp->rx_cq_prod, fp->rx_sge_prod);
11163 /* ToDo - Move to dma_alloc(). */
11165 * Tell controller where the receive CQ
11166 * chains start in physical memory.
11169 REG_WR(sc, BAR_USTORM_INTMEM +
11170 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
11171 U64_LO(fp->rcq_dma.paddr));
11172 REG_WR(sc, BAR_USTORM_INTMEM +
11173 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
11174 U64_HI(fp->rcq_dma.paddr));
11178 bxe_init_rx_chains_exit:
11179 /* Release memory if an error occurred. */
11181 bxe_clear_rx_chains(sc);
11183 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11188 * Free memory and clear the TX data structures.
11194 bxe_clear_tx_chains(struct bxe_softc *sc)
11196 struct bxe_fastpath *fp;
11199 DBENTER(BXE_VERBOSE_RESET);
11201 for (i = 0; i < sc->num_queues; i++) {
11204 /* Free all mbufs and unload all maps. */
11205 if (fp->tx_mbuf_tag) {
11206 for (j = 0; j < TOTAL_TX_BD; j++) {
11207 if (fp->tx_mbuf_ptr[j] != NULL) {
11208 bus_dmamap_sync(fp->tx_mbuf_tag,
11209 fp->tx_mbuf_map[j],
11210 BUS_DMASYNC_POSTWRITE);
11211 bus_dmamap_unload(fp->tx_mbuf_tag,
11212 fp->tx_mbuf_map[j]);
11213 m_freem(fp->tx_mbuf_ptr[j]);
11214 fp->tx_mbuf_alloc--;
11215 fp->tx_mbuf_ptr[j] = NULL;
11220 /* Check if we lost any mbufs in the process. */
11221 DBRUNIF((fp->tx_mbuf_alloc), DBPRINT(sc, BXE_FATAL,
11222 "%s(): Memory leak! Lost %d mbufs from fp[%02d] TX chain!\n",
11223 __FUNCTION__, fp->tx_mbuf_alloc, fp->index));
11226 DBEXIT(BXE_VERBOSE_RESET);
11230 * Initialize the transmit chain.
11236 bxe_init_tx_chains(struct bxe_softc *sc)
11238 struct bxe_fastpath *fp;
11241 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11243 for (i = 0; i < sc->num_queues; i++) {
11246 /* Initialize transmit doorbell. */
11247 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
11248 fp->tx_db.data.zero_fill1 = 0;
11249 fp->tx_db.data.prod = 0;
11251 /* Initialize tranmsit producer/consumer indices. */
11252 fp->tx_pkt_prod = fp->tx_pkt_cons = 0;
11253 fp->tx_bd_prod = fp->tx_bd_cons = 0;
11254 fp->tx_bd_used = 0;
11256 /* Pointer to TX packet consumer in status block. */
11257 fp->tx_pkt_cons_sb =
11258 &fp->status_block->c_status_block.index_values[C_SB_ETH_TX_CQ_INDEX];
11260 /* Soft TX counters. */
11262 fp->tx_soft_errors = 0;
11263 fp->tx_offload_frames_csum_ip = 0;
11264 fp->tx_offload_frames_csum_tcp = 0;
11265 fp->tx_offload_frames_csum_udp = 0;
11266 fp->tx_offload_frames_tso = 0;
11267 fp->tx_header_splits = 0;
11268 fp->tx_encap_failures = 0;
11269 fp->tx_hw_queue_full = 0;
11270 fp->tx_hw_max_queue_depth = 0;
11271 fp->tx_dma_mapping_failure = 0;
11272 fp->tx_max_drbr_queue_depth = 0;
11273 fp->tx_window_violation_std = 0;
11274 fp->tx_window_violation_tso = 0;
11275 fp->tx_unsupported_tso_request_ipv6 = 0;
11276 fp->tx_unsupported_tso_request_not_tcp = 0;
11277 fp->tx_chain_lost_mbuf = 0;
11278 fp->tx_frame_deferred = 0;
11279 fp->tx_queue_xoff = 0;
11281 /* Clear all TX mbuf pointers. */
11282 for (j = 0; j < TOTAL_TX_BD; j++) {
11283 fp->tx_mbuf_ptr[j] = NULL;
11287 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11291 * Initialize the slowpath ring.
11297 bxe_init_sp_ring(struct bxe_softc *sc)
11301 func = BP_FUNC(sc);
11303 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11305 bzero((char *)sc->slowpath, BXE_SLOWPATH_SZ);
11307 /* When the producer equals the consumer the chain is empty. */
11308 sc->spq_left = MAX_SPQ_PENDING;
11309 sc->spq_prod_idx = 0;
11310 sc->dsb_sp_prod = BXE_SP_DSB_INDEX;
11311 sc->spq_prod_bd = sc->spq;
11312 sc->spq_last_bd = sc->spq_prod_bd + MAX_SP_DESC_CNT;
11314 /* Tell the controller the address of the slowpath ring. */
11315 REG_WR(sc, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
11316 U64_LO(sc->spq_dma.paddr));
11317 REG_WR(sc, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
11318 U64_HI(sc->spq_dma.paddr));
11319 REG_WR(sc, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
11322 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11326 * Initialize STORM processor context.
11332 bxe_init_context(struct bxe_softc *sc)
11334 struct eth_context *context;
11335 struct bxe_fastpath *fp;
11340 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11342 for (i = 0; i < sc->num_queues; i++) {
11343 context = BXE_SP(sc, context[i].eth);
11348 /* Update the USTORM context. */
11349 context->ustorm_st_context.common.sb_index_numbers =
11350 BXE_RX_SB_INDEX_NUM;
11351 context->ustorm_st_context.common.clientId = cl_id;
11352 context->ustorm_st_context.common.status_block_id = sb_id;
11353 /* Enable packet alignment/pad and statistics. */
11354 context->ustorm_st_context.common.flags =
11355 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
11356 if (sc->stats_enable == TRUE)
11357 context->ustorm_st_context.common.flags |=
11358 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS;
11359 context->ustorm_st_context.common.statistics_counter_id=cl_id;
11361 * Set packet alignment boundary.
11362 * (Must be >= 4 (i.e. 16 bytes).)
11364 context->ustorm_st_context.common.mc_alignment_log_size = 8;
11365 /* Set the size of the receive buffers. */
11366 context->ustorm_st_context.common.bd_buff_size =
11367 sc->mbuf_alloc_size;
11369 /* Set the address of the receive chain base page. */
11370 context->ustorm_st_context.common.bd_page_base_hi =
11371 U64_HI(fp->rx_dma.paddr);
11372 context->ustorm_st_context.common.bd_page_base_lo =
11373 U64_LO(fp->rx_dma.paddr);
11375 if (TPA_ENABLED(sc) && (fp->disable_tpa == FALSE)) {
11376 /* Enable TPA and SGE chain support. */
11377 context->ustorm_st_context.common.flags |=
11378 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
11380 /* Set the size of the SGE buffer. */
11381 context->ustorm_st_context.common.sge_buff_size =
11382 (uint16_t) (SGE_PAGE_SIZE * PAGES_PER_SGE);
11384 /* Set the address of the SGE chain base page. */
11385 context->ustorm_st_context.common.sge_page_base_hi =
11386 U64_HI(fp->sg_dma.paddr);
11387 context->ustorm_st_context.common.sge_page_base_lo =
11388 U64_LO(fp->sg_dma.paddr);
11390 DBPRINT(sc, BXE_VERBOSE_TPA, "%s(): MTU = %d\n",
11391 __FUNCTION__, (int) sc->bxe_ifp->if_mtu);
11393 /* Describe MTU to SGE alignment. */
11394 context->ustorm_st_context.common.max_sges_for_packet =
11395 SGE_PAGE_ALIGN(sc->bxe_ifp->if_mtu) >>
11397 context->ustorm_st_context.common.max_sges_for_packet =
11398 ((context->ustorm_st_context.common.
11399 max_sges_for_packet + PAGES_PER_SGE - 1) &
11400 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
11402 DBPRINT(sc, BXE_VERBOSE_TPA,
11403 "%s(): max_sges_for_packet = %d\n", __FUNCTION__,
11404 context->ustorm_st_context.common.max_sges_for_packet);
11407 /* Update USTORM context. */
11408 context->ustorm_ag_context.cdu_usage =
11409 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, i),
11410 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
11412 /* Update XSTORM context. */
11413 context->xstorm_ag_context.cdu_reserved =
11414 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, i),
11415 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
11417 /* Set the address of the transmit chain base page. */
11418 context->xstorm_st_context.tx_bd_page_base_hi =
11419 U64_HI(fp->tx_dma.paddr);
11420 context->xstorm_st_context.tx_bd_page_base_lo =
11421 U64_LO(fp->tx_dma.paddr);
11423 /* Enable XSTORM statistics. */
11424 context->xstorm_st_context.statistics_data = (cl_id |
11425 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
11427 /* Update CSTORM status block configuration. */
11428 context->cstorm_st_context.sb_index_number =
11429 C_SB_ETH_TX_CQ_INDEX;
11430 context->cstorm_st_context.status_block_id = sb_id;
11433 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11437 * Initialize indirection table.
11443 bxe_init_ind_table(struct bxe_softc *sc)
11447 func = BP_FUNC(sc);
11449 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11451 if (sc->multi_mode == ETH_RSS_MODE_DISABLED)
11454 /* Initialize the indirection table. */
11455 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
11456 REG_WR8(sc, BAR_TSTORM_INTMEM +
11457 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
11458 sc->fp->cl_id + (i % sc->num_queues));
11460 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11464 * Set client configuration.
11470 bxe_set_client_config(struct bxe_softc *sc)
11472 struct tstorm_eth_client_config tstorm_client = {0};
11475 port = BP_PORT(sc);
11477 DBENTER(BXE_VERBOSE_MISC);
11479 tstorm_client.mtu = sc->bxe_ifp->if_mtu; /* ETHERMTU */
11480 tstorm_client.config_flags =
11481 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
11482 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
11484 /* Unconditionally enable VLAN tag stripping. */
11486 tstorm_client.config_flags |=
11487 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
11488 DBPRINT(sc, BXE_VERBOSE, "%s(): VLAN tag stripping enabled.\n",
11492 /* Initialize the receive mode for each receive queue. */
11493 for (i = 0; i < sc->num_queues; i++) {
11494 tstorm_client.statistics_counter_id = sc->fp[i].cl_id;
11496 REG_WR(sc, BAR_TSTORM_INTMEM +
11497 TSTORM_CLIENT_CONFIG_OFFSET(port, sc->fp[i].cl_id),
11498 ((uint32_t *) &tstorm_client)[0]);
11499 REG_WR(sc, BAR_TSTORM_INTMEM +
11500 TSTORM_CLIENT_CONFIG_OFFSET(port, sc->fp[i].cl_id) + 4,
11501 ((uint32_t *) &tstorm_client)[1]);
11504 DBEXIT(BXE_VERBOSE_MISC);
11508 * Set receive mode.
11510 * Programs the MAC according to the type of unicast/broadcast/multicast
11511 * packets it should receive.
11517 bxe_set_storm_rx_mode(struct bxe_softc *sc)
11519 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
11522 int func, i , port;
11524 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11526 mode = sc->rx_mode;
11527 mask = 1 << BP_L_ID(sc);
11528 func = BP_FUNC(sc);
11529 port = BP_PORT(sc);
11531 /* All but management unicast packets should pass to the host as well */
11532 llh_mask = NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
11533 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
11534 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
11535 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
11537 /* Set the individual accept/drop flags based on the receive mode. */
11539 case BXE_RX_MODE_NONE:
11540 /* Drop everything. */
11541 DBPRINT(sc, BXE_VERBOSE,
11542 "%s(): Setting RX_MODE_NONE for function %d.\n",
11543 __FUNCTION__, func);
11544 tstorm_mac_filter.ucast_drop_all = mask;
11545 tstorm_mac_filter.mcast_drop_all = mask;
11546 tstorm_mac_filter.bcast_drop_all = mask;
11548 case BXE_RX_MODE_NORMAL:
11549 /* Accept all broadcast frames. */
11550 DBPRINT(sc, BXE_VERBOSE,
11551 "%s(): Setting RX_MODE_NORMAL for function %d.\n",
11552 __FUNCTION__, func);
11553 tstorm_mac_filter.bcast_accept_all = mask;
11555 case BXE_RX_MODE_ALLMULTI:
11556 /* Accept all broadcast and multicast frames. */
11557 DBPRINT(sc, BXE_VERBOSE,
11558 "%s(): Setting RX_MODE_ALLMULTI for function %d.\n",
11559 __FUNCTION__, func);
11560 tstorm_mac_filter.mcast_accept_all = mask;
11561 tstorm_mac_filter.bcast_accept_all = mask;
11563 case BXE_RX_MODE_PROMISC:
11564 /* Accept all frames (promiscuous mode). */
11565 DBPRINT(sc, BXE_VERBOSE,
11566 "%s(): Setting RX_MODE_PROMISC for function %d.\n",
11567 __FUNCTION__, func);
11568 tstorm_mac_filter.ucast_accept_all = mask;
11569 tstorm_mac_filter.mcast_accept_all = mask;
11570 tstorm_mac_filter.bcast_accept_all = mask;
11571 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
11576 "%s(%d): Tried to set unknown receive mode (0x%08X)!\n",
11577 __FILE__, __LINE__, mode);
11580 REG_WR(sc, port ? NIG_REG_LLH1_BRB1_DRV_MASK :
11581 NIG_REG_LLH0_BRB1_DRV_MASK, llh_mask);
11583 /* Write the RX mode filter to the TSTORM. */
11584 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config) / 4; i++)
11585 REG_WR(sc, BAR_TSTORM_INTMEM +
11586 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + (i * 4),
11587 ((uint32_t *) &tstorm_mac_filter)[i]);
11589 if (mode != BXE_RX_MODE_NONE)
11590 bxe_set_client_config(sc);
11592 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11596 * Initialize common internal resources. (Applies to both ports and
11603 bxe_init_internal_common(struct bxe_softc *sc)
11607 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11610 * Zero this manually as its initialization is currently not
11611 * handled through block initialization.
11613 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
11614 REG_WR(sc, BAR_USTORM_INTMEM + USTORM_AGG_DATA_OFFSET + i * 4,
11617 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11621 * Initialize port specific internal resources.
11627 bxe_init_internal_port(struct bxe_softc *sc)
11629 int port = BP_PORT(sc);
11631 port = BP_PORT(sc);
11633 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11634 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
11635 "%s(): Port %d internal initialization.\n", __FUNCTION__, port);
11638 * Each SDM timer tick is 4us. Configure host coalescing
11639 * basic timer resolution (BTR) to 12us (3 * 4us).
11641 REG_WR(sc, BAR_CSTORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BXE_BTR);
11642 REG_WR(sc, BAR_CSTORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BXE_BTR);
11643 REG_WR(sc, BAR_TSTORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BXE_BTR);
11644 REG_WR(sc, BAR_XSTORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BXE_BTR);
11646 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11650 * Initialize function specific internal resources.
11656 bxe_init_internal_func(struct bxe_softc *sc)
11658 struct tstorm_eth_function_common_config tstorm_config = {0};
11659 struct stats_indication_flags stats_flags = {0};
11660 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
11661 struct bxe_fastpath *fp;
11662 struct eth_rx_cqe_next_page *nextpg;
11663 uint32_t offset, size;
11664 uint16_t max_agg_size;
11666 int func, i, j, port;
11668 port = BP_PORT(sc);
11669 func = BP_FUNC(sc);
11671 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11672 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
11673 "%s(): Port %d, function %d internal initialization.\n",
11674 __FUNCTION__, port, func);
11677 * Configure which fields the controller looks at when
11678 * distributing incoming frames for RSS/multi-queue operation.
11680 if (sc->num_queues > 1) {
11681 tstorm_config.config_flags = MULTI_FLAGS(sc);
11682 tstorm_config.rss_result_mask = MULTI_MASK;
11685 /* Enable TPA if needed */
11686 if (TPA_ENABLED(sc))
11687 tstorm_config.config_flags |=
11688 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
11691 tstorm_config.config_flags |=
11692 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
11694 tstorm_config.leading_client_id = BP_L_ID(sc);
11696 REG_WR(sc, BAR_TSTORM_INTMEM +
11697 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
11698 (*(uint32_t *)&tstorm_config));
11700 /* Don't receive anything until the link is up. */
11701 sc->rx_mode = BXE_RX_MODE_NONE;
11702 sc->rx_mode_cl_mask = (1 << BP_L_ID(sc));
11703 bxe_set_storm_rx_mode(sc);
11705 for (i = 0; i < sc->num_queues; i++) {
11706 cl_id = sc->fp[i].cl_id;
11707 /* Reset XSTORM per client statistics. */
11708 size = sizeof(struct xstorm_per_client_stats) / 4;
11709 offset = BAR_XSTORM_INTMEM +
11710 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
11711 for (j = 0; j < size; j++)
11712 REG_WR(sc, offset +(j * 4), 0);
11714 /* Reset TSTORM per client statistics. */
11715 size = sizeof(struct tstorm_per_client_stats) / 4;
11716 offset = BAR_TSTORM_INTMEM +
11717 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
11718 for (j = 0; j < size; j++)
11719 REG_WR(sc, offset + (j * 4), 0);
11721 /* Reset USTORM per client statistics. */
11722 size = sizeof(struct ustorm_per_client_stats) / 4;
11723 offset = BAR_USTORM_INTMEM +
11724 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
11725 for (j = 0; j < size; j++)
11726 REG_WR(sc, offset + (j * 4), 0);
11729 /* Initialize statistics related context. */
11730 stats_flags.collect_eth = 1;
11732 REG_WR(sc, BAR_XSTORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
11733 ((uint32_t *)&stats_flags)[0]);
11734 REG_WR(sc, BAR_XSTORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
11735 ((uint32_t *)&stats_flags)[1]);
11737 REG_WR(sc, BAR_TSTORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
11738 ((uint32_t *)&stats_flags)[0]);
11739 REG_WR(sc, BAR_TSTORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
11740 ((uint32_t *)&stats_flags)[1]);
11742 REG_WR(sc, BAR_USTORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
11743 ((uint32_t *)&stats_flags)[0]);
11744 REG_WR(sc, BAR_USTORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
11745 ((uint32_t *)&stats_flags)[1]);
11747 REG_WR(sc, BAR_CSTORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
11748 ((uint32_t *)&stats_flags)[0]);
11749 REG_WR(sc, BAR_CSTORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
11750 ((uint32_t *)&stats_flags)[1]);
11752 REG_WR(sc, BAR_XSTORM_INTMEM + XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
11753 U64_LO(BXE_SP_MAPPING(sc, fw_stats)));
11754 REG_WR(sc, BAR_XSTORM_INTMEM +
11755 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
11756 U64_HI(BXE_SP_MAPPING(sc, fw_stats)));
11758 REG_WR(sc, BAR_TSTORM_INTMEM + TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
11759 U64_LO(BXE_SP_MAPPING(sc, fw_stats)));
11760 REG_WR(sc, BAR_TSTORM_INTMEM +
11761 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
11762 U64_HI(BXE_SP_MAPPING(sc, fw_stats)));
11764 REG_WR(sc, BAR_USTORM_INTMEM + USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
11765 U64_LO(BXE_SP_MAPPING(sc, fw_stats)));
11766 REG_WR(sc, BAR_USTORM_INTMEM +
11767 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
11768 U64_HI(BXE_SP_MAPPING(sc, fw_stats)));
11770 /* Additional initialization for 57711/57711E. */
11771 if (CHIP_IS_E1H(sc)) {
11772 REG_WR8(sc, BAR_XSTORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
11774 REG_WR8(sc, BAR_TSTORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
11776 REG_WR8(sc, BAR_CSTORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
11778 REG_WR8(sc, BAR_USTORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
11781 /* Set the outer VLAN tag. */
11782 REG_WR16(sc, BAR_XSTORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
11786 /* Init completion queue mapping and TPA aggregation size. */
11787 max_agg_size = min((uint32_t)(sc->mbuf_alloc_size +
11788 (8 * BCM_PAGE_SIZE * PAGES_PER_SGE)), (uint32_t)0xffff);
11790 DBPRINT(sc, BXE_VERBOSE_TPA, "%s(): max_agg_size = 0x%08X\n",
11791 __FUNCTION__, max_agg_size);
11793 for (i = 0; i < sc->num_queues; i++) {
11795 nextpg = (struct eth_rx_cqe_next_page *)
11796 &fp->rcq_chain[USABLE_RCQ_ENTRIES_PER_PAGE];
11798 /* Program the completion queue address. */
11799 REG_WR(sc, BAR_USTORM_INTMEM +
11800 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
11801 U64_LO(fp->rcq_dma.paddr));
11802 REG_WR(sc, BAR_USTORM_INTMEM +
11803 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
11804 U64_HI(fp->rcq_dma.paddr));
11806 /* Program the first CQ next page address. */
11807 REG_WR(sc, BAR_USTORM_INTMEM +
11808 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
11810 REG_WR(sc, BAR_USTORM_INTMEM +
11811 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
11814 /* Set the maximum TPA aggregation size. */
11815 REG_WR16(sc, BAR_USTORM_INTMEM +
11816 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
11820 /* Configure lossless flow control. */
11821 if (CHIP_IS_E1H(sc)) {
11822 rx_pause.bd_thr_low = 250;
11823 rx_pause.cqe_thr_low = 250;
11825 rx_pause.sge_thr_low = 0;
11826 rx_pause.bd_thr_high = 350;
11827 rx_pause.cqe_thr_high = 350;
11828 rx_pause.sge_thr_high = 0;
11830 for (i = 0; i < sc->num_queues; i++) {
11832 if (fp->disable_tpa == FALSE) {
11833 rx_pause.sge_thr_low = 150;
11834 rx_pause.sge_thr_high = 250;
11837 offset = BAR_USTORM_INTMEM +
11838 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port, fp->cl_id);
11841 sizeof(struct ustorm_eth_rx_pause_data_e1h) / 4;
11843 REG_WR(sc, offset + (j * 4),
11844 ((uint32_t *)&rx_pause)[j]);
11848 memset(&(sc->cmng), 0, sizeof(struct cmng_struct_per_port));
11849 if (IS_E1HMF(sc)) {
11851 * During init there is no active link.
11852 * Until link is up, assume link rate @ 10Gbps
11854 bxe_read_mf_cfg(sc);
11857 DBPRINT(sc, BXE_VERBOSE_MISC,
11858 "%s(): All MIN values are zeroes, "
11859 "fairness will be disabled.\n", __FUNCTION__);
11862 /* Store it to internal memory */
11863 if (sc->port.pmf) {
11864 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
11865 REG_WR(sc, BAR_XSTORM_INTMEM +
11866 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
11867 ((uint32_t *)(&sc->cmng))[i]);
11870 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11874 * Initialize internal resources.
11880 bxe_init_internal(struct bxe_softc *sc, uint32_t load_code)
11883 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11885 switch (load_code) {
11886 case FW_MSG_CODE_DRV_LOAD_COMMON:
11887 bxe_init_internal_common(sc);
11890 case FW_MSG_CODE_DRV_LOAD_PORT:
11891 bxe_init_internal_port(sc);
11894 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
11895 bxe_init_internal_func(sc);
11900 "%s(%d): Unknown load_code (0x%08X) from MCP!\n",
11901 __FILE__, __LINE__, load_code);
11905 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11910 * Perform driver instance specific initialization.
11916 bxe_init_nic(struct bxe_softc *sc, uint32_t load_code)
11918 struct bxe_fastpath *fp;
11921 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11923 /* Intialize fastpath structures and the status block. */
11924 for (i = 0; i < sc->num_queues; i++) {
11926 fp->disable_tpa = TRUE;
11928 bzero((char *)fp->status_block, BXE_STATUS_BLK_SZ);
11932 /* Set a pointer back to the driver instance. */
11935 /* Set the fastpath starting state as closed. */
11936 fp->state = BXE_FP_STATE_CLOSED;
11938 /* Self-reference to this fastpath's instance. */
11941 /* Set the client ID beginning with the leading id. */
11942 fp->cl_id = BP_L_ID(sc) + i;
11944 /* Set the status block ID for this fastpath instance. */
11945 fp->sb_id = fp->cl_id;
11947 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
11948 "%s(): fp[%02d]: cl_id = %d, sb_id = %d\n",
11949 __FUNCTION__, fp->index, fp->cl_id, fp->sb_id);
11951 /* Initialize the fastpath status block. */
11952 bxe_init_sb(sc, fp->status_block, fp->sb_dma.paddr,
11954 bxe_update_fpsb_idx(fp);
11959 bzero((char *)sc->def_sb, BXE_DEF_STATUS_BLK_SZ);
11961 /* Initialize the Default Status Block. */
11962 bxe_init_def_sb(sc, sc->def_sb, sc->def_sb_dma.paddr, DEF_SB_ID);
11963 bxe_update_dsb_idx(sc);
11965 /* Initialize the coalescence parameters. */
11966 bxe_update_coalesce(sc);
11968 /* Initialize receive chains. */
11969 rc = bxe_init_rx_chains(sc);
11971 goto bxe_init_nic_exit;
11974 /* Initialize the Transmit BD Chain. */
11975 bxe_init_tx_chains(sc);
11977 /* Initialize the Slow Path Chain. */
11978 bxe_init_sp_ring(sc);
11980 /* Initialize STORM processor context/configuration. */
11981 bxe_init_context(sc);
11983 /* Initialize the Context. */
11984 bxe_init_internal(sc, load_code);
11986 /* Enable indirection table for multi-queue operation. */
11987 bxe_init_ind_table(sc);
11991 /* Disable the interrupts from device until init is complete.*/
11992 bxe_int_disable(sc);
11995 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
12000 * Send a loopback packet through the Network Interface Glue (NIG) block.
12006 bxe_lb_pckt(struct bxe_softc *sc)
12008 #ifdef BXE_USE_DMAE
12009 uint32_t wb_write[3];
12012 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
12014 /* Ethernet source and destination addresses. */
12015 #ifdef BXE_USE_DMAE
12016 wb_write[0] = 0x55555555;
12017 wb_write[1] = 0x55555555;
12018 wb_write[2] = 0x20; /* SOP */
12019 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
12021 REG_WR_IND(sc, NIG_REG_DEBUG_PACKET_LB, 0x55555555);
12022 REG_WR_IND(sc, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
12023 REG_WR_IND(sc, NIG_REG_DEBUG_PACKET_LB + 8, 0x20);
12026 /* NON-IP protocol. */
12027 #ifdef BXE_USE_DMAE
12028 wb_write[0] = 0x09000000;
12029 wb_write[1] = 0x55555555;
12030 wb_write[2] = 0x10; /* EOP */
12031 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
12033 REG_WR_IND(sc, NIG_REG_DEBUG_PACKET_LB, 0x09000000);
12034 REG_WR_IND(sc, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
12035 REG_WR_IND(sc, NIG_REG_DEBUG_PACKET_LB + 8, 0x10);
12038 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
12042 * Perform an internal memory test.
12044 * Some internal memories are not accessible through the PCIe interface so
12045 * we send some debug packets for the test.
12048 * 0 = Success, !0 = Failure.
12051 bxe_int_mem_test(struct bxe_softc *sc)
12059 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
12061 /* Perform a single debug packet test. */
12063 /* Disable inputs of parser neighbor blocks. */
12064 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
12065 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
12066 REG_WR(sc, CFC_REG_DEBUG0, 0x1);
12067 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
12069 /* Write 0 to parser credits for CFC search request. */
12070 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
12072 /* Send an Ethernet packet. */
12075 /* Wait until NIG register shows 1 packet of size 0x10. */
12078 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
12079 val = *BXE_SP(sc, wb_data[0]);
12088 DBPRINT(sc, BXE_FATAL,
12089 "%s(): NIG loopback test 1 timeout (val = 0x%08X)!\n",
12090 __FUNCTION__, val);
12092 goto bxe_int_mem_test_exit;
12095 /* Wait until PRS register shows 1 packet */
12098 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
12108 DBPRINT(sc, BXE_FATAL,
12109 "%s(): PRS loopback test 1 timeout (val = 0x%08X)!\n",
12110 __FUNCTION__, val);
12112 goto bxe_int_mem_test_exit;
12115 /* Reset and init BRB, PRS. */
12116 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x3);
12118 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x3);
12120 bxe_init_block(sc, BRB1_BLOCK, COMMON_STAGE);
12121 bxe_init_block(sc, PRS_BLOCK, COMMON_STAGE);
12123 /* Perform the test again, this time with 10 packets. */
12125 /* Disable inputs of parser neighbor blocks. */
12126 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
12127 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
12128 REG_WR(sc, CFC_REG_DEBUG0, 0x1);
12129 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
12131 /* Write 0 to parser credits for CFC search request. */
12132 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
12134 /* Send 10 Ethernet packets. */
12135 for (i = 0; i < 10; i++)
12138 /* Wait until NIG shows 10 + 1 packets of size 11 * 0x10 = 0xb0. */
12141 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
12142 val = *BXE_SP(sc, wb_data[0]);
12151 DBPRINT(sc, BXE_FATAL,
12152 "%s(): NIG loopback test 2 timeout (val = 0x%08X)!\n",
12153 __FUNCTION__, val);
12155 goto bxe_int_mem_test_exit;
12158 /* Wait until PRS register shows 2 packets. */
12159 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
12161 DBPRINT(sc, BXE_FATAL,
12162 "%s(): PRS loopback test 2 timeout (val = 0x%x)!\n",
12163 __FUNCTION__, val);
12165 goto bxe_int_mem_test_exit;
12168 /* Write 1 to parser credits for CFC search request. */
12169 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
12171 /* Wait until PRS register shows 3 packets. */
12174 /* Wait until NIG register shows 1 packet of size 0x10. */
12175 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
12177 DBPRINT(sc, BXE_FATAL,
12178 "%s(): PRS loopback test 3 timeout (val = 0x%08X)!\n",
12179 __FUNCTION__, val);
12181 goto bxe_int_mem_test_exit;
12184 /* Clear NIG end-of-packet FIFO. */
12185 for (i = 0; i < 11; i++)
12186 REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO);
12188 val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY);
12190 DBPRINT(sc, BXE_INFO, "%s(): Unable to clear NIG!\n",
12193 goto bxe_int_mem_test_exit;
12196 /* Reset and init BRB, PRS, NIG. */
12197 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
12199 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
12201 bxe_init_block(sc, BRB1_BLOCK, COMMON_STAGE);
12202 bxe_init_block(sc, PRS_BLOCK, COMMON_STAGE);
12204 /* Set NIC mode. */
12205 REG_WR(sc, PRS_REG_NIC_MODE, 1);
12207 /* Enable inputs of parser neighbor blocks. */
12208 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff);
12209 REG_WR(sc, TCM_REG_PRS_IFEN, 0x1);
12210 REG_WR(sc, CFC_REG_DEBUG0, 0x0);
12211 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1);
12213 bxe_int_mem_test_exit:
12214 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
12219 * Enable attentions from various blocks.
12225 bxe_enable_blocks_attention(struct bxe_softc *sc)
12228 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
12230 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
12231 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0);
12232 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
12233 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
12234 REG_WR(sc, QM_REG_QM_INT_MASK, 0);
12235 REG_WR(sc, TM_REG_TM_INT_MASK, 0);
12236 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0);
12237 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0);
12238 REG_WR(sc, XCM_REG_XCM_INT_MASK, 0);
12240 REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0);
12241 REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0);
12242 REG_WR(sc, UCM_REG_UCM_INT_MASK, 0);
12244 REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
12245 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0);
12246 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0);
12247 REG_WR(sc, CCM_REG_CCM_INT_MASK, 0);
12249 REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
12251 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0);
12252 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0);
12253 REG_WR(sc, TCM_REG_TCM_INT_MASK, 0);
12255 REG_WR(sc, CDU_REG_CDU_INT_MASK, 0);
12256 REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0);
12257 REG_WR(sc, PBF_REG_PBF_INT_MASK, 0X18);
12259 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
12267 * This code configures the PCI read/write arbiter
12268 * which implements a weighted round robin
12269 * between the virtual queues in the chip.
12271 * The values were derived for each PCI max payload and max request size.
12272 * since max payload and max request size are only known at run time,
12273 * this is done as a separate init stage.
12276 #define NUM_WR_Q 13
12277 #define NUM_RD_Q 29
12278 #define MAX_RD_ORD 3
12279 #define MAX_WR_ORD 2
12281 /* Configuration for one arbiter queue. */
12288 /* Derived configuration for each read queue for each max request size. */
12289 static const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = {
12290 /* 1 */ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
12291 { {4, 8, 4}, {4, 8, 4}, {4, 8, 4}, {4, 8, 4} },
12292 { {4, 3, 3}, {4, 3, 3}, {4, 3, 3}, {4, 3, 3} },
12293 { {8, 3, 6}, {16, 3, 11}, {16, 3, 11}, {16, 3, 11} },
12294 { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
12295 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
12296 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
12297 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
12298 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
12299 /* 10 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12300 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12301 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12302 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12303 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12304 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12305 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12306 { {8, 64, 6}, {16, 64, 11}, {32, 64, 21}, {32, 64, 21} },
12307 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12308 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12309 /* 20 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12310 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12311 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12312 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12313 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12314 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12315 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12316 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12317 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12318 { {8, 64, 25}, {16, 64, 41}, {32, 64, 81}, {64, 64, 120} }
12321 /* Derived configuration for each write queue for each max request size. */
12322 static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = {
12323 /* 1 */ { {4, 6, 3}, {4, 6, 3}, {4, 6, 3} },
12324 { {4, 2, 3}, {4, 2, 3}, {4, 2, 3} },
12325 { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
12326 { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
12327 { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
12328 { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
12329 { {8, 64, 25}, {16, 64, 25}, {32, 64, 25} },
12330 { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
12331 { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
12332 /* 10 */{ {8, 9, 6}, {16, 9, 11}, {32, 9, 21} },
12333 { {8, 47, 19}, {16, 47, 19}, {32, 47, 21} },
12334 { {8, 9, 6}, {16, 9, 11}, {16, 9, 11} },
12335 { {8, 64, 25}, {16, 64, 41}, {32, 64, 81} }
12338 /* Register addresses for read queues. */
12339 static const struct arb_line read_arb_addr[NUM_RD_Q-1] = {
12340 /* 1 */ {PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0,
12341 PXP2_REG_RQ_BW_RD_UBOUND0},
12342 {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
12343 PXP2_REG_PSWRQ_BW_UB1},
12344 {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
12345 PXP2_REG_PSWRQ_BW_UB2},
12346 {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
12347 PXP2_REG_PSWRQ_BW_UB3},
12348 {PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4,
12349 PXP2_REG_RQ_BW_RD_UBOUND4},
12350 {PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5,
12351 PXP2_REG_RQ_BW_RD_UBOUND5},
12352 {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
12353 PXP2_REG_PSWRQ_BW_UB6},
12354 {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
12355 PXP2_REG_PSWRQ_BW_UB7},
12356 {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
12357 PXP2_REG_PSWRQ_BW_UB8},
12358 /* 10 */{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
12359 PXP2_REG_PSWRQ_BW_UB9},
12360 {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
12361 PXP2_REG_PSWRQ_BW_UB10},
12362 {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
12363 PXP2_REG_PSWRQ_BW_UB11},
12364 {PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12,
12365 PXP2_REG_RQ_BW_RD_UBOUND12},
12366 {PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13,
12367 PXP2_REG_RQ_BW_RD_UBOUND13},
12368 {PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14,
12369 PXP2_REG_RQ_BW_RD_UBOUND14},
12370 {PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15,
12371 PXP2_REG_RQ_BW_RD_UBOUND15},
12372 {PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16,
12373 PXP2_REG_RQ_BW_RD_UBOUND16},
12374 {PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17,
12375 PXP2_REG_RQ_BW_RD_UBOUND17},
12376 {PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18,
12377 PXP2_REG_RQ_BW_RD_UBOUND18},
12378 /* 20 */{PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19,
12379 PXP2_REG_RQ_BW_RD_UBOUND19},
12380 {PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20,
12381 PXP2_REG_RQ_BW_RD_UBOUND20},
12382 {PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22,
12383 PXP2_REG_RQ_BW_RD_UBOUND22},
12384 {PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23,
12385 PXP2_REG_RQ_BW_RD_UBOUND23},
12386 {PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24,
12387 PXP2_REG_RQ_BW_RD_UBOUND24},
12388 {PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25,
12389 PXP2_REG_RQ_BW_RD_UBOUND25},
12390 {PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26,
12391 PXP2_REG_RQ_BW_RD_UBOUND26},
12392 {PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27,
12393 PXP2_REG_RQ_BW_RD_UBOUND27},
12394 {PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
12395 PXP2_REG_PSWRQ_BW_UB28}
12398 /* Register addresses for write queues. */
12399 static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
12400 /* 1 */ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
12401 PXP2_REG_PSWRQ_BW_UB1},
12402 {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
12403 PXP2_REG_PSWRQ_BW_UB2},
12404 {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
12405 PXP2_REG_PSWRQ_BW_UB3},
12406 {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
12407 PXP2_REG_PSWRQ_BW_UB6},
12408 {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
12409 PXP2_REG_PSWRQ_BW_UB7},
12410 {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
12411 PXP2_REG_PSWRQ_BW_UB8},
12412 {PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
12413 PXP2_REG_PSWRQ_BW_UB9},
12414 {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
12415 PXP2_REG_PSWRQ_BW_UB10},
12416 {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
12417 PXP2_REG_PSWRQ_BW_UB11},
12418 /* 10 */{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
12419 PXP2_REG_PSWRQ_BW_UB28},
12420 {PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29,
12421 PXP2_REG_RQ_BW_WR_UBOUND29},
12422 {PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30,
12423 PXP2_REG_RQ_BW_WR_UBOUND30}
12427 bxe_init_pxp_arb(struct bxe_softc *sc, int r_order, int w_order)
12431 if (r_order > MAX_RD_ORD) {
12432 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
12433 "%s(): Read order of %d order adjusted to %d\n",
12434 __FUNCTION__, r_order, MAX_RD_ORD);
12435 r_order = MAX_RD_ORD;
12437 if (w_order > MAX_WR_ORD) {
12438 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
12439 "%s(): Write order of %d order adjusted to %d\n",
12440 __FUNCTION__, w_order, MAX_WR_ORD);
12441 w_order = MAX_WR_ORD;
12444 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
12445 "%s(): Read order %d, write order %d\n",
12446 __FUNCTION__, r_order, w_order);
12448 for (i = 0; i < NUM_RD_Q - 1; i++) {
12449 REG_WR(sc, read_arb_addr[i].l,
12450 read_arb_data[i][r_order].l);
12451 REG_WR(sc, read_arb_addr[i].add,
12452 read_arb_data[i][r_order].add);
12453 REG_WR(sc, read_arb_addr[i].ubound,
12454 read_arb_data[i][r_order].ubound);
12457 for (i = 0; i < NUM_WR_Q - 1; i++) {
12458 if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) ||
12459 (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) {
12461 REG_WR(sc, write_arb_addr[i].l,
12462 write_arb_data[i][w_order].l);
12464 REG_WR(sc, write_arb_addr[i].add,
12465 write_arb_data[i][w_order].add);
12467 REG_WR(sc, write_arb_addr[i].ubound,
12468 write_arb_data[i][w_order].ubound);
12471 val = REG_RD(sc, write_arb_addr[i].l);
12472 REG_WR(sc, write_arb_addr[i].l, val |
12473 (write_arb_data[i][w_order].l << 10));
12475 val = REG_RD(sc, write_arb_addr[i].add);
12476 REG_WR(sc, write_arb_addr[i].add, val |
12477 (write_arb_data[i][w_order].add << 10));
12479 val = REG_RD(sc, write_arb_addr[i].ubound);
12480 REG_WR(sc, write_arb_addr[i].ubound, val |
12481 (write_arb_data[i][w_order].ubound << 7));
12485 val = write_arb_data[NUM_WR_Q - 1][w_order].add;
12486 val += write_arb_data[NUM_WR_Q - 1][w_order].ubound << 10;
12487 val += write_arb_data[NUM_WR_Q - 1][w_order].l << 17;
12488 REG_WR(sc, PXP2_REG_PSWRQ_BW_RD, val);
12490 val = read_arb_data[NUM_RD_Q - 1][r_order].add;
12491 val += read_arb_data[NUM_RD_Q - 1][r_order].ubound << 10;
12492 val += read_arb_data[NUM_RD_Q - 1][r_order].l << 17;
12493 REG_WR(sc, PXP2_REG_PSWRQ_BW_WR, val);
12495 REG_WR(sc, PXP2_REG_RQ_WR_MBS0, w_order);
12496 REG_WR(sc, PXP2_REG_RQ_WR_MBS1, w_order);
12497 REG_WR(sc, PXP2_REG_RQ_RD_MBS0, r_order);
12498 REG_WR(sc, PXP2_REG_RQ_RD_MBS1, r_order);
12500 if (r_order == MAX_RD_ORD)
12501 REG_WR(sc, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
12503 REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
12505 if (CHIP_IS_E1H(sc)) {
12506 /* MPS w_order optimal TH presently TH
12511 val = ((w_order == 0) ? 2 : 3);
12512 REG_WR(sc, PXP2_REG_WR_HC_MPS, val);
12513 REG_WR(sc, PXP2_REG_WR_USDM_MPS, val);
12514 REG_WR(sc, PXP2_REG_WR_CSDM_MPS, val);
12515 REG_WR(sc, PXP2_REG_WR_TSDM_MPS, val);
12516 REG_WR(sc, PXP2_REG_WR_XSDM_MPS, val);
12517 REG_WR(sc, PXP2_REG_WR_QM_MPS, val);
12518 REG_WR(sc, PXP2_REG_WR_TM_MPS, val);
12519 REG_WR(sc, PXP2_REG_WR_SRC_MPS, val);
12520 REG_WR(sc, PXP2_REG_WR_DBG_MPS, val);
12521 REG_WR(sc, PXP2_REG_WR_DMAE_MPS, 2); /* DMAE is special */
12522 REG_WR(sc, PXP2_REG_WR_CDU_MPS, val);
12527 bxe_init_pxp(struct bxe_softc *sc)
12530 int r_order, w_order;
12532 devctl = pci_read_config(sc->dev,
12533 sc->pcie_cap + PCI_EXP_DEVCTL, 2);
12534 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
12535 "%s(): Read 0x%x from devctl\n", __FUNCTION__, devctl);
12536 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
12537 if (sc->mrrs == -1)
12538 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
12540 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
12541 "%s(): Force MRRS read order to %d\n",
12542 __FUNCTION__, sc->mrrs);
12543 r_order = sc->mrrs;
12546 bxe_init_pxp_arb(sc, r_order, w_order);
12550 bxe_setup_fan_failure_detection(struct bxe_softc *sc)
12552 uint32_t phy_type, val;
12553 int is_required, port;
12559 val = SHMEM_RD(sc, dev_info.shared_hw_config.config2) &
12560 SHARED_HW_CFG_FAN_FAILURE_MASK;
12562 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
12566 * The fan failure mechanism is usually related to the PHY type since
12567 * the power consumption of the board is affected by the PHY. Currently,
12568 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
12570 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
12571 for (port = PORT_0; port < PORT_MAX; port++) {
12572 phy_type = SHMEM_RD(sc,
12573 dev_info.port_hw_config[port].external_phy_config) &
12574 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
12576 ((phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
12577 (phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
12578 (phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
12581 if (is_required == 0)
12584 /* Fan failure is indicated by SPIO 5. */
12585 bxe_set_spio(sc, MISC_REGISTERS_SPIO_5, MISC_REGISTERS_SPIO_INPUT_HI_Z);
12587 /* Set to active low mode. */
12588 val = REG_RD(sc, MISC_REG_SPIO_INT);
12589 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
12590 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
12591 REG_WR(sc, MISC_REG_SPIO_INT, val);
12593 /* Enable interrupt to signal the IGU. */
12594 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
12595 val |= (1 << MISC_REGISTERS_SPIO_5);
12596 REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val);
12600 * Common initialization.
12603 * 0 = Success, !0 = Failure.
12606 bxe_init_common(struct bxe_softc *sc)
12612 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
12614 /* Reset all blocks within the chip except the BMAC. */
12615 bxe_reset_common(sc);
12617 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
12618 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
12621 bxe_init_block(sc, MISC_BLOCK, COMMON_STAGE);
12622 if (CHIP_IS_E1H(sc))
12623 REG_WR(sc, MISC_REG_E1HMF_MODE, IS_E1HMF(sc));
12625 REG_WR(sc, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
12627 REG_WR(sc, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
12629 bxe_init_block(sc, PXP_BLOCK, COMMON_STAGE);
12630 if (CHIP_IS_E1(sc)) {
12632 * Enable HW interrupt from PXP on USDM overflow
12633 * bit 16 on INT_MASK_0.
12635 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
12638 bxe_init_block(sc, PXP2_BLOCK, COMMON_STAGE);
12641 #ifdef __BIG_ENDIAN
12642 REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1);
12643 REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1);
12644 REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
12645 REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
12646 REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
12647 /* Make sure this value is 0. */
12648 REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0);
12650 REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1);
12651 REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1);
12652 REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1);
12653 REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
12656 REG_WR(sc, PXP2_REG_RQ_CDU_P_SIZE, 2);
12658 /* Let the HW do it's magic ... */
12660 /* Finish the PXP initialization. */
12661 val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE);
12663 BXE_PRINTF("%s(%d): PXP2 CFG failed!\n", __FILE__, __LINE__);
12665 goto bxe_init_common_exit;
12668 val = REG_RD(sc, PXP2_REG_RD_INIT_DONE);
12670 BXE_PRINTF("%s(%d): PXP2 RD_INIT failed!\n", __FILE__,
12673 goto bxe_init_common_exit;
12676 REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0);
12677 REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0);
12679 bxe_init_block(sc, DMAE_BLOCK, COMMON_STAGE);
12681 sc->dmae_ready = 1;
12682 bxe_init_fill(sc, TSEM_REG_PRAM, 0, 8);
12684 bxe_init_block(sc, TCM_BLOCK, COMMON_STAGE);
12685 bxe_init_block(sc, UCM_BLOCK, COMMON_STAGE);
12686 bxe_init_block(sc, CCM_BLOCK, COMMON_STAGE);
12687 bxe_init_block(sc, XCM_BLOCK, COMMON_STAGE);
12689 bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3);
12690 bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3);
12691 bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3);
12692 bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3);
12694 bxe_init_block(sc, QM_BLOCK, COMMON_STAGE);
12696 /* Soft reset pulse. */
12697 REG_WR(sc, QM_REG_SOFT_RESET, 1);
12698 REG_WR(sc, QM_REG_SOFT_RESET, 0);
12700 bxe_init_block(sc, DQ_BLOCK, COMMON_STAGE);
12701 REG_WR(sc, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
12703 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
12705 bxe_init_block(sc, BRB1_BLOCK, COMMON_STAGE);
12706 bxe_init_block(sc, PRS_BLOCK, COMMON_STAGE);
12707 REG_WR(sc, PRS_REG_A_PRSU_20, 0xf);
12709 if (CHIP_IS_E1H(sc))
12710 REG_WR(sc, PRS_REG_E1HOV_MODE, IS_E1HMF(sc));
12712 bxe_init_block(sc, TSDM_BLOCK, COMMON_STAGE);
12713 bxe_init_block(sc, CSDM_BLOCK, COMMON_STAGE);
12714 bxe_init_block(sc, USDM_BLOCK, COMMON_STAGE);
12715 bxe_init_block(sc, XSDM_BLOCK, COMMON_STAGE);
12716 /* Clear STORM processor memory. */
12717 bxe_init_fill(sc, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(sc));
12718 bxe_init_fill(sc, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(sc));
12719 bxe_init_fill(sc, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(sc));
12720 bxe_init_fill(sc, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(sc));
12722 bxe_init_block(sc, TSEM_BLOCK, COMMON_STAGE);
12723 bxe_init_block(sc, USEM_BLOCK, COMMON_STAGE);
12724 bxe_init_block(sc, CSEM_BLOCK, COMMON_STAGE);
12725 bxe_init_block(sc, XSEM_BLOCK, COMMON_STAGE);
12727 /* Sync semi rtc. */
12728 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x80000000);
12729 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x80000000);
12731 bxe_init_block(sc, UPB_BLOCK, COMMON_STAGE);
12732 bxe_init_block(sc, XPB_BLOCK, COMMON_STAGE);
12733 bxe_init_block(sc, PBF_BLOCK, COMMON_STAGE);
12735 REG_WR(sc, SRC_REG_SOFT_RST, 1);
12736 /* Setup RSS/multi-queue hasking keys. */
12737 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
12738 REG_WR(sc, i, 0xc0cac01a);
12740 bxe_init_block(sc, SRCH_BLOCK, COMMON_STAGE);
12742 REG_WR(sc, SRC_REG_SOFT_RST, 0);
12744 /* Make sure the cdu_context structure has the right size. */
12745 if (sizeof(union cdu_context) != 1024) {
12746 BXE_PRINTF("%s(%d): Invalid size for context (%ld != 1024)!\n",
12747 __FILE__, __LINE__, (long)sizeof(union cdu_context));
12749 goto bxe_init_common_exit;
12752 bxe_init_block(sc, CDU_BLOCK, COMMON_STAGE);
12755 * val = (num_context_in_page << 24) +
12756 * (context_waste_size << 12) +
12757 * context_line_size.
12760 val = (4 << 24) + (0 << 12) + 1024;
12761 REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val);
12763 bxe_init_block(sc, CFC_BLOCK, COMMON_STAGE);
12764 REG_WR(sc, CFC_REG_INIT_REG, 0x7FF);
12765 /* Enable context validation interrupt from CFC. */
12766 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
12768 /* Set the thresholds to prevent CFC/CDU race. */
12769 REG_WR(sc, CFC_REG_DEBUG0, 0x20020000);
12771 bxe_init_block(sc, HC_BLOCK, COMMON_STAGE);
12772 bxe_init_block(sc, MISC_AEU_BLOCK, COMMON_STAGE);
12774 bxe_init_block(sc, PXPCS_BLOCK, COMMON_STAGE);
12775 /* Clear PCIe block debug status bits. */
12776 REG_WR(sc, 0x2814, 0xffffffff);
12777 REG_WR(sc, 0x3820, 0xffffffff);
12779 bxe_init_block(sc, EMAC0_BLOCK, COMMON_STAGE);
12780 bxe_init_block(sc, EMAC1_BLOCK, COMMON_STAGE);
12781 bxe_init_block(sc, DBU_BLOCK, COMMON_STAGE);
12782 bxe_init_block(sc, DBG_BLOCK, COMMON_STAGE);
12784 bxe_init_block(sc, NIG_BLOCK, COMMON_STAGE);
12785 if (CHIP_IS_E1H(sc)) {
12786 REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_E1HMF(sc));
12787 REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_E1HOV(sc));
12790 /* Finish CFC initialization. */
12791 val = bxe_reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10);
12793 BXE_PRINTF("%s(%d): CFC LL_INIT failed!\n",
12794 __FILE__, __LINE__);
12796 goto bxe_init_common_exit;
12799 val = bxe_reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10);
12801 BXE_PRINTF("%s(%d): CFC AC_INIT failed!\n",
12802 __FILE__, __LINE__);
12804 goto bxe_init_common_exit;
12807 val = bxe_reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
12809 BXE_PRINTF("%s(%d): CFC CAM_INIT failed!\n",
12810 __FILE__, __LINE__);
12812 goto bxe_init_common_exit;
12815 REG_WR(sc, CFC_REG_DEBUG0, 0);
12817 /* Read NIG statistic and check for first load since powerup. */
12818 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
12819 val = *BXE_SP(sc, wb_data[0]);
12821 /* Do internal memory self test only after a full power cycle. */
12822 if ((CHIP_IS_E1(sc)) && (val == 0) && bxe_int_mem_test(sc)) {
12823 BXE_PRINTF("%s(%d): Internal memory self-test failed!\n",
12824 __FILE__, __LINE__);
12826 goto bxe_init_common_exit;
12829 /* Handle any board specific initialization. */
12830 switch (XGXS_EXT_PHY_TYPE(sc->link_params.ext_phy_config)) {
12831 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
12832 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
12833 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
12834 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
12841 bxe_setup_fan_failure_detection(sc);
12843 /* Clear PXP2 attentions. */
12844 REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
12846 bxe_enable_blocks_attention(sc);
12849 bxe_acquire_phy_lock(sc);
12850 bxe_common_init_phy(sc, sc->common.shmem_base);
12851 bxe_release_phy_lock(sc);
12854 "%s(%d): Bootcode is missing - cannot initialize PHY!\n",
12855 __FILE__, __LINE__);
12857 bxe_init_common_exit:
12858 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
12863 * Port initialization.
12866 * 0 = Success, !0 = Failure.
12869 bxe_init_port(struct bxe_softc *sc)
12871 uint32_t val, low, high;
12872 uint32_t swap_val, swap_override, aeu_gpio_mask, offset;
12874 int init_stage, port;
12876 port = BP_PORT(sc);
12877 init_stage = port ? PORT1_STAGE : PORT0_STAGE;
12879 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
12881 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
12882 "%s(): Initializing port %d.\n", __FUNCTION__, port);
12884 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4, 0);
12886 bxe_init_block(sc, PXP_BLOCK, init_stage);
12887 bxe_init_block(sc, PXP2_BLOCK, init_stage);
12889 bxe_init_block(sc, TCM_BLOCK, init_stage);
12890 bxe_init_block(sc, UCM_BLOCK, init_stage);
12891 bxe_init_block(sc, CCM_BLOCK, init_stage);
12892 bxe_init_block(sc, XCM_BLOCK, init_stage);
12894 bxe_init_block(sc, DQ_BLOCK, init_stage);
12896 bxe_init_block(sc, BRB1_BLOCK, init_stage);
12898 /* Determine the pause threshold for the BRB */
12900 low = (sc->bxe_flags & BXE_ONE_PORT_FLAG) ? 160 : 246;
12901 else if (sc->bxe_ifp->if_mtu > 4096) {
12902 if (sc->bxe_flags & BXE_ONE_PORT_FLAG)
12905 val = sc->bxe_ifp->if_mtu;
12906 /* (24*1024 + val*4)/256 */
12907 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
12910 low = (sc->bxe_flags & BXE_ONE_PORT_FLAG) ? 80 : 160;
12911 high = low + 56; /* 14 * 1024 / 256 */
12913 REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port * 4, low);
12914 REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port * 4, high);
12916 /* Port PRS comes here. */
12917 bxe_init_block(sc, PRS_BLOCK, init_stage);
12919 bxe_init_block(sc, TSDM_BLOCK, init_stage);
12920 bxe_init_block(sc, CSDM_BLOCK, init_stage);
12921 bxe_init_block(sc, USDM_BLOCK, init_stage);
12922 bxe_init_block(sc, XSDM_BLOCK, init_stage);
12924 bxe_init_block(sc, TSEM_BLOCK, init_stage);
12925 bxe_init_block(sc, USEM_BLOCK, init_stage);
12926 bxe_init_block(sc, CSEM_BLOCK, init_stage);
12927 bxe_init_block(sc, XSEM_BLOCK, init_stage);
12929 bxe_init_block(sc, UPB_BLOCK, init_stage);
12930 bxe_init_block(sc, XPB_BLOCK, init_stage);
12932 bxe_init_block(sc, PBF_BLOCK, init_stage);
12934 /* Configure PBF to work without pause for MTU = 9000. */
12935 REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port * 4, 0);
12937 /* Update threshold. */
12938 REG_WR(sc, PBF_REG_P0_ARB_THRSH + port * 4, (9040/16));
12939 /* Update initial credit. */
12940 REG_WR(sc, PBF_REG_P0_INIT_CRD + port * 4, (9040/16) + 553 - 22);
12942 /* Probe changes. */
12943 REG_WR(sc, PBF_REG_INIT_P0 + port * 4, 1);
12945 REG_WR(sc, PBF_REG_INIT_P0 + port * 4, 0);
12947 bxe_init_block(sc, CDU_BLOCK, init_stage);
12948 bxe_init_block(sc, CFC_BLOCK, init_stage);
12950 if (CHIP_IS_E1(sc)) {
12951 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, 0);
12952 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, 0);
12955 bxe_init_block(sc, HC_BLOCK, init_stage);
12957 bxe_init_block(sc, MISC_AEU_BLOCK, init_stage);
12959 * init aeu_mask_attn_func_0/1:
12960 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
12961 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
12962 * bits 4-7 are used for "per vn group attention"
12964 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port * 4,
12965 (IS_E1HMF(sc) ? 0xF7 : 0x7));
12967 bxe_init_block(sc, PXPCS_BLOCK, init_stage);
12968 bxe_init_block(sc, EMAC0_BLOCK, init_stage);
12969 bxe_init_block(sc, EMAC1_BLOCK, init_stage);
12970 bxe_init_block(sc, DBU_BLOCK, init_stage);
12971 bxe_init_block(sc, DBG_BLOCK, init_stage);
12973 bxe_init_block(sc, NIG_BLOCK, init_stage);
12975 REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 1);
12977 if (CHIP_IS_E1H(sc)) {
12978 /* Enable outer VLAN support if required. */
12979 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port * 4,
12980 (IS_E1HOV(sc) ? 0x1 : 0x2));
12983 REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port * 4, 0);
12984 REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port * 4, 0);
12985 REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port * 4, 1);
12987 bxe_init_block(sc, MCP_BLOCK, init_stage);
12988 bxe_init_block(sc, DMAE_BLOCK, init_stage);
12990 switch (XGXS_EXT_PHY_TYPE(sc->link_params.ext_phy_config)) {
12991 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
12992 bxe_set_gpio(sc, MISC_REGISTERS_GPIO_3,
12993 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
12996 * The GPIO should be swapped if the swap register is
12999 swap_val = REG_RD(sc, NIG_REG_PORT_SWAP);
13000 swap_override = REG_RD(sc, NIG_REG_STRAP_OVERRIDE);
13002 /* Select function upon port-swap configuration. */
13004 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
13005 aeu_gpio_mask = (swap_val && swap_override) ?
13006 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
13007 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
13009 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
13010 aeu_gpio_mask = (swap_val && swap_override) ?
13011 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
13012 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
13014 val = REG_RD(sc, offset);
13015 /* Add GPIO3 to group. */
13016 val |= aeu_gpio_mask;
13017 REG_WR(sc, offset, val);
13019 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
13020 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
13021 /* Add SPIO 5 to group 0. */
13022 reg_addr = port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
13023 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
13024 val = REG_RD(sc, reg_addr);
13025 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
13026 REG_WR(sc, reg_addr, val);
13032 bxe__link_reset(sc);
13034 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
13039 #define ILT_PER_FUNC (768/2)
13040 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
13042 * The phys address is shifted right 12 bits and has an added 1=valid
13043 * bit added to the 53rd bit (bit 52) then since this is a wide
13044 * register(TM) we split it into two 32 bit writes.
13046 #define ONCHIP_ADDR1(x) ((uint32_t)(((uint64_t)x >> 12) & 0xFFFFFFFF))
13047 #define ONCHIP_ADDR2(x) ((uint32_t)((1 << 20) | ((uint64_t)x >> 44)))
13048 #define PXP_ONE_ILT(x) (((x) << 10) | x)
13049 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
13050 #define CNIC_ILT_LINES 0
13059 bxe_ilt_wr(struct bxe_softc *sc, uint32_t index, bus_addr_t addr)
13063 DBENTER(BXE_INSANE_LOAD | BXE_INSANE_RESET);
13065 if (CHIP_IS_E1H(sc))
13066 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index * 8;
13068 reg = PXP2_REG_RQ_ONCHIP_AT + index * 8;
13070 bxe_wb_wr(sc, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
13072 DBEXIT(BXE_INSANE_LOAD | BXE_INSANE_RESET);
13076 * Initialize a function.
13079 * 0 = Success, !0 = Failure.
13082 bxe_init_func(struct bxe_softc *sc)
13084 uint32_t addr, val;
13087 port = BP_PORT(sc);
13088 func = BP_FUNC(sc);
13090 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
13092 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
13093 "%s(): Initializing port %d, function %d.\n", __FUNCTION__, port,
13096 /* Set MSI reconfigure capability. */
13097 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
13098 val = REG_RD(sc, addr);
13099 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
13100 REG_WR(sc, addr, val);
13102 i = FUNC_ILT_BASE(func);
13104 bxe_ilt_wr(sc, i, BXE_SP_MAPPING(sc, context));
13106 if (CHIP_IS_E1H(sc)) {
13107 REG_WR(sc, PXP2_REG_RQ_CDU_FIRST_ILT, i);
13108 REG_WR(sc, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
13110 REG_WR(sc, PXP2_REG_PSWRQ_CDU0_L2P + func * 4,
13111 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
13113 if (CHIP_IS_E1H(sc)) {
13114 bxe_init_block(sc, MISC_BLOCK, FUNC0_STAGE + func);
13115 bxe_init_block(sc, TCM_BLOCK, FUNC0_STAGE + func);
13116 bxe_init_block(sc, UCM_BLOCK, FUNC0_STAGE + func);
13117 bxe_init_block(sc, CCM_BLOCK, FUNC0_STAGE + func);
13118 bxe_init_block(sc, XCM_BLOCK, FUNC0_STAGE + func);
13119 bxe_init_block(sc, TSEM_BLOCK, FUNC0_STAGE + func);
13120 bxe_init_block(sc, USEM_BLOCK, FUNC0_STAGE + func);
13121 bxe_init_block(sc, CSEM_BLOCK, FUNC0_STAGE + func);
13122 bxe_init_block(sc, XSEM_BLOCK, FUNC0_STAGE + func);
13124 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
13125 REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8, sc->e1hov);
13128 /* Host Coalescing initialization per function. */
13129 if (CHIP_IS_E1H(sc)) {
13130 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func * 4, 0);
13131 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, 0);
13132 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, 0);
13135 bxe_init_block(sc, HC_BLOCK, FUNC0_STAGE + func);
13137 /* Reset PCIe block debug values. */
13138 REG_WR(sc, 0x2114, 0xffffffff);
13139 REG_WR(sc, 0x2120, 0xffffffff);
13141 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
13149 * 0 = Failure, !0 = Failure.
13152 bxe_init_hw(struct bxe_softc *sc, uint32_t load_code)
13157 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
13159 sc->dmae_ready = 0;
13160 switch (load_code) {
13161 case FW_MSG_CODE_DRV_LOAD_COMMON:
13162 rc = bxe_init_common(sc);
13164 goto bxe_init_hw_exit;
13166 case FW_MSG_CODE_DRV_LOAD_PORT:
13167 sc->dmae_ready = 1;
13168 rc = bxe_init_port(sc);
13170 goto bxe_init_hw_exit;
13172 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
13173 sc->dmae_ready = 1;
13174 rc = bxe_init_func(sc);
13176 goto bxe_init_hw_exit;
13179 DBPRINT(sc, BXE_WARN,
13180 "%s(): Unknown load_code (0x%08X) from MCP!\n",
13181 __FUNCTION__, load_code);
13185 /* Fetch additional config data if the bootcode is running. */
13187 func = BP_FUNC(sc);
13188 /* Fetch the pulse sequence number. */
13189 sc->fw_drv_pulse_wr_seq = (SHMEM_RD(sc,
13190 func_mb[func].drv_pulse_mb) & DRV_PULSE_SEQ_MASK);
13193 /* Clear the default status block. */
13194 bxe_zero_def_sb(sc);
13195 for (i = 0; i < sc->num_queues; i++)
13196 bxe_zero_sb(sc, BP_L_ID(sc) + i);
13199 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
13205 * Send a firmware command and wait for the response.
13207 * Post a command to shared memory for the bootcode running on the MCP and
13208 * stall until the bootcode responds or a timeout occurs.
13211 * 0 = Failure, otherwise firmware response code (FW_MSG_CODE_*).
13214 bxe_fw_command(struct bxe_softc *sc, uint32_t command)
13216 uint32_t cnt, rc, seq;
13219 func = BP_FUNC(sc);
13220 seq = ++sc->fw_seq;
13224 DBRUNMSG(BXE_VERBOSE, bxe_decode_mb_msgs(sc, (command | seq), 0));
13228 /* Write the command to the shared memory mailbox. */
13229 SHMEM_WR(sc, func_mb[func].drv_mb_header, (command | seq));
13231 /* Wait up to 2 seconds for a response. */
13233 /* Wait 10ms for a response. */
13236 /* Pickup the response. */
13237 rc = SHMEM_RD(sc, func_mb[func].fw_mb_header);
13238 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 400));
13240 DBRUNMSG(BXE_VERBOSE, bxe_decode_mb_msgs(sc, 0, rc));
13242 /* Make sure we read the right response. */
13243 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK ))
13244 rc &= FW_MSG_CODE_MASK;
13246 BXE_PRINTF("%s(%d): Bootcode failed to respond!\n",
13247 __FILE__, __LINE__);
13248 DBRUN(bxe_dump_fw(sc));
13252 BXE_FWMB_UNLOCK(sc);
13257 * Allocate a block of memory and map it for DMA. No partial
13258 * completions allowed, release any resources acquired if we
13259 * can't acquire all resources.
13262 * 0 = Success, !0 = Failure
13273 bxe_dma_malloc(struct bxe_softc *sc, bus_size_t size,
13274 struct bxe_dma *dma, int mapflags, const char *msg)
13278 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
13280 DBRUNIF(dma->size > 0,
13281 BXE_PRINTF("%s(): Called for %s with size > 0 (%05d)!\n",
13282 __FUNCTION__, msg, (int) dma->size));
13284 rc = bus_dma_tag_create(
13285 sc->parent_tag, /* parent */
13286 BCM_PAGE_SIZE, /* alignment for segs */
13287 BXE_DMA_BOUNDARY, /* cannot cross */
13288 BUS_SPACE_MAXADDR, /* restricted low */
13289 BUS_SPACE_MAXADDR, /* restricted hi */
13290 NULL, NULL, /* filter f(), arg */
13291 size, /* max size for this tag */
13292 1, /* # of discontinuities */
13293 size, /* max seg size */
13294 BUS_DMA_ALLOCNOW, /* flags */
13295 NULL, NULL, /* lock f(), arg */
13299 BXE_PRINTF("%s(%d): bus_dma_tag_create() "
13300 "failed (rc = %d) for %s!\n",
13301 __FILE__, __LINE__, rc, msg);
13302 goto bxe_dma_malloc_fail_create;
13305 rc = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
13306 BUS_DMA_NOWAIT, &dma->map);
13308 BXE_PRINTF("%s(%d): bus_dmamem_alloc() "
13309 "failed (rc = %d) for %s!\n",
13310 __FILE__, __LINE__, rc, msg);
13311 goto bxe_dma_malloc_fail_alloc;
13314 rc = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
13315 bxe_dma_map_addr, &dma->paddr, mapflags | BUS_DMA_NOWAIT);
13317 BXE_PRINTF("%s(%d): bus_dmamap_load() "
13318 "failed (rc = %d) for %s!\n",
13319 __FILE__, __LINE__, rc, msg);
13320 goto bxe_dma_malloc_fail_load;
13325 DBPRINT(sc, BXE_VERBOSE, "%s(): size=%06d, vaddr=0x%p, "
13326 "paddr=0x%jX - %s\n", __FUNCTION__, (int) dma->size,
13327 dma->vaddr, (uintmax_t) dma->paddr, msg);
13329 goto bxe_dma_malloc_exit;
13331 bxe_dma_malloc_fail_load:
13332 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
13334 bxe_dma_malloc_fail_alloc:
13335 bus_dma_tag_destroy(dma->tag);
13338 bxe_dma_malloc_fail_create:
13343 bxe_dma_malloc_exit:
13344 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
13349 * Release a block of DMA memory associated tag/map.
13355 bxe_dma_free(struct bxe_softc *sc, struct bxe_dma *dma)
13357 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_UNLOAD);
13359 if (dma->size > 0) {
13360 bus_dmamap_sync(dma->tag, dma->map,
13361 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
13362 bus_dmamap_unload(dma->tag, dma->map);
13363 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
13364 bus_dma_tag_destroy(dma->tag);
13368 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_UNLOAD);
13372 * Free any DMA memory owned by the driver.
13374 * Scans through each data structre that requires DMA memory and frees
13375 * the memory if allocated.
13381 bxe_host_structures_free(struct bxe_softc *sc)
13383 struct bxe_fastpath *fp;
13384 int i, j, max_agg_queues;
13386 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
13387 max_agg_queues = CHIP_IS_E1H(sc) ?
13388 ETH_MAX_AGGREGATION_QUEUES_E1H :
13389 ETH_MAX_AGGREGATION_QUEUES_E1;
13391 if (sc->parent_tag == NULL)
13392 goto bxe_host_structures_free_exit;
13394 for (i = 0; i < sc->num_queues; i++) {
13397 /* Trust no one! */
13401 /* Status block. */
13402 bxe_dma_free(sc, &fp->sb_dma);
13405 bxe_dma_free(sc, &fp->tx_dma);
13406 fp->tx_chain = NULL;
13409 bxe_dma_free(sc, &fp->rx_dma);
13410 fp->rx_chain = NULL;
13413 bxe_dma_free(sc, &fp->rcq_dma);
13414 fp->rcq_chain = NULL;
13417 bxe_dma_free(sc, &fp->sg_dma);
13418 fp->sg_chain = NULL;
13420 /* Unload and destroy the TX mbuf maps. */
13421 if (fp->tx_mbuf_tag != NULL) {
13422 for (j = 0; j < TOTAL_TX_BD; j++) {
13423 if (fp->tx_mbuf_map[j] != NULL) {
13426 fp->tx_mbuf_map[j]);
13427 bus_dmamap_destroy(
13429 fp->tx_mbuf_map[j]);
13433 bus_dma_tag_destroy(fp->tx_mbuf_tag);
13436 /* Unload and destroy the TPA pool mbuf maps. */
13437 if (fp->rx_mbuf_tag != NULL) {
13438 if (fp->tpa_mbuf_spare_map != NULL) {
13441 fp->tpa_mbuf_spare_map);
13442 bus_dmamap_destroy(
13444 fp->tpa_mbuf_spare_map);
13447 for (j = 0; j < max_agg_queues; j++) {
13448 if (fp->tpa_mbuf_map[j] != NULL) {
13451 fp->tpa_mbuf_map[j]);
13452 bus_dmamap_destroy(
13454 fp->tpa_mbuf_map[j]);
13459 /* Unload and destroy the SGE Buf maps. */
13460 if (fp->rx_sge_buf_tag != NULL) {
13461 if (fp->rx_sge_spare_map != NULL) {
13463 fp->rx_sge_buf_tag,
13464 fp->rx_sge_spare_map);
13465 bus_dmamap_destroy(
13466 fp->rx_sge_buf_tag,
13467 fp->rx_sge_spare_map);
13470 for (j = 0; j < TOTAL_RX_SGE; j++) {
13471 if (fp->rx_sge_buf_map[j] != NULL) {
13473 fp->rx_sge_buf_tag,
13474 fp->rx_sge_buf_map[j]);
13475 bus_dmamap_destroy(
13476 fp->rx_sge_buf_tag,
13477 fp->rx_sge_buf_map[j]);
13481 bus_dma_tag_destroy(fp->rx_sge_buf_tag);
13484 /* Unload and destroy the RX mbuf maps. */
13485 if (fp->rx_mbuf_tag != NULL) {
13486 if (fp->rx_mbuf_spare_map != NULL) {
13487 bus_dmamap_unload(fp->rx_mbuf_tag,
13488 fp->rx_mbuf_spare_map);
13489 bus_dmamap_destroy(fp->rx_mbuf_tag,
13490 fp->rx_mbuf_spare_map);
13493 for (j = 0; j < TOTAL_RX_BD; j++) {
13494 if (fp->rx_mbuf_map[j] != NULL) {
13497 fp->rx_mbuf_map[j]);
13498 bus_dmamap_destroy(
13500 fp->rx_mbuf_map[j]);
13504 bus_dma_tag_destroy(fp->rx_mbuf_tag);
13508 /* Destroy the default status block */
13509 bxe_dma_free(sc, &sc->def_sb_dma);
13512 /* Destroy the statistics block */
13513 bxe_dma_free(sc, &sc->stats_dma);
13516 /* Destroy the slowpath block. */
13517 bxe_dma_free(sc, &sc->slowpath_dma);
13518 sc->slowpath = NULL;
13520 /* Destroy the slowpath queue. */
13521 bxe_dma_free(sc, &sc->spq_dma);
13524 /* Destroy the slowpath queue. */
13525 bxe_dma_free(sc, &sc->gz_dma);
13527 free(sc->strm, M_DEVBUF);
13530 bxe_host_structures_free_exit:
13531 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
13535 * Get DMA memory from the OS.
13537 * Validates that the OS has provided DMA buffers in response to a
13538 * bus_dmamap_load call and saves the physical address of those buffers.
13539 * When the callback is used the OS will return 0 for the mapping function
13540 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any
13541 * failures back to the caller.
13547 bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
13549 bus_addr_t *busaddr;
13552 /* Check for an error and signal the caller that an error occurred. */
13555 "bxe %s(%d): DMA mapping error (error = %d, nseg = %d)!\n",
13556 __FILE__, __LINE__, error, nseg);
13561 *busaddr = segs->ds_addr;
13565 * Allocate any non-paged DMA memory needed by the driver.
13568 * 0 = Success, !0 = Failure.
13571 bxe_host_structures_alloc(device_t dev)
13573 struct bxe_softc *sc;
13574 struct bxe_fastpath *fp;
13576 bus_addr_t busaddr;
13577 bus_size_t max_size, max_seg_size;
13578 int i, j, max_segments;
13580 sc = device_get_softc(dev);
13581 DBENTER(BXE_VERBOSE_RESET);
13583 int max_agg_queues = CHIP_IS_E1H(sc) ?
13584 ETH_MAX_AGGREGATION_QUEUES_E1H :
13585 ETH_MAX_AGGREGATION_QUEUES_E1;
13588 * Allocate the parent bus DMA tag appropriate for PCI.
13590 rc = bus_dma_tag_create(NULL, /* parent tag */
13591 1, /* alignment for segs */
13592 BXE_DMA_BOUNDARY, /* cannot cross */
13593 BUS_SPACE_MAXADDR, /* restricted low */
13594 BUS_SPACE_MAXADDR, /* restricted hi */
13595 NULL, /* filter f() */
13596 NULL, /* filter f() arg */
13597 MAXBSIZE, /* max map for this tag */
13598 BUS_SPACE_UNRESTRICTED, /* # of discontinuities */
13599 BUS_SPACE_MAXSIZE_32BIT, /* max seg size */
13601 NULL, /* lock f() */
13602 NULL, /* lock f() arg */
13603 &sc->parent_tag); /* dma tag */
13605 BXE_PRINTF("%s(%d): Could not allocate parent DMA tag!\n",
13606 __FILE__, __LINE__);
13608 goto bxe_host_structures_alloc_exit;
13611 /* Allocate DMA memory for each fastpath structure. */
13612 for (i = 0; i < sc->num_queues; i++) {
13616 * Allocate status block*
13618 rc = bxe_dma_malloc(sc, BXE_STATUS_BLK_SZ,
13619 &fp->sb_dma, BUS_DMA_NOWAIT, "fp status block");
13620 /* ToDo: Only using 32 bytes out of 4KB allocation! */
13622 goto bxe_host_structures_alloc_exit;
13624 (struct host_status_block *) fp->sb_dma.vaddr;
13627 * Allocate TX chain.
13629 rc = bxe_dma_malloc(sc, BXE_TX_CHAIN_PAGE_SZ *
13630 NUM_TX_PAGES, &fp->tx_dma, BUS_DMA_NOWAIT,
13633 goto bxe_host_structures_alloc_exit;
13634 fp->tx_chain = (union eth_tx_bd_types *) fp->tx_dma.vaddr;
13636 /* Link the TX chain pages. */
13637 for (j = 1; j <= NUM_TX_PAGES; j++) {
13638 struct eth_tx_next_bd *tx_n_bd =
13639 &fp->tx_chain[TOTAL_TX_BD_PER_PAGE * j - 1].next_bd;
13641 busaddr = fp->tx_dma.paddr +
13642 BCM_PAGE_SIZE * (j % NUM_TX_PAGES);
13643 tx_n_bd->addr_hi = htole32(U64_HI(busaddr));
13644 tx_n_bd->addr_lo = htole32(U64_LO(busaddr));
13648 * Allocate RX chain.
13650 rc = bxe_dma_malloc(sc, BXE_RX_CHAIN_PAGE_SZ *
13651 NUM_RX_PAGES, &fp->rx_dma, BUS_DMA_NOWAIT,
13654 goto bxe_host_structures_alloc_exit;
13655 fp->rx_chain = (struct eth_rx_bd *) fp->rx_dma.vaddr;
13657 /* Link the RX chain pages. */
13658 for (j = 1; j <= NUM_RX_PAGES; j++) {
13659 struct eth_rx_bd *rx_bd =
13660 &fp->rx_chain[TOTAL_RX_BD_PER_PAGE * j - 2];
13662 busaddr = fp->rx_dma.paddr +
13663 BCM_PAGE_SIZE * (j % NUM_RX_PAGES);
13664 rx_bd->addr_hi = htole32(U64_HI(busaddr));
13665 rx_bd->addr_lo = htole32(U64_LO(busaddr));
13669 * Allocate CQ chain.
13671 rc = bxe_dma_malloc(sc, BXE_RX_CHAIN_PAGE_SZ *
13672 NUM_RCQ_PAGES, &fp->rcq_dma, BUS_DMA_NOWAIT,
13673 "rcq chain pages");
13675 goto bxe_host_structures_alloc_exit;
13676 fp->rcq_chain = (union eth_rx_cqe *) fp->rcq_dma.vaddr;
13678 /* Link the CQ chain pages. */
13679 for (j = 1; j <= NUM_RCQ_PAGES; j++) {
13680 struct eth_rx_cqe_next_page *nextpg =
13681 (struct eth_rx_cqe_next_page *)
13682 &fp->rcq_chain[TOTAL_RCQ_ENTRIES_PER_PAGE * j - 1];
13684 busaddr = fp->rcq_dma.paddr +
13685 BCM_PAGE_SIZE * (j % NUM_RCQ_PAGES);
13686 nextpg->addr_hi = htole32(U64_HI(busaddr));
13687 nextpg->addr_lo = htole32(U64_LO(busaddr));
13691 * Allocate SG chain.
13693 rc = bxe_dma_malloc(sc, BXE_RX_CHAIN_PAGE_SZ *
13694 NUM_RX_SGE_PAGES, &fp->sg_dma, BUS_DMA_NOWAIT,
13697 goto bxe_host_structures_alloc_exit;
13698 fp->sg_chain = (struct eth_rx_sge *) fp->sg_dma.vaddr;
13700 /* Link the SG chain pages. */
13701 for (j = 1; j <= NUM_RX_SGE_PAGES; j++) {
13702 struct eth_rx_sge *nextpg =
13703 &fp->sg_chain[TOTAL_RX_SGE_PER_PAGE * j - 2];
13705 busaddr = fp->sg_dma.paddr +
13706 BCM_PAGE_SIZE * (j % NUM_RX_SGE_PAGES);
13707 nextpg->addr_hi = htole32(U64_HI(busaddr));
13708 nextpg->addr_lo = htole32(U64_LO(busaddr));
13712 * Check required size before mapping to conserve resources.
13714 if (sc->tso_enable == TRUE) {
13715 max_size = BXE_TSO_MAX_SIZE;
13716 max_segments = BXE_TSO_MAX_SEGMENTS;
13717 max_seg_size = BXE_TSO_MAX_SEG_SIZE;
13719 max_size = MCLBYTES * BXE_MAX_SEGMENTS;
13720 max_segments = BXE_MAX_SEGMENTS;
13721 max_seg_size = MCLBYTES;
13724 /* Create a DMA tag for TX mbufs. */
13725 if (bus_dma_tag_create(sc->parent_tag,
13726 1, /* alignment for segs */
13727 BXE_DMA_BOUNDARY, /* cannot cross */
13728 BUS_SPACE_MAXADDR, /* restricted low */
13729 BUS_SPACE_MAXADDR, /* restricted hi */
13730 NULL, /* filter f() */
13731 NULL, /* filter f() arg */
13732 max_size, /* max map for this tag */
13733 max_segments, /* # of discontinuities */
13734 max_seg_size, /* max seg size */
13736 NULL, /* lock f() */
13737 NULL, /* lock f() arg */
13738 &fp->tx_mbuf_tag)) {
13740 "%s(%d): Could not allocate fp[%d] "
13741 "TX mbuf DMA tag!\n",
13742 __FILE__, __LINE__, i);
13744 goto bxe_host_structures_alloc_exit;
13747 /* Create DMA maps for each the TX mbuf cluster(ext buf). */
13748 for (j = 0; j < TOTAL_TX_BD; j++) {
13749 if (bus_dmamap_create(fp->tx_mbuf_tag,
13751 &fp->tx_mbuf_map[j])) {
13753 "%s(%d): Unable to create fp[%02d]."
13754 "tx_mbuf_map[%d] DMA map!\n",
13755 __FILE__, __LINE__, i, j);
13757 goto bxe_host_structures_alloc_exit;
13762 * Create a DMA tag for RX mbufs.
13764 if (bus_dma_tag_create(sc->parent_tag,
13765 1, /* alignment for segs */
13766 BXE_DMA_BOUNDARY, /* cannot cross */
13767 BUS_SPACE_MAXADDR, /* restricted low */
13768 BUS_SPACE_MAXADDR, /* restricted hi */
13769 NULL, /* filter f() */
13770 NULL, /* filter f() arg */
13771 MJUM9BYTES, /* max map for this tag */
13772 1, /* # of discontinuities */
13773 MJUM9BYTES, /* max seg size */
13775 NULL, /* lock f() */
13776 NULL, /* lock f() arg */
13777 &fp->rx_mbuf_tag)) {
13779 "%s(%d): Could not allocate fp[%02d] "
13780 "RX mbuf DMA tag!\n",
13781 __FILE__, __LINE__, i);
13783 goto bxe_host_structures_alloc_exit;
13786 /* Create DMA maps for the RX mbuf clusters. */
13787 if (bus_dmamap_create(fp->rx_mbuf_tag,
13788 BUS_DMA_NOWAIT, &fp->rx_mbuf_spare_map)) {
13790 "%s(%d): Unable to create fp[%02d]."
13791 "rx_mbuf_spare_map DMA map!\n",
13792 __FILE__, __LINE__, i);
13794 goto bxe_host_structures_alloc_exit;
13797 for (j = 0; j < TOTAL_RX_BD; j++) {
13798 if (bus_dmamap_create(fp->rx_mbuf_tag,
13799 BUS_DMA_NOWAIT, &fp->rx_mbuf_map[j])) {
13801 "%s(%d): Unable to create fp[%02d]."
13802 "rx_mbuf_map[%d] DMA map!\n",
13803 __FILE__, __LINE__, i, j);
13805 goto bxe_host_structures_alloc_exit;
13810 * Create a DMA tag for RX SGE bufs.
13812 if (bus_dma_tag_create(sc->parent_tag, 1,
13813 BXE_DMA_BOUNDARY, BUS_SPACE_MAXADDR,
13814 BUS_SPACE_MAXADDR, NULL, NULL, PAGE_SIZE, 1,
13815 PAGE_SIZE, 0, NULL, NULL, &fp->rx_sge_buf_tag)) {
13817 "%s(%d): Could not allocate fp[%02d] "
13818 "RX SGE mbuf DMA tag!\n",
13819 __FILE__, __LINE__, i);
13821 goto bxe_host_structures_alloc_exit;
13824 /* Create DMA maps for the SGE mbuf clusters. */
13825 if (bus_dmamap_create(fp->rx_sge_buf_tag,
13826 BUS_DMA_NOWAIT, &fp->rx_sge_spare_map)) {
13828 "%s(%d): Unable to create fp[%02d]."
13829 "rx_sge_spare_map DMA map!\n",
13830 __FILE__, __LINE__, i);
13832 goto bxe_host_structures_alloc_exit;
13835 for (j = 0; j < TOTAL_RX_SGE; j++) {
13836 if (bus_dmamap_create(fp->rx_sge_buf_tag,
13837 BUS_DMA_NOWAIT, &fp->rx_sge_buf_map[j])) {
13839 "%s(%d): Unable to create fp[%02d]."
13840 "rx_sge_buf_map[%d] DMA map!\n",
13841 __FILE__, __LINE__, i, j);
13843 goto bxe_host_structures_alloc_exit;
13847 /* Create DMA maps for the TPA pool mbufs. */
13848 if (bus_dmamap_create(fp->rx_mbuf_tag,
13849 BUS_DMA_NOWAIT, &fp->tpa_mbuf_spare_map)) {
13851 "%s(%d): Unable to create fp[%02d]."
13852 "tpa_mbuf_spare_map DMA map!\n",
13853 __FILE__, __LINE__, i);
13855 goto bxe_host_structures_alloc_exit;
13858 for (j = 0; j < max_agg_queues; j++) {
13859 if (bus_dmamap_create(fp->rx_mbuf_tag,
13860 BUS_DMA_NOWAIT, &fp->tpa_mbuf_map[j])) {
13862 "%s(%d): Unable to create fp[%02d]."
13863 "tpa_mbuf_map[%d] DMA map!\n",
13864 __FILE__, __LINE__, i, j);
13866 goto bxe_host_structures_alloc_exit;
13870 bxe_init_sge_ring_bit_mask(fp);
13874 * Allocate default status block.
13876 rc = bxe_dma_malloc(sc, BXE_DEF_STATUS_BLK_SZ, &sc->def_sb_dma,
13877 BUS_DMA_NOWAIT, "default status block");
13879 goto bxe_host_structures_alloc_exit;
13880 sc->def_sb = (struct host_def_status_block *) sc->def_sb_dma.vaddr;
13883 * Allocate statistics block.
13885 rc = bxe_dma_malloc(sc, BXE_STATS_BLK_SZ, &sc->stats_dma,
13886 BUS_DMA_NOWAIT, "statistics block");
13888 goto bxe_host_structures_alloc_exit;
13889 sc->stats = (struct statistics_block *) sc->stats_dma.vaddr;
13892 * Allocate slowpath block.
13894 rc = bxe_dma_malloc(sc, BXE_SLOWPATH_SZ, &sc->slowpath_dma,
13895 BUS_DMA_NOWAIT, "slowpath block");
13897 goto bxe_host_structures_alloc_exit;
13898 sc->slowpath = (struct bxe_slowpath *) sc->slowpath_dma.vaddr;
13901 * Allocate slowpath queue.
13903 rc = bxe_dma_malloc(sc, BXE_SPQ_SZ, &sc->spq_dma,
13904 BUS_DMA_NOWAIT, "slowpath queue");
13906 goto bxe_host_structures_alloc_exit;
13907 sc->spq = (struct eth_spe *) sc->spq_dma.vaddr;
13910 * Allocate firmware decompression buffer.
13912 rc = bxe_dma_malloc(sc, BXE_FW_BUF_SIZE, &sc->gz_dma,
13913 BUS_DMA_NOWAIT, "gunzip buffer");
13915 goto bxe_host_structures_alloc_exit;
13916 sc->gz = sc->gz_dma.vaddr;
13917 if (sc->strm == NULL) {
13918 goto bxe_host_structures_alloc_exit;
13921 sc->strm = malloc(sizeof(*sc->strm), M_DEVBUF, M_NOWAIT);
13923 bxe_host_structures_alloc_exit:
13924 DBEXIT(BXE_VERBOSE_RESET);
13929 * Program the MAC address for 57710 controllers.
13935 bxe_set_mac_addr_e1(struct bxe_softc *sc, int set)
13937 struct mac_configuration_cmd *config;
13938 struct mac_configuration_entry *config_table;
13942 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
13944 config = BXE_SP(sc, mac_config);
13945 port = BP_PORT(sc);
13948 * Port 0 Unicast Addresses: 32 Perfect Match Filters (31-0)
13949 * Port 1 Unicast Addresses: 32 Perfect Match Filters (63-32)
13950 * Port 0 Multicast Addresses: 128 Hashes (127-64)
13951 * Port 1 Multicast Addresses: 128 Hashes (191-128)
13954 config->hdr.length = 2;
13955 config->hdr.offset = port ? 32 : 0;
13956 config->hdr.client_id = BP_CL_ID(sc);
13957 config->hdr.reserved1 = 0;
13959 /* Program the primary MAC address. */
13960 config_table = &config->config_table[0];
13961 eaddr = sc->link_params.mac_addr;
13962 config_table->cam_entry.msb_mac_addr = eaddr[0] << 8 | eaddr[1];
13963 config_table->cam_entry.middle_mac_addr = eaddr[2] << 8 | eaddr[3];
13964 config_table->cam_entry.lsb_mac_addr = eaddr[4] << 8 | eaddr[5];
13965 config_table->cam_entry.flags = htole16(port);
13968 config_table->target_table_entry.flags = 0;
13970 CAM_INVALIDATE(config_table);
13972 config_table->target_table_entry.vlan_id = 0;
13974 DBPRINT(sc, BXE_VERBOSE, "%s(): %s MAC (%04x:%04x:%04x)\n",
13975 __FUNCTION__, (set ? "Setting" : "Clearing"),
13976 config_table->cam_entry.msb_mac_addr,
13977 config_table->cam_entry.middle_mac_addr,
13978 config_table->cam_entry.lsb_mac_addr);
13980 /* Program the broadcast MAC address. */
13981 config_table = &config->config_table[1];
13982 config_table->cam_entry.msb_mac_addr = 0xffff;
13983 config_table->cam_entry.middle_mac_addr = 0xffff;
13984 config_table->cam_entry.lsb_mac_addr = 0xffff;
13985 config_table->cam_entry.flags = htole16(port);
13988 config_table->target_table_entry.flags =
13989 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
13991 CAM_INVALIDATE(config_table);
13993 config_table->target_table_entry.vlan_id = 0;
13995 /* Post the command to slow path queue. */
13996 bxe_sp_post(sc, RAMROD_CMD_ID_ETH_SET_MAC, 0,
13997 U64_HI(BXE_SP_MAPPING(sc, mac_config)),
13998 U64_LO(BXE_SP_MAPPING(sc, mac_config)), 0);
14000 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
14004 * Program the MAC address for 57711/57711E controllers.
14010 bxe_set_mac_addr_e1h(struct bxe_softc *sc, int set)
14012 struct mac_configuration_cmd_e1h *config;
14013 struct mac_configuration_entry_e1h *config_table;
14017 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
14019 config = (struct mac_configuration_cmd_e1h *)BXE_SP(sc, mac_config);
14020 port = BP_PORT(sc);
14021 func = BP_FUNC(sc);
14023 if (set && (sc->state != BXE_STATE_OPEN)) {
14024 DBPRINT(sc, BXE_VERBOSE,
14025 "%s(): Can't set E1H MAC in state 0x%08X!\n", __FUNCTION__,
14027 goto bxe_set_mac_addr_e1h_exit;
14032 * Function 0-7 Unicast Addresses: 8 Perfect Match Filters
14033 * Multicast Addresses: 20 + FUNC * 20, 20 each (???)
14035 config->hdr.length = 1;
14036 config->hdr.offset = func;
14037 config->hdr.client_id = 0xff;
14038 config->hdr.reserved1 = 0;
14040 /* Program the primary MAC address. */
14041 config_table = &config->config_table[0];
14042 eaddr = sc->link_params.mac_addr;
14043 config_table->msb_mac_addr = eaddr[0] << 8 | eaddr[1];
14044 config_table->middle_mac_addr = eaddr[2] << 8 | eaddr[3];
14045 config_table->lsb_mac_addr = eaddr[4] << 8 | eaddr[5];
14046 config_table->clients_bit_vector = htole32(1 << sc->fp->cl_id);
14048 config_table->vlan_id = 0;
14049 config_table->e1hov_id = htole16(sc->e1hov);
14052 config_table->flags = port;
14054 config_table->flags =
14055 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
14057 DBPRINT(sc, BXE_VERBOSE,
14058 "%s(): %s MAC (%04x:%04x:%04x), E1HOV = %d, CLID = %d\n",
14059 __FUNCTION__, (set ? "Setting" : "Clearing"),
14060 config_table->msb_mac_addr, config_table->middle_mac_addr,
14061 config_table->lsb_mac_addr, sc->e1hov, BP_L_ID(sc));
14063 bxe_sp_post(sc, RAMROD_CMD_ID_ETH_SET_MAC, 0,
14064 U64_HI(BXE_SP_MAPPING(sc, mac_config)),
14065 U64_LO(BXE_SP_MAPPING(sc, mac_config)), 0);
14067 bxe_set_mac_addr_e1h_exit:
14068 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
14072 * Programs the various packet receive modes (broadcast and multicast).
14079 bxe_set_rx_mode(struct bxe_softc *sc)
14082 struct ifmultiaddr *ifma;
14083 struct mac_configuration_cmd *config;
14084 struct mac_configuration_entry *config_table;
14085 uint32_t mc_filter[MC_HASH_SIZE];
14087 uint32_t crc, bit, regidx, rx_mode;
14088 int i, old, offset, port;
14090 BXE_CORE_LOCK_ASSERT(sc);
14092 rx_mode = BXE_RX_MODE_NORMAL;
14093 port = BP_PORT(sc);
14095 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
14097 if (sc->state != BXE_STATE_OPEN) {
14098 DBPRINT(sc, BXE_WARN, "%s(): State (0x%08X) is not open!\n",
14099 __FUNCTION__, sc->state);
14100 goto bxe_set_rx_mode_exit;
14106 * Check for promiscuous, all multicast, or selected
14107 * multicast address filtering.
14109 if (ifp->if_flags & IFF_PROMISC) {
14110 /* Enable promiscuous mode. */
14111 rx_mode = BXE_RX_MODE_PROMISC;
14112 } else if (ifp->if_flags & IFF_ALLMULTI ||
14113 ifp->if_amcount > BXE_MAX_MULTICAST) {
14114 /* Enable all multicast addresses. */
14115 rx_mode = BXE_RX_MODE_ALLMULTI;
14117 /* Enable selective multicast mode. */
14118 if (CHIP_IS_E1(sc)) {
14120 config = BXE_SP(sc, mcast_config);
14124 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
14125 if (ifma->ifma_addr->sa_family != AF_LINK)
14127 maddr = (uint8_t *)LLADDR(
14128 (struct sockaddr_dl *)ifma->ifma_addr);
14129 config_table = &config->config_table[i];
14130 config_table->cam_entry.msb_mac_addr =
14131 maddr[0] << 8 | maddr[1];
14132 config_table->cam_entry.middle_mac_addr =
14133 maddr[2] << 8 | maddr[3];
14134 config_table->cam_entry.lsb_mac_addr =
14135 maddr[4] << 8 | maddr[5];
14136 config_table->cam_entry.flags = htole16(port);
14137 config_table->target_table_entry.flags = 0;
14138 config_table->target_table_entry.
14139 clients_bit_vector =
14140 htole32(1 << BP_L_ID(sc));
14141 config_table->target_table_entry.vlan_id = 0;
14143 DBPRINT(sc, BXE_INFO,
14144 "%s(): Setting MCAST[%d] (%04X:%04X:%04X)\n",
14146 config_table->cam_entry.msb_mac_addr,
14147 config_table->cam_entry.middle_mac_addr,
14148 config_table->cam_entry.lsb_mac_addr);
14151 IF_ADDR_UNLOCK(ifp);
14153 old = config->hdr.length;
14155 /* Invalidate any extra MC entries in the CAM. */
14157 for (; i < old; i++) {
14158 config_table = &config->config_table[i];
14159 if (CAM_IS_INVALID(config_table))
14162 CAM_INVALIDATE(config_table);
14166 offset = BXE_MAX_MULTICAST * (1 + port);
14167 config->hdr.length = i;
14168 config->hdr.offset = offset;
14169 config->hdr.client_id = sc->fp->cl_id;
14170 config->hdr.reserved1 = 0;
14172 bxe_sp_post(sc, RAMROD_CMD_ID_ETH_SET_MAC, 0,
14173 U64_HI(BXE_SP_MAPPING(sc, mcast_config)),
14174 U64_LO(BXE_SP_MAPPING(sc, mcast_config)), 0);
14176 /* Accept one or more multicasts */
14177 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
14181 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
14182 if (ifma->ifma_addr->sa_family != AF_LINK)
14184 crc = ether_crc32_le(ifma->ifma_addr->sa_data,
14186 bit = (crc >> 24) & 0xff;
14189 mc_filter[regidx] |= (1 << bit);
14191 IF_ADDR_UNLOCK(ifp);
14193 for (i = 0; i < MC_HASH_SIZE; i++)
14194 REG_WR(sc, MC_HASH_OFFSET(sc, i), mc_filter[i]);
14198 DBPRINT(sc, BXE_VERBOSE, "%s(): Enabling new receive mode: 0x%08X\n",
14199 __FUNCTION__, rx_mode);
14201 sc->rx_mode = rx_mode;
14202 bxe_set_storm_rx_mode(sc);
14204 bxe_set_rx_mode_exit:
14205 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
14209 * Function specific controller reset.
14215 bxe_reset_func(struct bxe_softc *sc)
14217 int base, func, i, port;
14219 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
14221 port = BP_PORT(sc);
14222 func = BP_FUNC(sc);
14224 /* Configure IGU. */
14225 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, 0);
14226 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, 0);
14227 REG_WR(sc, HC_REG_CONFIG_0 + (port * 4), 0x1000);
14230 base = FUNC_ILT_BASE(func);
14231 for (i = base; i < base + ILT_PER_FUNC; i++)
14232 bxe_ilt_wr(sc, i, 0);
14234 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
14238 * Port specific controller reset.
14244 bxe_reset_port(struct bxe_softc *sc)
14249 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
14251 port = BP_PORT(sc);
14252 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4, 0);
14254 /* Do not receive packets to BRB. */
14255 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port * 4, 0x0);
14257 /* Do not direct receive packets that are not for MCP to the BRB. */
14258 REG_WR(sc, port ? NIG_REG_LLH1_BRB1_NOT_MCP :
14259 NIG_REG_LLH0_BRB1_NOT_MCP, 0x0);
14261 /* Configure AEU. */
14262 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port * 4, 0);
14266 /* Check for BRB port occupancy. */
14267 val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port * 4);
14269 DBPRINT(sc, BXE_VERBOSE,
14270 "%s(): BRB1 is not empty (%d blocks are occupied)!\n",
14271 __FUNCTION__, val);
14273 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
14277 * Common controller reset.
14283 bxe_reset_common(struct bxe_softc *sc)
14286 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
14288 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
14290 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
14293 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
14297 * Reset the controller.
14303 bxe_reset_chip(struct bxe_softc *sc, uint32_t reset_code)
14306 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
14308 switch (reset_code) {
14309 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
14310 bxe_reset_port(sc);
14311 bxe_reset_func(sc);
14312 bxe_reset_common(sc);
14314 case FW_MSG_CODE_DRV_UNLOAD_PORT:
14315 bxe_reset_port(sc);
14316 bxe_reset_func(sc);
14318 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
14319 bxe_reset_func(sc);
14322 BXE_PRINTF("%s(%d): Unknown reset code (0x%08X) from MCP!\n",
14323 __FILE__, __LINE__, reset_code);
14327 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
14331 * Called by the OS to set media options (link, speed, etc.)
14332 * when the user specifies "ifconfig bxe media XXX" or
14333 * "ifconfig bxe mediaopt XXX".
14336 * 0 = Success, !0 = Failure
14339 bxe_ifmedia_upd(struct ifnet *ifp)
14341 struct bxe_softc *sc;
14342 struct ifmedia *ifm;
14345 sc = ifp->if_softc;
14346 DBENTER(BXE_VERBOSE_PHY);
14348 ifm = &sc->bxe_ifmedia;
14351 /* We only support Ethernet media type. */
14352 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
14354 goto bxe_ifmedia_upd_exit;
14357 switch (IFM_SUBTYPE(ifm->ifm_media)) {
14359 /* ToDo: What to do here? */
14360 /* Doing nothing translates to success here. */
14368 case IFM_10G_TWINAX:
14371 /* We don't support channging the media type. */
14372 DBPRINT(sc, BXE_WARN, "%s(): Invalid media type!\n",
14377 bxe_ifmedia_upd_exit:
14378 DBENTER(BXE_VERBOSE_PHY);
14383 * Called by the OS to report current media status
14384 * (link, speed, etc.).
14390 bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
14392 struct bxe_softc *sc;
14394 sc = ifp->if_softc;
14395 DBENTER(BXE_EXTREME_LOAD | BXE_EXTREME_RESET);
14397 /* Report link down if the driver isn't running. */
14398 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
14399 ifmr->ifm_active |= IFM_NONE;
14400 goto bxe_ifmedia_status_exit;
14403 /* Setup the default interface info. */
14404 ifmr->ifm_status = IFM_AVALID;
14405 ifmr->ifm_active = IFM_ETHER;
14407 if (sc->link_vars.link_up)
14408 ifmr->ifm_status |= IFM_ACTIVE;
14410 ifmr->ifm_active |= IFM_NONE;
14411 goto bxe_ifmedia_status_exit;
14414 ifmr->ifm_active |= sc->media;
14416 if (sc->link_vars.duplex == MEDIUM_FULL_DUPLEX)
14417 ifmr->ifm_active |= IFM_FDX;
14419 ifmr->ifm_active |= IFM_HDX;
14421 bxe_ifmedia_status_exit:
14422 DBEXIT(BXE_EXTREME_LOAD | BXE_EXTREME_RESET);
14427 * Update last maximum scatter gather entry.
14432 static __inline void
14433 bxe_update_last_max_sge(struct bxe_fastpath *fp, uint16_t index)
14437 last_max = fp->last_max_sge;
14438 if (SUB_S16(index, last_max) > 0)
14439 fp->last_max_sge = index;
14443 * Clear scatter gather mask next elements.
14449 bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp)
14453 for (i = 0; i < NUM_RX_SGE_PAGES; i++) {
14454 index = i * TOTAL_RX_SGE_PER_PAGE + USABLE_RX_SGE_PER_PAGE;
14455 for (j = 0; j < 2; j++) {
14456 SGE_MASK_CLEAR_BIT(fp, index);
14463 * Update SGE producer.
14469 bxe_update_sge_prod(struct bxe_fastpath *fp,
14470 struct eth_fast_path_rx_cqe *fp_cqe)
14472 struct bxe_softc *sc;
14473 uint16_t delta, first_elem, last_max, last_elem, sge_len;
14477 DBENTER(BXE_EXTREME_RECV);
14480 sge_len = SGE_PAGE_ALIGN(le16toh(fp_cqe->pkt_len) -
14481 le16toh(fp_cqe->len_on_bd)) >> SGE_PAGE_SHIFT;
14483 goto bxe_update_sge_prod_exit;
14485 /* First mark all used pages. */
14486 for (i = 0; i < sge_len; i++)
14487 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16toh(fp_cqe->sgl[i])));
14489 /* Assume that the last SGE index is the biggest. */
14490 bxe_update_last_max_sge(fp, le16toh(fp_cqe->sgl[sge_len - 1]));
14492 last_max = RX_SGE(fp->last_max_sge);
14493 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
14494 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
14496 /* If ring is not full. */
14497 if (last_elem + 1 != first_elem)
14500 /* Now update the producer index. */
14501 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
14502 if (fp->rx_sge_mask[i])
14505 fp->rx_sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
14506 delta += RX_SGE_MASK_ELEM_SZ;
14510 fp->rx_sge_prod += delta;
14511 /* clear page-end entries */
14512 bxe_clear_sge_mask_next_elems(fp);
14515 bxe_update_sge_prod_exit:
14516 DBEXIT(BXE_EXTREME_RECV);
14520 * Initialize scatter gather ring bitmask.
14522 * Each entry in the SGE is associated with an aggregation in process.
14523 * Since there is no guarantee that all Ethernet frames associated with
14524 * a partciular TCP flow will arrive at the adapter and be placed into
14525 * the SGE chain contiguously, we maintain a bitmask for each SGE element
14526 * that identifies which aggregation an Ethernet frame belongs to.
14531 static __inline void
14532 bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp)
14535 /* Set the mask to all 1s, it's faster to compare to 0 than to 0xf. */
14536 memset(fp->rx_sge_mask, 0xff,
14537 (TOTAL_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT) * sizeof(uint64_t));
14540 * The SGE chain is formatted just like the RX chain.
14541 * The last two elements are reserved as a "next page pointer"
14542 * to the next page of SGE elements. Clear the last two
14543 * elements in each SGE chain page since they will never be
14544 * used to track an aggregation.
14546 bxe_clear_sge_mask_next_elems(fp);
14550 * The current mbuf is part of an aggregation. Swap the mbuf into the TPA
14551 * aggregation queue, swap an empty mbuf back onto the receive chain, and
14552 * mark the current aggregation queue as in-progress.
14558 bxe_tpa_start(struct bxe_fastpath *fp, uint16_t queue, uint16_t cons,
14561 struct bxe_softc *sc;
14562 struct mbuf *m_temp;
14563 struct eth_rx_bd *rx_bd;
14564 bus_dmamap_t map_temp;
14565 int max_agg_queues;
14568 DBENTER(BXE_INSANE_RECV | BXE_INSANE_TPA);
14572 DBPRINT(sc, BXE_EXTREME_TPA,
14573 "%s(): fp[%02d].tpa[%02d], cons=0x%04X, prod=0x%04X\n",
14574 __FUNCTION__, fp->index, queue, cons, prod);
14576 max_agg_queues = CHIP_IS_E1(sc) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
14577 ETH_MAX_AGGREGATION_QUEUES_E1H;
14579 DBRUNIF((queue > max_agg_queues),
14580 BXE_PRINTF("%s(): fp[%02d] illegal aggregation (%d > %d)!\n",
14581 __FUNCTION__, fp->index, queue, max_agg_queues));
14583 DBRUNIF((fp->tpa_state[queue] != BXE_TPA_STATE_STOP),
14584 BXE_PRINTF("%s(): Starting aggregation on "
14585 "fp[%02d].tpa[%02d] even though queue is not in the "
14586 "TPA_STOP state!\n", __FUNCTION__, fp->index, queue));
14588 /* Remove the existing mbuf and mapping from the TPA pool. */
14589 m_temp = fp->tpa_mbuf_ptr[queue];
14590 map_temp = fp->tpa_mbuf_map[queue];
14592 /* Only the paranoid survive! */
14593 if(m_temp == NULL) {
14594 BXE_PRINTF("%s(%d): fp[%02d].tpa[%02d] not allocated!\n",
14595 __FILE__, __LINE__, fp->index, queue);
14596 /* ToDo: Additional error handling! */
14597 goto bxe_tpa_start_exit;
14600 /* Move received mbuf and mapping to TPA pool. */
14601 fp->tpa_mbuf_ptr[queue] = fp->rx_mbuf_ptr[cons];
14602 fp->tpa_mbuf_map[queue] = fp->rx_mbuf_map[cons];
14604 /* Place the TPA bin into the START state. */
14605 fp->tpa_state[queue] = BXE_TPA_STATE_START;
14606 DBRUN(fp->tpa_queue_used |= (1 << queue));
14608 /* Get the rx_bd for the next open entry on the receive chain. */
14609 rx_bd = &fp->rx_chain[prod];
14611 /* Update the rx_bd with the empty mbuf from the TPA pool. */
14612 rx_bd->addr_hi = htole32(U64_HI(fp->tpa_mbuf_segs[queue].ds_addr));
14613 rx_bd->addr_lo = htole32(U64_LO(fp->tpa_mbuf_segs[queue].ds_addr));
14614 fp->rx_mbuf_ptr[prod] = m_temp;
14615 fp->rx_mbuf_map[prod] = map_temp;
14617 bxe_tpa_start_exit:
14618 DBEXIT(BXE_INSANE_RECV | BXE_INSANE_TPA);
14622 * When a TPA aggregation is completed, loop through the individual mbufs
14623 * of the aggregation, combining them into a single mbuf which will be sent
14624 * up the stack. Refill all freed SGEs with mbufs as we go along.
14627 * 0 = Success, !0 = Failure.
14630 bxe_fill_frag_mbuf(struct bxe_softc *sc, struct bxe_fastpath *fp,
14631 struct mbuf *m, struct eth_fast_path_rx_cqe *fp_cqe, uint16_t cqe_idx)
14633 struct mbuf *m_frag;
14634 uint32_t frag_len, frag_size, pages, i;
14635 uint16_t sge_idx, len_on_bd;
14638 DBENTER(BXE_EXTREME_RECV | BXE_EXTREME_TPA);
14641 len_on_bd = le16toh(fp_cqe->len_on_bd);
14642 frag_size = le16toh(fp_cqe->pkt_len) - len_on_bd;
14643 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
14645 DBPRINT(sc, BXE_VERBOSE_TPA,
14646 "%s(): len_on_bd=%d, frag_size=%d, pages=%d\n",
14647 __FUNCTION__, len_on_bd, frag_size, pages);
14649 /* Make sure the aggregated frame is not too big to handle. */
14650 if (pages > 8 * PAGES_PER_SGE) {
14651 DBPRINT(sc, BXE_FATAL,
14652 "%s(): fp[%02d].rx_sge[0x%04X] has too many pages (%d)!\n",
14653 __FUNCTION__, fp->index, cqe_idx, pages);
14654 DBPRINT(sc, BXE_FATAL,
14655 "%s(): fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
14656 __FUNCTION__, le16toh(fp_cqe->pkt_len), len_on_bd);
14657 bxe_panic_dump(sc);
14659 goto bxe_fill_frag_mbuf_exit;
14663 * Scan through the scatter gather list, pulling individual
14664 * mbufs into a single mbuf for the host stack.
14666 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
14667 sge_idx = RX_SGE(le16toh(fp_cqe->sgl[j]));
14670 * Firmware gives the indices of the SGE as if the ring is an
14671 * array (meaning that the "next" element will consume 2
14674 frag_len = min(frag_size, (uint32_t)(BCM_PAGE_SIZE *
14677 DBPRINT(sc, BXE_VERBOSE_TPA,
14678 "%s(): i=%d, j=%d, frag_size=%d, frag_len=%d\n",
14679 __FUNCTION__, i, j, frag_size, frag_len);
14681 m_frag = fp->rx_sge_buf_ptr[sge_idx];
14683 /* Allocate a new mbuf for the SGE. */
14684 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
14687 * Leave all remaining SGEs in the ring.
14689 goto bxe_fill_frag_mbuf_exit;
14692 /* Update the fragment its length. */
14693 m_frag->m_len = frag_len;
14695 /* Concatenate the fragment to the head mbuf. */
14697 DBRUN(fp->sge_mbuf_alloc--);
14699 /* Update TPA mbuf size and remaining fragment size. */
14700 m->m_pkthdr.len += frag_len;
14701 frag_size -= frag_len;
14704 bxe_fill_frag_mbuf_exit:
14705 DBPRINT(sc, BXE_VERBOSE_TPA,
14706 "%s(): frag_size=%d\n", __FUNCTION__, frag_size);
14707 DBEXIT(BXE_EXTREME_RECV | BXE_EXTREME_TPA);
14712 * The aggregation on the current TPA queue has completed. Pull the
14713 * individual mbuf fragments together into a single mbuf, perform all
14714 * necessary checksum calculations, and send the resuting mbuf to the stack.
14720 bxe_tpa_stop(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t queue,
14721 int pad, int len, union eth_rx_cqe *cqe, uint16_t cqe_idx)
14727 DBENTER(BXE_INSANE_RECV | BXE_INSANE_TPA);
14728 DBPRINT(sc, (BXE_EXTREME_RECV | BXE_EXTREME_TPA),
14729 "%s(): fp[%02d].tpa[%02d], len=%d, pad=%d\n",
14730 __FUNCTION__, fp->index, queue, len, pad);
14734 m = fp->tpa_mbuf_ptr[queue];
14736 /* Allocate a replacement before modifying existing mbuf. */
14737 rc = bxe_alloc_tpa_mbuf(fp, queue);
14739 /* Drop the frame and log a soft error. */
14740 fp->rx_soft_errors++;
14741 goto bxe_tpa_stop_exit;
14744 /* We have a replacement, fixup the current mbuf. */
14746 m->m_pkthdr.len = m->m_len = len;
14748 /* Mark the checksums valid (taken care of by firmware). */
14749 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID |
14750 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
14751 m->m_pkthdr.csum_data = 0xffff;
14753 /* Aggregate all of the SGEs into a single mbuf. */
14754 rc = bxe_fill_frag_mbuf(sc, fp, m, &cqe->fast_path_cqe, cqe_idx);
14756 /* Drop the packet and log an error. */
14757 fp->rx_soft_errors++;
14760 /* Find VLAN tag and send frame up to the stack. */
14761 if ((le16toh(cqe->fast_path_cqe.pars_flags.flags) &
14762 PARSING_FLAGS_VLAN)) {
14763 m->m_pkthdr.ether_vtag =
14764 cqe->fast_path_cqe.vlan_tag;
14765 m->m_flags |= M_VLANTAG;
14768 /* Assign packet to the appropriate interface. */
14769 m->m_pkthdr.rcvif = ifp;
14771 /* Update packet statistics. */
14773 ifp->if_ipackets++;
14775 /* ToDo: Any potential locking issues here? */
14776 /* Pass the frame to the stack. */
14777 (*ifp->if_input)(ifp, m);
14780 /* We passed mbuf up the stack or dropped the frame. */
14781 DBRUN(fp->tpa_mbuf_alloc--);
14784 fp->tpa_state[queue] = BXE_TPA_STATE_STOP;
14785 DBRUN(fp->tpa_queue_used &= ~(1 << queue));
14786 DBEXIT(BXE_INSANE_RECV | BXE_INSANE_TPA);
14790 * Notify the controller that the RX producer indices have been updated for
14791 * a fastpath connection by writing them to the controller.
14796 static __inline void
14797 bxe_update_rx_prod(struct bxe_softc *sc, struct bxe_fastpath *fp,
14798 uint16_t bd_prod, uint16_t cqe_prod, uint16_t sge_prod)
14800 volatile struct ustorm_eth_rx_producers rx_prods = {0};
14803 /* Update producers. */
14804 rx_prods.bd_prod = bd_prod;
14805 rx_prods.cqe_prod = cqe_prod;
14806 rx_prods.sge_prod = sge_prod;
14810 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++){
14811 REG_WR(sc, BAR_USTORM_INTMEM +
14812 USTORM_RX_PRODS_OFFSET(BP_PORT(sc), fp->cl_id) + i * 4,
14813 ((volatile uint32_t *) &rx_prods)[i]);
14816 DBPRINT(sc, BXE_EXTREME_RECV, "%s(%d): Wrote fp[%02d] bd_prod = 0x%04X, "
14817 "cqe_prod = 0x%04X, sge_prod = 0x%04X\n", __FUNCTION__, curcpu,
14818 fp->index, bd_prod, cqe_prod, sge_prod);
14822 * Processes received frames.
14828 bxe_rxeof(struct bxe_fastpath *fp)
14830 struct bxe_softc *sc;
14832 uint16_t rx_bd_cons, rx_bd_cons_idx;
14833 uint16_t rx_bd_prod, rx_bd_prod_idx;
14834 uint16_t rx_cq_cons, rx_cq_cons_idx;
14835 uint16_t rx_cq_prod, rx_cq_cons_sb;
14836 unsigned long rx_pkts = 0;
14842 DBENTER(BXE_EXTREME_RECV);
14844 /* Get the status block's view of the RX completion consumer index. */
14845 rx_cq_cons_sb = bxe_rx_cq_cons(fp);
14848 * Get working copies of the driver's view of the
14849 * RX indices. These are 16 bit values that are
14850 * expected to increment from 0 to 65535 and then
14851 * wrap-around to 0 again.
14853 rx_bd_cons = fp->rx_bd_cons;
14854 rx_bd_prod = fp->rx_bd_prod;
14855 rx_cq_cons = fp->rx_cq_cons;
14856 rx_cq_prod = fp->rx_cq_prod;
14858 DBPRINT(sc, (BXE_EXTREME_RECV),
14859 "%s(%d): BEFORE: fp[%02d], rx_bd_cons = 0x%04X, rx_bd_prod = 0x%04X, "
14860 "rx_cq_cons_sw = 0x%04X, rx_cq_prod_sw = 0x%04X\n", __FUNCTION__,
14861 curcpu, fp->index, rx_bd_cons, rx_bd_prod, rx_cq_cons, rx_cq_prod);
14864 * Memory barrier to prevent speculative reads of the RX buffer
14865 * from getting ahead of the index in the status block.
14870 * Scan through the receive chain as long
14871 * as there is work to do.
14873 while (rx_cq_cons != rx_cq_cons_sb) {
14875 union eth_rx_cqe *cqe;
14876 uint8_t cqe_fp_flags;
14880 * Convert the 16 bit indices used by hardware
14881 * into array indices used by the driver.
14883 rx_cq_cons_idx = RCQ_ENTRY(rx_cq_cons);
14884 rx_bd_prod_idx = RX_BD(rx_bd_prod);
14885 rx_bd_cons_idx = RX_BD(rx_bd_cons);
14888 /* Fetch the completion queue entry (i.e. cookie). */
14889 cqe = (union eth_rx_cqe *)
14890 &fp->rcq_chain[rx_cq_cons_idx];
14891 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
14893 /* Sanity check the cookie flags. */
14894 if (__predict_false(cqe_fp_flags == 0)) {
14895 fp->rx_null_cqe_flags++;
14896 DBRUN(bxe_dump_cqe(fp, rx_cq_cons_idx, cqe));
14897 /* ToDo: What error handling can be done here? */
14900 /* Check the CQE type for slowpath or fastpath completion. */
14901 if (__predict_false(CQE_TYPE(cqe_fp_flags) ==
14902 RX_ETH_CQE_TYPE_ETH_RAMROD)) {
14903 /* This is a slowpath completion. */
14904 bxe_sp_event(fp, cqe);
14905 goto bxe_rxeof_next_cqe;
14908 /* This is a fastpath completion. */
14910 /* Get the length and pad information from the CQE. */
14911 len = le16toh(cqe->fast_path_cqe.pkt_len);
14912 pad = cqe->fast_path_cqe.placement_offset;
14914 /* Check if the completion is for TPA. */
14915 if ((fp->disable_tpa == FALSE) &&
14916 (TPA_TYPE(cqe_fp_flags) !=
14917 (TPA_TYPE_START | TPA_TYPE_END))) {
14918 uint16_t queue = cqe->fast_path_cqe.queue_index;
14921 * No need to worry about error flags in
14922 * the frame as the firmware has already
14923 * managed that for us when aggregating
14927 /* Check if TPA aggregation has started. */
14928 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
14929 bxe_tpa_start(fp, queue, rx_bd_cons_idx,
14931 goto bxe_rxeof_next_rx;
14934 /* Check if TPA aggregation has completed. */
14935 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
14936 DBRUNIF(!BXE_RX_SUM_FIX(cqe),
14937 DBPRINT(sc, BXE_FATAL,
14938 "%s(): STOP on non-TCP data.\n",
14942 * This is the size of the linear
14943 * data on this mbuf.
14945 len = le16toh(cqe->fast_path_cqe.len_on_bd);
14948 * Stop the aggregation and pass
14951 bxe_tpa_stop(sc, fp, queue, pad, len,
14952 cqe, rx_cq_cons_idx);
14953 bxe_update_sge_prod(fp,
14954 &cqe->fast_path_cqe);
14955 goto bxe_rxeof_next_cqe;
14959 m = fp->rx_mbuf_ptr[rx_bd_cons_idx];
14961 /* Allocate a replacement before modifying existing mbuf. */
14962 rc = bxe_alloc_rx_bd_mbuf(fp, rx_bd_prod_idx);
14964 /* Drop the frame and log a soft error. */
14965 fp->rx_soft_errors++;
14966 goto bxe_rxeof_next_rx;
14969 /* Check if the received frame has any errors. */
14970 if (__predict_false(cqe_fp_flags &
14971 ETH_RX_ERROR_FLAGS)) {
14972 DBPRINT(sc, BXE_WARN ,
14973 "%s(): fp[%02d].cqe[0x%04X] has errors "
14974 "(0x%08X)!\n", __FUNCTION__, fp->index,
14975 rx_cq_cons, cqe_fp_flags);
14977 fp->rx_soft_errors++;
14978 goto bxe_rxeof_next_rx;
14981 /* We have a replacement, fixup the current mbuf. */
14983 m->m_pkthdr.len = m->m_len = len;
14985 /* Assign packet to the appropriate interface. */
14986 m->m_pkthdr.rcvif = ifp;
14988 /* Assume no hardware checksum complated. */
14989 m->m_pkthdr.csum_flags = 0;
14991 /* Validate checksum if offload enabled. */
14992 if (ifp->if_capenable & IFCAP_RXCSUM) {
14993 /* Check whether IP checksummed or not. */
14995 !(cqe->fast_path_cqe.status_flags &
14996 ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) {
14997 m->m_pkthdr.csum_flags |=
14999 if (__predict_false(cqe_fp_flags &
15000 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) {
15001 DBPRINT(sc, BXE_WARN_SEND,
15002 "%s(): Invalid IP checksum!\n",
15005 m->m_pkthdr.csum_flags |=
15009 /* Check for a valid TCP/UDP frame. */
15011 !(cqe->fast_path_cqe.status_flags &
15012 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) {
15013 /* Check for a good TCP/UDP checksum. */
15014 if (__predict_false(cqe_fp_flags &
15015 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) {
15016 DBPRINT(sc, BXE_VERBOSE_RECV,
15017 "%s(): Invalid TCP/UDP checksum!\n",
15020 m->m_pkthdr.csum_data = 0xFFFF;
15021 m->m_pkthdr.csum_flags |=
15029 * If we received a packet with a vlan tag,
15030 * attach that information to the packet.
15032 if (cqe->fast_path_cqe.pars_flags.flags &
15033 PARSING_FLAGS_VLAN) {
15034 m->m_pkthdr.ether_vtag =
15035 cqe->fast_path_cqe.vlan_tag;
15036 m->m_flags |= M_VLANTAG;
15039 #if __FreeBSD_version >= 800000
15040 /* Tell OS what RSS queue was used for this flow. */
15041 m->m_pkthdr.flowid = fp->index;
15042 m->m_flags |= M_FLOWID;
15045 /* Last chance to check for problems. */
15046 DBRUN(bxe_validate_rx_packet(fp, rx_cq_cons, cqe, m));
15048 /* Update packet statistics. */
15049 ifp->if_ipackets++;
15052 /* ToDo: Any potential locking issues here? */
15053 /* Pass the frame to the stack. */
15054 (*ifp->if_input)(ifp, m);
15056 DBRUN(fp->rx_mbuf_alloc--);
15060 rx_bd_prod = NEXT_RX_BD(rx_bd_prod);
15061 rx_bd_cons = NEXT_RX_BD(rx_bd_cons);
15063 bxe_rxeof_next_cqe:
15064 rx_cq_prod = NEXT_RCQ_IDX(rx_cq_prod);
15065 rx_cq_cons = NEXT_RCQ_IDX(rx_cq_cons);
15068 * Memory barrier to prevent speculative reads of the RX buffer
15069 * from getting ahead of the index in the status block.
15074 /* Update driver copy of the fastpath indices. */
15075 fp->rx_bd_cons = rx_bd_cons;
15076 fp->rx_bd_prod = rx_bd_prod;
15077 fp->rx_cq_cons = rx_cq_cons;
15078 fp->rx_cq_prod = rx_cq_prod;
15080 DBPRINT(sc, (BXE_EXTREME_RECV),
15081 "%s(%d): AFTER: fp[%02d], rx_bd_cons = 0x%04X, rx_bd_prod = 0x%04X, "
15082 "rx_cq_cons_sw = 0x%04X, rx_cq_prod_sw = 0x%04X\n", __FUNCTION__,
15083 curcpu, fp->index, rx_bd_cons, rx_bd_prod, rx_cq_cons, rx_cq_prod);
15085 /* Update producers */
15086 bxe_update_rx_prod(sc, fp, fp->rx_bd_prod,
15087 fp->rx_cq_prod, fp->rx_sge_prod);
15088 bus_space_barrier(sc->bxe_btag, sc->bxe_bhandle, 0, 0,
15089 BUS_SPACE_BARRIER_READ);
15091 fp->rx_pkts += rx_pkts;
15092 DBEXIT(BXE_EXTREME_RECV);
15096 * Processes transmit completions.
15102 bxe_txeof(struct bxe_fastpath *fp)
15104 struct bxe_softc *sc;
15106 struct eth_tx_start_bd *txbd;
15107 uint16_t hw_pkt_cons, sw_pkt_cons, sw_tx_bd_cons;
15108 uint16_t bd_index, pkt_index, nbds;
15114 DBENTER(BXE_EXTREME_SEND);
15116 /* Get the hardware's view of the TX packet consumer index. */
15117 hw_pkt_cons = le16toh(*fp->tx_pkt_cons_sb);
15118 sw_pkt_cons = fp->tx_pkt_cons;
15119 sw_tx_bd_cons = fp->tx_bd_cons;
15121 /* Cycle through any completed TX chain page entries. */
15122 while (sw_pkt_cons != hw_pkt_cons) {
15123 bd_index = TX_BD(sw_tx_bd_cons);
15124 pkt_index = TX_BD(sw_pkt_cons);
15126 txbd = &fp->tx_chain[bd_index].start_bd;
15129 /* Free the completed frame's mbuf. */
15130 if (__predict_true(fp->tx_mbuf_ptr[pkt_index] != NULL)) {
15131 /* Unmap the mbuf from non-paged memory. */
15132 bus_dmamap_unload(fp->tx_mbuf_tag,
15133 fp->tx_mbuf_map[pkt_index]);
15135 /* Return the mbuf to the system. */
15136 m_freem(fp->tx_mbuf_ptr[pkt_index]);
15137 fp->tx_mbuf_alloc--;
15138 fp->tx_mbuf_ptr[pkt_index] = NULL;
15141 fp->tx_chain_lost_mbuf++;
15144 /* Updated packet consumer value. */
15147 /* Skip over the remaining used buffer descriptors. */
15148 fp->tx_bd_used -= nbds;
15149 for (i = 0; i < nbds; i++)
15150 sw_tx_bd_cons = NEXT_TX_BD(sw_tx_bd_cons);
15152 /* Check for new work since we started. */
15153 hw_pkt_cons = le16toh(*fp->tx_pkt_cons_sb);
15157 /* Enable new transmits if we've made enough room. */
15158 if (fp->tx_bd_used < BXE_TX_CLEANUP_THRESHOLD) {
15159 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
15160 if (fp->tx_bd_used == 0) {
15162 * Clear the watchdog timer if we've emptied
15165 fp->watchdog_timer = 0;
15168 * Reset the watchdog timer if we still have
15169 * transmits pending.
15171 fp->watchdog_timer = BXE_TX_TIMEOUT;
15175 /* Save our indices. */
15176 fp->tx_pkt_cons = sw_pkt_cons;
15177 fp->tx_bd_cons = sw_tx_bd_cons;
15178 DBEXIT(BXE_EXTREME_SEND);
15182 * Transmit timeout handler.
15185 * 0 = No timeout, !0 = timeout occurred.
15188 bxe_watchdog(struct bxe_fastpath *fp)
15190 struct bxe_softc *sc;
15194 DBENTER(BXE_INSANE_SEND);
15197 if (fp->watchdog_timer == 0 || --fp->watchdog_timer) {
15200 goto bxe_watchdog_exit;
15204 BXE_PRINTF("TX watchdog timeout occurred on fp[%02d], "
15205 "resetting!\n", fp->index);
15207 /* DBRUNLV(BXE_FATAL, bxe_breakpoint(sc)); */
15211 /* Mark the interface as down. */
15212 sc->bxe_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
15214 bxe_stop_locked(sc, UNLOAD_NORMAL);
15216 bxe_init_locked(sc, LOAD_OPEN);
15218 BXE_CORE_UNLOCK(sc);
15221 DBEXIT(BXE_INSANE_SEND);
15227 * The periodic timer tick routine.
15229 * This code only runs when the interface is up.
15235 bxe_tick(void *xsc)
15237 struct bxe_softc *sc;
15238 struct bxe_fastpath *fp;
15240 /* Re-enable at a later time. */
15241 uint32_t drv_pulse, mcp_pulse;
15246 DBENTER(BXE_INSANE_MISC);
15249 /* Check for TX timeouts on any fastpath. */
15250 for (i = 0; i < sc->num_queues; i++) {
15253 if (bxe_watchdog(fp) != 0)
15257 func = BP_FUNC(sc);
15259 /* Schedule the next tick. */
15260 callout_reset(&sc->bxe_tick_callout, hz, bxe_tick, sc);
15264 func = BP_FUNC(sc);
15266 ++sc->fw_drv_pulse_wr_seq;
15267 sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
15269 /* Let the MCP know we're alive. */
15270 drv_pulse = sc->fw_drv_pulse_wr_seq;
15271 SHMEM_WR(sc, func_mb[func].drv_pulse_mb, drv_pulse);
15273 /* Check if the MCP is still alive. */
15274 mcp_pulse = (SHMEM_RD(sc, func_mb[func].mcp_pulse_mb) &
15275 MCP_PULSE_SEQ_MASK);
15278 * The delta between driver pulse and MCP response should be 1
15279 * (before MCP response) or 0 (after MCP response).
15281 if ((drv_pulse != mcp_pulse) && (drv_pulse != ((mcp_pulse + 1) &
15282 MCP_PULSE_SEQ_MASK))) {
15283 /* Someone's in cardiac arrest. */
15284 DBPRINT(sc, BXE_WARN,
15285 "%s(): drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
15286 __FUNCTION__, drv_pulse, mcp_pulse);
15291 if ((sc->state == BXE_STATE_OPEN) || (sc->state == BXE_STATE_DISABLED))
15292 bxe_stats_handle(sc, STATS_EVENT_UPDATE);
15297 * Allows the driver state to be dumped through the sysctl interface.
15300 * 0 for success, positive value for failure.
15303 bxe_sysctl_driver_state(SYSCTL_HANDLER_ARGS)
15305 struct bxe_softc *sc;
15306 struct bxe_fastpath *fp;
15307 int error, i, result;
15309 sc = (struct bxe_softc *)arg1;
15311 error = sysctl_handle_int(oidp, &result, 0, req);
15312 if (error || !req->newptr)
15316 bxe_dump_driver_state(sc);
15317 for (i = 0; i < sc->num_queues; i++) {
15319 bxe_dump_fp_state(fp);
15321 bxe_dump_status_block(sc);
15328 * Allows the hardware state to be dumped through the sysctl interface.
15331 * 0 for success, positive value for failure.
15334 bxe_sysctl_hw_state(SYSCTL_HANDLER_ARGS)
15336 struct bxe_softc *sc;
15339 sc = (struct bxe_softc *)arg1;
15341 error = sysctl_handle_int(oidp, &result, 0, req);
15342 if (error || !req->newptr)
15346 bxe_dump_hw_state(sc);
15352 * Allows the MCP firmware to be dumped through the sysctl interface.
15355 * 0 for success, positive value for failure.
15358 bxe_sysctl_dump_fw(SYSCTL_HANDLER_ARGS)
15360 struct bxe_softc *sc;
15363 sc = (struct bxe_softc *)arg1;
15365 error = sysctl_handle_int(oidp, &result, 0, req);
15366 if (error || !req->newptr)
15376 * Provides a sysctl interface to allow dumping the RX completion chain.
15379 * 0 for success, positive value for failure.
15382 bxe_sysctl_dump_rx_cq_chain(SYSCTL_HANDLER_ARGS)
15384 struct bxe_softc *sc;
15385 struct bxe_fastpath *fp;
15388 sc = (struct bxe_softc *)arg1;
15390 error = sysctl_handle_int(oidp, &result, 0, req);
15391 if (error || !req->newptr)
15394 if ((result >= 0) && (result < sc->num_queues)) {
15395 fp = &sc->fp[result];
15396 bxe_dump_rx_cq_chain(fp, 0, TOTAL_RCQ_ENTRIES);
15404 * Provides a sysctl interface to allow dumping the RX chain.
15407 * 0 for success, positive value for failure.
15410 bxe_sysctl_dump_rx_bd_chain(SYSCTL_HANDLER_ARGS)
15412 struct bxe_softc *sc;
15413 struct bxe_fastpath *fp;
15416 sc = (struct bxe_softc *)arg1;
15418 error = sysctl_handle_int(oidp, &result, 0, req);
15419 if (error || !req->newptr)
15422 if ((result >= 0) && (result < sc->num_queues)) {
15423 fp = &sc->fp[result];
15424 bxe_dump_rx_bd_chain(fp, 0, TOTAL_RX_BD);
15431 * Provides a sysctl interface to allow dumping the TX chain.
15434 * 0 for success, positive value for failure.
15437 bxe_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS)
15439 struct bxe_softc *sc;
15440 struct bxe_fastpath *fp;
15443 sc = (struct bxe_softc *)arg1;
15445 error = sysctl_handle_int(oidp, &result, 0, req);
15446 if (error || !req->newptr)
15449 if ((result >= 0) && (result < sc->num_queues)) {
15450 fp = &sc->fp[result];
15451 bxe_dump_tx_chain(fp, 0, TOTAL_TX_BD);
15458 * Provides a sysctl interface to allow reading arbitrary registers in the
15459 * device. DO NOT ENABLE ON PRODUCTION SYSTEMS!
15462 * 0 for success, positive value for failure.
15465 bxe_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
15467 struct bxe_softc *sc;
15468 uint32_t result, val;
15471 sc = (struct bxe_softc *)arg1;
15473 error = sysctl_handle_int(oidp, &result, 0, req);
15474 if (error || (req->newptr == NULL))
15477 val = REG_RD(sc, result);
15478 BXE_PRINTF("reg 0x%08X = 0x%08X\n", result, val);
15484 * Provides a sysctl interface to allow generating a grcdump.
15487 * 0 for success, positive value for failure.
15490 bxe_sysctl_grcdump(SYSCTL_HANDLER_ARGS)
15492 struct bxe_softc *sc;
15495 sc = (struct bxe_softc *)arg1;
15497 error = sysctl_handle_int(oidp, &result, 0, req);
15498 if (error || !req->newptr)
15502 /* Generate a grcdump and log the contents.*/
15503 bxe_grcdump(sc, 1);
15505 /* Generate a grcdump and don't log the contents. */
15506 bxe_grcdump(sc, 0);
15513 * Provides a sysctl interface to forcing the driver to dump state and
15514 * enter the debugger. DO NOT ENABLE ON PRODUCTION SYSTEMS!
15517 * 0 for success, positive value for failure.
15520 bxe_sysctl_breakpoint(SYSCTL_HANDLER_ARGS)
15522 struct bxe_softc *sc;
15526 error = sysctl_handle_int(oidp, &result, 0, req);
15527 if (error || !req->newptr)
15531 sc = (struct bxe_softc *)arg1;
15532 bxe_breakpoint(sc);
15540 * Adds any sysctl parameters for tuning or debugging purposes.
15546 bxe_add_sysctls(struct bxe_softc *sc)
15548 struct sysctl_ctx_list *ctx =
15549 device_get_sysctl_ctx(sc->dev);
15550 struct sysctl_oid_list *children =
15551 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
15552 struct bxe_port_stats *estats = &sc->eth_stats;
15554 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15555 "estats_total_bytes_received_hi",
15556 CTLFLAG_RD, &estats->total_bytes_received_hi,
15557 0, "Total bytes received (hi)");
15559 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15560 "estats_total_bytes_received_lo",
15561 CTLFLAG_RD, &estats->total_bytes_received_lo,
15562 0, "Total bytes received (lo)");
15564 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15565 "estats_valid_bytes_received_hi",
15566 CTLFLAG_RD, &estats->valid_bytes_received_hi,
15567 0, "Valid bytes received (hi)");
15569 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15570 "estats_valid_bytes_received_lo",
15571 CTLFLAG_RD, &estats->valid_bytes_received_lo,
15572 0, "Valid bytes received (lo)");
15574 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15575 "estats_total_unicast_packets_received_hi",
15576 CTLFLAG_RD, &estats->total_unicast_packets_received_hi,
15577 0, "Total unicast packets received (hi)");
15579 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15580 "estats_total_unicast_packets_received_lo",
15581 CTLFLAG_RD, &estats->total_unicast_packets_received_lo,
15582 0, "Total unicast packets received (lo)");
15584 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15585 "estats_total_bytes_transmitted_hi",
15586 CTLFLAG_RD, &estats->total_bytes_transmitted_hi,
15587 0, "Total bytes transmitted (hi)");
15589 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15590 "estats_total_bytes_transmitted_lo",
15591 CTLFLAG_RD, &estats->total_bytes_transmitted_lo,
15592 0, "Total bytes transmitted (lo)");
15594 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15595 "estats_total_unicast_packets_transmitted_hi",
15596 CTLFLAG_RD, &estats->total_unicast_packets_transmitted_hi,
15597 0, "Total unicast packets transmitted (hi)");
15599 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15600 "estats_total_unicast_packets_transmitted_lo",
15601 CTLFLAG_RD, &estats->total_unicast_packets_transmitted_lo,
15602 0, "Total unicast packets transmitted (lo)");
15604 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15605 "estats_total_broadcast_packets_received_lo",
15606 CTLFLAG_RD, &estats->total_broadcast_packets_received_lo,
15607 0, "Total broadcast packets received (lo)");
15609 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15610 "estats_total_broadcast_packets_transmitted_lo",
15611 CTLFLAG_RD, &estats->total_broadcast_packets_transmitted_lo,
15612 0, "Total broadcast packets transmitted (lo)");
15614 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15615 "estats_total_multicast_packets_received_lo",
15616 CTLFLAG_RD, &estats->total_multicast_packets_received_lo,
15617 0, "Total multicast packets received (lo)");
15619 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15620 "estats_total_multicast_packets_transmitted_lo",
15621 CTLFLAG_RD, &estats->total_multicast_packets_transmitted_lo,
15622 0, "Total multicast packets transmitted (lo)");
15624 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15625 "tx_stat_etherstatspkts64octets_hi",
15626 CTLFLAG_RD, &estats->tx_stat_etherstatspkts64octets_hi,
15627 0, "Total 64 byte packets transmitted (hi)");
15629 /* ToDo: Fix for 64 bit access. */
15630 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15631 "tx_stat_etherstatspkts64octets_lo",
15632 CTLFLAG_RD, &estats->tx_stat_etherstatspkts64octets_lo,
15633 0, "Total 64 byte packets transmitted (lo)");
15635 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15637 CTLFLAG_RD, &estats->driver_xoff,
15638 0, "Driver transmit queue full count");
15640 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
15641 "tx_start_called_with_link_down",
15642 CTLFLAG_RD, &sc->tx_start_called_with_link_down,
15643 "TX start routine called while link down count");
15645 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
15646 "tx_start_called_with_queue_full",
15647 CTLFLAG_RD, &sc->tx_start_called_with_queue_full,
15648 "TX start routine called with queue full count");
15650 /* ToDo: Add more statistics here. */
15653 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "bxe_debug",
15654 CTLFLAG_RW, &bxe_debug, 0,
15655 "Debug message level flag");
15659 #define QUEUE_NAME_LEN 32
15660 char namebuf[QUEUE_NAME_LEN];
15661 struct sysctl_oid *queue_node;
15662 struct sysctl_oid_list *queue_list;
15664 for (int i = 0; i < sc->num_queues; i++) {
15665 struct bxe_fastpath *fp = &sc->fp[i];
15666 snprintf(namebuf, QUEUE_NAME_LEN, "fp[%02d]", i);
15668 queue_node = SYSCTL_ADD_NODE(ctx, children, OID_AUTO,
15669 namebuf, CTLFLAG_RD, NULL, "Queue Name");
15670 queue_list = SYSCTL_CHILDREN(queue_node);
15673 * Receive related fastpath statistics.*
15675 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15677 CTLFLAG_RD, &fp->rx_pkts,
15678 "Received packets");
15680 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15682 CTLFLAG_RD, &fp->rx_tpa_pkts,
15683 "Received TPA packets");
15685 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15686 "rx_null_cqe_flags",
15687 CTLFLAG_RD, &fp->rx_null_cqe_flags,
15688 "CQEs with NULL flags count");
15690 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15692 CTLFLAG_RD, &fp->rx_soft_errors,
15693 "Received frames dropped by driver count");
15696 * Transmit related fastpath statistics.*
15698 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15700 CTLFLAG_RD, &fp->tx_pkts,
15701 "Transmitted packets");
15703 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15705 CTLFLAG_RD, &fp->tx_soft_errors,
15706 "Transmit frames dropped by driver count");
15708 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15709 "tx_offload_frames_csum_ip",
15710 CTLFLAG_RD, &fp->tx_offload_frames_csum_ip,
15711 "IP checksum offload frame count");
15713 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15714 "tx_offload_frames_csum_tcp",
15715 CTLFLAG_RD, &fp->tx_offload_frames_csum_tcp,
15716 "TCP checksum offload frame count");
15718 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15719 "tx_offload_frames_csum_udp",
15720 CTLFLAG_RD, &fp->tx_offload_frames_csum_udp,
15721 "UDP checksum offload frame count");
15723 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15724 "tx_offload_frames_tso",
15725 CTLFLAG_RD, &fp->tx_offload_frames_tso,
15726 "TSO offload frame count");
15728 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15729 "tx_header_splits",
15730 CTLFLAG_RD, &fp->tx_header_splits,
15731 "TSO frame header/data split count");
15733 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15734 "tx_encap_failures",
15735 CTLFLAG_RD, &fp->tx_encap_failures,
15736 "TX encapsulation failure count");
15738 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15739 "tx_hw_queue_full",
15740 CTLFLAG_RD, &fp->tx_hw_queue_full,
15741 "TX H/W queue too full to add a frame count");
15743 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15744 "tx_hw_max_queue_depth",
15745 CTLFLAG_RD, &fp->tx_hw_max_queue_depth,
15746 "TX H/W maximum queue depth count");
15748 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15749 "tx_dma_mapping_failure",
15750 CTLFLAG_RD, &fp->tx_dma_mapping_failure,
15751 "TX DMA mapping failure");
15753 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO,
15754 "tx_max_drbr_queue_depth",
15755 CTLFLAG_RD, &fp->tx_max_drbr_queue_depth,
15756 0, "TX S/W queue maximum depth");
15758 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15759 "tx_window_violation_std",
15760 CTLFLAG_RD, &fp->tx_window_violation_std,
15761 "Standard frame TX BD window violation count");
15763 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15764 "tx_window_violation_tso",
15765 CTLFLAG_RD, &fp->tx_window_violation_tso,
15766 "TSO frame TX BD window violation count");
15768 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15769 "tx_unsupported_tso_request_ipv6",
15770 CTLFLAG_RD, &fp->tx_unsupported_tso_request_ipv6,
15771 "TSO frames with unsupported IPv6 protocol count");
15773 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15774 "tx_unsupported_tso_request_not_tcp",
15775 CTLFLAG_RD, &fp->tx_unsupported_tso_request_not_tcp,
15776 "TSO frames with unsupported protocol count");
15778 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15779 "tx_chain_lost_mbuf",
15780 CTLFLAG_RD, &fp->tx_chain_lost_mbuf,
15781 "Mbufs lost on TX chain count");
15783 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15784 "tx_frame_deferred",
15785 CTLFLAG_RD, &fp->tx_frame_deferred,
15786 "TX frame deferred from H/W queue to S/W queue count");
15788 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15790 CTLFLAG_RD, &fp->tx_queue_xoff,
15791 "TX queue full count");
15794 * Memory related fastpath statistics.*
15796 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15797 "mbuf_rx_bd_alloc_failed",
15798 CTLFLAG_RD, &fp->mbuf_rx_bd_alloc_failed,
15799 "RX BD mbuf allocation failure count");
15801 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15802 "mbuf_rx_bd_mapping_failed",
15803 CTLFLAG_RD, &fp->mbuf_rx_bd_mapping_failed,
15804 "RX BD mbuf mapping failure count");
15806 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15807 "mbuf_tpa_alloc_failed",
15808 CTLFLAG_RD, &fp->mbuf_tpa_alloc_failed,
15809 "TPA mbuf allocation failure count");
15811 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15812 "mbuf_tpa_mapping_failed",
15813 CTLFLAG_RD, &fp->mbuf_tpa_mapping_failed,
15814 "TPA mbuf mapping failure count");
15816 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15817 "mbuf_sge_alloc_failed",
15818 CTLFLAG_RD, &fp->mbuf_sge_alloc_failed,
15819 "SGE mbuf allocation failure count");
15821 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15822 "mbuf_sge_mapping_failed",
15823 CTLFLAG_RD, &fp->mbuf_sge_mapping_failed,
15824 "SGE mbuf mapping failure count");
15826 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15827 "mbuf_defrag_attempts",
15828 CTLFLAG_RD, &fp->mbuf_defrag_attempts,
15829 "Mbuf defrag attempt count");
15831 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15832 "mbuf_defrag_failures",
15833 CTLFLAG_RD, &fp->mbuf_defrag_failures,
15834 "Mbuf defrag failure count");
15840 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "driver_state",
15841 CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0,
15842 bxe_sysctl_driver_state, "I", "Drive state information");
15844 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_state",
15845 CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0,
15846 bxe_sysctl_hw_state, "I", "Hardware state information");
15848 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dump_fw",
15849 CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0,
15850 bxe_sysctl_dump_fw, "I", "Dump MCP firmware");
15852 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dump_rx_bd_chain",
15853 CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0,
15854 bxe_sysctl_dump_rx_bd_chain, "I", "Dump rx_bd chain");
15856 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dump_rx_cq_chain",
15857 CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0,
15858 bxe_sysctl_dump_rx_cq_chain, "I", "Dump cqe chain");
15860 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dump_tx_chain",
15861 CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0,
15862 bxe_sysctl_dump_tx_chain, "I", "Dump tx_bd chain");
15865 * Generates a GRCdump (run sysctl dev.bxe.0.grcdump=0
15866 * before accessing buffer below).
15868 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "grcdump",
15869 CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0, bxe_sysctl_grcdump,
15870 "I", "Initiate a grcdump operation");
15874 * Use "sysctl -b dev.bxe.0.grcdump_buffer > buf.bin".
15876 SYSCTL_ADD_OPAQUE(ctx, children, OID_AUTO, "grcdump_buffer",
15877 CTLFLAG_RD | CTLFLAG_SKIP, sc->grcdump_buffer,
15878 BXE_GRCDUMP_BUF_SIZE, "IU", "Access grcdump buffer");
15880 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "breakpoint",
15881 CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0,
15882 bxe_sysctl_breakpoint, "I", "Driver breakpoint");
15884 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
15885 CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0,
15886 bxe_sysctl_reg_read, "I", "Register read");
15888 #endif /* BXE_DEBUG */
15892 * BXE Debug Routines
15896 * Writes out the header for the debug dump buffer.
15905 bxe_dump_debug_header(struct bxe_softc *sc, uint32_t *index)
15907 struct hd_param hd_param_cu = {0};
15910 buf = sc->grcdump_buffer;
15911 if (CHIP_IS_E1H(sc))
15912 hd_param_cu = hd_param_e1h;
15914 hd_param_cu = hd_param_e1;
15916 buf[(*index)++] = hd_param_cu.time_stamp;
15917 buf[(*index)++] = hd_param_cu.diag_ver;
15918 buf[(*index)++] = hd_param_cu.grc_dump_ver;
15920 buf[(*index)++] = REG_RD_IND(sc, XSTORM_WAITP_ADDRESS);
15921 buf[(*index)++] = REG_RD_IND(sc, TSTORM_WAITP_ADDRESS);
15922 buf[(*index)++] = REG_RD_IND(sc, USTORM_WAITP_ADDRESS);
15923 buf[(*index)++] = REG_RD_IND(sc, CSTORM_WAITP_ADDRESS);
15925 /* The size of the header is stored at the first DWORD. */
15926 buf[0] = (*index) - 1;
15931 * Writes to the controller to prepare it for a dump.
15940 bxe_dump_debug_writes(struct bxe_softc *sc)
15942 uint32_t write_val;
15945 /* Halt the STORMs to get a consistent device state. */
15946 REG_WR_IND(sc, XSTORM_WAITP_ADDRESS, write_val);
15947 REG_WR_IND(sc, TSTORM_WAITP_ADDRESS, write_val);
15948 REG_WR_IND(sc, USTORM_WAITP_ADDRESS, write_val);
15949 REG_WR_IND(sc, CSTORM_WAITP_ADDRESS, write_val);
15951 if (CHIP_IS_E1H(sc))
15952 REG_WR_IND(sc, TSTORM_CAM_MODE, write_val);
15957 * Cycles through the required register reads and dumps them
15958 * to the debug buffer.
15967 bxe_dump_debug_reg_read(struct bxe_softc *sc, uint32_t *index)
15969 preg_addr preg_addrs;
15970 uint32_t regs_count, *buf;
15971 uint32_t i, reg_addrs_index;
15973 buf = sc->grcdump_buffer;
15976 /* Read different registers for different controllers. */
15977 if (CHIP_IS_E1H(sc)) {
15978 regs_count = regs_count_e1h;
15979 preg_addrs = ®_addrs_e1h[0];
15981 regs_count = regs_count_e1;
15982 preg_addrs = ®_addrs_e1[0];
15985 /* ToDo: Add a buffer size check. */
15986 for (reg_addrs_index = 0; reg_addrs_index < regs_count;
15987 reg_addrs_index++) {
15988 for (i = 0; i < preg_addrs[reg_addrs_index].size; i++) {
15989 buf[(*index)++] = REG_RD_IND(sc,
15990 preg_addrs[reg_addrs_index].addr + (i * 4));
15996 * Cycles through the required wide register reads and dumps them
15997 * to the debug buffer.
16003 bxe_dump_debug_reg_wread(struct bxe_softc *sc, uint32_t *index)
16005 pwreg_addr pwreg_addrs;
16006 uint32_t reg_addrs_index, reg_add_read, reg_add_count;
16007 uint32_t *buf, cam_index, wregs_count;
16009 buf = sc->grcdump_buffer;
16010 pwreg_addrs = NULL;
16012 /* Read different registers for different controllers. */
16013 if (CHIP_IS_E1H(sc)) {
16014 wregs_count = wregs_count_e1h;
16015 pwreg_addrs = &wreg_addrs_e1h[0];
16017 wregs_count = wregs_count_e1;
16018 pwreg_addrs = &wreg_addrs_e1[0];
16021 for (reg_addrs_index = 0; reg_addrs_index < wregs_count;
16022 reg_addrs_index++) {
16023 reg_add_read = pwreg_addrs[reg_addrs_index].addr;
16024 for (reg_add_count = 0; reg_add_count <
16025 pwreg_addrs[reg_addrs_index].size; reg_add_count++) {
16026 buf[(*index)++] = REG_RD_IND(sc, reg_add_read);
16027 reg_add_read += sizeof(uint32_t);
16029 for (cam_index = 0; cam_index <
16030 pwreg_addrs[reg_addrs_index].const_regs_count;
16032 buf[(*index)++] = REG_RD_IND(sc,
16033 pwreg_addrs[reg_addrs_index].const_regs[cam_index]);
16039 * Performs a debug dump for offline diagnostics.
16041 * Note that when this routine is called the STORM
16042 * processors will be stopped in order to create a
16043 * cohesive dump. The controller will need to be
16044 * reset before the device can begin passing traffic
16051 bxe_grcdump(struct bxe_softc *sc, int log)
16053 uint32_t *buf, i, index;
16056 buf = sc->grcdump_buffer;
16059 /* Write the header and regsiters contents to the dump buffer. */
16060 bxe_dump_debug_header(sc, &index);
16061 bxe_dump_debug_writes(sc);
16062 bxe_dump_debug_reg_read(sc,&index);
16063 bxe_dump_debug_reg_wread(sc, &index);
16065 /* Print the results to the system log is necessary. */
16068 "-----------------------------"
16070 "-----------------------------\n");
16071 BXE_PRINTF("Buffer length = 0x%08X bytes\n", index * 4);
16073 for (i = 0; i < index; i += 8) {
16075 "0x%08X - 0x%08X 0x%08X 0x%08X 0x%08X "
16076 "0x%08X 0x%08X 0x%08X 0x%08X\n", i * 4,
16077 buf[i + 0], buf[i + 1], buf[i + 2],
16078 buf[i + 3], buf[i + 4], buf[i + 5],
16079 buf[i + 6], buf[i + 7]);
16083 "-----------------------------"
16085 "-----------------------------\n");
16088 BXE_PRINTF("No grcdump buffer allocated!\n");
16093 * Check that an Etherent frame is valid and prints out debug info if it's
16100 void bxe_validate_rx_packet(struct bxe_fastpath *fp, uint16_t comp_cons,
16101 union eth_rx_cqe *cqe, struct mbuf *m)
16103 struct bxe_softc *sc;
16108 /* Check that the mbuf is sane. */
16109 error = m_sanity(m, FALSE);
16110 if (error != 1 || ((m->m_len < ETHER_HDR_LEN) |
16111 (m->m_len > ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))) {
16113 bxe_dump_enet(sc, m);
16114 bxe_dump_cqe(fp, comp_cons, cqe);
16115 /* Make sure the packet has a valid length. */
16120 * Prints out Ethernet frame information from an mbuf.
16122 * Partially decode an Ethernet frame to look at some important headers.
16128 void bxe_dump_enet(struct bxe_softc *sc, struct mbuf *m)
16130 struct ether_vlan_header *eh;
16139 "-----------------------------"
16141 "-----------------------------\n");
16143 eh = mtod(m, struct ether_vlan_header *);
16145 /* Handle VLAN encapsulation if present. */
16146 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
16147 etype = ntohs(eh->evl_proto);
16148 e_hlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
16150 etype = ntohs(eh->evl_encap_proto);
16151 e_hlen = ETHER_HDR_LEN;
16154 BXE_PRINTF("enet: dest = %6D, src = %6D, type = 0x%04X, e_hlen = %d\n",
16155 eh->evl_dhost, ":", eh->evl_shost, ":", etype, e_hlen);
16159 ip = (struct ip *)(m->m_data + e_hlen);
16161 "--ip: dest = 0x%08X , src = 0x%08X, "
16162 "ip_hlen = %d bytes, len = %d bytes, protocol = 0x%02X, "
16163 "ip_id = 0x%04X, csum = 0x%04X\n",
16164 ntohl(ip->ip_dst.s_addr), ntohl(ip->ip_src.s_addr),
16165 (ip->ip_hl << 2), ntohs(ip->ip_len), ip->ip_p,
16166 ntohs(ip->ip_id), ntohs(ip->ip_sum));
16168 switch (ip->ip_p) {
16170 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
16172 "-tcp: dest = %d, src = %d, tcp_hlen = %d "
16173 "bytes, flags = 0x%b, csum = 0x%04X\n",
16174 ntohs(th->th_dport), ntohs(th->th_sport),
16175 (th->th_off << 2), th->th_flags,
16176 "\20\10CWR\07ECE\06URG\05ACK\04PSH\03RST\02SYN\01FIN",
16177 ntohs(th->th_sum));
16180 uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
16182 "-udp: dest = %d, src = %d, udp_hlen = %d "
16183 "bytes, len = %d bytes, csum = 0x%04X\n",
16184 ntohs(uh->uh_dport), ntohs(uh->uh_sport),
16185 (int)sizeof(struct udphdr), ntohs(uh->uh_ulen),
16186 ntohs(uh->uh_sum));
16189 BXE_PRINTF("icmp:\n");
16192 BXE_PRINTF("----: Other IP protocol.\n");
16195 case ETHERTYPE_IPV6:
16196 /* ToDo: Add IPv6 support. */
16197 BXE_PRINTF("IPv6 not supported!.\n");
16199 case ETHERTYPE_ARP:
16200 BXE_PRINTF("-arp: ");
16201 ah = (struct arphdr *) (m->m_data + e_hlen);
16202 switch (ntohs(ah->ar_op)) {
16203 case ARPOP_REVREQUEST:
16204 printf("reverse ARP request\n");
16206 case ARPOP_REVREPLY:
16207 printf("reverse ARP reply\n");
16209 case ARPOP_REQUEST:
16210 printf("ARP request\n");
16213 printf("ARP reply\n");
16216 printf("other ARP operation\n");
16220 BXE_PRINTF("----: Other protocol.\n");
16224 "-----------------------------"
16226 "-----------------------------\n");
16231 bxe_dump_mbuf_data(struct mbuf *m, int len)
16236 ptr = mtod(m, uint8_t *);
16237 printf("\nmbuf->m_data:");
16239 for (i = 0; i < len; i++){
16240 if (i != 0 && i % 40 == 0)
16242 else if (i != 0 && i % 6 == 0)
16244 printf("%02x", *ptr++);
16252 * Prints out information about an mbuf.
16258 void bxe_dump_mbuf(struct bxe_softc *sc, struct mbuf *m)
16261 BXE_PRINTF("mbuf: null pointer\n");
16266 BXE_PRINTF("mbuf: %p, m_len = %d, m_flags = 0x%b, "
16267 "m_data = %p\n", m, m->m_len, m->m_flags,
16268 "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", m->m_data);
16270 if (m->m_flags & M_PKTHDR) {
16271 BXE_PRINTF("- m_pkthdr: len = %d, flags = 0x%b, "
16272 "csum_flags = %b\n", m->m_pkthdr.len,
16273 m->m_flags, "\20\12M_BCAST\13M_MCAST\14M_FRAG"
16274 "\15M_FIRSTFRAG\16M_LASTFRAG\21M_VLANTAG"
16275 "\22M_PROMISC\23M_NOFREE",
16276 m->m_pkthdr.csum_flags,
16277 "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS"
16278 "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED"
16279 "\12CSUM_IP_VALID\13CSUM_DATA_VALID"
16280 "\14CSUM_PSEUDO_HDR");
16283 if (m->m_flags & M_EXT) {
16284 BXE_PRINTF("- m_ext: %p, ext_size = %d, type = ",
16285 m->m_ext.ext_buf, m->m_ext.ext_size);
16286 switch (m->m_ext.ext_type) {
16288 printf("EXT_CLUSTER\n"); break;
16290 printf("EXT_SFBUF\n"); break;
16292 printf("EXT_JUMBO9\n"); break;
16294 printf("EXT_JUMBO16\n"); break;
16296 printf("EXT_PACKET\n"); break;
16298 printf("EXT_MBUF\n"); break;
16300 printf("EXT_NET_DRV\n"); break;
16302 printf("EXT_MOD_TYPE\n"); break;
16303 case EXT_DISPOSABLE:
16304 printf("EXT_DISPOSABLE\n"); break;
16306 printf("EXT_EXTREF\n"); break;
16308 printf("UNKNOWN\n");
16317 * Prints out information about an rx_bd.
16323 void bxe_dump_rxbd(struct bxe_fastpath *fp, int idx,
16324 struct eth_rx_bd *rx_bd)
16326 struct bxe_softc *sc;
16330 /* Check if index out of range. */
16331 if (idx > MAX_RX_BD) {
16332 BXE_PRINTF("fp[%02d].rx_bd[0x%04X] XX: Invalid rx_bd index!\n",
16334 } else if ((idx & RX_BD_PER_PAGE_MASK) >= USABLE_RX_BD_PER_PAGE) {
16335 /* RX Chain page pointer. */
16336 BXE_PRINTF("fp[%02d].rx_bd[0x%04X] NP: haddr=0x%08X:%08X\n",
16337 fp->index, idx, rx_bd->addr_hi, rx_bd->addr_lo);
16339 BXE_PRINTF("fp[%02d].rx_bd[0x%04X] RX: haddr=0x%08X:%08X\n",
16340 fp->index, idx, rx_bd->addr_hi, rx_bd->addr_lo);
16345 * Prints out a completion queue entry.
16351 void bxe_dump_cqe(struct bxe_fastpath *fp, int idx,
16352 union eth_rx_cqe *cqe)
16354 struct bxe_softc *sc;
16358 if (idx > MAX_RCQ_ENTRIES) {
16359 /* Index out of range. */
16360 BXE_PRINTF("fp[%02d].rx_cqe[0x%04X]: Invalid rx_cqe index!\n",
16362 } else if ((idx & USABLE_RCQ_ENTRIES_PER_PAGE) ==
16363 USABLE_RCQ_ENTRIES_PER_PAGE) {
16364 /* CQE next page pointer. */
16365 BXE_PRINTF("fp[%02d].rx_cqe[0x%04X] NP: haddr=0x%08X:%08X\n",
16367 le32toh(cqe->next_page_cqe.addr_hi),
16368 le32toh(cqe->next_page_cqe.addr_lo));
16371 BXE_PRINTF("fp[%02d].rx_cqe[0x%04X] CQ: error_flags=0x%b, "
16372 "pkt_len=0x%04X, status_flags=0x%02X, vlan=0x%04X "
16373 "rss_hash=0x%08X\n", fp->index, idx,
16374 cqe->fast_path_cqe.type_error_flags,
16375 BXE_ETH_FAST_PATH_RX_CQE_ERROR_FLAGS_PRINTFB,
16376 le16toh(cqe->fast_path_cqe.pkt_len),
16377 cqe->fast_path_cqe.status_flags,
16378 le16toh(cqe->fast_path_cqe.vlan_tag),
16379 le32toh(cqe->fast_path_cqe.rss_hash_result));
16384 * Prints out information about a TX parsing BD.
16390 void bxe_dump_tx_parsing_bd(struct bxe_fastpath *fp, int idx,
16391 struct eth_tx_parse_bd *p_bd)
16393 struct bxe_softc *sc;
16397 if (idx > MAX_TX_BD){
16398 /* Index out of range. */
16399 BXE_PRINTF("fp[%02d].tx_bd[0x%04X] XX: Invalid tx_bd index!\n",
16402 BXE_PRINTF("fp[%02d]:tx_bd[0x%04X] PB: global_data=0x%b, "
16403 "tcp_flags=0x%b, ip_hlen=%04d, total_hlen=%04d, "
16404 "tcp_pseudo_csum=0x%04X, lso_mss=0x%04X, ip_id=0x%04X, "
16405 "tcp_send_seq=0x%08X\n", fp->index, idx,
16406 p_bd->global_data, BXE_ETH_TX_PARSE_BD_GLOBAL_DATA_PRINTFB,
16407 p_bd->tcp_flags, BXE_ETH_TX_PARSE_BD_TCP_FLAGS_PRINTFB,
16408 p_bd->ip_hlen, p_bd->total_hlen, p_bd->tcp_pseudo_csum,
16409 p_bd->lso_mss, p_bd->ip_id, p_bd->tcp_send_seq);
16414 * Prints out information about a tx_bd.
16420 void bxe_dump_txbd(struct bxe_fastpath *fp, int idx,
16421 union eth_tx_bd_types *tx_bd)
16423 struct bxe_softc *sc;
16427 if (idx > MAX_TX_BD){
16428 /* Index out of range. */
16429 BXE_PRINTF("fp[%02d]:tx_bd[0x%04X] XX: Invalid tx_bd index!\n",
16431 } else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) {
16432 /* TX next page BD. */
16433 BXE_PRINTF("fp[%02d]:tx_bd[0x%04X] NP: haddr=0x%08X:%08X\n",
16434 fp->index, idx, tx_bd->next_bd.addr_hi,
16435 tx_bd->next_bd.addr_lo);
16436 } else if ((tx_bd->start_bd.bd_flags.as_bitfield &
16437 ETH_TX_BD_FLAGS_START_BD) != 0) {
16439 BXE_PRINTF("fp[%02d]:tx_bd[0x%04X] ST: haddr=0x%08X:%08X, "
16440 "nbd=%02d, nbytes=%05d, vlan/idx=0x%04X, flags=0x%b, "
16441 "gendata=0x%02X\n",
16442 fp->index, idx, tx_bd->start_bd.addr_hi,
16443 tx_bd->start_bd.addr_lo, tx_bd->start_bd.nbd,
16444 tx_bd->start_bd.nbytes, tx_bd->start_bd.vlan,
16445 tx_bd->start_bd.bd_flags.as_bitfield,
16446 BXE_ETH_TX_BD_FLAGS_PRINTFB,
16447 tx_bd->start_bd.general_data);
16449 /* Regular TX BD. */
16450 BXE_PRINTF("fp[%02d]:tx_bd[0x%04X] TX: haddr=0x%08X:%08X, "
16451 "total_pkt_bytes=%05d, nbytes=%05d\n", fp->index, idx,
16452 tx_bd->reg_bd.addr_hi, tx_bd->reg_bd.addr_lo,
16453 tx_bd->reg_bd.total_pkt_bytes, tx_bd->reg_bd.nbytes);
16459 * Prints out the transmit chain.
16465 void bxe_dump_tx_chain(struct bxe_fastpath * fp, int tx_bd_prod, int count)
16467 struct bxe_softc *sc;
16468 union eth_tx_bd_types *tx_bd;
16469 uint32_t val_hi, val_lo;
16470 int i, parsing_bd = 0;
16474 /* First some info about the tx_bd chain structure. */
16476 "----------------------------"
16478 "----------------------------\n");
16480 val_hi = U64_HI(fp->tx_dma.paddr);
16481 val_lo = U64_LO(fp->tx_dma.paddr);
16483 "0x%08X:%08X - (fp[%02d]->tx_dma.paddr) TX Chain physical address\n",
16484 val_hi, val_lo, fp->index);
16486 "page size = 0x%08X, tx chain pages = 0x%08X\n",
16487 (uint32_t)BCM_PAGE_SIZE, (uint32_t)NUM_TX_PAGES);
16489 "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
16490 (uint32_t)TOTAL_TX_BD_PER_PAGE, (uint32_t)USABLE_TX_BD_PER_PAGE);
16492 "total tx_bd = 0x%08X\n", (uint32_t)TOTAL_TX_BD);
16495 "-----------------------------"
16497 "-----------------------------\n");
16499 /* Now print out the tx_bd's themselves. */
16500 for (i = 0; i < count; i++) {
16501 tx_bd = &fp->tx_chain[tx_bd_prod];
16503 struct eth_tx_parse_bd *p_bd;
16504 p_bd = (struct eth_tx_parse_bd *)
16505 &fp->tx_chain[tx_bd_prod].parse_bd;
16506 bxe_dump_tx_parsing_bd(fp, tx_bd_prod, p_bd);
16509 bxe_dump_txbd(fp, tx_bd_prod, tx_bd);
16510 if ((tx_bd->start_bd.bd_flags.as_bitfield &
16511 ETH_TX_BD_FLAGS_START_BD) != 0)
16513 * There is always a parsing BD following the
16514 * tx_bd with the start bit set.
16518 /* Don't skip next page pointers. */
16519 tx_bd_prod = ((tx_bd_prod + 1) & MAX_TX_BD);
16523 "-----------------------------"
16525 "-----------------------------\n");
16529 * Prints out the receive completion queue chain.
16535 void bxe_dump_rx_cq_chain(struct bxe_fastpath *fp, int rx_cq_prod, int count)
16537 struct bxe_softc *sc;
16538 union eth_rx_cqe *cqe;
16543 /* First some info about the tx_bd chain structure. */
16545 "----------------------------"
16547 "----------------------------\n");
16549 BXE_PRINTF("fp[%02d]->rcq_dma.paddr = 0x%jX\n",
16550 fp->index, (uintmax_t) fp->rcq_dma.paddr);
16552 BXE_PRINTF("page size = 0x%08X, cq chain pages "
16554 (uint32_t)BCM_PAGE_SIZE, (uint32_t) NUM_RCQ_PAGES);
16556 BXE_PRINTF("cqe_bd per page = 0x%08X, usable cqe_bd per "
16558 (uint32_t) TOTAL_RCQ_ENTRIES_PER_PAGE,
16559 (uint32_t) USABLE_RCQ_ENTRIES_PER_PAGE);
16561 BXE_PRINTF("total cqe_bd = 0x%08X\n",(uint32_t) TOTAL_RCQ_ENTRIES);
16563 /* Now the CQE entries themselves. */
16565 "----------------------------"
16567 "----------------------------\n");
16569 for (i = 0; i < count; i++) {
16570 cqe = (union eth_rx_cqe *)&fp->rcq_chain[rx_cq_prod];
16572 bxe_dump_cqe(fp, rx_cq_prod, cqe);
16574 /* Don't skip next page pointers. */
16575 rx_cq_prod = ((rx_cq_prod + 1) & MAX_RCQ_ENTRIES);
16579 "----------------------------"
16581 "----------------------------\n");
16585 * Prints out the receive chain.
16591 void bxe_dump_rx_bd_chain(struct bxe_fastpath *fp, int prod, int count)
16593 struct bxe_softc *sc;
16594 struct eth_rx_bd *rx_bd;
16600 /* First some info about the tx_bd chain structure. */
16602 "----------------------------"
16604 "----------------------------\n");
16607 "----- RX_BD Chain -----\n");
16609 BXE_PRINTF("fp[%02d]->rx_dma.paddr = 0x%jX\n",
16610 fp->index, (uintmax_t) fp->rx_dma.paddr);
16613 "page size = 0x%08X, rx chain pages = 0x%08X\n",
16614 (uint32_t)BCM_PAGE_SIZE, (uint32_t)NUM_RX_PAGES);
16617 "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
16618 (uint32_t)TOTAL_RX_BD_PER_PAGE, (uint32_t)USABLE_RX_BD_PER_PAGE);
16621 "total rx_bd = 0x%08X\n", (uint32_t)TOTAL_RX_BD);
16623 /* Now the rx_bd entries themselves. */
16625 "----------------------------"
16627 "----------------------------\n");
16629 /* Now print out the rx_bd's themselves. */
16630 for (i = 0; i < count; i++) {
16631 rx_bd = (struct eth_rx_bd *) (&fp->rx_chain[prod]);
16632 m = sc->fp->rx_mbuf_ptr[prod];
16634 bxe_dump_rxbd(fp, prod, rx_bd);
16635 bxe_dump_mbuf(sc, m);
16637 /* Don't skip next page pointers. */
16638 prod = ((prod + 1) & MAX_RX_BD);
16642 "----------------------------"
16644 "----------------------------\n");
16648 * Prints out a register dump.
16654 void bxe_dump_hw_state(struct bxe_softc *sc)
16659 "----------------------------"
16661 "----------------------------\n");
16663 for (i = 0x2000; i < 0x10000; i += 0x10)
16664 BXE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i,
16665 REG_RD(sc, 0 + i), REG_RD(sc, 0 + i + 0x4),
16666 REG_RD(sc, 0 + i + 0x8), REG_RD(sc, 0 + i + 0xC));
16669 "----------------------------"
16671 "----------------------------\n");
16675 * Prints out the RX mbuf chain.
16681 void bxe_dump_rx_mbuf_chain(struct bxe_softc *sc, int chain_prod, int count)
16687 "----------------------------"
16689 "----------------------------\n");
16691 for (i = 0; i < count; i++) {
16692 m = sc->fp->rx_mbuf_ptr[chain_prod];
16693 BXE_PRINTF("rxmbuf[0x%04X]\n", chain_prod);
16694 bxe_dump_mbuf(sc, m);
16695 chain_prod = RX_BD(NEXT_RX_BD(chain_prod));
16699 "----------------------------"
16701 "----------------------------\n");
16705 * Prints out the mbufs in the TX mbuf chain.
16711 void bxe_dump_tx_mbuf_chain(struct bxe_softc *sc, int chain_prod, int count)
16717 "----------------------------"
16719 "----------------------------\n");
16721 for (i = 0; i < count; i++) {
16722 m = sc->fp->tx_mbuf_ptr[chain_prod];
16723 BXE_PRINTF("txmbuf[%d]\n", chain_prod);
16724 bxe_dump_mbuf(sc, m);
16725 chain_prod = TX_BD(NEXT_TX_BD(chain_prod));
16729 "----------------------------"
16731 "----------------------------\n");
16735 * Prints out the status block from host memory.
16741 void bxe_dump_status_block(struct bxe_softc *sc)
16743 struct bxe_fastpath *fp;
16744 struct host_def_status_block *def_sb;
16745 struct host_status_block *fpsb;
16748 def_sb = sc->def_sb;
16750 "----------------------------"
16752 "----------------------------\n");
16754 for (i = 0; i < sc->num_queues; i++) {
16756 fpsb = fp->status_block;
16758 "----------------------------"
16760 "----------------------------\n", fp->index);
16762 /* Print the USTORM fields (HC_USTORM_SB_NUM_INDICES). */
16764 "0x%08X - USTORM Flags (F/W RESERVED)\n",
16765 fpsb->u_status_block.__flags);
16767 " 0x%02X - USTORM PCIe Function\n",
16768 fpsb->u_status_block.func);
16770 " 0x%02X - USTORM Status Block ID\n",
16771 fpsb->u_status_block.status_block_id);
16773 " 0x%04X - USTORM Status Block Index (Tag)\n",
16774 fpsb->u_status_block.status_block_index);
16776 " 0x%04X - USTORM [TOE_RX_CQ_CONS]\n",
16777 fpsb->u_status_block.index_values[HC_INDEX_U_TOE_RX_CQ_CONS]);
16779 " 0x%04X - USTORM [ETH_RX_CQ_CONS]\n",
16780 fpsb->u_status_block.index_values[HC_INDEX_U_ETH_RX_CQ_CONS]);
16782 " 0x%04X - USTORM [ETH_RX_BD_CONS]\n",
16783 fpsb->u_status_block.index_values[HC_INDEX_U_ETH_RX_BD_CONS]);
16785 " 0x%04X - USTORM [RESERVED]\n",
16786 fpsb->u_status_block.index_values[3]);
16788 /* Print the CSTORM fields (HC_CSTORM_SB_NUM_INDICES). */
16790 "0x%08X - CSTORM Flags (F/W RESERVED)\n",
16791 fpsb->c_status_block.__flags);
16793 " 0x%02X - CSTORM PCIe Function\n",
16794 fpsb->c_status_block.func);
16796 " 0x%02X - CSTORM Status Block ID\n",
16797 fpsb->c_status_block.status_block_id);
16799 " 0x%04X - CSTORM Status Block Index (Tag)\n",
16800 fpsb->c_status_block.status_block_index);
16802 " 0x%04X - CSTORM [TOE_TX_CQ_CONS]\n",
16803 fpsb->c_status_block.index_values[HC_INDEX_C_TOE_TX_CQ_CONS]);
16805 " 0x%04X - CSTORM [ETH_TX_CQ_CONS]\n",
16806 fpsb->c_status_block.index_values[HC_INDEX_C_ETH_TX_CQ_CONS]);
16808 " 0x%04X - CSTORM [ISCSI_EQ_CONS]\n",
16809 fpsb->c_status_block.index_values[HC_INDEX_C_ISCSI_EQ_CONS]);
16811 " 0x%04X - CSTORM [RESERVED]\n",
16812 fpsb->c_status_block.index_values[3]);
16816 "--------------------------"
16817 " Def Status Block "
16818 "--------------------------\n");
16820 /* Print attention information. */
16822 " 0x%02X - Status Block ID\n",
16823 def_sb->atten_status_block.status_block_id);
16825 "0x%08X - Attn Bits\n",
16826 def_sb->atten_status_block.attn_bits);
16828 "0x%08X - Attn Bits Ack\n",
16829 def_sb->atten_status_block.attn_bits_ack);
16831 " 0x%04X - Attn Block Index\n",
16832 le16toh(def_sb->atten_status_block.attn_bits_index));
16834 /* Print the USTORM fields (HC_USTORM_DEF_SB_NUM_INDICES). */
16836 " 0x%02X - USTORM Status Block ID\n",
16837 def_sb->u_def_status_block.status_block_id);
16839 " 0x%04X - USTORM Status Block Index\n",
16840 le16toh(def_sb->u_def_status_block.status_block_index));
16842 " 0x%04X - USTORM [ETH_RDMA_RX_CQ_CONS]\n",
16843 le16toh(def_sb->u_def_status_block.index_values[HC_INDEX_DEF_U_ETH_RDMA_RX_CQ_CONS]));
16845 " 0x%04X - USTORM [ETH_ISCSI_RX_CQ_CONS]\n",
16846 le16toh(def_sb->u_def_status_block.index_values[HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS]));
16848 " 0x%04X - USTORM [ETH_RDMA_RX_BD_CONS]\n",
16849 le16toh(def_sb->u_def_status_block.index_values[HC_INDEX_DEF_U_ETH_RDMA_RX_BD_CONS]));
16851 " 0x%04X - USTORM [ETH_ISCSI_RX_BD_CONS]\n",
16852 le16toh(def_sb->u_def_status_block.index_values[HC_INDEX_DEF_U_ETH_ISCSI_RX_BD_CONS]));
16854 /* Print the CSTORM fields (HC_CSTORM_DEF_SB_NUM_INDICES). */
16856 " 0x%02X - CSTORM Status Block ID\n",
16857 def_sb->c_def_status_block.status_block_id);
16859 " 0x%04X - CSTORM Status Block Index\n",
16860 le16toh(def_sb->c_def_status_block.status_block_index));
16862 " 0x%04X - CSTORM [RDMA_EQ_CONS]\n",
16863 le16toh(def_sb->c_def_status_block.index_values[HC_INDEX_DEF_C_RDMA_EQ_CONS]));
16865 " 0x%04X - CSTORM [RDMA_NAL_PROD]\n",
16866 le16toh(def_sb->c_def_status_block.index_values[HC_INDEX_DEF_C_RDMA_NAL_PROD]));
16868 " 0x%04X - CSTORM [ETH_FW_TX_CQ_CONS]\n",
16869 le16toh(def_sb->c_def_status_block.index_values[HC_INDEX_DEF_C_ETH_FW_TX_CQ_CONS]));
16871 " 0x%04X - CSTORM [ETH_SLOW_PATH]\n",
16872 le16toh(def_sb->c_def_status_block.index_values[HC_INDEX_DEF_C_ETH_SLOW_PATH]));
16874 " 0x%04X - CSTORM [ETH_RDMA_CQ_CONS]\n",
16875 le16toh(def_sb->c_def_status_block.index_values[HC_INDEX_DEF_C_ETH_RDMA_CQ_CONS]));
16877 " 0x%04X - CSTORM [ETH_ISCSI_CQ_CONS]\n",
16878 le16toh(def_sb->c_def_status_block.index_values[HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS]));
16880 " 0x%04X - CSTORM [UNUSED]\n",
16881 le16toh(def_sb->c_def_status_block.index_values[6]));
16883 " 0x%04X - CSTORM [UNUSED]\n",
16884 le16toh(def_sb->c_def_status_block.index_values[7]));
16886 /* Print the TSTORM fields (HC_TSTORM_DEF_SB_NUM_INDICES). */
16888 " 0x%02X - TSTORM Status Block ID\n",
16889 def_sb->t_def_status_block.status_block_id);
16891 " 0x%04X - TSTORM Status Block Index\n",
16892 le16toh(def_sb->t_def_status_block.status_block_index));
16893 for (i = 0; i < HC_TSTORM_DEF_SB_NUM_INDICES; i++)
16895 " 0x%04X - TSTORM [UNUSED]\n",
16896 le16toh(def_sb->t_def_status_block.index_values[i]));
16898 /* Print the XSTORM fields (HC_XSTORM_DEF_SB_NUM_INDICES). */
16900 " 0x%02X - XSTORM Status Block ID\n",
16901 def_sb->x_def_status_block.status_block_id);
16903 " 0x%04X - XSTORM Status Block Index\n",
16904 le16toh(def_sb->x_def_status_block.status_block_index));
16905 for (i = 0; i < HC_XSTORM_DEF_SB_NUM_INDICES; i++)
16907 " 0x%04X - XSTORM [UNUSED]\n",
16908 le16toh(def_sb->x_def_status_block.index_values[i]));
16911 "----------------------------"
16913 "----------------------------\n");
16918 * Prints out the statistics block from host memory.
16924 void bxe_dump_stats_block(struct bxe_softc *sc)
16930 * Prints out a summary of the fastpath state.
16936 void bxe_dump_fp_state(struct bxe_fastpath *fp)
16938 struct bxe_softc *sc;
16939 uint32_t val_hi, val_lo;
16944 "----------------------------"
16946 "----------------------------\n");
16948 val_hi = U64_HI(fp);
16949 val_lo = U64_LO(fp);
16951 "0x%08X:%08X - (fp[%02d]) fastpath virtual address\n",
16952 val_hi, val_lo, fp->index);
16954 " %3d - (fp[%02d]->sb_id)\n",
16955 fp->sb_id, fp->index);
16957 " %3d - (fp[%02d]->cl_id)\n",
16958 fp->cl_id, fp->index);
16960 " 0x%08X - (fp[%02d]->state)\n",
16961 (uint32_t)fp->state, fp->index);
16963 /* Receive state. */
16965 " 0x%04X - (fp[%02d]->rx_bd_prod)\n",
16966 fp->rx_bd_prod, fp->index);
16968 " 0x%04X - (fp[%02d]->rx_bd_cons)\n",
16969 fp->rx_bd_cons, fp->index);
16971 " 0x%04X - (fp[%02d]->rx_cq_prod)\n",
16972 fp->rx_cq_prod, fp->index);
16974 " 0x%04X - (fp[%02d]->rx_cq_cons)\n",
16975 fp->rx_cq_cons, fp->index);
16977 " %16lu - (fp[%02d]->rx_pkts)\n",
16978 fp->rx_pkts, fp->index);
16980 " 0x%08X - (fp[%02d]->rx_mbuf_alloc)\n",
16981 fp->rx_mbuf_alloc, fp->index);
16983 " %16lu - (fp[%02d]->ipackets)\n",
16984 fp->ipackets, fp->index);
16986 " %16lu - (fp[%02d]->rx_soft_errors)\n",
16987 fp->rx_soft_errors, fp->index);
16989 /* Transmit state. */
16991 " 0x%04X - (fp[%02d]->tx_bd_used)\n",
16992 fp->tx_bd_used, fp->index);
16994 " 0x%04X - (fp[%02d]->tx_bd_prod)\n",
16995 fp->tx_bd_prod, fp->index);
16997 " 0x%04X - (fp[%02d]->tx_bd_cons)\n",
16998 fp->tx_bd_cons, fp->index);
17000 " 0x%04X - (fp[%02d]->tx_pkt_prod)\n",
17001 fp->tx_pkt_prod, fp->index);
17003 " 0x%04X - (fp[%02d]->tx_pkt_cons)\n",
17004 fp->tx_pkt_cons, fp->index);
17006 " %16lu - (fp[%02d]->tx_pkts)\n",
17007 fp->tx_pkts, fp->index);
17009 " 0x%08X - (fp[%02d]->tx_mbuf_alloc)\n",
17010 fp->tx_mbuf_alloc, fp->index);
17012 " %16lu - (fp[%02d]->opackets)\n",
17013 fp->opackets, fp->index);
17015 " %16lu - (fp[%02d]->tx_soft_errors)\n",
17016 fp->tx_soft_errors, fp->index);
17019 if (TPA_ENABLED(sc)) {
17021 " %16lu - (fp[%02d]->rx_tpa_pkts)\n",
17022 fp->rx_tpa_pkts, fp->index);
17024 " 0x%08X - (fp[%02d]->tpa_mbuf_alloc)\n",
17025 fp->tpa_mbuf_alloc, fp->index);
17027 " 0x%08X - (fp[%02d]->sge_mbuf_alloc)\n",
17028 fp->sge_mbuf_alloc, fp->index);
17030 if (CHIP_IS_E1(sc)) {
17031 for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1; i++)
17033 " 0x%08X - (fp[%02d]->tpa_state[%02d])\n",
17034 (uint32_t)fp->tpa_state[i], fp->index, i);
17036 for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1; i++)
17038 " 0x%08X - (fp[%02d]->tpa_state[%02d])\n",
17039 (uint32_t)fp->tpa_state[i], fp->index, i);
17044 "----------------------------"
17046 "----------------------------\n");
17054 void bxe_dump_port_state_locked(struct bxe_softc *sc)
17058 "------------------------------"
17060 "------------------------------\n");
17063 " %2d - (port) pmf\n", sc->port.pmf);
17065 "0x%08X - (port) link_config\n", sc->port.link_config);
17067 "0x%08X - (port) supported\n", sc->port.supported);
17069 "0x%08X - (port) advertising\n", sc->port.advertising);
17071 "0x%08X - (port) port_stx\n", sc->port.port_stx);
17074 "----------------------------"
17076 "----------------------------\n");
17084 void bxe_dump_link_vars_state_locked(struct bxe_softc *sc)
17087 "---------------------------"
17088 " Link Vars State "
17089 "----------------------------\n");
17091 switch (sc->link_vars.mac_type) {
17092 case MAC_TYPE_NONE:
17093 BXE_PRINTF(" NONE");
17095 case MAC_TYPE_EMAC:
17096 BXE_PRINTF(" EMAC");
17098 case MAC_TYPE_BMAC:
17099 BXE_PRINTF(" BMAC");
17102 BXE_PRINTF(" UNKN");
17104 printf(" - (link_vars->mac_type)\n");
17107 " %2d - (link_vars->phy_link_up)\n",
17108 sc->link_vars.phy_link_up);
17110 " %2d - (link_vars->link_up)\n",
17111 sc->link_vars.link_up);
17113 " %2d - (link_vars->duplex)\n",
17114 sc->link_vars.duplex);
17116 " 0x%04X - (link_vars->flow_ctrl)\n",
17117 sc->link_vars.flow_ctrl);
17119 " 0x%04X - (link_vars->line_speed)\n",
17120 sc->link_vars.line_speed);
17122 "0x%08X - (link_vars->ieee_fc)\n",
17123 sc->link_vars.ieee_fc);
17125 "0x%08X - (link_vars->autoneg)\n",
17126 sc->link_vars.autoneg);
17128 "0x%08X - (link_vars->phy_flags)\n",
17129 sc->link_vars.phy_flags);
17131 "0x%08X - (link_vars->link_status)\n",
17132 sc->link_vars.link_status);
17135 "----------------------------"
17137 "----------------------------\n");
17147 void bxe_dump_link_params_state_locked(struct bxe_softc *sc)
17150 "--------------------------"
17151 " Link Params State "
17152 "---------------------------\n");
17155 " %2d - (link_params->port)\n",
17156 sc->link_params.port);
17158 " %2d - (link_params->loopback_mode)\n",
17159 sc->link_params.loopback_mode);
17161 " %3d - (link_params->phy_addr)\n",
17162 sc->link_params.phy_addr);
17164 " 0x%04X - (link_params->req_duplex)\n",
17165 sc->link_params.req_duplex);
17167 " 0x%04X - (link_params->req_flow_ctrl)\n",
17168 sc->link_params.req_flow_ctrl);
17170 " 0x%04X - (link_params->req_line_speed)\n",
17171 sc->link_params.req_line_speed);
17173 " %5d - (link_params->ether_mtu)\n",
17174 sc->port.ether_mtu);
17176 "0x%08X - (link_params->shmem_base) shared memory base address\n",
17177 sc->link_params.shmem_base);
17179 "0x%08X - (link_params->speed_cap_mask)\n",
17180 sc->link_params.speed_cap_mask);
17182 "0x%08X - (link_params->ext_phy_config)\n",
17183 sc->link_params.ext_phy_config);
17185 "0x%08X - (link_params->switch_cfg)\n",
17186 sc->link_params.switch_cfg);
17189 "----------------------------"
17191 "----------------------------\n");
17195 * Prints out a summary of the driver state.
17201 void bxe_dump_driver_state(struct bxe_softc *sc)
17203 uint32_t val_hi, val_lo;
17206 "-----------------------------"
17208 "-----------------------------\n");
17210 val_hi = U64_HI(sc);
17211 val_lo = U64_LO(sc);
17213 "0x%08X:%08X - (sc) driver softc structure virtual address\n",
17216 val_hi = U64_HI(sc->bxe_vhandle);
17217 val_lo = U64_LO(sc->bxe_vhandle);
17219 "0x%08X:%08X - (sc->bxe_vhandle) PCI BAR0 virtual address\n",
17222 val_hi = U64_HI(sc->bxe_db_vhandle);
17223 val_lo = U64_LO(sc->bxe_db_vhandle);
17225 "0x%08X:%08X - (sc->bxe_db_vhandle) PCI BAR2 virtual address\n",
17228 BXE_PRINTF(" 0x%08X - (sc->num_queues) Fastpath queues\n",
17230 BXE_PRINTF(" 0x%08X - (sc->rx_lane_swap) RX XAUI lane swap\n",
17232 BXE_PRINTF(" 0x%08X - (sc->tx_lane_swap) TX XAUI lane swap\n",
17234 BXE_PRINTF(" %16lu - (sc->debug_sim_mbuf_alloc_failed)\n",
17235 sc->debug_sim_mbuf_alloc_failed);
17236 BXE_PRINTF(" %16lu - (sc->debug_sim_mbuf_map_failed)\n",
17237 sc->debug_sim_mbuf_map_failed);
17240 "----------------------------"
17242 "----------------------------\n");
17244 bxe_dump_port_state_locked(sc);
17245 bxe_dump_link_params_state_locked(sc);
17246 bxe_dump_link_vars_state_locked(sc);
17250 * Dump bootcode (MCP) debug buffer to the console.
17256 void bxe_dump_fw(struct bxe_softc *sc)
17258 uint32_t addr, mark, data[9], offset;
17261 addr = sc->common.shmem_base - 0x0800 + 4;
17262 mark = REG_RD(sc, addr);
17263 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
17266 "---------------------------"
17267 " MCP Debug Buffer "
17268 "---------------------------\n");
17270 /* Read from "mark" to the end of the buffer. */
17271 for (offset = mark; offset <= sc->common.shmem_base;
17272 offset += (0x8 * 4)) {
17273 for (word = 0; word < 8; word++)
17274 data[word] = htonl(REG_RD(sc, offset + 4 * word));
17276 printf("%s", (char *) data);
17279 /* Read from the start of the buffer to "mark". */
17280 for (offset = addr + 4; offset <= mark; offset += (0x8 * 4)) {
17281 for (word = 0; word < 8; word++)
17282 data[word] = htonl(REG_RD(sc, offset + 4 * word));
17284 printf("%s", (char *) data);
17288 "----------------------------"
17290 "----------------------------\n");
17294 * Decode firmware messages.
17300 bxe_decode_mb_msgs(struct bxe_softc *sc, uint32_t drv_mb_header,
17301 uint32_t fw_mb_header)
17304 if (drv_mb_header) {
17305 BXE_PRINTF("Driver message is ");
17306 switch (drv_mb_header & DRV_MSG_CODE_MASK) {
17307 case DRV_MSG_CODE_LOAD_REQ:
17309 "LOAD_REQ (0x%08X)",
17310 (uint32_t)DRV_MSG_CODE_LOAD_REQ);
17312 case DRV_MSG_CODE_LOAD_DONE:
17314 "LOAD_DONE (0x%08X)",
17315 (uint32_t)DRV_MSG_CODE_LOAD_DONE);
17317 case DRV_MSG_CODE_UNLOAD_REQ_WOL_EN:
17319 "UNLOAD_REQ_WOL_EN (0x%08X)",
17320 (uint32_t)DRV_MSG_CODE_UNLOAD_REQ_WOL_EN);
17322 case DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS:
17324 "UNLOAD_REQ_WOL_DIS (0x%08X)",
17325 (uint32_t)DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS);
17327 case DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP:
17329 "UNLOADREQ_WOL_MCP (0x%08X)",
17330 (uint32_t)DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
17332 case DRV_MSG_CODE_UNLOAD_DONE:
17334 "UNLOAD_DONE (0x%08X)",
17335 (uint32_t)DRV_MSG_CODE_UNLOAD_DONE);
17337 case DRV_MSG_CODE_DIAG_ENTER_REQ:
17339 "DIAG_ENTER_REQ (0x%08X)",
17340 (uint32_t)DRV_MSG_CODE_DIAG_ENTER_REQ);
17342 case DRV_MSG_CODE_DIAG_EXIT_REQ:
17344 "DIAG_EXIT_REQ (0x%08X)",
17345 (uint32_t)DRV_MSG_CODE_DIAG_EXIT_REQ);
17347 case DRV_MSG_CODE_VALIDATE_KEY:
17349 "CODE_VALIDITY_KEY (0x%08X)",
17350 (uint32_t)DRV_MSG_CODE_VALIDATE_KEY);
17352 case DRV_MSG_CODE_GET_CURR_KEY:
17354 "GET_CURR_KEY (0x%08X)",
17355 (uint32_t) DRV_MSG_CODE_GET_CURR_KEY);
17357 case DRV_MSG_CODE_GET_UPGRADE_KEY:
17359 "GET_UPGRADE_KEY (0x%08X)",
17360 (uint32_t)DRV_MSG_CODE_GET_UPGRADE_KEY);
17362 case DRV_MSG_CODE_GET_MANUF_KEY:
17364 "GET_MANUF_KEY (0x%08X)",
17365 (uint32_t)DRV_MSG_CODE_GET_MANUF_KEY);
17367 case DRV_MSG_CODE_LOAD_L2B_PRAM:
17369 "LOAD_L2B_PRAM (0x%08X)",
17370 (uint32_t)DRV_MSG_CODE_LOAD_L2B_PRAM);
17372 case BIOS_MSG_CODE_LIC_CHALLENGE:
17374 "LIC_CHALLENGE (0x%08X)",
17375 (uint32_t)BIOS_MSG_CODE_LIC_CHALLENGE);
17377 case BIOS_MSG_CODE_LIC_RESPONSE:
17379 "LIC_RESPONSE (0x%08X)",
17380 (uint32_t)BIOS_MSG_CODE_LIC_RESPONSE);
17382 case BIOS_MSG_CODE_VIRT_MAC_PRIM:
17384 "VIRT_MAC_PRIM (0x%08X)",
17385 (uint32_t)BIOS_MSG_CODE_VIRT_MAC_PRIM);
17387 case BIOS_MSG_CODE_VIRT_MAC_ISCSI:
17389 "VIRT_MAC_ISCSI (0x%08X)",
17390 (uint32_t)BIOS_MSG_CODE_VIRT_MAC_ISCSI);
17394 "Unknown command (0x%08X)!",
17395 (drv_mb_header & DRV_MSG_CODE_MASK));
17398 printf(" (seq = 0x%04X)\n", (drv_mb_header &
17399 DRV_MSG_SEQ_NUMBER_MASK));
17402 if (fw_mb_header) {
17403 BXE_PRINTF("Firmware response is ");
17404 switch (fw_mb_header & FW_MSG_CODE_MASK) {
17405 case FW_MSG_CODE_DRV_LOAD_COMMON:
17407 "DRV_LOAD_COMMON (0x%08X)",
17408 (uint32_t)FW_MSG_CODE_DRV_LOAD_COMMON);
17410 case FW_MSG_CODE_DRV_LOAD_PORT:
17412 "DRV_LOAD_PORT (0x%08X)",
17413 (uint32_t)FW_MSG_CODE_DRV_LOAD_PORT);
17415 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
17417 "DRV_LOAD_FUNCTION (0x%08X)",
17418 (uint32_t)FW_MSG_CODE_DRV_LOAD_FUNCTION);
17420 case FW_MSG_CODE_DRV_LOAD_REFUSED:
17422 "DRV_LOAD_REFUSED (0x%08X)",
17423 (uint32_t)FW_MSG_CODE_DRV_LOAD_REFUSED);
17425 case FW_MSG_CODE_DRV_LOAD_DONE:
17427 "DRV_LOAD_DONE (0x%08X)",
17428 (uint32_t)FW_MSG_CODE_DRV_LOAD_DONE);
17430 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
17432 "DRV_UNLOAD_COMMON (0x%08X)",
17433 (uint32_t)FW_MSG_CODE_DRV_UNLOAD_COMMON);
17435 case FW_MSG_CODE_DRV_UNLOAD_PORT:
17437 "DRV_UNLOAD_PORT (0x%08X)",
17438 (uint32_t)FW_MSG_CODE_DRV_UNLOAD_PORT);
17440 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
17442 "DRV_UNLOAD_FUNCTION (0x%08X)",
17443 (uint32_t)FW_MSG_CODE_DRV_UNLOAD_FUNCTION);
17445 case FW_MSG_CODE_DRV_UNLOAD_DONE:
17447 "DRV_UNLOAD_DONE (0x%08X)",
17448 (uint32_t)FW_MSG_CODE_DRV_UNLOAD_DONE);
17450 case FW_MSG_CODE_DIAG_ENTER_DONE:
17452 "DIAG_ENTER_DONE (0x%08X)",
17453 (uint32_t)FW_MSG_CODE_DIAG_ENTER_DONE);
17455 case FW_MSG_CODE_DIAG_REFUSE:
17457 "DIAG_REFUSE (0x%08X)",
17458 (uint32_t)FW_MSG_CODE_DIAG_REFUSE);
17460 case FW_MSG_CODE_DIAG_EXIT_DONE:
17462 "DIAG_EXIT_DONE (0x%08X)",
17463 (uint32_t)FW_MSG_CODE_DIAG_EXIT_DONE);
17465 case FW_MSG_CODE_VALIDATE_KEY_SUCCESS:
17467 "VALIDATE_KEY_SUCCESS (0x%08X)",
17468 (uint32_t)FW_MSG_CODE_VALIDATE_KEY_SUCCESS);
17470 case FW_MSG_CODE_VALIDATE_KEY_FAILURE:
17472 "VALIDATE_KEY_FAILURE (0x%08X)",
17473 (uint32_t)FW_MSG_CODE_VALIDATE_KEY_FAILURE);
17475 case FW_MSG_CODE_GET_KEY_DONE:
17477 "GET_KEY_DONE (0x%08X)",
17478 (uint32_t)FW_MSG_CODE_GET_KEY_DONE);
17480 case FW_MSG_CODE_NO_KEY:
17483 (uint32_t)FW_MSG_CODE_NO_KEY);
17487 "unknown value (0x%08X)!",
17488 (fw_mb_header & FW_MSG_CODE_MASK));
17491 printf(" (seq = 0x%04X)\n", (fw_mb_header &
17492 FW_MSG_SEQ_NUMBER_MASK));
17497 * Prints a text string for the ramrod command.
17503 bxe_decode_ramrod_cmd(struct bxe_softc *sc, int command)
17505 BXE_PRINTF("Ramrod command = ");
17508 case RAMROD_CMD_ID_ETH_PORT_SETUP:
17509 printf("ETH_PORT_SETUP\n");
17511 case RAMROD_CMD_ID_ETH_CLIENT_SETUP:
17512 printf("ETH_CLIENT_SETUP\n");
17514 case RAMROD_CMD_ID_ETH_STAT_QUERY:
17515 printf("ETH_STAT_QUERY\n");
17517 case RAMROD_CMD_ID_ETH_UPDATE:
17518 printf("ETH_UPDATE\n");
17520 case RAMROD_CMD_ID_ETH_HALT:
17521 printf("ETH_HALT\n");
17523 case RAMROD_CMD_ID_ETH_SET_MAC:
17524 printf("ETH_SET_MAC\n");
17526 case RAMROD_CMD_ID_ETH_CFC_DEL:
17527 printf("ETH_CFC_DEL\n");
17529 case RAMROD_CMD_ID_ETH_PORT_DEL:
17530 printf("ETH_PORT_DEL\n");
17532 case RAMROD_CMD_ID_ETH_FORWARD_SETUP:
17533 printf("ETH_FORWARD_SETUP\n");
17536 printf("Unknown ramrod command!\n");
17542 * Prints out driver information and forces a kernel breakpoint.
17548 bxe_breakpoint(struct bxe_softc *sc)
17550 struct bxe_fastpath *fp;
17554 /* Unreachable code to silence the compiler about unused functions. */
17556 bxe_reg_read16(sc, PCICFG_OFFSET);
17557 bxe_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
17558 bxe_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
17559 bxe_dump_tx_chain(fp, 0, USABLE_TX_BD);
17560 bxe_dump_rx_cq_chain(fp, 0, USABLE_RCQ_ENTRIES);
17561 bxe_dump_rx_bd_chain(fp, 0, USABLE_RX_BD);
17562 bxe_dump_status_block(sc);
17563 bxe_dump_stats_block(sc);
17564 bxe_dump_fp_state(fp);
17565 bxe_dump_driver_state(sc);
17566 bxe_dump_hw_state(sc);
17571 * Do some device sanity checking. Run it twice in case
17572 * the hardware is still running so we can identify any
17573 * transient conditions.
17575 bxe_idle_chk(sc); bxe_idle_chk(sc);
17577 bxe_dump_driver_state(sc);
17579 for (i = 0; i < sc->num_queues; i++)
17580 bxe_dump_fp_state(&sc->fp[i]);
17582 bxe_dump_status_block(sc);
17585 /* Call the OS debugger. */