2 * Copyright (c) 2007-2011 Broadcom Corporation. All rights reserved.
4 * Gary Zambrano <zambrano@broadcom.com>
5 * David Christensen <davidch@broadcom.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written consent.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
37 * The following controllers are supported by this driver:
42 * The following controllers are not supported by this driver:
43 * BCM57710 A0 (pre-production)
45 * External PHY References:
46 * ------------------------
47 * BCM8073 - Dual Port 10GBase-KR Ethernet PHY
48 * BCM8705 - 10Gb Ethernet Serial Transceiver
49 * BCM8706 - 10Gb Ethernet LRM PHY
50 * BCM8726 - Dual Port 10Gb Ethernet LRM PHY
51 * BCM8727 - Dual Port 10Gb Ethernet LRM PHY
52 * BCM8481 - Single Port 10GBase-T Ethernet PHY
53 * BCM84823 - Dual Port 10GBase-T Ethernet PHY
54 * SFX7101 - Solarflare 10GBase-T Ethernet PHY
59 #include "bxe_include.h"
63 #include "hw_dump_reg_st.h"
67 #include "bxe_self_test.h"
69 /* BXE Debug Options */
71 uint32_t bxe_debug = BXE_WARN;
74 /* 1 = 1 in 2,147,483,648 */
75 /* 256 = 1 in 8,388,608 */
76 /* 2048 = 1 in 1,048,576 */
77 /* 65536 = 1 in 32,768 */
78 /* 1048576 = 1 in 2,048 */
79 /* 268435456 = 1 in 8 */
80 /* 536870912 = 1 in 4 */
81 /* 1073741824 = 1 in 2 */
83 /* Controls how often to simulate an mbuf allocation failure. */
84 int bxe_debug_mbuf_allocation_failure = 0;
86 /* Controls how often to simulate a DMA mapping failure. */
87 int bxe_debug_dma_map_addr_failure = 0;
89 /* Controls how often to simulate a bootcode failure. */
90 int bxe_debug_bootcode_running_failure = 0;
93 #define MDIO_INDIRECT_REG_ADDR 0x1f
94 #define MDIO_SET_REG_BANK(sc, reg_bank) \
95 bxe_mdio22_write(sc, MDIO_INDIRECT_REG_ADDR, reg_bank)
97 #define MDIO_ACCESS_TIMEOUT 1000
98 #define BMAC_CONTROL_RX_ENABLE 2
100 /* BXE Build Time Options */
101 /* #define BXE_NVRAM_WRITE 1 */
102 #define BXE_USE_DMAE 1
105 * PCI Device ID Table
106 * Used by bxe_probe() to identify the devices supported by this driver.
108 #define BXE_DEVDESC_MAX 64
110 static struct bxe_type bxe_devs[] = {
111 /* BCM57710 Controllers and OEM boards. */
112 { BRCM_VENDORID, BRCM_DEVICEID_BCM57710, PCI_ANY_ID, PCI_ANY_ID,
113 "Broadcom NetXtreme II BCM57710 10GbE" },
114 /* BCM57711 Controllers and OEM boards. */
115 { BRCM_VENDORID, BRCM_DEVICEID_BCM57711, PCI_ANY_ID, PCI_ANY_ID,
116 "Broadcom NetXtreme II BCM57711 10GbE" },
117 /* BCM57711E Controllers and OEM boards. */
118 { BRCM_VENDORID, BRCM_DEVICEID_BCM57711E, PCI_ANY_ID, PCI_ANY_ID,
119 "Broadcom NetXtreme II BCM57711E 10GbE" },
124 * FreeBSD device entry points.
126 static int bxe_probe(device_t);
127 static int bxe_attach(device_t);
128 static int bxe_detach(device_t);
129 static int bxe_shutdown(device_t);
132 * Driver local functions.
134 static void bxe_tunables_set(struct bxe_softc *);
135 static void bxe_print_adapter_info(struct bxe_softc *);
136 static void bxe_probe_pci_caps(struct bxe_softc *);
137 static void bxe_link_settings_supported(struct bxe_softc *, uint32_t);
138 static void bxe_link_settings_requested(struct bxe_softc *);
139 static int bxe_hwinfo_function_get(struct bxe_softc *);
140 static int bxe_hwinfo_port_get(struct bxe_softc *);
141 static int bxe_hwinfo_common_get(struct bxe_softc *);
142 static void bxe_undi_unload(struct bxe_softc *);
143 static int bxe_setup_leading(struct bxe_softc *);
144 static int bxe_stop_leading(struct bxe_softc *);
145 static int bxe_setup_multi(struct bxe_softc *, int);
146 static int bxe_stop_multi(struct bxe_softc *, int);
147 static int bxe_stop_locked(struct bxe_softc *, int);
148 static int bxe_alloc_buf_rings(struct bxe_softc *);
149 static void bxe_free_buf_rings(struct bxe_softc *);
150 static void bxe_init_locked(struct bxe_softc *, int);
151 static int bxe_wait_ramrod(struct bxe_softc *, int, int, int *, int);
152 static void bxe_init_str_wr(struct bxe_softc *, uint32_t, const uint32_t *,
154 static void bxe_init_ind_wr(struct bxe_softc *, uint32_t, const uint32_t *,
156 static void bxe_init_wr_64(struct bxe_softc *, uint32_t, const uint32_t *,
158 static void bxe_write_big_buf(struct bxe_softc *, uint32_t, uint32_t);
159 static void bxe_init_fill(struct bxe_softc *, uint32_t, int, uint32_t);
160 static void bxe_init_block(struct bxe_softc *, uint32_t, uint32_t);
161 static void bxe_init(void *);
162 static void bxe_release_resources(struct bxe_softc *);
163 static void bxe_reg_wr_ind(struct bxe_softc *, uint32_t, uint32_t);
164 static uint32_t bxe_reg_rd_ind(struct bxe_softc *, uint32_t);
165 static void bxe_post_dmae(struct bxe_softc *, struct dmae_command *, int);
166 static void bxe_wb_wr(struct bxe_softc *, int, uint32_t, uint32_t);
167 static __inline uint32_t bxe_reg_poll(struct bxe_softc *, uint32_t,
169 static int bxe_mc_assert(struct bxe_softc *);
170 static void bxe_panic_dump(struct bxe_softc *);
171 static void bxe_int_enable(struct bxe_softc *);
172 static void bxe_int_disable(struct bxe_softc *);
174 static int bxe_nvram_acquire_lock(struct bxe_softc *);
175 static int bxe_nvram_release_lock(struct bxe_softc *);
176 static void bxe_nvram_enable_access(struct bxe_softc *);
177 static void bxe_nvram_disable_access(struct bxe_softc *);
178 static int bxe_nvram_read_dword (struct bxe_softc *, uint32_t, uint32_t *,
180 static int bxe_nvram_read(struct bxe_softc *, uint32_t, uint8_t *, int);
182 #ifdef BXE_NVRAM_WRITE_SUPPORT
183 static int bxe_nvram_write_dword(struct bxe_softc *, uint32_t, uint32_t,
185 static int bxe_nvram_write1(struct bxe_softc *, uint32_t, uint8_t *, int);
186 static int bxe_nvram_write(struct bxe_softc *, uint32_t, uint8_t *, int);
189 static int bxe_nvram_test(struct bxe_softc *);
191 static __inline void bxe_ack_sb(struct bxe_softc *, uint8_t, uint8_t, uint16_t,
193 static __inline uint16_t bxe_update_fpsb_idx(struct bxe_fastpath *);
194 static uint16_t bxe_ack_int(struct bxe_softc *);
195 static void bxe_sp_event(struct bxe_fastpath *, union eth_rx_cqe *);
196 static int bxe_acquire_hw_lock(struct bxe_softc *, uint32_t);
197 static int bxe_release_hw_lock(struct bxe_softc *, uint32_t);
198 static void bxe_acquire_phy_lock(struct bxe_softc *);
199 static void bxe_release_phy_lock(struct bxe_softc *);
200 static void bxe_pmf_update(struct bxe_softc *);
201 static void bxe_init_port_minmax(struct bxe_softc *);
202 static void bxe_link_attn(struct bxe_softc *);
204 static int bxe_sp_post(struct bxe_softc *, int, int, uint32_t, uint32_t, int);
205 static int bxe_acquire_alr(struct bxe_softc *);
206 static void bxe_release_alr(struct bxe_softc *);
207 static uint16_t bxe_update_dsb_idx(struct bxe_softc *);
208 static void bxe_attn_int_asserted(struct bxe_softc *, uint32_t);
209 static __inline void bxe_attn_int_deasserted0(struct bxe_softc *, uint32_t);
210 static __inline void bxe_attn_int_deasserted1(struct bxe_softc *, uint32_t);
211 static __inline void bxe_attn_int_deasserted2(struct bxe_softc *, uint32_t);
212 static __inline void bxe_attn_int_deasserted3(struct bxe_softc *, uint32_t);
213 static void bxe_attn_int_deasserted(struct bxe_softc *, uint32_t);
214 static void bxe_attn_int(struct bxe_softc *);
216 static void bxe_stats_storm_post(struct bxe_softc *);
217 static void bxe_stats_init(struct bxe_softc *);
218 static void bxe_stats_hw_post(struct bxe_softc *);
219 static int bxe_stats_comp(struct bxe_softc *);
220 static void bxe_stats_pmf_update(struct bxe_softc *);
221 static void bxe_stats_port_base_init(struct bxe_softc *);
222 static void bxe_stats_port_init(struct bxe_softc *);
223 static void bxe_stats_func_base_init(struct bxe_softc *);
224 static void bxe_stats_func_init(struct bxe_softc *);
225 static void bxe_stats_start(struct bxe_softc *);
226 static void bxe_stats_pmf_start(struct bxe_softc *);
227 static void bxe_stats_restart(struct bxe_softc *);
228 static void bxe_stats_bmac_update(struct bxe_softc *);
229 static void bxe_stats_emac_update(struct bxe_softc *);
230 static int bxe_stats_hw_update(struct bxe_softc *);
231 static int bxe_stats_storm_update(struct bxe_softc *);
232 static void bxe_stats_func_base_update(struct bxe_softc *);
233 static void bxe_stats_update(struct bxe_softc *);
234 static void bxe_stats_port_stop(struct bxe_softc *);
235 static void bxe_stats_stop(struct bxe_softc *);
236 static void bxe_stats_do_nothing(struct bxe_softc *);
237 static void bxe_stats_handle(struct bxe_softc *, enum bxe_stats_event);
239 static int bxe_tx_encap(struct bxe_fastpath *, struct mbuf **);
240 static void bxe_tx_start(struct ifnet *);
241 static void bxe_tx_start_locked(struct ifnet *, struct bxe_fastpath *);
242 static int bxe_tx_mq_start(struct ifnet *, struct mbuf *);
243 static int bxe_tx_mq_start_locked(struct ifnet *,
244 struct bxe_fastpath *, struct mbuf *);
245 static void bxe_mq_flush(struct ifnet *ifp);
246 static int bxe_ioctl(struct ifnet *, u_long, caddr_t);
247 static __inline int bxe_has_rx_work(struct bxe_fastpath *);
248 static __inline int bxe_has_tx_work(struct bxe_fastpath *);
250 static void bxe_intr_legacy(void *);
251 static void bxe_task_sp(void *, int);
252 static void bxe_intr_sp(void *);
253 static void bxe_task_fp(void *, int);
254 static void bxe_intr_fp(void *);
255 static void bxe_zero_sb(struct bxe_softc *, int);
256 static void bxe_init_sb(struct bxe_softc *,
257 struct host_status_block *, bus_addr_t, int);
258 static void bxe_zero_def_sb(struct bxe_softc *);
259 static void bxe_init_def_sb(struct bxe_softc *,
260 struct host_def_status_block *, bus_addr_t, int);
261 static void bxe_update_coalesce(struct bxe_softc *);
262 static __inline void bxe_update_rx_prod(struct bxe_softc *,
263 struct bxe_fastpath *, uint16_t, uint16_t, uint16_t);
264 static void bxe_clear_sge_mask_next_elems(struct bxe_fastpath *);
265 static __inline void bxe_init_sge_ring_bit_mask(struct bxe_fastpath *);
266 static int bxe_alloc_tpa_mbuf(struct bxe_fastpath *, int);
267 static int bxe_fill_tpa_pool(struct bxe_fastpath *);
268 static void bxe_free_tpa_pool(struct bxe_fastpath *);
270 static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *, uint16_t);
271 static int bxe_fill_sg_chain(struct bxe_fastpath *);
272 static void bxe_free_sg_chain(struct bxe_fastpath *);
274 static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *, uint16_t);
275 static int bxe_fill_rx_bd_chain(struct bxe_fastpath *);
276 static void bxe_free_rx_bd_chain(struct bxe_fastpath *);
278 static void bxe_mutexes_alloc(struct bxe_softc *);
279 static void bxe_mutexes_free(struct bxe_softc *);
280 static void bxe_clear_rx_chains(struct bxe_softc *);
281 static int bxe_init_rx_chains(struct bxe_softc *);
282 static void bxe_clear_tx_chains(struct bxe_softc *);
283 static void bxe_init_tx_chains(struct bxe_softc *);
284 static void bxe_init_sp_ring(struct bxe_softc *);
285 static void bxe_init_context(struct bxe_softc *);
286 static void bxe_init_ind_table(struct bxe_softc *);
287 static void bxe_set_client_config(struct bxe_softc *);
288 static void bxe_set_storm_rx_mode(struct bxe_softc *);
289 static void bxe_init_internal_common(struct bxe_softc *);
290 static void bxe_init_internal_port(struct bxe_softc *);
292 static void bxe_init_internal_func(struct bxe_softc *);
293 static void bxe_init_internal(struct bxe_softc *, uint32_t);
294 static int bxe_init_nic(struct bxe_softc *, uint32_t);
295 static void bxe_lb_pckt(struct bxe_softc *);
296 static int bxe_int_mem_test(struct bxe_softc *);
297 static void bxe_enable_blocks_attention (struct bxe_softc *);
299 static void bxe_init_pxp(struct bxe_softc *);
300 static int bxe_init_common(struct bxe_softc *);
301 static int bxe_init_port(struct bxe_softc *);
302 static void bxe_ilt_wr(struct bxe_softc *, uint32_t, bus_addr_t);
303 static int bxe_init_func(struct bxe_softc *);
304 static int bxe_init_hw(struct bxe_softc *, uint32_t);
305 static int bxe_fw_command(struct bxe_softc *, uint32_t);
306 static void bxe_host_structures_free(struct bxe_softc *);
307 static void bxe_dma_map_addr(void *, bus_dma_segment_t *, int, int);
308 static int bxe_host_structures_alloc(device_t);
309 static void bxe_set_mac_addr_e1(struct bxe_softc *, int);
310 static void bxe_set_mac_addr_e1h(struct bxe_softc *, int);
311 static void bxe_set_rx_mode(struct bxe_softc *);
312 static void bxe_reset_func(struct bxe_softc *);
313 static void bxe_reset_port(struct bxe_softc *);
314 static void bxe_reset_common(struct bxe_softc *);
315 static void bxe_reset_chip(struct bxe_softc *, uint32_t);
316 static int bxe_ifmedia_upd(struct ifnet *);
317 static void bxe_ifmedia_status(struct ifnet *, struct ifmediareq *);
318 static __inline void bxe_update_last_max_sge(struct bxe_fastpath *, uint16_t);
319 static void bxe_update_sge_prod(struct bxe_fastpath *,
320 struct eth_fast_path_rx_cqe *);
321 static void bxe_tpa_start(struct bxe_fastpath *, uint16_t, uint16_t, uint16_t);
322 static int bxe_fill_frag_mbuf(struct bxe_softc *, struct bxe_fastpath *,
323 struct mbuf *, struct eth_fast_path_rx_cqe *, uint16_t);
324 static void bxe_tpa_stop(struct bxe_softc *, struct bxe_fastpath *, uint16_t,
325 int, int, union eth_rx_cqe *, uint16_t);
326 static void bxe_rxeof(struct bxe_fastpath *);
327 static void bxe_txeof(struct bxe_fastpath *);
328 static int bxe_watchdog(struct bxe_fastpath *fp);
329 static void bxe_tick(void *);
330 static void bxe_add_sysctls(struct bxe_softc *);
332 static void bxe_write_dmae_phys_len(struct bxe_softc *,
333 bus_addr_t, uint32_t, uint32_t);
335 void bxe_write_dmae(struct bxe_softc *, bus_addr_t, uint32_t, uint32_t);
336 void bxe_read_dmae(struct bxe_softc *, uint32_t, uint32_t);
337 int bxe_set_gpio(struct bxe_softc *, int, uint32_t, uint8_t);
338 int bxe_get_gpio(struct bxe_softc *, int, uint8_t);
339 int bxe_set_spio(struct bxe_softc *, int, uint32_t);
340 int bxe_set_gpio_int(struct bxe_softc *, int, uint32_t, uint8_t);
343 * BXE Debug Data Structure Dump Routines
347 static int bxe_sysctl_driver_state(SYSCTL_HANDLER_ARGS);
348 static int bxe_sysctl_hw_state(SYSCTL_HANDLER_ARGS);
349 static int bxe_sysctl_dump_fw(SYSCTL_HANDLER_ARGS);
350 static int bxe_sysctl_dump_rx_cq_chain(SYSCTL_HANDLER_ARGS);
351 static int bxe_sysctl_dump_rx_bd_chain(SYSCTL_HANDLER_ARGS);
352 static int bxe_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS);
353 static int bxe_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
354 static int bxe_sysctl_breakpoint(SYSCTL_HANDLER_ARGS);
355 static __noinline void bxe_validate_rx_packet(struct bxe_fastpath *,
356 uint16_t, union eth_rx_cqe *, struct mbuf *);
357 static void bxe_grcdump(struct bxe_softc *, int);
358 static __noinline void bxe_dump_enet(struct bxe_softc *,struct mbuf *);
359 static __noinline void bxe_dump_mbuf (struct bxe_softc *, struct mbuf *);
360 static __noinline void bxe_dump_tx_mbuf_chain(struct bxe_softc *, int, int);
361 static __noinline void bxe_dump_rx_mbuf_chain(struct bxe_softc *, int, int);
362 static __noinline void bxe_dump_tx_parsing_bd(struct bxe_fastpath *,int,
363 struct eth_tx_parse_bd *);
364 static __noinline void bxe_dump_txbd(struct bxe_fastpath *, int,
365 union eth_tx_bd_types *);
366 static __noinline void bxe_dump_rxbd(struct bxe_fastpath *, int,
368 static __noinline void bxe_dump_cqe(struct bxe_fastpath *,
369 int, union eth_rx_cqe *);
370 static __noinline void bxe_dump_tx_chain(struct bxe_fastpath *, int, int);
371 static __noinline void bxe_dump_rx_cq_chain(struct bxe_fastpath *, int, int);
372 static __noinline void bxe_dump_rx_bd_chain(struct bxe_fastpath *, int, int);
373 static __noinline void bxe_dump_status_block(struct bxe_softc *);
374 static __noinline void bxe_dump_stats_block(struct bxe_softc *);
375 static __noinline void bxe_dump_fp_state(struct bxe_fastpath *);
376 static __noinline void bxe_dump_port_state_locked(struct bxe_softc *);
377 static __noinline void bxe_dump_link_vars_state_locked(struct bxe_softc *);
378 static __noinline void bxe_dump_link_params_state_locked(struct bxe_softc *);
379 static __noinline void bxe_dump_driver_state(struct bxe_softc *);
380 static __noinline void bxe_dump_hw_state(struct bxe_softc *);
381 static __noinline void bxe_dump_fw(struct bxe_softc *);
382 static void bxe_decode_mb_msgs(struct bxe_softc *, uint32_t, uint32_t);
383 static void bxe_decode_ramrod_cmd(struct bxe_softc *, int);
384 static void bxe_breakpoint(struct bxe_softc *);
388 #define BXE_DRIVER_VERSION "1.5.52"
390 static void bxe_init_e1_firmware(struct bxe_softc *sc);
391 static void bxe_init_e1h_firmware(struct bxe_softc *sc);
394 * FreeBSD device dispatch table.
396 static device_method_t bxe_methods[] = {
397 /* Device interface (device_if.h) */
398 DEVMETHOD(device_probe, bxe_probe),
399 DEVMETHOD(device_attach, bxe_attach),
400 DEVMETHOD(device_detach, bxe_detach),
401 DEVMETHOD(device_shutdown, bxe_shutdown),
407 static driver_t bxe_driver = {
410 sizeof(struct bxe_softc)
413 static devclass_t bxe_devclass;
415 MODULE_DEPEND(bxe, pci, 1, 1, 1);
416 MODULE_DEPEND(bxe, ether, 1, 1, 1);
417 DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0);
420 * Tunable device values
422 static SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters");
423 /* Allowable values are TRUE (1) or FALSE (0). */
425 static int bxe_dcc_enable = FALSE;
426 TUNABLE_INT("hw.bxe.dcc_enable", &bxe_dcc_enable);
427 SYSCTL_UINT(_hw_bxe, OID_AUTO, dcc_enable, CTLFLAG_RDTUN, &bxe_dcc_enable,
428 0, "dcc Enable/Disable");
430 /* Allowable values are TRUE (1) or FALSE (0). */
431 static int bxe_tso_enable = TRUE;
432 TUNABLE_INT("hw.bxe.tso_enable", &bxe_tso_enable);
433 SYSCTL_UINT(_hw_bxe, OID_AUTO, tso_enable, CTLFLAG_RDTUN, &bxe_tso_enable,
434 0, "TSO Enable/Disable");
436 /* Allowable values are 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ). */
437 static int bxe_int_mode = 2;
438 TUNABLE_INT("hw.bxe.int_mode", &bxe_int_mode);
439 SYSCTL_UINT(_hw_bxe, OID_AUTO, int_mode, CTLFLAG_RDTUN, &bxe_int_mode,
440 0, "Interrupt (MSI-X|MSI|INTx) mode");
443 * Specifies the number of queues that will be used when a multi-queue
444 * RSS mode is selected using bxe_multi_mode below.
446 * Allowable values are 0 (Auto) or 1 to MAX_CONTEXT (fixed queue number).
448 static int bxe_queue_count = 0;
449 TUNABLE_INT("hw.bxe.queue_count", &bxe_queue_count);
450 SYSCTL_UINT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, &bxe_queue_count,
451 0, "Multi-Queue queue count");
454 * ETH_RSS_MODE_DISABLED (0)
455 * Disables all multi-queue/packet sorting algorithms. All
456 * received frames are routed to a single receive queue.
458 * ETH_RSS_MODE_REGULAR (1)
459 * The default mode which assigns incoming frames to receive
460 * queues according to RSS (i.e a 2-tuple match on the source/
461 * destination IP address or a 4-tuple match on the source/
462 * destination IP address and the source/destination TCP port).
465 static int bxe_multi_mode = ETH_RSS_MODE_REGULAR;
466 TUNABLE_INT("hw.bxe.multi_mode", &bxe_multi_mode);
467 SYSCTL_UINT(_hw_bxe, OID_AUTO, multi_mode, CTLFLAG_RDTUN, &bxe_multi_mode,
468 0, "Multi-Queue Mode");
471 * Host interrupt coalescing is controller by these values.
472 * The first frame always causes an interrupt but subsequent
473 * frames are coalesced until the RX/TX ticks timer value
474 * expires and another interrupt occurs. (Ticks are measured
477 static uint32_t bxe_rx_ticks = 25;
478 TUNABLE_INT("hw.bxe.rx_ticks", &bxe_rx_ticks);
479 SYSCTL_UINT(_hw_bxe, OID_AUTO, rx_ticks, CTLFLAG_RDTUN, &bxe_rx_ticks,
482 static uint32_t bxe_tx_ticks = 50;
483 TUNABLE_INT("hw.bxe.tx_ticks", &bxe_tx_ticks);
484 SYSCTL_UINT(_hw_bxe, OID_AUTO, tx_ticks, CTLFLAG_RDTUN, &bxe_tx_ticks,
485 0, "Transmit ticks");
488 * Allows the PCIe maximum read request size value to be manually
489 * set during initialization rather than automatically determined
492 * Allowable values are:
493 * -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB)
495 static int bxe_mrrs = -1;
496 TUNABLE_INT("hw.bxe.mrrs", &bxe_mrrs);
497 SYSCTL_UINT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN, &bxe_mrrs,
498 0, "PCIe maximum read request size.");
502 * Allows setting the maximum number of received frames to process
503 * during an interrupt.
505 * Allowable values are:
506 * -1 (Unlimited), 0 (None), otherwise specifies the number of RX frames.
508 static int bxe_rx_limit = -1;
509 TUNABLE_INT("hw.bxe.rx_limit", &bxe_rx_limit);
510 SYSCTL_UINT(_hw_bxe, OID_AUTO, rx_limit, CTLFLAG_RDTUN, &bxe_rx_limit,
511 0, "Maximum received frames processed during an interrupt.");
514 * Allows setting the maximum number of transmit frames to process
515 * during an interrupt.
517 * Allowable values are:
518 * -1 (Unlimited), 0 (None), otherwise specifies the number of TX frames.
520 static int bxe_tx_limit = -1;
521 TUNABLE_INT("hw.bxe.tx_limit", &bxe_tx_limit);
522 SYSCTL_UINT(_hw_bxe, OID_AUTO, tx_limit, CTLFLAG_RDTUN, &bxe_tx_limit,
523 0, "Maximum transmit frames processed during an interrupt.");
530 /* 0 is common, 1 is port 0, 2 is port 1. */
531 static int load_count[3];
533 /* Tracks whether MCP firmware is running. */
538 * A debug version of the 32 bit OS register write function to
539 * capture/display values written to the controller.
545 bxe_reg_write32(struct bxe_softc *sc, bus_size_t offset, uint32_t val)
548 if ((offset % 4) != 0) {
549 DBPRINT(sc, BXE_WARN,
550 "%s(): Warning! Unaligned write to 0x%jX!\n", __FUNCTION__,
554 DBPRINT(sc, BXE_INSANE_REGS, "%s(): offset = 0x%jX, val = 0x%08X\n",
555 __FUNCTION__, (uintmax_t)offset, val);
557 bus_space_write_4(sc->bxe_btag, sc->bxe_bhandle, offset, val);
561 * A debug version of the 16 bit OS register write function to
562 * capture/display values written to the controller.
568 bxe_reg_write16(struct bxe_softc *sc, bus_size_t offset, uint16_t val)
571 if ((offset % 2) != 0) {
572 DBPRINT(sc, BXE_WARN,
573 "%s(): Warning! Unaligned write to 0x%jX!\n", __FUNCTION__,
577 DBPRINT(sc, BXE_INSANE_REGS, "%s(): offset = 0x%jX, val = 0x%04X\n",
578 __FUNCTION__, (uintmax_t)offset, val);
580 bus_space_write_2(sc->bxe_btag, sc->bxe_bhandle, offset, val);
584 * A debug version of the 8 bit OS register write function to
585 * capture/display values written to the controller.
591 bxe_reg_write8(struct bxe_softc *sc, bus_size_t offset, uint8_t val)
594 DBPRINT(sc, BXE_INSANE_REGS, "%s(): offset = 0x%jX, val = 0x%02X\n",
595 __FUNCTION__, (uintmax_t)offset, val);
597 bus_space_write_1(sc->bxe_btag, sc->bxe_bhandle, offset, val);
601 * A debug version of the 32 bit OS register read function to
602 * capture/display values read from the controller.
608 bxe_reg_read32(struct bxe_softc *sc, bus_size_t offset)
612 if ((offset % 4) != 0) {
613 DBPRINT(sc, BXE_WARN,
614 "%s(): Warning! Unaligned read from 0x%jX!\n",
615 __FUNCTION__, (uintmax_t)offset);
618 val = bus_space_read_4(sc->bxe_btag, sc->bxe_bhandle, offset);
620 DBPRINT(sc, BXE_INSANE_REGS, "%s(): offset = 0x%jX, val = 0x%08X\n",
621 __FUNCTION__, (uintmax_t)offset, val);
627 * A debug version of the 16 bit OS register read function to
628 * capture/display values read from the controller.
634 bxe_reg_read16(struct bxe_softc *sc, bus_size_t offset)
638 if ((offset % 2) != 0) {
639 DBPRINT(sc, BXE_WARN,
640 "%s(): Warning! Unaligned read from 0x%jX!\n",
641 __FUNCTION__, (uintmax_t)offset);
644 val = bus_space_read_2(sc->bxe_btag, sc->bxe_bhandle, offset);
646 DBPRINT(sc, BXE_INSANE_REGS, "%s(): offset = 0x%jX, val = 0x%08X\n",
647 __FUNCTION__, (uintmax_t)offset, val);
654 * A debug version of the 8 bit OS register write function to
655 * capture/display values written to the controller.
661 bxe_reg_read8(struct bxe_softc *sc, bus_size_t offset)
663 uint8_t val = bus_space_read_1(sc->bxe_btag, sc->bxe_bhandle, offset);
665 DBPRINT(sc, BXE_INSANE_REGS, "%s(): offset = 0x%jX, val = 0x%02X\n",
666 __FUNCTION__, (uintmax_t)offset, val);
673 bxe_read_mf_cfg(struct bxe_softc *sc)
677 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
678 func = 2 * vn + BP_PORT(sc);
680 SHMEM_RD(sc,mf_cfg.func_mf_config[func].config);
686 bxe_e1h_disable(struct bxe_softc *sc)
691 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 0);
692 sc->bxe_ifp->if_drv_flags = 0;
696 bxe_e1h_enable(struct bxe_softc *sc)
701 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
702 sc->bxe_ifp->if_drv_flags = IFF_DRV_RUNNING;
706 * Calculates the sum of vn_min_rates.
707 * It's needed for further normalizing of the min_rates.
709 * sum of vn_min_rates.
711 * 0 - if all the min_rates are 0. In the later case fainess
712 * algorithm should be deactivated. If not all min_rates are
713 * zero then those that are zeroes will be set to 1.
716 bxe_calc_vn_wsum(struct bxe_softc *sc)
718 uint32_t vn_cfg, vn_min_rate;
721 DBENTER(BXE_VERBOSE_LOAD);
725 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
726 vn_cfg = sc->mf_config[vn];
727 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
728 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
729 /* Skip hidden vns */
730 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
732 /* If min rate is zero - set it to 1. */
734 vn_min_rate = DEF_MIN_RATE;
738 sc->vn_wsum += vn_min_rate;
741 /* ... only if all min rates are zeros - disable fairness */
743 sc->cmng.flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
745 sc->cmng.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
747 DBEXIT(BXE_VERBOSE_LOAD);
756 bxe_init_vn_minmax(struct bxe_softc *sc, int vn)
758 struct rate_shaping_vars_per_vn m_rs_vn;
759 struct fairness_vars_per_vn m_fair_vn;
761 uint16_t vn_min_rate, vn_max_rate;
764 vn_cfg = sc->mf_config[vn];
765 func = 2 * vn + BP_PORT(sc);
767 DBENTER(BXE_VERBOSE_LOAD);
769 /* If function is hidden - set min and max to zeroes. */
770 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
774 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
775 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
777 * If fairness is enabled (i.e. not all min rates are zero),
778 * and if the current min rate is zero, set it to 1.
779 * This is a requirement of the algorithm.
781 if (sc->vn_wsum && (vn_min_rate == 0))
782 vn_min_rate = DEF_MIN_RATE;
784 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
785 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
787 if (vn_max_rate == 0)
790 DBPRINT(sc, BXE_INFO_LOAD,
791 "%s(): func %d: vn_min_rate = %d, vn_max_rate = %d, wsum = %d.\n",
792 __FUNCTION__, func, vn_min_rate, vn_max_rate, sc->vn_wsum);
794 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
795 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
797 /* Global VNIC counter - maximal Mbps for this VNIC. */
798 m_rs_vn.vn_counter.rate = vn_max_rate;
800 /* Quota - number of bytes transmitted in this period. */
801 m_rs_vn.vn_counter.quota =
802 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
806 * Credit for each period of the fairness algorithm. The
807 * number of bytes in T_FAIR (the VNIC shares the port rate).
808 * vn_wsum should not be larger than 10000, thus
809 * T_FAIR_COEF / (8 * vn_wsum) will always be grater than zero.
811 m_fair_vn.vn_credit_delta =
812 max((uint32_t)(vn_min_rate * (T_FAIR_COEF /
814 (uint32_t)(sc->cmng.fair_vars.fair_threshold * 2));
819 /* Store it to internal memory */
820 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn) / 4; i++)
821 REG_WR(sc, BAR_XSTORM_INTMEM +
822 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + (i * 4),
823 ((uint32_t *)(&m_rs_vn))[i]);
825 for (i = 0; i < sizeof(struct fairness_vars_per_vn) / 4; i++)
826 REG_WR(sc, BAR_XSTORM_INTMEM +
827 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + (i * 4),
828 ((uint32_t *)(&m_fair_vn))[i]);
830 DBEXIT(BXE_VERBOSE_LOAD);
834 bxe_congestionmgmt(struct bxe_softc *sc, uint8_t readshm)
838 DBENTER(BXE_VERBOSE_LOAD);
840 /* Read mf conf from shmem. */
844 /* Init rate shaping and fairness contexts */
845 bxe_init_port_minmax(sc);
847 /* vn_weight_sum and enable fairness if not 0 */
848 bxe_calc_vn_wsum(sc);
850 /* calculate and set min-max rate for each vn */
851 for (vn = 0; vn < E1HVN_MAX; vn++)
852 bxe_init_vn_minmax(sc, vn);
854 /* Always enable rate shaping and fairness. */
855 sc->cmng.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
857 DBPRINT(sc, BXE_VERBOSE_LOAD,
858 "%s(): Rate shaping set\n", __FUNCTION__);
861 DBPRINT(sc, BXE_INFO_LOAD, "%s(): All MIN values "
862 "are zeroes, fairness is disabled\n", __FUNCTION__);
864 DBEXIT(BXE_VERBOSE_LOAD);
868 bxe_dcc_event(struct bxe_softc *sc, uint32_t dcc_event)
872 DBENTER(BXE_VERBOSE_LOAD);
874 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
875 if (sc->mf_config[BP_E1HVN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
876 DBPRINT(sc, BXE_INFO_LOAD, "%s(): mf_cfg function "
877 "disabled\n", __FUNCTION__);
878 sc->state = BXE_STATE_DISABLED;
881 DBPRINT(sc, BXE_INFO_LOAD, "%s(): mf_cfg function "
882 "enabled\n", __FUNCTION__);
883 sc->state = BXE_STATE_OPEN;
886 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
888 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
890 bxe_congestionmgmt(sc, TRUE);
891 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
892 REG_WR(sc, BAR_XSTORM_INTMEM +
893 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
894 ((uint32_t *)(&sc->cmng))[i]);
895 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
898 /* Report results to MCP */
900 bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE);
902 bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK);
904 DBEXIT(BXE_VERBOSE_LOAD);
908 * Device probe function.
910 * Compares the device to the driver's list of supported devices and
911 * reports back to the OS whether this is the right driver for the device.
914 * BUS_PROBE_DEFAULT on success, positive value on failure.
917 bxe_probe(device_t dev)
919 struct bxe_softc *sc;
922 uint16_t did, sdid, svid, vid;
924 sc = device_get_softc(dev);
928 /* Get the data for the device to be probed. */
929 vid = pci_get_vendor(dev);
930 did = pci_get_device(dev);
931 svid = pci_get_subvendor(dev);
932 sdid = pci_get_subdevice(dev);
934 DBPRINT(sc, BXE_VERBOSE_LOAD,
935 "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
936 "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
938 /* Look through the list of known devices for a match. */
939 while (t->bxe_name != NULL) {
940 if ((vid == t->bxe_vid) && (did == t->bxe_did) &&
941 ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) &&
942 ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) {
943 descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
947 /* Print out the device identity. */
948 snprintf(descbuf, BXE_DEVDESC_MAX,
949 "%s (%c%d) BXE v:%s\n", t->bxe_name,
950 (((pci_read_config(dev, PCIR_REVID, 4) &
952 (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
955 device_set_desc_copy(dev, descbuf);
956 free(descbuf, M_TEMP);
957 return (BUS_PROBE_DEFAULT);
966 * Prints useful adapter info.
971 /* ToDo: Create a sysctl for this info. */
973 bxe_print_adapter_info(struct bxe_softc *sc)
977 DBENTER(BXE_EXTREME_LOAD);
979 /* Hardware chip info. */
980 BXE_PRINTF("ASIC (0x%08X); ", sc->common.chip_id);
981 printf("Rev (%c%d); ", (CHIP_REV(sc) >> 12) + 'A',
982 (CHIP_METAL(sc) >> 4));
985 printf("Bus (PCIe x%d, ", sc->pcie_link_width);
986 switch (sc->pcie_link_speed) {
994 printf("Unknown link speed");
997 /* Device features. */
998 printf("); Flags (");
1000 /* Miscellaneous flags. */
1001 if (sc->msi_count > 0)
1004 if (sc->msix_count > 0) {
1005 if (i > 0) printf("|");
1006 printf("MSI-X"); i++;
1009 if (TPA_ENABLED(sc)) {
1010 if (i > 0) printf("|");
1014 printf("); Queues (");
1015 switch (sc->multi_mode) {
1016 case ETH_RSS_MODE_DISABLED:
1019 case ETH_RSS_MODE_REGULAR:
1020 printf("RSS:%d", sc->num_queues);
1027 printf("); BD's (RX:%d,TX:%d",
1028 (int) USABLE_RX_BD, (int) USABLE_TX_BD);
1030 /* Firmware versions and device features. */
1031 printf("); Firmware (%d.%d.%d); Bootcode (%d.%d.%d)\n",
1032 BCM_5710_FW_MAJOR_VERSION,
1033 BCM_5710_FW_MINOR_VERSION,
1034 BCM_5710_FW_REVISION_VERSION,
1035 (int)((sc->common.bc_ver & 0xff0000) >> 16),
1036 (int)((sc->common.bc_ver & 0x00ff00) >> 8),
1037 (int)((sc->common.bc_ver & 0x0000ff)));
1039 DBEXIT(BXE_EXTREME_LOAD);
1043 * Release any interrupts allocated by the driver.
1049 bxe_interrupt_free(struct bxe_softc *sc)
1054 DBENTER(BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
1058 if (sc->msix_count > 0) {
1059 /* Free MSI-X resources. */
1061 for (i = 0; i < sc->msix_count; i++) {
1062 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET |
1063 BXE_VERBOSE_INTR), "%s(): Releasing MSI-X[%d] "
1064 "vector.\n", __FUNCTION__, i);
1065 if (sc->bxe_msix_res[i] && sc->bxe_msix_rid[i])
1066 bus_release_resource(dev, SYS_RES_IRQ,
1067 sc->bxe_msix_rid[i], sc->bxe_msix_res[i]);
1070 pci_release_msi(dev);
1072 } else if (sc->msi_count > 0) {
1073 /* Free MSI resources. */
1075 for (i = 0; i < sc->msi_count; i++) {
1076 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET |
1077 BXE_VERBOSE_INTR), "%s(): Releasing MSI[%d] "
1078 "vector.\n", __FUNCTION__, i);
1079 if (sc->bxe_msi_res[i] && sc->bxe_msi_rid[i])
1080 bus_release_resource(dev, SYS_RES_IRQ,
1081 sc->bxe_msi_rid[i], sc->bxe_msi_res[i]);
1084 pci_release_msi(dev);
1087 /* Free legacy interrupt resources. */
1089 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET |
1090 BXE_VERBOSE_INTR), "%s(): Releasing legacy interrupt.\n",
1092 if (sc->bxe_irq_res != NULL)
1093 bus_release_resource(dev, SYS_RES_IRQ,
1094 sc->bxe_irq_rid, sc->bxe_irq_res);
1097 DBEXIT(BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
1101 * This function determines and allocates the appropriate
1102 * interrupt based on system capabilites and user request.
1104 * The user may force a particular interrupt mode, specify
1105 * the number of receive queues, specify the method for
1106 * distribuitng received frames to receive queues, or use
1107 * the default settings which will automatically select the
1108 * best supported combination. In addition, the OS may or
1109 * may not support certain combinations of these settings.
1110 * This routine attempts to reconcile the settings requested
1111 * by the user with the capabilites available from the system
1112 * to select the optimal combination of features.
1115 * 0 = Success, !0 = Failure.
1118 bxe_interrupt_alloc(struct bxe_softc *sc)
1121 int error, i, rid, rc;
1122 int msi_count, msi_required, msi_allocated;
1123 int msix_count, msix_required, msix_allocated;
1125 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR);
1129 msi_count = msi_required = msi_allocated = 0;
1130 msix_count = msix_required = msix_allocated = 0;
1132 /* Get the number of available MSI/MSI-X interrupts from the OS. */
1133 if (sc->int_mode > 0) {
1134 if (sc->bxe_cap_flags & BXE_MSIX_CAPABLE_FLAG)
1135 msix_count = pci_msix_count(dev);
1137 if (sc->bxe_cap_flags & BXE_MSI_CAPABLE_FLAG)
1138 msi_count = pci_msi_count(dev);
1140 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
1141 "%s(): %d MSI and %d MSI-X vectors available.\n",
1142 __FUNCTION__, msi_count, msix_count);
1145 /* Try allocating MSI-X interrupt resources. */
1146 if ((sc->bxe_cap_flags & BXE_MSIX_CAPABLE_FLAG) &&
1147 (sc->int_mode > 1) && (msix_count > 0) &&
1148 (msix_count >= sc->num_queues)) {
1149 /* Ask for the necessary number of MSI-X vectors. */
1150 if (sc->num_queues == 1)
1151 msix_allocated = msix_required = 2;
1153 msix_allocated = msix_required = sc->num_queues + 1;
1155 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
1156 "%s(): Requesting %d MSI-X vectors.\n",
1157 __FUNCTION__, msix_required);
1159 /* BSD resource identifier */
1161 error = pci_alloc_msix(dev, &msix_allocated);
1163 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
1164 "%s(): Required/Allocated (%d/%d) MSI-X vector(s).\n",
1165 __FUNCTION__, msix_required, msix_allocated);
1167 /* Make sure we got all the interrupts we asked for. */
1168 if (msix_allocated >= msix_required) {
1169 sc->msix_count = msix_required;
1172 /* Allocate the MSI-X vectors. */
1173 for (i = 0; i < msix_required; i++) {
1174 sc->bxe_msix_rid[i] = rid + i +
1176 sc->bxe_msix_res[i] =
1177 bus_alloc_resource_any(dev,
1178 SYS_RES_IRQ, &sc->bxe_msix_rid[i],
1180 /* Report any IRQ allocation errors. */
1181 if (sc->bxe_msix_res[i] == NULL) {
1183 "%s(%d): Failed to map MSI-X[%d] vector!\n",
1184 __FILE__, __LINE__, (3));
1186 goto bxe_interrupt_alloc_exit;
1191 DBPRINT(sc, BXE_WARN,
1192 "%s(): MSI-X allocation failed!\n",
1195 /* Release any resources acquired. */
1196 pci_release_msi(dev);
1197 sc->msix_count = msix_count = 0;
1199 /* We'll try MSI next. */
1205 /* Try allocating MSI vector resources. */
1206 if ((sc->bxe_cap_flags & BXE_MSI_CAPABLE_FLAG) &&
1207 (sc->int_mode > 0) && (msi_count > 0) &&
1208 (msi_count >= sc->num_queues)) {
1209 /* Ask for the necessary number of MSI vectors. */
1210 if (sc->num_queues == 1)
1211 msi_required = msi_allocated = 1;
1213 msi_required = msi_allocated = BXE_MSI_VECTOR_COUNT;
1215 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
1216 "%s(): Requesting %d MSI vectors.\n", __FUNCTION__,
1220 error = pci_alloc_msi(dev, &msi_allocated);
1222 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
1223 "%s(): Required/Allocated (%d/%d) MSI vector(s).\n",
1224 __FUNCTION__, msi_required, msi_allocated);
1227 * Make sure we got all the vectors we asked for.
1229 * FreeBSD always gives 8 even if we ask for less.
1231 if (msi_required >= msi_allocated) {
1232 sc->msi_count = msi_required;
1233 /* Allocate the MSI vectors. */
1234 for (i = 0; i < msi_required; i++) {
1235 sc->bxe_msi_rid[i] = i + rid;
1236 sc->bxe_msi_res[i] =
1237 bus_alloc_resource_any(dev,
1238 SYS_RES_IRQ, &sc->bxe_msi_rid[i],
1240 /* Report any IRQ allocation errors. */
1241 if (sc->bxe_msi_res[i] == NULL) {
1243 "%s(%d): Failed to map MSI vector (%d)!\n",
1244 __FILE__, __LINE__, (i));
1246 goto bxe_interrupt_alloc_exit;
1252 DBPRINT(sc, BXE_WARN, "%s(): MSI allocation failed!\n",
1255 /* Release any resources acquired. */
1256 pci_release_msi(dev);
1257 sc->msi_count = msi_count = 0;
1259 /* We'll try INTx next. */
1264 /* Try allocating INTx resources. */
1265 if (sc->int_mode == 0) {
1267 sc->multi_mode = ETH_RSS_MODE_DISABLED;
1269 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
1270 "%s(): Requesting legacy INTx interrupt.\n",
1274 sc->bxe_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1275 RF_SHAREABLE | RF_ACTIVE);
1276 /* Report any IRQ allocation errors. */
1277 if (sc->bxe_irq_res == NULL) {
1278 BXE_PRINTF("%s(%d): PCI map interrupt failed!\n",
1279 __FILE__, __LINE__);
1281 goto bxe_interrupt_alloc_exit;
1283 sc->bxe_irq_rid = rid;
1286 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
1287 "%s(): Actual: int_mode = %d, multi_mode = %d, num_queues = %d\n",
1288 __FUNCTION__, sc->int_mode, sc->multi_mode, sc->num_queues);
1290 bxe_interrupt_alloc_exit:
1291 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR);
1296 * This function releases taskqueues.
1302 bxe_interrupt_detach(struct bxe_softc *sc)
1305 struct bxe_fastpath *fp;
1310 DBENTER(BXE_VERBOSE_UNLOAD);
1315 /* Free the OS taskqueue resources. */
1316 for (i = 0; i < sc->num_queues; i++) {
1319 if (fp->tq != NULL) {
1320 taskqueue_drain(fp->tq, &fp->task);
1321 taskqueue_free(fp->tq);
1325 if (sc->tq != NULL) {
1326 taskqueue_drain(sc->tq, &sc->task);
1327 taskqueue_free(sc->tq);
1331 /* Release interrupt resources. */
1332 if (sc->msix_count > 0) {
1333 for (i = 0; i < sc->msix_count; i++) {
1334 if (sc->bxe_msix_tag[i] && sc->bxe_msix_res[i])
1335 bus_teardown_intr(dev, sc->bxe_msix_res[i],
1336 sc->bxe_msix_tag[i]);
1338 } else if (sc->msi_count > 0) {
1339 for (i = 0; i < sc->msi_count; i++) {
1340 if (sc->bxe_msi_tag[i] && sc->bxe_msi_res[i])
1341 bus_teardown_intr(dev, sc->bxe_msi_res[i],
1342 sc->bxe_msi_tag[i]);
1345 if (sc->bxe_irq_tag != NULL)
1346 bus_teardown_intr(dev, sc->bxe_irq_res,
1350 DBEXIT(BXE_VERBOSE_UNLOAD);
1354 * This function enables interrupts and attachs to the ISR.
1356 * When using multiple MSI/MSI-X vectors the first vector
1357 * is used for slowpath operations while all remaining
1358 * vectors are used for fastpath operations. If only a
1359 * single MSI/MSI-X vector is used (SINGLE_ISR) then the
1360 * ISR must look for both slowpath and fastpath completions.
1363 * 0 = Success, !0 = Failure.
1366 bxe_interrupt_attach(struct bxe_softc *sc)
1368 struct bxe_fastpath *fp;
1371 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR);
1376 /* Setup the slowpath deferred task queue. */
1377 TASK_INIT(&sc->task, 0, bxe_task_sp, sc);
1378 sc->tq = taskqueue_create_fast("bxe_spq", M_NOWAIT,
1379 taskqueue_thread_enqueue, &sc->tq);
1380 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s spq",
1381 device_get_nameunit(sc->dev));
1384 /* Setup interrupt handlers. */
1385 if (sc->msix_count > 0) {
1386 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
1387 "%s(): Enabling slowpath MSI-X[0] vector.\n",__FUNCTION__);
1389 * Setup the interrupt handler. Note that we pass the
1390 * driver instance to the interrupt handler for the
1393 rc = bus_setup_intr(sc->dev, sc->bxe_msix_res[0],
1394 INTR_TYPE_NET | INTR_MPSAFE, NULL, bxe_intr_sp,
1395 sc, &sc->bxe_msix_tag[0]);
1399 "%s(%d): Failed to allocate MSI-X[0] vector!\n",
1400 __FILE__, __LINE__);
1401 goto bxe_interrupt_attach_exit;
1404 #if __FreeBSD_version >= 800504
1405 bus_describe_intr(sc->dev, sc->bxe_msix_res[0],
1406 sc->bxe_msix_tag[0], "sp");
1409 /* Now initialize the fastpath vectors. */
1410 for (i = 0; i < (sc->num_queues); i++) {
1412 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
1413 "%s(): Enabling MSI-X[%d] vector.\n",
1414 __FUNCTION__, i + 1);
1416 * Setup the interrupt handler. Note that we pass the
1417 * fastpath context to the interrupt handler in this
1418 * case. Also the first msix_res was used by the sp.
1420 rc = bus_setup_intr(sc->dev, sc->bxe_msix_res[i + 1],
1421 INTR_TYPE_NET | INTR_MPSAFE, NULL, bxe_intr_fp,
1422 fp, &sc->bxe_msix_tag[i + 1]);
1426 "%s(%d): Failed to allocate MSI-X[%d] vector!\n",
1427 __FILE__, __LINE__, (i + 1));
1428 goto bxe_interrupt_attach_exit;
1431 #if __FreeBSD_version >= 800504
1432 bus_describe_intr(sc->dev, sc->bxe_msix_res[i + 1],
1433 sc->bxe_msix_tag[i + 1], "fp[%02d]", i);
1436 /* Bind the fastpath instance to a CPU. */
1437 if (sc->num_queues > 1) {
1438 bus_bind_intr(sc->dev,
1439 sc->bxe_msix_res[i + 1], i);
1443 TASK_INIT(&fp->task, 0, bxe_task_fp, fp);
1444 fp->tq = taskqueue_create_fast("bxe_fpq", M_NOWAIT,
1445 taskqueue_thread_enqueue, &fp->tq);
1446 taskqueue_start_threads(&fp->tq, 1, PI_NET, "%s fpq",
1447 device_get_nameunit(sc->dev));
1449 fp->state = BXE_FP_STATE_IRQ;
1451 } else if (sc->msi_count > 0) {
1452 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
1453 "%s(): Enabling slowpath MSI[0] vector.\n",
1456 * Setup the interrupt handler. Note that we pass the driver
1457 * instance to the interrupt handler for the slowpath.
1459 rc = bus_setup_intr(sc->dev,sc->bxe_msi_res[0],
1460 INTR_TYPE_NET | INTR_MPSAFE, NULL, bxe_intr_sp,
1461 sc, &sc->bxe_msi_tag[0]);
1465 "%s(%d): Failed to allocate MSI[0] vector!\n",
1466 __FILE__, __LINE__);
1467 goto bxe_interrupt_attach_exit;
1470 #if __FreeBSD_version >= 800504
1471 bus_describe_intr(sc->dev, sc->bxe_msi_res[0],
1472 sc->bxe_msi_tag[0], "sp");
1475 /* Now initialize the fastpath vectors. */
1476 for (i = 0; i < (sc->num_queues); i++) {
1479 (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
1480 "%s(): Enabling MSI[%d] vector.\n",
1481 __FUNCTION__, i + 1);
1483 * Setup the interrupt handler. Note that we pass the
1484 * fastpath context to the interrupt handler in this
1487 rc = bus_setup_intr(sc->dev, sc->bxe_msi_res[i + 1],
1488 INTR_TYPE_NET | INTR_MPSAFE, NULL, bxe_intr_fp,
1489 fp, &sc->bxe_msi_tag[i + 1]);
1493 "%s(%d): Failed to allocate MSI[%d] vector!\n",
1494 __FILE__, __LINE__, (i + 1));
1495 goto bxe_interrupt_attach_exit;
1498 #if __FreeBSD_version >= 800504
1499 bus_describe_intr(sc->dev, sc->bxe_msi_res[i + 1],
1500 sc->bxe_msi_tag[i + 1], "fp[%02d]", i);
1504 TASK_INIT(&fp->task, 0, bxe_task_fp, fp);
1505 fp->tq = taskqueue_create_fast("bxe_fpq", M_NOWAIT,
1506 taskqueue_thread_enqueue, &fp->tq);
1507 taskqueue_start_threads(&fp->tq, 1, PI_NET, "%s fpq",
1508 device_get_nameunit(sc->dev));
1516 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
1517 "%s(): Enabling INTx interrupts.\n", __FUNCTION__);
1520 * Setup the interrupt handler. Note that we pass the
1521 * driver instance to the interrupt handler which
1522 * will handle both the slowpath and fastpath.
1524 rc = bus_setup_intr(sc->dev,sc->bxe_irq_res, INTR_TYPE_NET |
1525 INTR_MPSAFE, NULL, bxe_intr_legacy, sc, &sc->bxe_irq_tag);
1528 BXE_PRINTF("%s(%d): Failed to allocate interrupt!\n",
1529 __FILE__, __LINE__);
1530 goto bxe_interrupt_attach_exit;
1533 TASK_INIT(&fp->task, 0, bxe_task_fp, fp);
1534 fp->tq = taskqueue_create_fast("bxe_fpq",
1535 M_NOWAIT, taskqueue_thread_enqueue, &fp->tq);
1536 taskqueue_start_threads(&fp->tq, 1,
1537 PI_NET, "%s fpq", device_get_nameunit(sc->dev));
1541 bxe_interrupt_attach_exit:
1542 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR);
1548 * PCI Capabilities Probe Function.
1550 * Walks the PCI capabiites list for the device to find what features are
1551 * supported. These capabilites may be enabled/disabled by firmware so it's
1552 * best to walk the list rather than hard code any values.
1558 bxe_probe_pci_caps(struct bxe_softc *sc)
1562 uint16_t link_status;
1565 DBENTER(BXE_EXTREME_LOAD);
1567 /* Check if PCI Power Management capability is enabled. */
1568 if (pci_find_cap(dev, PCIY_PMG, ®) == 0) {
1570 DBPRINT(sc, BXE_EXTREME_LOAD,
1571 "%s(): Found PM capability at 0x%04X\n",
1577 /* Check if PCIe capability is enabled. */
1578 if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) {
1580 link_status = pci_read_config(dev, reg + 0x12, 2);
1582 DBPRINT(sc, BXE_EXTREME_LOAD,
1583 "%s(): Found PCIe capability at 0x%04X\n",
1586 /* Handle PCIe 2.0 workarounds for the 57710. */
1587 if (CHIP_IS_E1(sc)) {
1588 /* Workaround for 57710 errata E4_57710_27462. */
1589 sc->pcie_link_speed =
1590 (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1;
1592 /* Workaround for 57710 errata E4_57710_27488. */
1593 sc->pcie_link_width = (link_status >> 4) & 0x3f;
1594 if (sc->pcie_link_speed > 1)
1595 sc->pcie_link_width =
1596 ((link_status >> 4) & 0x3f) >> 1;
1600 sc->pcie_link_speed = link_status & 0xf;
1601 sc->pcie_link_width = (link_status >> 4) & 0x3f;
1605 sc->bxe_cap_flags |= BXE_PCIE_CAPABLE_FLAG;
1611 /* Check if MSI capability is enabled. */
1612 if (pci_find_cap(dev, PCIY_MSI, ®) == 0) {
1614 DBPRINT(sc, BXE_EXTREME_LOAD,
1615 "%s(): Found MSI capability at 0x%04X\n",
1617 sc->bxe_cap_flags |= BXE_MSI_CAPABLE_FLAG;
1621 /* Check if MSI-X capability is enabled. */
1622 if (pci_find_cap(dev, PCIY_MSIX, ®) == 0) {
1624 DBPRINT(sc, BXE_EXTREME_LOAD,
1625 "%s(): Found MSI-X capability at 0x%04X\n",
1627 sc->bxe_cap_flags |= BXE_MSIX_CAPABLE_FLAG;
1631 DBEXIT(BXE_EXTREME_LOAD);
1635 * Setup firmware pointers for BCM57710.
1641 bxe_init_e1_firmware(struct bxe_softc *sc)
1643 INIT_OPS(sc) = (struct raw_op *)init_ops_e1;
1644 INIT_DATA(sc) = (const uint32_t *)init_data_e1;
1645 INIT_OPS_OFFSETS(sc) = (const uint16_t *)init_ops_offsets_e1;
1646 INIT_TSEM_INT_TABLE_DATA(sc) = tsem_int_table_data_e1;
1647 INIT_TSEM_PRAM_DATA(sc) = tsem_pram_data_e1;
1648 INIT_USEM_INT_TABLE_DATA(sc) = usem_int_table_data_e1;
1649 INIT_USEM_PRAM_DATA(sc) = usem_pram_data_e1;
1650 INIT_XSEM_INT_TABLE_DATA(sc) = xsem_int_table_data_e1;
1651 INIT_XSEM_PRAM_DATA(sc) = xsem_pram_data_e1;
1652 INIT_CSEM_INT_TABLE_DATA(sc) = csem_int_table_data_e1;
1653 INIT_CSEM_PRAM_DATA(sc) = csem_pram_data_e1;
1657 * Setup firmware pointers for BCM57711.
1663 bxe_init_e1h_firmware(struct bxe_softc *sc)
1665 INIT_OPS(sc) = (struct raw_op *)init_ops_e1h;
1666 INIT_DATA(sc) = (const uint32_t *)init_data_e1h;
1667 INIT_OPS_OFFSETS(sc) = (const uint16_t *)init_ops_offsets_e1h;
1668 INIT_TSEM_INT_TABLE_DATA(sc) = tsem_int_table_data_e1h;
1669 INIT_TSEM_PRAM_DATA(sc) = tsem_pram_data_e1h;
1670 INIT_USEM_INT_TABLE_DATA(sc) = usem_int_table_data_e1h;
1671 INIT_USEM_PRAM_DATA(sc) = usem_pram_data_e1h;
1672 INIT_XSEM_INT_TABLE_DATA(sc) = xsem_int_table_data_e1h;
1673 INIT_XSEM_PRAM_DATA(sc) = xsem_pram_data_e1h;
1674 INIT_CSEM_INT_TABLE_DATA(sc) = csem_int_table_data_e1h;
1675 INIT_CSEM_PRAM_DATA(sc) = csem_pram_data_e1h;
1679 * Sets up pointers for loading controller firmware.
1682 * 0 = Success, !0 = Failure
1685 bxe_init_firmware(struct bxe_softc *sc)
1692 bxe_init_e1_firmware(sc);
1693 else if (CHIP_IS_E1H(sc))
1694 bxe_init_e1h_firmware(sc);
1696 BXE_PRINTF("%s(%d): No firmware to support chip revision!\n",
1697 __FILE__, __LINE__);
1705 bxe_tunables_set(struct bxe_softc *sc)
1708 * Get our starting point for interrupt mode/number of queues.
1709 * We will progressively step down from MSI-X to MSI to INTx
1710 * and reduce the number of receive queues as necessary to
1711 * match the system capabilities.
1713 sc->multi_mode = bxe_multi_mode;
1714 sc->int_mode = bxe_int_mode;
1715 sc->tso_enable = bxe_tso_enable;
1718 * Verify the Priority -> Receive Queue mappings.
1720 if (sc->int_mode > 0) {
1721 /* Multi-queue modes require MSI/MSI-X. */
1722 switch (sc->multi_mode) {
1723 case ETH_RSS_MODE_DISABLED:
1724 /* No multi-queue mode requested. */
1727 case ETH_RSS_MODE_REGULAR:
1728 if (sc->int_mode > 1) {
1730 * Assume we can use MSI-X
1731 * (max of 16 receive queues).
1733 sc->num_queues = min((bxe_queue_count ?
1734 bxe_queue_count : mp_ncpus), MAX_CONTEXT);
1737 * Assume we can use MSI
1738 * (max of 7 receive queues).
1740 sc->num_queues = min((bxe_queue_count ?
1741 bxe_queue_count : mp_ncpus),
1742 BXE_MSI_VECTOR_COUNT - 1);
1747 "%s(%d): Unsupported multi_mode parameter (%d), "
1748 "disabling multi-queue support!\n", __FILE__,
1749 __LINE__, sc->multi_mode);
1750 sc->multi_mode = ETH_RSS_MODE_DISABLED;
1755 /* User has forced INTx mode. */
1756 sc->multi_mode = ETH_RSS_MODE_DISABLED;
1760 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
1761 "%s(): Requested: int_mode = %d, multi_mode = %d num_queues = %d\n",
1762 __FUNCTION__, sc->int_mode, sc->multi_mode, sc->num_queues);
1764 sc->stats_enable = TRUE;
1766 /* Select the host coalescing tick count values (limit values). */
1767 if (bxe_tx_ticks > 100) {
1768 BXE_PRINTF("%s(%d): bxe_tx_ticks too large "
1769 "(%d), setting default value of 50.\n",
1770 __FILE__, __LINE__, bxe_tx_ticks);
1773 sc->tx_ticks = bxe_tx_ticks;
1775 if (bxe_rx_ticks > 100) {
1776 BXE_PRINTF("%s(%d): bxe_rx_ticks too large "
1777 "(%d), setting default value of 25.\n",
1778 __FILE__, __LINE__, bxe_rx_ticks);
1781 sc->rx_ticks = bxe_rx_ticks;
1783 /* Select the PCIe maximum read request size (MRRS). */
1787 sc->mrrs = bxe_mrrs;
1789 /* Check for DCC support. */
1790 if (bxe_dcc_enable == FALSE)
1791 sc->dcc_enable = FALSE;
1793 sc->dcc_enable = TRUE;
1798 * Allocates PCI resources from OS.
1801 * 0 = Success, !0 = Failure
1804 bxe_pci_resources_alloc(struct bxe_softc *sc)
1808 DBENTER(BXE_VERBOSE_LOAD);
1811 * Allocate PCI memory resources for BAR0.
1812 * This includes device registers and internal
1816 sc->bxe_res = bus_alloc_resource_any(sc->dev,
1817 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1818 if (sc->bxe_res == NULL) {
1819 BXE_PRINTF("%s(%d):PCI BAR0 memory allocation failed\n",
1820 __FILE__, __LINE__);
1822 goto bxe_pci_resources_alloc_exit;
1825 /* Get OS resource handles for BAR0 memory. */
1826 sc->bxe_btag = rman_get_bustag(sc->bxe_res);
1827 sc->bxe_bhandle = rman_get_bushandle(sc->bxe_res);
1828 sc->bxe_vhandle = (vm_offset_t) rman_get_virtual(sc->bxe_res);
1831 * Allocate PCI memory resources for BAR2.
1832 * Doorbell (DB) memory.
1835 sc->bxe_db_res = bus_alloc_resource_any(sc->dev,
1836 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1837 if (sc->bxe_db_res == NULL) {
1838 BXE_PRINTF("%s(%d): PCI BAR2 memory allocation failed\n",
1839 __FILE__, __LINE__);
1841 goto bxe_pci_resources_alloc_exit;
1844 /* Get OS resource handles for BAR2 memory. */
1845 sc->bxe_db_btag = rman_get_bustag(sc->bxe_db_res);
1846 sc->bxe_db_bhandle = rman_get_bushandle(sc->bxe_db_res);
1847 sc->bxe_db_vhandle = (vm_offset_t) rman_get_virtual(sc->bxe_db_res);
1849 bxe_pci_resources_alloc_exit:
1850 DBEXIT(BXE_VERBOSE_LOAD);
1856 * Frees PCI resources allocated in bxe_pci_resources_alloc().
1862 bxe_pci_resources_free(struct bxe_softc *sc)
1864 DBENTER(BXE_VERBOSE_UNLOAD);
1866 /* Release the PCIe BAR0 mapped memory. */
1867 if (sc->bxe_res != NULL) {
1868 bus_release_resource(sc->dev, SYS_RES_MEMORY,
1869 PCIR_BAR(0), sc->bxe_res);
1872 /* Release the PCIe BAR2 (doorbell) mapped memory. */
1873 if (sc->bxe_db_res != NULL) {
1874 bus_release_resource(sc->dev, SYS_RES_MEMORY,
1875 PCIR_BAR(2), sc->bxe_db_res);
1878 DBENTER(BXE_VERBOSE_UNLOAD);
1883 * Determines the media reported to the OS by examining
1884 * the installed PHY type.
1887 * 0 = Success, !0 = Failure
1890 bxe_media_detect(struct bxe_softc *sc)
1896 /* Identify supported media based on the PHY type. */
1897 switch (XGXS_EXT_PHY_TYPE(sc->link_params.ext_phy_config)) {
1898 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
1899 DBPRINT(sc, BXE_INFO_LOAD,
1900 "%s(): Found 10GBase-CX4 media.\n", __FUNCTION__);
1901 sc->media = IFM_10G_CX4;
1903 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
1904 /* Technically 10GBase-KR but report as 10GBase-SR*/
1905 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
1906 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
1907 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
1908 DBPRINT(sc, BXE_INFO_LOAD,
1909 "%s(): Found 10GBase-SR media.\n", __FUNCTION__);
1910 sc->media = IFM_10G_SR;
1912 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
1913 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
1914 DBPRINT(sc, BXE_INFO_LOAD,
1915 "%s(): Found 10Gb twinax media.\n", __FUNCTION__);
1916 sc->media = IFM_10G_TWINAX;
1918 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
1919 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
1920 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
1921 DBPRINT(sc, BXE_INFO_LOAD,
1922 "%s(): Found 10GBase-T media.\n", __FUNCTION__);
1923 sc->media = IFM_10G_T;
1925 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
1926 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN:
1937 * Device attach function.
1939 * Allocates device resources, performs secondary chip identification,
1940 * resets and initializes the hardware, and initializes driver instance
1944 * 0 = Success, Positive value on failure.
1947 bxe_attach(device_t dev)
1949 struct bxe_softc *sc;
1953 sc = device_get_softc(dev);
1954 DBENTER(BXE_INFO_LOAD | BXE_INFO_RESET);
1957 sc->bxe_unit = device_get_unit(dev);
1958 sc->bxe_func = pci_get_function(dev);
1960 sc->state = BXE_STATE_CLOSED;
1963 DBPRINT(sc, BXE_FATAL, "%s(): ************************\n",
1965 DBPRINT(sc, BXE_FATAL, "%s(): ** Debug mode enabled **\n",
1967 DBPRINT(sc, BXE_FATAL, "%s(): ************************\n",
1969 DBPRINT(sc, BXE_FATAL, "%s(): sc vaddr = 0x%08X:%08X\n",
1970 __FUNCTION__, (uint32_t) U64_HI(sc), (uint32_t) U64_LO(sc));
1972 /* Get the user configurable values for driver load. */
1973 bxe_tunables_set(sc);
1975 bxe_mutexes_alloc(sc);
1977 /* Prepare tick routine. */
1978 callout_init_mtx(&sc->bxe_tick_callout, &sc->bxe_core_mtx, 0);
1980 /* Enable bus master capability */
1981 pci_enable_busmaster(dev);
1983 /* Enable PCI BAR mapped memory for register access. */
1984 rc = bxe_pci_resources_alloc(sc);
1986 BXE_PRINTF("%s(%d): Error allocating PCI resources!\n",
1987 __FILE__, __LINE__);
1988 goto bxe_attach_fail;
1991 /* Put indirect address registers into a sane state. */
1992 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS,
1993 PCICFG_VENDOR_ID_OFFSET, 4);
1994 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(sc) * 16, 0);
1995 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(sc) * 16, 0);
1996 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(sc) * 16, 0);
1997 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(sc) * 16, 0);
1999 /* Get hardware info from shared memory and validate data. */
2000 rc = bxe_hwinfo_function_get(sc);
2002 DBPRINT(sc, BXE_WARN,
2003 "%s(): Failed to get hardware info!\n", __FUNCTION__);
2004 goto bxe_attach_fail;
2007 /* Setup supported media options. */
2008 rc = bxe_media_detect(sc);
2010 BXE_PRINTF("%s(%d): Unknown media (PHY) type!\n",
2011 __FILE__, __LINE__);
2012 goto bxe_attach_fail;
2015 /* Interface entrypoint for media type/status reporting. */
2016 ifmedia_init(&sc->bxe_ifmedia,
2017 IFM_IMASK, bxe_ifmedia_upd, bxe_ifmedia_status);
2019 /* Default interface values. */
2020 ifmedia_add(&sc->bxe_ifmedia,
2021 IFM_ETHER | sc->media | IFM_FDX, 0, NULL);
2022 ifmedia_add(&sc->bxe_ifmedia,
2023 IFM_ETHER | IFM_AUTO, 0, NULL);
2024 ifmedia_set(&sc->bxe_ifmedia,
2025 IFM_ETHER | IFM_AUTO);
2026 sc->bxe_ifmedia.ifm_media =
2027 sc->bxe_ifmedia.ifm_cur->ifm_media;
2029 /* Setup firmware arrays (firmware load comes later). */
2030 rc = bxe_init_firmware(sc);
2032 BXE_PRINTF("%s(%d): Error preparing firmware load!\n",
2033 __FILE__, __LINE__);
2034 goto bxe_attach_fail;
2038 /* Allocate a memory buffer for grcdump output.*/
2039 sc->grcdump_buffer = malloc(BXE_GRCDUMP_BUF_SIZE, M_TEMP, M_NOWAIT);
2040 if (sc->grcdump_buffer == NULL) {
2041 BXE_PRINTF("%s(%d): Failed to allocate grcdump memory "
2042 "buffer!\n", __FILE__, __LINE__);
2047 /* Check that NVRAM contents are valid.*/
2048 rc = bxe_nvram_test(sc);
2050 BXE_PRINTF("%s(%d): Failed NVRAM test!\n",
2051 __FILE__, __LINE__);
2052 goto bxe_attach_fail;
2055 /* Allocate the appropriate interrupts.*/
2056 rc = bxe_interrupt_alloc(sc);
2058 BXE_PRINTF("%s(%d): Interrupt allocation failed!\n",
2059 __FILE__, __LINE__);
2060 goto bxe_attach_fail;
2063 /* Useful for accessing unconfigured devices (i.e. factory diags).*/
2065 sc->bxe_flags |= BXE_NO_MCP_FLAG;
2067 /* If bootcode is not running only initialize port 0. */
2068 if (nomcp && BP_PORT(sc)) {
2070 "%s(%d): Second device disabled (no bootcode), "
2071 "exiting...\n", __FILE__, __LINE__);
2073 goto bxe_attach_fail;
2076 /* Check if PXE/UNDI is still active and unload it. */
2078 bxe_undi_unload(sc);
2081 * Select the RX and TX ring sizes. The actual
2082 * ring size for TX is complicated by the fact
2083 * that a single TX frame may be broken up into
2084 * many buffer descriptors (tx_start_bd,
2085 * tx_parse_bd, tx_data_bd). In the best case,
2086 * there are always at least two BD's required
2087 * so we'll assume the best case here.
2089 sc->tx_ring_size = (USABLE_TX_BD >> 1);
2090 sc->rx_ring_size = USABLE_RX_BD;
2092 /* Assume receive IP/TCP/UDP checksum is enabled. */
2093 /* ToDo: Change when IOCTL changes checksum offload? */
2099 /* Assume a standard 1500 byte MTU size for mbuf allocations. */
2100 sc->mbuf_alloc_size = MCLBYTES;
2102 /* Allocate DMA memory resources. */
2103 rc = bxe_host_structures_alloc(sc->dev);
2105 BXE_PRINTF("%s(%d): DMA memory allocation failed!\n",
2106 __FILE__, __LINE__);
2107 goto bxe_attach_fail;
2110 /* Allocate a FreeBSD ifnet structure. */
2111 ifp = sc->bxe_ifp = if_alloc(IFT_ETHER);
2113 BXE_PRINTF("%s(%d): Interface allocation failed!\n",
2114 __FILE__, __LINE__);
2116 goto bxe_attach_fail;
2119 /* Initialize the FreeBSD ifnet interface. */
2121 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2123 /* Written by driver before attach, read-only afterwards. */
2124 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2126 /* Driver entrypoints from the network interface. */
2127 ifp->if_ioctl = bxe_ioctl;
2128 ifp->if_start = bxe_tx_start;
2129 #if __FreeBSD_version >= 800000
2130 ifp->if_transmit = bxe_tx_mq_start;
2131 ifp->if_qflush = bxe_mq_flush;
2138 ifp->if_init = bxe_init;
2139 ifp->if_mtu = ETHERMTU;
2140 ifp->if_hwassist = BXE_IF_HWASSIST;
2141 ifp->if_capabilities = BXE_IF_CAPABILITIES;
2142 /* TPA not enabled by default. */
2143 ifp->if_capenable = BXE_IF_CAPABILITIES & ~IFCAP_LRO;
2144 ifp->if_baudrate = IF_Gbps(10UL);
2146 ifp->if_snd.ifq_drv_maxlen = sc->tx_ring_size;
2148 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2149 IFQ_SET_READY(&ifp->if_snd);
2151 /* Attach to the Ethernet interface list. */
2152 ether_ifattach(ifp, sc->link_params.mac_addr);
2154 /* Attach the interrupts to the interrupt handlers. */
2155 rc = bxe_interrupt_attach(sc);
2157 BXE_PRINTF("%s(%d): Interrupt allocation failed!\n",
2158 __FILE__, __LINE__);
2159 goto bxe_attach_fail;
2162 /* Print important adapter info for the user. */
2163 bxe_print_adapter_info(sc);
2165 /* Add the supported sysctls to the kernel. */
2166 bxe_add_sysctls(sc);
2172 DBEXIT(BXE_INFO_LOAD | BXE_INFO_RESET);
2178 * Supported link settings.
2180 * Examines hardware configuration present in NVRAM and
2181 * determines the link settings that are supported between
2182 * the external PHY and the switch.
2188 * Sets sc->port.supported
2189 * Sets sc->link_params.phy_addr
2192 bxe_link_settings_supported(struct bxe_softc *sc, uint32_t switch_cfg)
2194 uint32_t ext_phy_type;
2197 DBENTER(BXE_VERBOSE_PHY);
2198 DBPRINT(sc, BXE_VERBOSE_PHY, "%s(): switch_cfg = 0x%08X\n",
2199 __FUNCTION__, switch_cfg);
2202 /* Get the link settings supported by the external PHY. */
2203 switch (switch_cfg) {
2206 SERDES_EXT_PHY_TYPE(sc->link_params.ext_phy_config);
2208 DBPRINT(sc, BXE_VERBOSE_PHY,
2209 "%s(): 1G switch w/ ext_phy_type = "
2210 "0x%08X\n", __FUNCTION__, ext_phy_type);
2212 switch (ext_phy_type) {
2213 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
2214 DBPRINT(sc, BXE_VERBOSE_PHY, "%s(): 1G Direct.\n",
2217 sc->port.supported |=
2218 (SUPPORTED_10baseT_Half |
2219 SUPPORTED_10baseT_Full |
2220 SUPPORTED_100baseT_Half |
2221 SUPPORTED_100baseT_Full |
2222 SUPPORTED_1000baseT_Full |
2223 SUPPORTED_2500baseX_Full |
2228 SUPPORTED_Asym_Pause);
2231 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
2232 DBPRINT(sc, BXE_VERBOSE_PHY, "%s(): 1G 5482\n",
2235 sc->port.supported |=
2236 (SUPPORTED_10baseT_Half |
2237 SUPPORTED_10baseT_Full |
2238 SUPPORTED_100baseT_Half |
2239 SUPPORTED_100baseT_Full |
2240 SUPPORTED_1000baseT_Full |
2245 SUPPORTED_Asym_Pause);
2250 "%s(%d): Bad NVRAM 1Gb PHY configuration data "
2251 "(ext_phy_config=0x%08X).\n",
2253 sc->link_params.ext_phy_config);
2254 goto bxe_link_settings_supported_exit;
2258 REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + (port * 0x10));
2260 DBPRINT(sc, BXE_VERBOSE_PHY, "%s(): phy_addr = 0x%08X\n",
2261 __FUNCTION__, sc->port.phy_addr);
2264 case SWITCH_CFG_10G:
2266 XGXS_EXT_PHY_TYPE(sc->link_params.ext_phy_config);
2269 sc, BXE_VERBOSE_PHY,
2270 "%s(): 10G switch w/ ext_phy_type = 0x%08X\n",
2271 __FUNCTION__, ext_phy_type);
2273 switch (ext_phy_type) {
2274 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
2275 DBPRINT(sc, BXE_VERBOSE_PHY,
2276 "%s(): 10G switch w/ direct connect.\n",
2279 sc->port.supported |=
2280 (SUPPORTED_10baseT_Half |
2281 SUPPORTED_10baseT_Full |
2282 SUPPORTED_100baseT_Half |
2283 SUPPORTED_100baseT_Full |
2284 SUPPORTED_1000baseT_Full |
2285 SUPPORTED_2500baseX_Full |
2286 SUPPORTED_10000baseT_Full |
2291 SUPPORTED_Asym_Pause);
2294 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
2295 DBPRINT(sc, BXE_VERBOSE_PHY,
2296 "ext_phy_type 0x%x (8072)\n",ext_phy_type);
2298 sc->port.supported |=
2299 (SUPPORTED_10000baseT_Full |
2300 SUPPORTED_1000baseT_Full |
2304 SUPPORTED_Asym_Pause);
2307 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
2309 BXE_VERBOSE_PHY,"ext_phy_type 0x%x (8073)\n",
2312 sc->port.supported |=
2313 (SUPPORTED_10000baseT_Full |
2314 SUPPORTED_2500baseX_Full |
2315 SUPPORTED_1000baseT_Full |
2319 SUPPORTED_Asym_Pause);
2322 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
2323 DBPRINT(sc, BXE_VERBOSE_PHY,
2324 "%s(): 10G switch w/ 8705.\n",__FUNCTION__);
2326 sc->port.supported |=
2327 (SUPPORTED_10000baseT_Full |
2330 SUPPORTED_Asym_Pause);
2333 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
2334 DBPRINT(sc, BXE_VERBOSE_PHY,
2335 "%s(): 10G switch w/ 8706.\n",
2338 sc->port.supported |=
2339 (SUPPORTED_10000baseT_Full |
2340 SUPPORTED_1000baseT_Full |
2343 SUPPORTED_Asym_Pause);
2346 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
2347 DBPRINT(sc, BXE_VERBOSE_PHY,
2348 "%s(): 10G switch w/ 8726.\n",
2351 sc->port.supported |=
2352 (SUPPORTED_10000baseT_Full |
2355 SUPPORTED_Asym_Pause);
2358 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2359 DBPRINT(sc, BXE_VERBOSE_PHY,"ext_phy_type 0x%x (8727)\n",
2362 sc->port.supported |=
2363 (SUPPORTED_10000baseT_Full |
2364 SUPPORTED_1000baseT_Full |
2368 SUPPORTED_Asym_Pause);
2371 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2372 DBPRINT(sc, BXE_VERBOSE_PHY,
2373 "%s(): 10G switch w/ SFX7101.\n",
2376 sc->port.supported |=
2377 (SUPPORTED_10000baseT_Full |
2381 SUPPORTED_Asym_Pause);
2384 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
2385 DBPRINT(sc, BXE_VERBOSE_PHY,
2386 "ext_phy_type 0x%x (BCM8481)\n",
2389 sc->port.supported |=
2390 (SUPPORTED_10baseT_Half |
2391 SUPPORTED_10baseT_Full |
2392 SUPPORTED_100baseT_Half |
2393 SUPPORTED_100baseT_Full |
2394 SUPPORTED_1000baseT_Full |
2395 SUPPORTED_10000baseT_Full |
2399 SUPPORTED_Asym_Pause);
2402 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
2403 DBPRINT(sc, BXE_WARN,
2404 "%s(): 10G XGXS PHY failure detected.\n",
2409 "%s(%d): Bad NVRAM 10Gb PHY configuration data "
2410 "(ext_phy_config=0x%08X).\n",
2412 sc->link_params.ext_phy_config);
2413 goto bxe_link_settings_supported_exit;
2417 REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR +(port * 0x18));
2421 DBPRINT(sc, BXE_WARN, "%s(): BAD switch configuration "
2422 "(link_config = 0x%08X)\n", __FUNCTION__,
2423 sc->port.link_config);
2424 goto bxe_link_settings_supported_exit;
2427 sc->link_params.phy_addr = sc->port.phy_addr;
2429 /* Mask out unsupported speeds according to NVRAM. */
2430 if ((sc->link_params.speed_cap_mask &
2431 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) == 0)
2432 sc->port.supported &= ~SUPPORTED_10baseT_Half;
2434 if ((sc->link_params.speed_cap_mask &
2435 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) == 0)
2436 sc->port.supported &= ~SUPPORTED_10baseT_Full;
2438 if ((sc->link_params.speed_cap_mask &
2439 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) == 0)
2440 sc->port.supported &= ~SUPPORTED_100baseT_Half;
2442 if ((sc->link_params.speed_cap_mask &
2443 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) == 0)
2444 sc->port.supported &= ~SUPPORTED_100baseT_Full;
2446 if ((sc->link_params.speed_cap_mask &
2447 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) == 0)
2448 sc->port.supported &= ~(SUPPORTED_1000baseT_Half |
2449 SUPPORTED_1000baseT_Full);
2451 if ((sc->link_params.speed_cap_mask &
2452 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) == 0)
2453 sc->port.supported &= ~SUPPORTED_2500baseX_Full;
2455 if ((sc->link_params.speed_cap_mask &
2456 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) == 0)
2457 sc->port.supported &= ~SUPPORTED_10000baseT_Full;
2459 DBPRINT(sc, BXE_VERBOSE_PHY,
2460 "%s(): Supported link settings = 0x%b\n", __FUNCTION__,
2461 sc->port.supported, BXE_SUPPORTED_PRINTFB);
2463 bxe_link_settings_supported_exit:
2465 DBEXIT(BXE_VERBOSE_PHY);
2469 * Requested link settings.
2475 bxe_link_settings_requested(struct bxe_softc *sc)
2477 uint32_t ext_phy_type;
2478 DBENTER(BXE_VERBOSE_PHY);
2480 sc->link_params.req_duplex = MEDIUM_FULL_DUPLEX;
2482 switch (sc->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
2484 case PORT_FEATURE_LINK_SPEED_AUTO:
2485 if (sc->port.supported & SUPPORTED_Autoneg) {
2486 sc->link_params.req_line_speed |= SPEED_AUTO_NEG;
2487 sc->port.advertising = sc->port.supported;
2489 ext_phy_type = XGXS_EXT_PHY_TYPE(
2490 sc->link_params.ext_phy_config);
2492 if ((ext_phy_type ==
2493 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
2495 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
2496 /* Force 10G, no autonegotiation. */
2497 sc->link_params.req_line_speed = SPEED_10000;
2498 sc->port.advertising =
2499 ADVERTISED_10000baseT_Full |
2504 DBPRINT(sc, BXE_FATAL,
2505 "%s(): NVRAM config error. Invalid "
2506 "link_config (0x%08X) - Autoneg not supported!\n",
2507 __FUNCTION__, sc->port.link_config);
2508 goto bxe_link_settings_requested_exit;
2511 case PORT_FEATURE_LINK_SPEED_10M_FULL:
2512 if (sc->port.supported & SUPPORTED_10baseT_Full) {
2513 sc->link_params.req_line_speed = SPEED_10;
2514 sc->port.advertising = ADVERTISED_10baseT_Full |
2517 DBPRINT(sc, BXE_FATAL,
2518 "%s(): NVRAM config error. Invalid "
2519 "link_config (0x%08X) - speed_cap_mask 0x%08X\n",
2520 __FUNCTION__, sc->port.link_config,
2521 sc->link_params.speed_cap_mask);
2522 goto bxe_link_settings_requested_exit;
2525 case PORT_FEATURE_LINK_SPEED_10M_HALF:
2526 if (sc->port.supported & SUPPORTED_10baseT_Half) {
2527 sc->link_params.req_line_speed = SPEED_10;
2528 sc->link_params.req_duplex = MEDIUM_HALF_DUPLEX;
2529 sc->port.advertising = ADVERTISED_10baseT_Half |
2532 DBPRINT(sc, BXE_FATAL,
2533 "%s(): NVRAM config error. Invalid "
2534 "link_config (0x%08X) - speed_cap_mask = 0x%08X\n",
2535 __FUNCTION__, sc->port.link_config,
2536 sc->link_params.speed_cap_mask);
2537 goto bxe_link_settings_requested_exit;
2540 case PORT_FEATURE_LINK_SPEED_100M_FULL:
2541 if (sc->port.supported & SUPPORTED_100baseT_Full) {
2542 sc->link_params.req_line_speed = SPEED_100;
2543 sc->port.advertising = ADVERTISED_100baseT_Full |
2546 DBPRINT(sc, BXE_FATAL,
2547 "%s(): NVRAM config error. Invalid "
2548 "link_config (0x%08X) - speed_cap_mask = 0x%08X\n",
2549 __FUNCTION__, sc->port.link_config,
2550 sc->link_params.speed_cap_mask);
2551 goto bxe_link_settings_requested_exit;
2554 case PORT_FEATURE_LINK_SPEED_100M_HALF:
2555 if (sc->port.supported & SUPPORTED_100baseT_Half) {
2556 sc->link_params.req_line_speed = SPEED_100;
2557 sc->link_params.req_duplex = MEDIUM_HALF_DUPLEX;
2558 sc->port.advertising = ADVERTISED_100baseT_Half |
2561 DBPRINT(sc, BXE_FATAL,
2562 "%s(): NVRAM config error. Invalid "
2563 "link_config (0x%08X) - speed_cap_mask = 0x%08X\n",
2564 __FUNCTION__, sc->port.link_config,
2565 sc->link_params.speed_cap_mask);
2566 goto bxe_link_settings_requested_exit;
2569 case PORT_FEATURE_LINK_SPEED_1G:
2570 if (sc->port.supported & SUPPORTED_1000baseT_Full) {
2571 sc->link_params.req_line_speed = SPEED_1000;
2572 sc->port.advertising = ADVERTISED_1000baseT_Full |
2575 DBPRINT(sc, BXE_FATAL,
2576 "%s(): NVRAM config error. Invalid "
2577 "link_config (0x%08X) - speed_cap_mask = 0x%08X\n",
2578 __FUNCTION__, sc->port.link_config,
2579 sc->link_params.speed_cap_mask);
2580 goto bxe_link_settings_requested_exit;
2583 case PORT_FEATURE_LINK_SPEED_2_5G:
2584 if (sc->port.supported & SUPPORTED_2500baseX_Full) {
2585 sc->link_params.req_line_speed = SPEED_2500;
2586 sc->port.advertising = ADVERTISED_2500baseX_Full |
2589 DBPRINT(sc, BXE_FATAL,
2590 "%s(): NVRAM config error. Invalid "
2591 "link_config (0x%08X) - speed_cap_mask = 0x%08X\n",
2592 __FUNCTION__, sc->port.link_config,
2593 sc->link_params.speed_cap_mask);
2594 goto bxe_link_settings_requested_exit;
2597 case PORT_FEATURE_LINK_SPEED_10G_CX4:
2598 case PORT_FEATURE_LINK_SPEED_10G_KX4:
2599 case PORT_FEATURE_LINK_SPEED_10G_KR:
2600 if (sc->port.supported & SUPPORTED_10000baseT_Full) {
2601 sc->link_params.req_line_speed = SPEED_10000;
2602 sc->port.advertising = ADVERTISED_10000baseT_Full |
2605 DBPRINT(sc, BXE_FATAL,
2606 "%s(): NVRAM config error. Invalid "
2607 "link_config (0x%08X) - speed_cap_mask = 0x%08X\n",
2608 __FUNCTION__, sc->port.link_config,
2609 sc->link_params.speed_cap_mask);
2610 goto bxe_link_settings_requested_exit;
2614 DBPRINT(sc, BXE_FATAL, "%s(): NVRAM config error. BAD link "
2615 "speed - link_config = 0x%08X\n", __FUNCTION__,
2616 sc->port.link_config);
2617 sc->link_params.req_line_speed = 0;
2618 sc->port.advertising = sc->port.supported;
2622 DBPRINT(sc, BXE_VERBOSE_PHY,
2623 "%s(): req_line_speed = %d, req_duplex = %d\n",
2624 __FUNCTION__, sc->link_params.req_line_speed,
2625 sc->link_params.req_duplex);
2627 sc->link_params.req_flow_ctrl =
2628 sc->port.link_config & PORT_FEATURE_FLOW_CONTROL_MASK;
2630 if ((sc->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
2631 !(sc->port.supported & SUPPORTED_Autoneg))
2632 sc->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
2634 DBPRINT(sc, BXE_VERBOSE_PHY,
2635 "%s(): req_flow_ctrl = 0x%08X, advertising = 0x%08X\n",
2636 __FUNCTION__, sc->link_params.req_flow_ctrl,
2637 sc->port.advertising);
2639 bxe_link_settings_requested_exit:
2641 DBEXIT(BXE_VERBOSE_PHY);
2646 * Get function specific hardware configuration.
2648 * Multiple function devices such as the BCM57711E have configuration
2649 * information that is specific to each PCIe function of the controller.
2650 * The number of PCIe functions is not necessarily the same as the number
2651 * of Ethernet ports supported by the device.
2654 * 0 = Success, !0 = Failure
2657 bxe_hwinfo_function_get(struct bxe_softc *sc)
2659 uint32_t mac_hi, mac_lo, val;
2662 DBENTER(BXE_VERBOSE_LOAD);
2667 /* Get the common hardware configuration first. */
2668 bxe_hwinfo_common_get(sc);
2670 /* Assume no outer VLAN/multi-function support. */
2671 sc->e1hov = sc->e1hmf = 0;
2673 /* Get config info for mf enabled devices. */
2674 if (CHIP_IS_E1H(sc)) {
2675 sc->mf_config[BP_E1HVN(sc)] =
2676 SHMEM_RD(sc, mf_cfg.func_mf_config[func].config);
2677 val = (SHMEM_RD(sc, mf_cfg.func_mf_config[func].e1hov_tag) &
2678 FUNC_MF_CFG_E1HOV_TAG_MASK);
2679 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
2680 sc->e1hov = (uint16_t) val;
2685 goto bxe_hwinfo_function_get_exit;
2691 bxe_hwinfo_port_get(sc);
2692 sc->fw_seq = SHMEM_RD(sc, func_mb[func].drv_mb_header) &
2693 DRV_MSG_SEQ_NUMBER_MASK;
2698 * Fetch the factory configured MAC address for multi function
2699 * devices. If this is not a multi-function device then the MAC
2700 * address was already read in the bxe_hwinfo_port_get() routine.
2701 * The MAC addresses used by the port are not the same as the MAC
2702 * addressed used by the function.
2705 mac_hi = SHMEM_RD(sc, mf_cfg.func_mf_config[func].mac_upper);
2706 mac_lo = SHMEM_RD(sc, mf_cfg.func_mf_config[func].mac_lower);
2708 if ((mac_lo == 0) && (mac_hi == 0)) {
2709 BXE_PRINTF("%s(%d): Invalid Ethernet address!\n",
2710 __FILE__, __LINE__);
2713 sc->link_params.mac_addr[0] = (u_char)(mac_hi >> 8);
2714 sc->link_params.mac_addr[1] = (u_char)(mac_hi);
2715 sc->link_params.mac_addr[2] = (u_char)(mac_lo >> 24);
2716 sc->link_params.mac_addr[3] = (u_char)(mac_lo >> 16);
2717 sc->link_params.mac_addr[4] = (u_char)(mac_lo >> 8);
2718 sc->link_params.mac_addr[5] = (u_char)(mac_lo);
2723 bxe_hwinfo_function_get_exit:
2724 DBEXIT(BXE_VERBOSE_LOAD);
2730 * Get port specific hardware configuration.
2732 * Multiple port devices such as the BCM57710 have configuration
2733 * information that is specific to each Ethernet port of the
2734 * controller. This function reads that configuration
2735 * information from the bootcode's shared memory and saves it
2739 * 0 = Success, !0 = Failure
2742 bxe_hwinfo_port_get(struct bxe_softc *sc)
2745 uint32_t val, mac_hi, mac_lo;
2747 DBENTER(BXE_VERBOSE_LOAD);
2751 sc->link_params.sc = sc;
2752 sc->link_params.port = port;
2754 /* Fetch several configuration values from bootcode shared memory. */
2755 sc->link_params.lane_config =
2756 SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config);
2757 sc->link_params.ext_phy_config =
2758 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
2760 if (XGXS_EXT_PHY_TYPE(sc->link_params.ext_phy_config) ==
2761 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
2762 sc->link_params.ext_phy_config &=
2763 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2764 sc->link_params.ext_phy_config |=
2765 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
2766 sc->link_params.feature_config_flags |=
2767 FEATURE_CONFIG_BCM8727_NOC;
2770 sc->link_params.speed_cap_mask =
2771 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask);
2772 sc->port.link_config =
2773 SHMEM_RD(sc, dev_info.port_feature_config[port].link_config);
2776 /* Read the XGXS RX/TX preemphasis values. */
2777 for (i = 0; i < 2; i++) {
2779 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
2780 sc->link_params.xgxs_config_rx[i << 1] = ((val >> 16) & 0xffff);
2781 sc->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
2784 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
2785 sc->link_params.xgxs_config_tx[i << 1] = ((val >> 16) & 0xffff);
2786 sc->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
2789 /* Fetch the device configured link settings. */
2790 sc->link_params.switch_cfg = sc->port.link_config &
2791 PORT_FEATURE_CONNECTED_SWITCH_MASK;
2793 bxe_link_settings_supported(sc, sc->link_params.switch_cfg);
2794 bxe_link_settings_requested(sc);
2796 mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper);
2797 mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower);
2799 if (mac_lo == 0 && mac_hi == 0) {
2800 BXE_PRINTF("%s(%d): No Ethernet address programmed on the "
2801 "controller!\n", __FILE__, __LINE__);
2804 sc->link_params.mac_addr[0] = (u_char)(mac_hi >> 8);
2805 sc->link_params.mac_addr[1] = (u_char)(mac_hi);
2806 sc->link_params.mac_addr[2] = (u_char)(mac_lo >> 24);
2807 sc->link_params.mac_addr[3] = (u_char)(mac_lo >> 16);
2808 sc->link_params.mac_addr[4] = (u_char)(mac_lo >> 8);
2809 sc->link_params.mac_addr[5] = (u_char)(mac_lo);
2812 DBEXIT(BXE_VERBOSE_LOAD);
2818 * Get common hardware configuration.
2820 * Multiple port devices such as the BCM57710 have configuration
2821 * information that is shared between all ports of the Ethernet
2822 * controller. This function reads that configuration
2823 * information from the bootcode's shared memory and saves it
2827 * 0 = Success, !0 = Failure
2830 bxe_hwinfo_common_get(struct bxe_softc *sc)
2835 DBENTER(BXE_VERBOSE_LOAD);
2838 /* Get the chip revision. */
2839 sc->common.chip_id = sc->link_params.chip_id =
2840 ((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) |
2841 ((REG_RD(sc, MISC_REG_CHIP_REV) & 0x000f) << 12) |
2842 ((REG_RD(sc, MISC_REG_CHIP_METAL) & 0xff) << 4) |
2843 ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf));
2845 DBPRINT(sc, BXE_VERBOSE_LOAD, "%s(): chip_id = 0x%08X.\n",
2846 __FUNCTION__, sc->common.chip_id);
2848 val = (REG_RD(sc, 0x2874) & 0x55);
2849 if ((sc->common.chip_id & 0x1) ||
2850 (CHIP_IS_E1(sc) && val) || (CHIP_IS_E1H(sc) && (val == 0x55))) {
2851 sc->bxe_flags |= BXE_ONE_PORT_FLAG;
2852 DBPRINT(sc, BXE_VERBOSE_LOAD, "%s(): Single port device.\n",
2856 /* Identify enabled PCI capabilites (PCIe, MSI-X, etc.). */
2857 bxe_probe_pci_caps(sc);
2859 /* Get the NVRAM size. */
2860 val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4);
2861 sc->common.flash_size = (NVRAM_1MB_SIZE <<
2862 (val & MCPR_NVM_CFG4_FLASH_SIZE));
2864 DBPRINT(sc, BXE_VERBOSE_LOAD, "%s(): flash_size = 0x%08x (%dKB)\n",
2865 __FUNCTION__, sc->common.flash_size,(sc->common.flash_size >> 10));
2867 /* Find the shared memory base address. */
2868 sc->common.shmem_base = sc->link_params.shmem_base =
2869 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
2870 sc->common.shmem2_base = REG_RD(sc, MISC_REG_GENERIC_CR_0);
2871 DBPRINT(sc, BXE_VERBOSE_LOAD, "%s(): shmem_base = 0x%08X\n",
2872 __FUNCTION__, sc->common.shmem_base);
2874 /* Make sure the shared memory address is valid. */
2875 if (!sc->common.shmem_base ||
2876 (sc->common.shmem_base < 0xA0000) ||
2877 (sc->common.shmem_base > 0xC0000)) {
2879 BXE_PRINTF("%s(%d): MCP is not active!\n",
2880 __FILE__, __LINE__);
2881 /* ToDo: Remove the NOMCP support. */
2882 sc->bxe_flags |= BXE_NO_MCP_FLAG;
2884 goto bxe_hwinfo_common_get_exit;
2887 /* Make sure the shared memory contents are valid. */
2888 val = SHMEM_RD(sc, validity_map[BP_PORT(sc)]);
2889 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
2890 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
2891 BXE_PRINTF("%s(%d): Invalid NVRAM! Bad validity "
2892 "signature.\n", __FILE__, __LINE__);
2894 goto bxe_hwinfo_common_get_exit;
2897 /* Read the device configuration from shared memory. */
2898 sc->common.hw_config =
2899 SHMEM_RD(sc, dev_info.shared_hw_config.config);
2900 sc->link_params.hw_led_mode = ((sc->common.hw_config &
2901 SHARED_HW_CFG_LED_MODE_MASK) >> SHARED_HW_CFG_LED_MODE_SHIFT);
2903 /* Check if we need to override the preemphasis values. */
2904 sc->link_params.feature_config_flags = 0;
2905 val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
2906 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
2907 sc->link_params.feature_config_flags |=
2908 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
2910 sc->link_params.feature_config_flags &=
2911 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
2913 /* In multifunction mode, we can't support WoL on a VN. */
2914 if (BP_E1HVN(sc) == 0) {
2915 val = REG_RD(sc, PCICFG_OFFSET + PCICFG_PM_CAPABILITY);
2916 sc->bxe_flags |= (val & PCICFG_PM_CAPABILITY_PME_IN_D3_COLD) ?
2917 0 : BXE_NO_WOL_FLAG;
2919 sc->bxe_flags |= BXE_NO_WOL_FLAG;
2921 DBPRINT(sc, BXE_VERBOSE_LOAD, "%s(): %sWoL capable\n", __FUNCTION__,
2922 (sc->bxe_flags & BXE_NO_WOL_FLAG) ? "Not " : "");
2924 /* Check bootcode version */
2925 sc->common.bc_ver = ((SHMEM_RD(sc, dev_info.bc_rev)) >> 8);
2926 if (sc->common.bc_ver < MIN_BXE_BC_VER) {
2927 BXE_PRINTF("%s(%d): Warning: This driver needs bootcode "
2928 "0x%08X but found 0x%08X, please upgrade!\n",
2929 __FILE__, __LINE__, MIN_BXE_BC_VER, sc->common.bc_ver);
2931 goto bxe_hwinfo_common_get_exit;
2934 bxe_hwinfo_common_get_exit:
2935 DBEXIT(BXE_VERBOSE_LOAD);
2941 * Remove traces of PXE boot by forcing UNDI driver unload.
2947 bxe_undi_unload(struct bxe_softc *sc)
2949 uint32_t reset_code, swap_en, swap_val, val;
2952 DBENTER(BXE_VERBOSE_LOAD);
2954 /* Check if there is any driver already loaded */
2955 val = REG_RD(sc, MISC_REG_UNPREPARED);
2958 /* Check if it is the UNDI driver. */
2959 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_UNDI);
2960 val = REG_RD(sc, DORQ_REG_NORM_CID_OFST);
2962 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
2965 DBPRINT(sc, BXE_WARN,
2966 "%s(): UNDI is active! Resetting the device.\n",
2969 /* Clear the UNDI indication. */
2970 REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0);
2972 /* Try to unload UNDI on port 0. */
2974 sc->fw_seq = (SHMEM_RD(sc,
2975 func_mb[sc->bxe_func].drv_mb_header) &
2976 DRV_MSG_SEQ_NUMBER_MASK);
2977 reset_code = bxe_fw_command(sc, reset_code);
2979 /* Check if UNDI is active on port 1. */
2980 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
2982 /* Send "done" for previous unload. */
2983 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE);
2985 /* Now unload on port 1. */
2987 sc->fw_seq = (SHMEM_RD(sc,
2988 func_mb[sc->bxe_func].drv_mb_header) &
2989 DRV_MSG_SEQ_NUMBER_MASK);
2991 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
2992 bxe_fw_command(sc, reset_code);
2995 /* It's now safe to release the lock. */
2996 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_UNDI);
2998 REG_WR(sc, (BP_PORT(sc) ? HC_REG_CONFIG_1 :
2999 HC_REG_CONFIG_0), 0x1000);
3001 REG_WR(sc, (BP_PORT(sc) ?
3002 NIG_REG_LLH1_BRB1_DRV_MASK :
3003 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
3005 REG_WR(sc, (BP_PORT(sc) ?
3006 NIG_REG_LLH1_BRB1_NOT_MCP :
3007 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
3010 REG_WR(sc, (BP_PORT(sc) ?
3011 MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3012 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
3016 /* Save NIG port swap information. */
3017 swap_val = REG_RD(sc, NIG_REG_PORT_SWAP);
3018 swap_en = REG_RD(sc, NIG_REG_STRAP_OVERRIDE);
3020 /* Reset the controller. */
3021 REG_WR(sc, GRCBASE_MISC +
3022 MISC_REGISTERS_RESET_REG_1_CLEAR, 0xd3ffffff);
3023 REG_WR(sc, GRCBASE_MISC +
3024 MISC_REGISTERS_RESET_REG_2_CLEAR, 0x00001403);
3026 /* Take the NIG out of reset and restore swap values.*/
3027 REG_WR(sc, GRCBASE_MISC +
3028 MISC_REGISTERS_RESET_REG_1_SET,
3029 MISC_REGISTERS_RESET_REG_1_RST_NIG);
3030 REG_WR(sc, NIG_REG_PORT_SWAP, swap_val);
3031 REG_WR(sc, NIG_REG_STRAP_OVERRIDE, swap_en);
3033 /* Send completion message to the MCP. */
3034 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE);
3037 * Restore our function and firmware sequence counter.
3039 sc->bxe_func = func;
3040 sc->fw_seq = (SHMEM_RD(sc,
3041 func_mb[sc->bxe_func].drv_mb_header) &
3042 DRV_MSG_SEQ_NUMBER_MASK);
3044 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_UNDI);
3047 DBEXIT(BXE_VERBOSE_LOAD);
3052 * Device detach function.
3054 * Stops the controller, resets the controller, and releases resources.
3057 * 0 on success, !0 = failure.
3060 bxe_detach(device_t dev)
3062 struct bxe_softc *sc;
3066 sc = device_get_softc(dev);
3067 DBENTER(BXE_INFO_UNLOAD);
3072 if (ifp != NULL && ifp->if_vlantrunk != NULL) {
3073 BXE_PRINTF("%s(%d): Cannot detach while VLANs are in use.\n",
3074 __FILE__, __LINE__);
3076 goto bxe_detach_exit;
3079 /* Stop and reset the controller if it was open. */
3080 if (sc->state != BXE_STATE_CLOSED) {
3082 rc = bxe_stop_locked(sc, UNLOAD_CLOSE);
3083 BXE_CORE_UNLOCK(sc);
3087 /* Free memory buffer for grcdump output.*/
3088 if (sc->grcdump_buffer != NULL)
3089 free(sc->grcdump_buffer, M_TEMP);
3092 /* Clean-up any remaining interrupt resources. */
3093 bxe_interrupt_detach(sc);
3094 bxe_interrupt_free(sc);
3096 /* Release the network interface. */
3098 ether_ifdetach(ifp);
3099 ifmedia_removeall(&sc->bxe_ifmedia);
3101 /* Release all remaining resources. */
3102 bxe_release_resources(sc);
3104 /* Free all PCI resources. */
3105 bxe_pci_resources_free(sc);
3106 pci_disable_busmaster(dev);
3108 bxe_mutexes_free(sc);
3111 DBEXIT(BXE_INFO_UNLOAD);
3117 * Setup a leading connection for the controller.
3120 * 0 = Success, !0 = Failure.
3123 bxe_setup_leading(struct bxe_softc *sc)
3127 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_RAMROD);
3129 DBPRINT(sc, BXE_VERBOSE_LOAD, "%s(): Setup leading connection "
3130 "on fp[00].\n", __FUNCTION__);
3132 /* Reset IGU state for the leading connection. */
3133 bxe_ack_sb(sc, sc->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
3135 /* Post a PORT_SETUP ramrod and wait for completion. */
3136 bxe_sp_post(sc, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
3138 /* Wait for the ramrod to complete on the leading connection. */
3139 rc = bxe_wait_ramrod(sc, BXE_STATE_OPEN, 0, &(sc->state), 1);
3141 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_RAMROD);
3147 * Stop the leading connection on the controller.
3153 bxe_stop_leading(struct bxe_softc *sc)
3155 uint16_t dsb_sp_prod_idx;
3158 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET |
3159 BXE_VERBOSE_UNLOAD), "%s(): Stop client connection "
3160 "on fp[00].\n", __FUNCTION__);
3162 /* Send the ETH_HALT ramrod. */
3163 sc->fp[0].state = BXE_FP_STATE_HALTING;
3164 bxe_sp_post(sc,RAMROD_CMD_ID_ETH_HALT, 0, 0, sc->fp[0].cl_id, 0);
3166 /* Poll for the ETH_HALT ramrod on the leading connection. */
3167 rc = bxe_wait_ramrod(sc, BXE_FP_STATE_HALTED,
3168 0, &(sc->fp[0].state), 1);
3170 DBPRINT(sc, BXE_FATAL, "%s(): Timeout waiting for "
3171 "STATE_HALTED ramrod completion!\n", __FUNCTION__);
3172 goto bxe_stop_leading_exit;
3175 /* Get the default status block SP producer index. */
3176 dsb_sp_prod_idx = *sc->dsb_sp_prod;
3178 /* After HALT we send PORT_DELETE ramrod. */
3179 bxe_sp_post(sc, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
3181 /* Be patient but don't wait forever. */
3183 while (dsb_sp_prod_idx == *sc->dsb_sp_prod) {
3185 DBPRINT(sc, BXE_FATAL, "%s(): Timeout waiting for "
3186 "PORT_DEL ramrod completion!\n", __FUNCTION__);
3195 /* Update the adapter and connection states. */
3196 sc->state = BXE_STATE_CLOSING_WAIT4_UNLOAD;
3197 sc->fp[0].state = BXE_FP_STATE_CLOSED;
3199 bxe_stop_leading_exit:
3204 * Setup a client connection when using multi-queue/RSS.
3210 bxe_setup_multi(struct bxe_softc *sc, int index)
3212 struct bxe_fastpath *fp;
3215 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET |
3216 BXE_VERBOSE_UNLOAD), "%s(): Setup client connection "
3217 "on fp[%02d].\n", __FUNCTION__, index);
3219 fp = &sc->fp[index];
3220 /* Reset IGU state. */
3221 bxe_ack_sb(sc, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
3223 /* Post a CLIENT_SETUP ramrod. */
3224 fp->state = BXE_FP_STATE_OPENING;
3225 bxe_sp_post(sc, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, fp->cl_id, 0);
3227 /* Wait for the ramrod to complete. */
3228 rc = bxe_wait_ramrod(sc, BXE_FP_STATE_OPEN, index, &fp->state, 1);
3234 * Stop a client connection.
3236 * Stops an individual client connection on the device. Use
3237 * bxe_stop_leading() for the first/default connection.
3240 * 0 = Success, !0 = Failure.
3243 bxe_stop_multi(struct bxe_softc *sc, int index)
3245 struct bxe_fastpath *fp;
3248 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET |
3249 BXE_VERBOSE_UNLOAD), "%s(): Stop client connection "
3250 "on fp[%02d].\n", __FUNCTION__, index);
3252 fp = &sc->fp[index];
3254 /* Halt the client connection. */
3255 fp->state = BXE_FP_STATE_HALTING;
3256 bxe_sp_post(sc, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
3258 /* Wait for the HALT ramrod completion. */
3259 rc = bxe_wait_ramrod(sc, BXE_FP_STATE_HALTED, index, &fp->state, 1);
3261 BXE_PRINTF("%s(%d): fp[%02d] client ramrod halt failed!\n",
3262 __FILE__, __LINE__, index);
3263 goto bxe_stop_multi_exit;
3265 /* Delete the CFC entry. */
3266 bxe_sp_post(sc, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
3268 /* Poll for the DELETE ramrod completion. */
3269 rc = bxe_wait_ramrod(sc, BXE_FP_STATE_CLOSED, index, &fp->state, 1);
3271 bxe_stop_multi_exit:
3276 * Hardware lock for shared, dual-port PHYs.
3282 bxe_acquire_phy_lock(struct bxe_softc *sc)
3284 uint32_t ext_phy_type;
3286 DBENTER(BXE_VERBOSE_PHY);
3288 ext_phy_type = XGXS_EXT_PHY_TYPE(sc->link_params.ext_phy_config);
3289 switch(ext_phy_type){
3290 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3291 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
3292 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
3293 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
3294 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_MDIO);
3299 DBEXIT(BXE_VERBOSE_PHY);
3303 * Hardware unlock for shared, dual-port PHYs.
3309 bxe_release_phy_lock(struct bxe_softc *sc)
3311 uint32_t ext_phy_type;
3313 DBENTER(BXE_VERBOSE_PHY);
3314 ext_phy_type = XGXS_EXT_PHY_TYPE(sc->link_params.ext_phy_config);
3315 switch(ext_phy_type){
3316 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3317 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
3318 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
3319 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
3320 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_MDIO);
3326 DBEXIT(BXE_VERBOSE_PHY);
3335 bxe__link_reset(struct bxe_softc *sc)
3337 DBENTER(BXE_VERBOSE_PHY);
3340 bxe_acquire_phy_lock(sc);
3341 bxe_link_reset(&sc->link_params, &sc->link_vars, 1);
3342 bxe_release_phy_lock(sc);
3344 DBPRINT(sc, BXE_WARN,
3345 "%s(): Bootcode is not running, not resetting link!\n",
3349 DBEXIT(BXE_VERBOSE_PHY);
3353 * Stop the controller.
3356 * 0 = Success, !0 = Failure
3359 bxe_stop_locked(struct bxe_softc *sc, int unload_mode)
3362 struct mac_configuration_cmd *config;
3363 struct bxe_fastpath *fp;
3364 uint32_t reset_code;
3365 uint32_t emac_base, val;
3366 uint8_t entry, *mac_addr;
3367 int count, i, port, rc;
3369 DBENTER(BXE_INFO_LOAD | BXE_INFO_RESET | BXE_INFO_UNLOAD);
3373 rc = reset_code = 0;
3375 BXE_CORE_LOCK_ASSERT(sc);
3377 /* Stop the periodic tick. */
3378 callout_stop(&sc->bxe_tick_callout);
3380 sc->state = BXE_STATE_CLOSING_WAIT4_HALT;
3382 /* Prevent any further RX traffic. */
3383 sc->rx_mode = BXE_RX_MODE_NONE;
3384 bxe_set_storm_rx_mode(sc);
3386 /* Tell the stack the driver is stopped and TX queue is full. */
3388 ifp->if_drv_flags = 0;
3390 /* Tell the bootcode to stop watching for a heartbeat. */
3391 SHMEM_WR(sc, func_mb[BP_FUNC(sc)].drv_pulse_mb,
3392 (DRV_PULSE_ALWAYS_ALIVE | sc->fw_drv_pulse_wr_seq));
3394 /* Stop the statistics updates. */
3395 bxe_stats_handle(sc, STATS_EVENT_STOP);
3397 /* Wait until all TX fastpath tasks have completed. */
3398 for (i = 0; i < sc->num_queues; i++) {
3401 if (fp == NULL || fp->tx_pkt_cons_sb == NULL)
3405 while (bxe_has_tx_work(fp)) {
3411 "%s(%d): Timeout wating for fp[%02d] transmits to complete!\n",
3412 __FILE__, __LINE__, i);
3421 /* Wait until all slowpath tasks have completed. */
3423 while ((sc->spq_left != MAX_SPQ_PENDING) && count--)
3426 /* Disable Interrupts */
3427 bxe_int_disable(sc);
3430 /* Clear the MAC addresses. */
3431 if (CHIP_IS_E1(sc)) {
3432 config = BXE_SP(sc, mcast_config);
3433 bxe_set_mac_addr_e1(sc, 0);
3435 for (i = 0; i < config->hdr.length; i++)
3436 CAM_INVALIDATE(&config->config_table[i]);
3438 config->hdr.length = i;
3439 config->hdr.offset = BXE_MAX_MULTICAST * (1 + port);
3440 config->hdr.client_id = BP_CL_ID(sc);
3441 config->hdr.reserved1 = 0;
3443 bxe_sp_post(sc, RAMROD_CMD_ID_ETH_SET_MAC, 0,
3444 U64_HI(BXE_SP_MAPPING(sc, mcast_config)),
3445 U64_LO(BXE_SP_MAPPING(sc, mcast_config)), 0);
3447 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 0);
3448 bxe_set_mac_addr_e1h(sc, 0);
3449 for (i = 0; i < MC_HASH_SIZE; i++)
3450 REG_WR(sc, MC_HASH_OFFSET(sc, i), 0);
3451 REG_WR(sc, MISC_REG_E1HMF_MODE, 0);
3454 /* Determine if any WoL settings needed. */
3455 if (unload_mode == UNLOAD_NORMAL)
3456 /* Driver initiatied WoL is disabled. */
3457 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3458 else if (sc->bxe_flags & BXE_NO_WOL_FLAG) {
3459 /* Driver initiated WoL is disabled, use OOB WoL settings. */
3460 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
3461 if (CHIP_IS_E1H(sc))
3462 REG_WR(sc, MISC_REG_E1HMF_MODE, 0);
3463 } else if (sc->wol) {
3464 emac_base = BP_PORT(sc) ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
3465 mac_addr = sc->link_params.mac_addr;
3466 entry = (BP_E1HVN(sc) + 1) * 8;
3467 val = (mac_addr[0] << 8) | mac_addr[1];
3468 EMAC_WR(sc, EMAC_REG_EMAC_MAC_MATCH + entry, val);
3469 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
3470 (mac_addr[4] << 8) | mac_addr[5];
3471 EMAC_WR(sc, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
3472 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
3475 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3478 /* Stop all non-leading client connections. */
3479 for (i = 1; i < sc->num_queues; i++) {
3480 if (bxe_stop_multi(sc, i)){
3481 goto bxe_stop_locked_exit;
3485 /* Stop the leading client connection. */
3486 rc = bxe_stop_leading(sc);
3489 bxe_stop_locked_exit:
3491 DBPRINT(sc, BXE_INFO,
3492 "%s(): Old No MCP load counts: %d, %d, %d\n",
3493 __FUNCTION__, load_count[0], load_count[1], load_count[2]);
3496 load_count[1 + port]--;
3497 DBPRINT(sc, BXE_INFO,
3498 "%s(): New No MCP load counts: %d, %d, %d\n",
3499 __FUNCTION__, load_count[0], load_count[1], load_count[2]);
3501 if (load_count[0] == 0)
3502 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
3503 else if (load_count[1 + BP_PORT(sc)] == 0)
3504 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
3506 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
3508 /* Tell MCP driver unload is complete. */
3509 reset_code = bxe_fw_command(sc, reset_code);
3512 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
3513 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
3514 bxe__link_reset(sc);
3518 /* Reset the chip */
3519 bxe_reset_chip(sc, reset_code);
3523 /* Report UNLOAD_DONE to MCP */
3525 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE);
3528 /* Free RX chains and buffers. */
3529 bxe_clear_rx_chains(sc);
3531 /* Free TX chains and buffers. */
3532 bxe_clear_tx_chains(sc);
3534 sc->state = BXE_STATE_CLOSED;
3538 DBEXIT(BXE_INFO_LOAD | BXE_INFO_RESET |BXE_INFO_UNLOAD);
3543 * Device shutdown function.
3545 * Stops and resets the controller.
3548 * 0 = Success, !0 = Failure
3551 bxe_shutdown(device_t dev)
3553 struct bxe_softc *sc;
3555 sc = device_get_softc(dev);
3556 DBENTER(BXE_INFO_LOAD | BXE_INFO_RESET | BXE_INFO_UNLOAD);
3559 bxe_stop_locked(sc, UNLOAD_NORMAL);
3560 BXE_CORE_UNLOCK(sc);
3562 DBEXIT(BXE_INFO_LOAD | BXE_INFO_RESET | BXE_INFO_UNLOAD);
3567 * Prints out link speed and duplex setting to console.
3573 bxe_link_report(struct bxe_softc *sc)
3575 uint32_t line_speed;
3576 uint16_t vn_max_rate;
3578 DBENTER(BXE_VERBOSE_PHY);
3580 if (sc->link_vars.link_up) {
3581 /* Report the link status change to OS. */
3582 if (sc->state == BXE_STATE_OPEN)
3583 if_link_state_change(sc->bxe_ifp, LINK_STATE_UP);
3585 line_speed = sc->link_vars.line_speed;
3588 vn_max_rate = ((sc->mf_config[BP_E1HVN(sc)] &
3589 FUNC_MF_CFG_MAX_BW_MASK) >>
3590 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
3591 if (vn_max_rate < line_speed)
3592 line_speed = vn_max_rate;
3595 BXE_PRINTF("Link is up, %d Mbps, ", line_speed);
3597 if (sc->link_vars.duplex == MEDIUM_FULL_DUPLEX)
3598 printf("full duplex");
3600 printf("half duplex");
3602 if (sc->link_vars.flow_ctrl) {
3603 if (sc->link_vars.flow_ctrl & FLOW_CTRL_RX) {
3604 printf(", receive ");
3605 if (sc->link_vars.flow_ctrl & FLOW_CTRL_TX)
3606 printf("& transmit ");
3608 printf(", transmit ");
3609 printf("flow control ON");
3613 /* Report the link down */
3614 BXE_PRINTF("Link is down\n");
3615 if_link_state_change(sc->bxe_ifp, LINK_STATE_DOWN);
3618 DBEXIT(BXE_VERBOSE_PHY);
3627 bxe__link_status_update(struct bxe_softc *sc)
3629 DBENTER(BXE_VERBOSE_PHY);
3631 if (sc->stats_enable == FALSE || sc->state != BXE_STATE_OPEN)
3634 bxe_link_status_update(&sc->link_params, &sc->link_vars);
3636 if (sc->link_vars.link_up)
3637 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
3639 bxe_stats_handle(sc, STATS_EVENT_STOP);
3641 bxe_read_mf_cfg(sc);
3643 /* Indicate link status. */
3644 bxe_link_report(sc);
3646 DBEXIT(BXE_VERBOSE_PHY);
3650 * Calculate flow control to advertise during autonegotiation.
3656 bxe_calc_fc_adv(struct bxe_softc *sc)
3658 DBENTER(BXE_EXTREME_PHY);
3660 switch (sc->link_vars.ieee_fc &
3661 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
3663 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
3664 sc->port.advertising &= ~(ADVERTISED_Asym_Pause |
3668 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
3669 sc->port.advertising |= (ADVERTISED_Asym_Pause |
3673 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
3674 sc->port.advertising |= ADVERTISED_Asym_Pause;
3678 sc->port.advertising &= ~(ADVERTISED_Asym_Pause |
3683 DBEXIT(BXE_EXTREME_PHY);
3694 bxe_initial_phy_init(struct bxe_softc *sc)
3698 DBENTER(BXE_VERBOSE_PHY);
3704 * It is recommended to turn off RX flow control for 5771x
3705 * when using jumbo frames for better performance.
3707 if (!IS_E1HMF(sc) && (sc->mbuf_alloc_size > 5000))
3708 sc->link_params.req_fc_auto_adv = FLOW_CTRL_TX;
3710 sc->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
3712 bxe_acquire_phy_lock(sc);
3713 rc = bxe_phy_init(&sc->link_params, &sc->link_vars);
3714 bxe_release_phy_lock(sc);
3716 bxe_calc_fc_adv(sc);
3718 if (sc->link_vars.link_up) {
3719 bxe_stats_handle(sc,STATS_EVENT_LINK_UP);
3720 bxe_link_report(sc);
3724 DBPRINT(sc, BXE_FATAL, "%s(): Bootcode is not running, "
3725 "not initializing link!\n", __FUNCTION__);
3729 DBEXIT(BXE_VERBOSE_PHY);
3734 #if __FreeBSD_version >= 800000
3736 * Allocate buffer rings used for multiqueue.
3739 * 0 = Success, !0 = Failure.
3742 bxe_alloc_buf_rings(struct bxe_softc *sc)
3744 struct bxe_fastpath *fp;
3747 DBENTER(BXE_VERBOSE_LOAD);
3750 for (i = 0; i < sc->num_queues; i++) {
3754 fp->br = buf_ring_alloc(BXE_BR_SIZE,
3755 M_DEVBUF, M_NOWAIT, &fp->mtx);
3756 if (fp->br == NULL) {
3758 goto bxe_alloc_buf_rings_exit;
3761 BXE_PRINTF("%s(%d): Bug!\n", __FILE__, __LINE__);
3764 bxe_alloc_buf_rings_exit:
3765 DBEXIT(BXE_VERBOSE_LOAD);
3770 * Releases buffer rings used for multiqueue.
3776 bxe_free_buf_rings(struct bxe_softc *sc)
3778 struct bxe_fastpath *fp;
3781 DBENTER(BXE_VERBOSE_UNLOAD);
3783 for (i = 0; i < sc->num_queues; i++) {
3787 buf_ring_free(fp->br, M_DEVBUF);
3791 DBEXIT(BXE_VERBOSE_UNLOAD);
3797 * Handles controller initialization.
3799 * Must be called from a locked routine. Since this code
3800 * may be called from the OS it does not provide a return
3801 * error value and must clean-up it's own mess.
3807 bxe_init_locked(struct bxe_softc *sc, int load_mode)
3813 DBENTER(BXE_INFO_LOAD | BXE_INFO_RESET);
3815 BXE_CORE_LOCK_ASSERT(sc);
3818 /* Skip if we're in panic mode. */
3820 DBPRINT(sc, BXE_WARN, "%s(): Panic mode enabled, exiting!\n",
3822 goto bxe_init_locked_exit;
3825 /* Check if the driver is still running and bail out if it is. */
3826 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3827 DBPRINT(sc, BXE_WARN,
3828 "%s(): Init called while driver is running!\n",
3830 goto bxe_init_locked_exit;
3834 * Send LOAD_REQUEST command to MCP.
3835 * The MCP will return the type of LOAD
3836 * the driver should perform.
3837 * - If it is the first port to be initialized
3838 * then all common blocks should be initialized.
3839 * - If it is not the first port to be initialized
3840 * then don't do the common block initialization.
3842 sc->state = BXE_STATE_OPENING_WAIT4_LOAD;
3847 DBPRINT(sc, BXE_INFO,
3848 "%s(): Old No MCP load counts: %d, %d, %d\n",
3850 load_count[0], load_count[1], load_count[2]);
3853 load_count[1 + port]++;
3855 DBPRINT(sc, BXE_INFO,
3856 "%s(): New No MCP load counts: %d, %d, %d\n",
3858 load_count[0], load_count[1], load_count[2]);
3860 /* No MCP to tell us what to do. */
3861 if (load_count[0] == 1)
3862 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
3863 else if (load_count[1 + port] == 1)
3864 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
3866 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
3869 /* Ask the MCP what type of initialization we need to do. */
3870 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ);
3872 if ((load_code == 0) ||
3873 (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)) {
3874 BXE_PRINTF("%s(%d): Bootcode refused load request.!\n",
3875 __FILE__, __LINE__);
3876 goto bxe_init_locked_failed1;
3880 /* Keep track of whether we are controlling the port. */
3881 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
3882 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
3887 /* Block any interrupts until we're ready. */
3890 /* Initialize hardware. */
3891 error = bxe_init_hw(sc, load_code);
3893 BXE_PRINTF("%s(%d): Hardware initialization failed, "
3894 "aborting!\n", __FILE__, __LINE__);
3895 goto bxe_init_locked_failed1;
3898 /* Calculate and save the Ethernet MTU size. */
3899 sc->port.ether_mtu = ifp->if_mtu + ETHER_HDR_LEN +
3900 (ETHER_VLAN_ENCAP_LEN * 2) + ETHER_CRC_LEN + 4;
3902 DBPRINT(sc, BXE_INFO, "%s(): Setting MTU = %d\n",
3903 __FUNCTION__, sc->port.ether_mtu);
3905 /* Setup the mbuf allocation size for RX frames. */
3906 if (sc->port.ether_mtu <= MCLBYTES)
3907 sc->mbuf_alloc_size = MCLBYTES;
3908 else if (sc->port.ether_mtu <= PAGE_SIZE)
3909 sc->mbuf_alloc_size = PAGE_SIZE;
3911 sc->mbuf_alloc_size = MJUM9BYTES;
3913 DBPRINT(sc, BXE_INFO, "%s(): mbuf_alloc_size = %d, "
3914 "max_frame_size = %d\n", __FUNCTION__,
3915 sc->mbuf_alloc_size, sc->port.ether_mtu);
3917 /* Setup NIC internals and enable interrupts. */
3918 error = bxe_init_nic(sc, load_code);
3920 BXE_PRINTF("%s(%d): NIC initialization failed, "
3921 "aborting!\n", __FILE__, __LINE__);
3922 goto bxe_init_locked_failed1;
3925 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
3926 (sc->common.shmem2_base)){
3927 if (sc->dcc_enable == TRUE) {
3928 BXE_PRINTF("Enabing DCC support\n");
3929 SHMEM2_WR(sc, dcc_support,
3930 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
3931 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
3935 #if __FreeBSD_version >= 800000
3936 /* Allocate buffer rings for multiqueue operation. */
3937 error = bxe_alloc_buf_rings(sc);
3939 BXE_PRINTF("%s(%d): Buffer ring initialization failed, "
3940 "aborting!\n", __FILE__, __LINE__);
3941 goto bxe_init_locked_failed1;
3945 /* Tell MCP that driver load is done. */
3947 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE);
3949 BXE_PRINTF("%s(%d): Driver load failed! No MCP "
3950 "response to LOAD_DONE!\n", __FILE__, __LINE__);
3951 goto bxe_init_locked_failed2;
3955 sc->state = BXE_STATE_OPENING_WAIT4_PORT;
3957 /* Enable ISR for PORT_SETUP ramrod. */
3960 /* Setup the leading connection for the controller. */
3961 error = bxe_setup_leading(sc);
3963 DBPRINT(sc, BXE_FATAL, "%s(): Initial PORT_SETUP ramrod "
3964 "failed. State is not OPEN!\n", __FUNCTION__);
3965 goto bxe_init_locked_failed3;
3968 if (CHIP_IS_E1H(sc)) {
3969 if (sc->mf_config[BP_E1HVN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
3970 BXE_PRINTF("Multi-function mode is disabled\n");
3971 /* sc->state = BXE_STATE_DISABLED; */
3974 /* Setup additional client connections for RSS/multi-queue */
3975 if (sc->state == BXE_STATE_OPEN) {
3976 for (i = 1; i < sc->num_queues; i++) {
3977 if (bxe_setup_multi(sc, i)) {
3978 DBPRINT(sc, BXE_FATAL,
3979 "%s(): fp[%02d] CLIENT_SETUP ramrod failed! State not OPEN!\n",
3981 goto bxe_init_locked_failed4;
3990 /* Initialize statistics. */
3994 /* Load our MAC address. */
3995 bcopy(IF_LLADDR(sc->bxe_ifp), sc->link_params.mac_addr, ETHER_ADDR_LEN);
3998 bxe_set_mac_addr_e1(sc, 1);
4000 bxe_set_mac_addr_e1h(sc, 1);
4004 /* Perform PHY initialization for the primary port. */
4006 bxe_initial_phy_init(sc);
4010 /* Start fastpath. */
4011 switch (load_mode) {
4014 /* Initialize the receive filters. */
4015 bxe_set_rx_mode(sc);
4019 /* Initialize the receive filters. */
4020 bxe_set_rx_mode(sc);
4021 sc->state = BXE_STATE_DIAG;
4025 DBPRINT(sc, BXE_WARN, "%s(): Unknown load mode (%d)!\n",
4026 __FUNCTION__, load_mode);
4031 bxe__link_status_update(sc);
4034 /* Tell the stack the driver is running. */
4035 ifp->if_drv_flags = IFF_DRV_RUNNING;
4037 /* Schedule our periodic timer tick. */
4038 callout_reset(&sc->bxe_tick_callout, hz, bxe_tick, sc);
4039 /* Everything went OK, go ahead and exit. */
4040 goto bxe_init_locked_exit;
4042 bxe_init_locked_failed4:
4043 /* Try and gracefully shutdown the device because of a failure. */
4044 for (i = 1; i < sc->num_queues; i++)
4045 bxe_stop_multi(sc, i);
4047 bxe_init_locked_failed3:
4048 bxe_stop_leading(sc);
4049 bxe_stats_handle(sc, STATS_EVENT_STOP);
4051 bxe_init_locked_failed2:
4052 bxe_int_disable(sc);
4054 bxe_init_locked_failed1:
4056 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE);
4057 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
4058 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE);
4062 #if __FreeBSD_version >= 800000
4063 bxe_free_buf_rings(sc);
4066 DBPRINT(sc, BXE_WARN, "%s(): Initialization failed!\n", __FUNCTION__);
4068 bxe_init_locked_exit:
4069 DBEXIT(BXE_INFO_LOAD | BXE_INFO_RESET);
4073 * Ramrod wait function.
4075 * Waits for a ramrod command to complete.
4078 * 0 = Success, !0 = Failure
4081 bxe_wait_ramrod(struct bxe_softc *sc, int state, int idx, int *state_p,
4086 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_RAMROD);
4088 DBPRINT(sc, BXE_VERBOSE_RAMROD, "%s(): %s for state 0x%08X on "
4089 "fp[%02d], currently 0x%08X.\n", __FUNCTION__,
4090 poll ? "Polling" : "Waiting", state, idx, *state_p);
4096 /* Manually check for the completion. */
4100 * Some commands don't use the leading client
4104 bxe_rxeof(&sc->fp[idx]);
4107 /* State may be changed by bxe_sp_event(). */
4109 if (*state_p == state)
4110 goto bxe_wait_ramrod_exit;
4114 /* Pause 1ms before checking again. */
4118 /* We timed out polling for a completion. */
4119 DBPRINT(sc, BXE_FATAL, "%s(): Timeout %s for state 0x%08X on fp[%02d]. "
4120 "Got 0x%x instead\n", __FUNCTION__, poll ? "polling" : "waiting",
4121 state, idx, *state_p);
4125 bxe_wait_ramrod_exit:
4127 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_RAMROD);
4136 bxe_write_dmae_phys_len(struct bxe_softc *sc, bus_addr_t phys_addr,
4137 uint32_t addr, uint32_t len)
4139 int dmae_wr_max, offset;
4140 DBENTER(BXE_INSANE_REGS);
4142 dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
4144 while (len > dmae_wr_max) {
4145 bxe_write_dmae(sc, phys_addr + offset, addr + offset,
4147 offset += dmae_wr_max * 4;
4150 bxe_write_dmae(sc, phys_addr + offset, addr + offset, len);
4151 DBEXIT(BXE_INSANE_REGS);
4157 #define INIT_MEM_WR(block, reg, part, hw, data, reg_off, len) \
4158 bxe_init_str_wr(sc, GRCBASE_##block + reg + reg_off * 4, data, len)
4162 * Write a block of data to a range of registers.
4168 bxe_init_str_wr(struct bxe_softc *sc, uint32_t addr, const uint32_t *data,
4172 for (i = 0; i < len; i++)
4173 REG_WR(sc, addr + i * 4, data[i]);
4177 * Write a block of data to a range of registers using indirect access.
4183 bxe_init_ind_wr(struct bxe_softc *sc, uint32_t addr, const uint32_t *data,
4187 for (i = 0; i < len; i++)
4188 REG_WR_IND(sc, addr + i * 4, data[i]);
4197 bxe_write_big_buf(struct bxe_softc *sc, uint32_t addr, uint32_t len)
4199 DBENTER(BXE_INSANE_REGS);
4202 bxe_write_dmae_phys_len(sc, sc->gz_dma.paddr, addr, len);
4204 bxe_init_str_wr(sc, addr, sc->gz, len);
4206 bxe_init_str_wr(sc, addr, sc->gz, len);
4209 DBEXIT(BXE_INSANE_REGS);
4213 * Fill areas of device memory with the specified value.
4215 * Generally used to clear a small area of device memory prior to writing
4216 * firmware to STORM memory or writing STORM firmware to device memory.
4222 bxe_init_fill(struct bxe_softc *sc, uint32_t addr, int fill, uint32_t len)
4224 uint32_t cur_len, i, leftovers, length;
4226 DBENTER(BXE_VERBOSE_LOAD);
4228 length = (((len * 4) > BXE_FW_BUF_SIZE) ? BXE_FW_BUF_SIZE : (len * 4));
4229 leftovers = length / 4;
4230 memset(sc->gz, fill, length);
4232 for (i = 0; i < len; i += leftovers) {
4233 cur_len = min(leftovers, len - i);
4234 bxe_write_big_buf(sc, addr + i * 4, cur_len);
4237 DBEXIT(BXE_VERBOSE_LOAD);
4246 bxe_init_wr_64(struct bxe_softc *sc, uint32_t addr, const uint32_t *data,
4249 uint64_t data64, *pdata;
4250 uint32_t buf_len32, cur_len, len;
4253 DBENTER(BXE_INSANE_REGS);
4255 buf_len32 = BXE_FW_BUF_SIZE / 4;
4257 /* 64 bit value is in a blob: first low DWORD, then high DWORD. */
4258 data64 = HILO_U64((*(data + 1)), (*data));
4259 len64 = min((uint32_t)(BXE_FW_BUF_SIZE / 8), len64);
4260 for (i = 0; i < len64; i++) {
4261 pdata = ((uint64_t *)(sc->gz)) + i;
4265 for (i = 0; i < len; i += buf_len32) {
4266 cur_len = min(buf_len32, len - i);
4267 bxe_write_big_buf(sc, addr + i*4, cur_len);
4270 DBEXIT(BXE_INSANE_REGS);
4275 * There are different blobs for each PRAM section. In addition, each
4276 * blob write operation is divided into multiple, smaller write
4277 * operations in order to decrease the amount of physically contiguous
4278 * buffer memory needed. Thus, when we select a blob, the address may
4279 * be with some offset from the beginning of PRAM section. The same
4280 * holds for the INT_TABLE sections.
4283 #define IF_IS_INT_TABLE_ADDR(base, addr) \
4284 if (((base) <= (addr)) && ((base) + 0x400 >= (addr)))
4286 #define IF_IS_PRAM_ADDR(base, addr) \
4287 if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
4295 static const uint8_t *
4296 bxe_sel_blob(struct bxe_softc *sc, uint32_t addr, const uint8_t *data)
4299 IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
4300 data = INIT_TSEM_INT_TABLE_DATA(sc);
4302 IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr)
4303 data = INIT_CSEM_INT_TABLE_DATA(sc);
4305 IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr)
4306 data = INIT_USEM_INT_TABLE_DATA(sc);
4308 IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr)
4309 data = INIT_XSEM_INT_TABLE_DATA(sc);
4311 IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr)
4312 data = INIT_TSEM_PRAM_DATA(sc);
4314 IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr)
4315 data = INIT_CSEM_PRAM_DATA(sc);
4317 IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr)
4318 data = INIT_USEM_PRAM_DATA(sc);
4320 IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr)
4321 data = INIT_XSEM_PRAM_DATA(sc);
4328 bxe_write_big_buf_wb(struct bxe_softc *sc, uint32_t addr, uint32_t len)
4331 bxe_write_dmae_phys_len(sc, sc->gz_dma.paddr, addr, len);
4333 bxe_init_ind_wr(sc, addr, sc->gz, len);
4337 #define VIRT_WR_DMAE_LEN(sc, data, addr, len32, le32_swap) \
4339 memcpy(sc->gz, data, (len32)*4); \
4340 bxe_write_big_buf_wb(sc, addr, len32); \
4350 bxe_init_wr_wb(struct bxe_softc *sc, uint32_t addr, const uint32_t *data,
4353 const uint32_t *old_data;
4355 DBENTER(BXE_INSANE_REGS);
4357 data = (const uint32_t *)bxe_sel_blob(sc, addr, (const uint8_t *)data);
4358 if (sc->dmae_ready) {
4359 if (old_data != data)
4360 VIRT_WR_DMAE_LEN(sc, data, addr, len, 1);
4362 VIRT_WR_DMAE_LEN(sc, data, addr, len, 0);
4364 bxe_init_ind_wr(sc, addr, data, len);
4366 DBEXIT(BXE_INSANE_REGS);
4370 bxe_init_wr_zp(struct bxe_softc *sc, uint32_t addr, uint32_t len,
4373 BXE_PRINTF("%s(%d): Compressed FW is not supported yet. "
4374 "ERROR: address:0x%x len:0x%x blob_offset:0x%x\n",
4375 __FILE__, __LINE__, addr, len, blob_off);
4379 * Initialize blocks of the device.
4381 * This routine basically performs bulk register programming for different
4382 * blocks within the controller. The file bxe_init_values.h contains a
4383 * series of register access operations (read, write, fill, etc.) as well
4384 * as a BLOB of data to initialize multiple blocks within the controller.
4385 * Block initialization may be supported by all controllers or by specific
4392 bxe_init_block(struct bxe_softc *sc, uint32_t block, uint32_t stage)
4395 const uint32_t *data, *data_base;
4396 uint32_t i, op_type, addr, len;
4397 uint16_t op_end, op_start;
4400 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
4402 op_start = INIT_OPS_OFFSETS(sc)[BLOCK_OPS_IDX(block, stage,
4404 op_end = INIT_OPS_OFFSETS(sc)[BLOCK_OPS_IDX(block, stage, STAGE_END)];
4405 /* If empty block */
4406 if (op_start == op_end)
4411 data_base = INIT_DATA(sc);
4413 for (i = op_start; i < op_end; i++) {
4415 op = (union init_op *)&(INIT_OPS(sc)[i]);
4417 op_type = op->str_wr.op;
4418 addr = op->str_wr.offset;
4419 len = op->str_wr.data_len;
4420 data = data_base + op->str_wr.data_off;
4422 /* HW/EMUL specific */
4423 if ((op_type > OP_WB) && (op_type == hw_wr))
4431 REG_WR(sc, addr, op->write.val);
4434 bxe_init_str_wr(sc, addr, data, len);
4437 bxe_init_wr_wb(sc, addr, data, len);
4440 bxe_init_ind_wr(sc, addr, data, len);
4443 bxe_init_fill(sc, addr, 0, op->zero.len);
4446 bxe_init_wr_zp(sc, addr, len, op->str_wr.data_off);
4449 bxe_init_wr_64(sc, addr, data, len);
4452 /* happens whenever an op is of a diff HW */
4457 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
4461 * Handles controller initialization when called from an unlocked routine.
4462 * ifconfig calls this function.
4470 struct bxe_softc *sc;
4475 bxe_init_locked(sc, LOAD_NORMAL);
4476 BXE_CORE_UNLOCK(sc);
4480 * Release all resources used by the driver.
4482 * Releases all resources acquired by the driver including interrupts,
4483 * interrupt handler, interfaces, mutexes, and DMA memory.
4489 bxe_release_resources(struct bxe_softc *sc)
4493 DBENTER(BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
4497 /* Release the FreeBSD interface. */
4498 if (sc->bxe_ifp != NULL)
4499 if_free(sc->bxe_ifp);
4501 /* Free the DMA resources. */
4502 bxe_host_structures_free(sc);
4504 #if __FreeBSD_version >= 800000
4505 /* Free multiqueue buffer rings. */
4506 bxe_free_buf_rings(sc);
4513 * Indirect register write.
4515 * Writes NetXtreme II registers using an index/data register pair in PCI
4516 * configuration space. Using this mechanism avoids issues with posted
4517 * writes but is much slower than memory-mapped I/O.
4523 bxe_reg_wr_ind(struct bxe_softc *sc, uint32_t offset, uint32_t val)
4525 DBPRINT(sc, BXE_INSANE_REGS, "%s(); offset = 0x%08X, val = 0x%08X\n",
4526 __FUNCTION__, offset, val);
4528 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, offset, 4);
4529 pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4);
4531 /* Return to a safe address. */
4532 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS,
4533 PCICFG_VENDOR_ID_OFFSET, 4);
4538 * Indirect register read.
4540 * Reads NetXtreme II registers using an index/data register pair in PCI
4541 * configuration space. Using this mechanism avoids issues with posted
4542 * reads but is much slower than memory-mapped I/O.
4545 * The value of the register.
4548 bxe_reg_rd_ind(struct bxe_softc *sc, uint32_t offset)
4552 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, offset, 4);
4553 val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4);
4555 /* Return to a safe address. */
4556 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS,
4557 PCICFG_VENDOR_ID_OFFSET, 4);
4559 DBPRINT(sc, BXE_INSANE_REGS, "%s(); offset = 0x%08X, val = 0x%08X\n",
4560 __FUNCTION__, offset, val);
4566 static uint32_t dmae_reg_go_c[] = {
4567 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
4568 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
4569 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
4570 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
4575 * Copy DMAE command into memory and start the command.
4581 bxe_post_dmae(struct bxe_softc *sc, struct dmae_command *dmae, int idx)
4583 uint32_t cmd_offset;
4585 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
4587 for (i = 0; i < (sizeof(struct dmae_command) / 4); i++) {
4588 REG_WR(sc, cmd_offset + i * 4, *(((uint32_t *)dmae) + i));
4589 DBPRINT(sc, BXE_INSANE_REGS, "%s(): DMAE cmd[%d].%d : 0x%08X\n",
4590 __FUNCTION__, idx, i, cmd_offset + i * 4);
4593 /* Kick off the command. */
4594 REG_WR(sc, dmae_reg_go_c[idx], 1);
4599 * Perform a DMAE write to device memory.
4601 * Some of the registers on the 577XX controller are 128bits wide. It is
4602 * required that when accessing those registers that they be written
4603 * atomically and that no intervening bus acceses to the device occur.
4604 * This could be handled by a lock held across all driver instances for
4605 * the device or it can be handled by performing a DMA operation when
4606 * writing to the device. This code implements the latter.
4612 bxe_write_dmae(struct bxe_softc *sc, bus_addr_t dma_addr, uint32_t dst_addr,
4615 struct dmae_command dmae;
4616 uint32_t *data, *wb_comp;
4619 DBENTER(BXE_INSANE_REGS);
4621 DBPRINT(sc, BXE_EXTREME_REGS,
4622 "%s(): host addr = 0x%jX, device addr = 0x%08X, length = %d.\n",
4623 __FUNCTION__, (uintmax_t)dma_addr, dst_addr, (int)len32);
4625 wb_comp = BXE_SP(sc, wb_comp);
4626 /* Fall back to indirect access if DMAE is not ready. */
4627 if (!sc->dmae_ready) {
4628 data = BXE_SP(sc, wb_data[0]);
4630 DBPRINT(sc, BXE_WARN, "%s(): DMAE not ready, "
4631 "using indirect.\n", __FUNCTION__);
4633 bxe_init_ind_wr(sc, dst_addr, data, len32);
4634 goto bxe_write_dmae_exit;
4637 memset(&dmae, 0, sizeof(struct dmae_command));
4639 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4640 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4641 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4643 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4645 DMAE_CMD_ENDIANITY_DW_SWAP |
4647 (BP_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4648 (BP_E1HVN(sc) << DMAE_CMD_E1HVN_SHIFT));
4649 dmae.src_addr_lo = U64_LO(dma_addr);
4650 dmae.src_addr_hi = U64_HI(dma_addr);
4651 dmae.dst_addr_lo = dst_addr >> 2;
4652 dmae.dst_addr_hi = 0;
4654 dmae.comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp));
4655 dmae.comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp));
4656 dmae.comp_val = BXE_WB_COMP_VAL;
4662 bxe_post_dmae(sc, &dmae, INIT_DMAE_C(sc));
4666 /* Wait up to 200ms. */
4668 while (*wb_comp != BXE_WB_COMP_VAL) {
4670 DBPRINT(sc, BXE_FATAL,
4671 "%s(): DMAE timeout (dst_addr = 0x%08X, len = %d)!\n",
4672 __FUNCTION__, dst_addr, len32);
4679 BXE_DMAE_UNLOCK(sc);
4681 bxe_write_dmae_exit:
4682 DBEXIT(BXE_INSANE_REGS);
4687 * Perform a DMAE read from to device memory.
4689 * Some of the registers on the 577XX controller are 128bits wide. It is
4690 * required that when accessing those registers that they be read
4691 * atomically and that no intervening bus acceses to the device occur.
4692 * This could be handled by a lock held across all driver instances for
4693 * the device or it can be handled by performing a DMA operation when
4694 * reading from the device. This code implements the latter.
4700 bxe_read_dmae(struct bxe_softc *sc, uint32_t src_addr,
4703 struct dmae_command dmae;
4704 uint32_t *data, *wb_comp;
4707 DBENTER(BXE_INSANE_REGS);
4709 wb_comp = BXE_SP(sc, wb_comp);
4710 /* Fall back to indirect access if DMAE is not ready. */
4711 if (!sc->dmae_ready) {
4712 data = BXE_SP(sc, wb_data[0]);
4714 DBPRINT(sc, BXE_WARN, "%s(): DMAE not ready, "
4715 "using indirect.\n", __FUNCTION__);
4717 for (i = 0; i < len32; i++)
4718 data[i] = bxe_reg_rd_ind(sc, src_addr + i * 4);
4720 goto bxe_read_dmae_exit;
4723 memset(&dmae, 0, sizeof(struct dmae_command));
4725 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4726 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4727 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4729 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4731 DMAE_CMD_ENDIANITY_DW_SWAP |
4733 (BP_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4734 (BP_E1HVN(sc) << DMAE_CMD_E1HVN_SHIFT));
4736 dmae.src_addr_lo = src_addr >> 2;
4737 dmae.src_addr_hi = 0;
4738 dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data));
4739 dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data));
4741 dmae.comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp));
4742 dmae.comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp));
4743 dmae.comp_val = BXE_WB_COMP_VAL;
4747 memset(BXE_SP(sc, wb_data[0]), 0, sizeof(uint32_t) * 4);
4750 bxe_post_dmae(sc, &dmae, INIT_DMAE_C(sc));
4755 while (*wb_comp != BXE_WB_COMP_VAL) {
4757 DBPRINT(sc, BXE_FATAL,
4758 "%s(): DMAE timeout (src_addr = 0x%08X, len = %d)!\n",
4759 __FUNCTION__, src_addr, len32);
4766 BXE_DMAE_UNLOCK(sc);
4769 DBEXIT(BXE_INSANE_REGS);
4773 * DMAE write wrapper.
4779 bxe_wb_wr(struct bxe_softc *sc, int reg, uint32_t val_hi, uint32_t val_lo)
4781 uint32_t wb_write[2];
4783 wb_write[0] = val_hi;
4784 wb_write[1] = val_lo;
4785 REG_WR_DMAE(sc, reg, wb_write, 2);
4791 * Poll a register waiting for a value.
4794 * The last read register value.
4797 uint32_t bxe_reg_poll(struct bxe_softc *sc, uint32_t reg, uint32_t expected,
4803 val = REG_RD(sc, reg);
4804 if (val == expected)
4816 * Microcode assert display.
4818 * This function walks through each STORM processor and prints out a
4819 * listing of all asserts currently in effect. Useful for post-mortem
4823 * The number of asserts detected.
4826 bxe_mc_assert(struct bxe_softc *sc)
4828 uint32_t row0, row1, row2, row3;
4832 DBENTER(BXE_VERBOSE_INTR);
4836 last_idx = REG_RD8(sc, BAR_XSTORM_INTMEM +
4837 XSTORM_ASSERT_LIST_INDEX_OFFSET);
4840 DBPRINT(sc, BXE_FATAL, "DATA XSTORM_ASSERT_LIST_INDEX 0x%x\n",
4843 /* Print the asserts */
4844 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
4846 row0 = REG_RD(sc, BAR_XSTORM_INTMEM +
4847 XSTORM_ASSERT_LIST_OFFSET(i));
4848 row1 = REG_RD(sc, BAR_XSTORM_INTMEM +
4849 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
4850 row2 = REG_RD(sc, BAR_XSTORM_INTMEM +
4851 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
4852 row3 = REG_RD(sc, BAR_XSTORM_INTMEM +
4853 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
4855 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
4856 DBPRINT(sc, BXE_FATAL, "DATA XSTORM_ASSERT_INDEX %d = "
4857 "0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2,
4865 last_idx = REG_RD8(sc, BAR_TSTORM_INTMEM +
4866 TSTORM_ASSERT_LIST_INDEX_OFFSET);
4869 DBPRINT(sc, BXE_FATAL, "DATA TSTORM_ASSERT_LIST_INDEX 0x%x\n",
4872 /* Print the asserts */
4873 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
4875 row0 = REG_RD(sc, BAR_TSTORM_INTMEM +
4876 TSTORM_ASSERT_LIST_OFFSET(i));
4877 row1 = REG_RD(sc, BAR_TSTORM_INTMEM +
4878 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
4879 row2 = REG_RD(sc, BAR_TSTORM_INTMEM +
4880 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
4881 row3 = REG_RD(sc, BAR_TSTORM_INTMEM +
4882 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
4884 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
4885 DBPRINT(sc, BXE_FATAL, "DATA TSTORM_ASSERT_INDEX %d = "
4886 "0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2,
4894 last_idx = REG_RD8(sc, BAR_CSTORM_INTMEM +
4895 CSTORM_ASSERT_LIST_INDEX_OFFSET);
4898 DBPRINT(sc, BXE_FATAL, "DATA CSTORM_ASSERT_LIST_INDEX 0x%x\n",
4901 /* Print the asserts */
4902 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
4904 row0 = REG_RD(sc, BAR_CSTORM_INTMEM +
4905 CSTORM_ASSERT_LIST_OFFSET(i));
4906 row1 = REG_RD(sc, BAR_CSTORM_INTMEM +
4907 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
4908 row2 = REG_RD(sc, BAR_CSTORM_INTMEM +
4909 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
4910 row3 = REG_RD(sc, BAR_CSTORM_INTMEM +
4911 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
4913 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
4914 DBPRINT(sc, BXE_FATAL, "DATA CSTORM_ASSERT_INDEX %d = "
4915 "0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2,
4923 last_idx = REG_RD8(sc, BAR_USTORM_INTMEM +
4924 USTORM_ASSERT_LIST_INDEX_OFFSET);
4927 DBPRINT(sc, BXE_FATAL, "DATA USTORM_ASSERT_LIST_INDEX 0x%x\n",
4930 /* Print the asserts */
4931 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
4933 row0 = REG_RD(sc, BAR_USTORM_INTMEM +
4934 USTORM_ASSERT_LIST_OFFSET(i));
4935 row1 = REG_RD(sc, BAR_USTORM_INTMEM +
4936 USTORM_ASSERT_LIST_OFFSET(i) + 4);
4937 row2 = REG_RD(sc, BAR_USTORM_INTMEM +
4938 USTORM_ASSERT_LIST_OFFSET(i) + 8);
4939 row3 = REG_RD(sc, BAR_USTORM_INTMEM +
4940 USTORM_ASSERT_LIST_OFFSET(i) + 12);
4942 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
4943 DBPRINT(sc, BXE_FATAL, "DATA USTORM_ASSERT_INDEX %d = "
4944 "0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2,
4951 DBEXIT(BXE_VERBOSE_INTR);
4957 * Perform a panic dump.
4963 bxe_panic_dump(struct bxe_softc *sc)
4967 sc->stats_state = STATS_STATE_DISABLED;
4969 BXE_PRINTF("---------- Begin crash dump ----------\n");
4971 /* Idle check is run twice to verify the controller has stopped. */
4980 BXE_PRINTF("---------- End crash dump ----------\n");
4987 * Enables interrupt generation.
4993 bxe_int_enable(struct bxe_softc *sc)
4995 uint32_t hc_addr, val;
4998 DBENTER(BXE_VERBOSE_INTR);
5001 hc_addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
5002 val = REG_RD(sc, hc_addr);
5003 if (sc->msix_count > 0) {
5004 if (sc->msix_count == 1) {
5006 /* Single interrupt, multiple queues.*/
5007 DBPRINT(sc, BXE_VERBOSE_INTR,
5008 "%s(): Setting host coalescing registers for MSI-X (SIMQ).\n",
5012 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
5014 /* Enable single ISR mode, MSI/MSI-X, and attention messages. */
5015 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
5016 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
5017 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
5020 /* Multiple interrupts, multiple queues.*/
5021 DBPRINT(sc, BXE_VERBOSE_INTR,
5022 "%s(): Setting host coalescing registers for MSI-X (MIMQ).\n",
5025 /* Clear single ISR mode and INTx. */
5026 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
5027 HC_CONFIG_0_REG_INT_LINE_EN_0);
5029 /* Enable MSI/MSI-X and attention messages. */
5030 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
5031 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
5034 } else if (sc->msi_count > 0) {
5036 if (sc->msi_count == 1) {
5038 /* Single interrupt, multiple queues.*/
5039 DBPRINT(sc, BXE_VERBOSE_INTR,
5040 "%s(): Setting host coalescing registers for MSI (SIMQ).\n",
5044 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
5046 /* Enable single ISR mode, MSI/MSI-X, and attention
5049 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
5050 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
5051 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
5053 /* Multiple interrupts, multiple queues.*/
5054 DBPRINT(sc, BXE_VERBOSE_INTR,
5055 "%s(): Setting host coalescing registers for"
5059 /* Clear single ISR mode and INTx. */
5060 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
5061 HC_CONFIG_0_REG_INT_LINE_EN_0);
5063 /* Enable MSI/MSI-X and attention messages. */
5064 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
5065 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
5068 /* Single interrupt, single queue. */
5069 DBPRINT(sc, BXE_VERBOSE_INTR,
5070 "%s(): Setting host coalescing registers for INTA#.\n",
5073 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
5074 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
5075 HC_CONFIG_0_REG_INT_LINE_EN_0 |
5076 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
5077 REG_WR(sc, hc_addr, val);
5079 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
5082 /* Write the interrupt mode to the host coalescing block. */
5083 REG_WR(sc, hc_addr, val);
5085 if (CHIP_IS_E1H(sc)) {
5087 /* Init leading/trailing edge attention generation. */
5089 val = (0xee0f | (1 << (BP_E1HVN(sc) + 4)));
5092 * Check if this driver instance is the port
5096 /* Enable nig & GPIO3 attentions. */
5101 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, val);
5102 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, val);
5105 DBEXIT(BXE_VERBOSE_INTR);
5110 * Disables interrupt generation.
5116 bxe_int_disable(struct bxe_softc *sc)
5118 uint32_t hc_addr, val;
5121 DBENTER(BXE_VERBOSE_INTR | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
5124 hc_addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
5125 val = REG_RD(sc, hc_addr);
5127 val &= ~(HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
5128 HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0);
5130 REG_WR(sc, hc_addr, val);
5132 if (REG_RD(sc, hc_addr)!= val) {
5133 DBPRINT(sc, BXE_WARN, "%s(): BUG! Returned value from IGU "
5134 "doesn't match value written (0x%08X).\n",
5138 DBEXIT(BXE_VERBOSE_INTR | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
5141 #define BXE_CRC32_RESIDUAL 0xdebb20e3
5145 * 0 = Success, !0 = Failure.
5148 bxe_nvram_acquire_lock(struct bxe_softc *sc)
5153 DBENTER(BXE_VERBOSE_NVRAM);
5159 /* Acquire the NVRAM lock. */
5160 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
5161 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
5163 for (i = 0; i < NVRAM_TIMEOUT_COUNT * 10; i++) {
5164 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
5165 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
5171 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
5172 DBPRINT(sc, BXE_WARN, "%s(): Cannot acquire NVRAM lock!\n",
5177 DBEXIT(BXE_VERBOSE_NVRAM);
5183 * 0 = Success, !0 = Failure.
5186 bxe_nvram_release_lock(struct bxe_softc *sc)
5191 DBENTER(BXE_VERBOSE_NVRAM);
5197 /* Release the NVRAM lock. */
5198 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
5199 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
5201 for (i = 0; i < NVRAM_TIMEOUT_COUNT * 10; i++) {
5202 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
5203 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
5209 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
5210 DBPRINT(sc, BXE_WARN, "%s(): Cannot release NVRAM lock!\n",
5215 DBEXIT(BXE_VERBOSE_NVRAM);
5224 bxe_nvram_enable_access(struct bxe_softc *sc)
5228 DBENTER(BXE_VERBOSE_NVRAM);
5230 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
5232 /* Enable both bits, even on read */
5233 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
5234 (val | MCPR_NVM_ACCESS_ENABLE_EN |
5235 MCPR_NVM_ACCESS_ENABLE_WR_EN));
5237 DBEXIT(BXE_VERBOSE_NVRAM);
5245 bxe_nvram_disable_access(struct bxe_softc *sc)
5249 DBENTER(BXE_VERBOSE_NVRAM);
5251 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
5253 /* Disable both bits, even after read. */
5254 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
5255 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
5256 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
5258 DBEXIT(BXE_VERBOSE_NVRAM);
5263 * 0 = Success, !0 = Failure.
5266 bxe_nvram_read_dword(struct bxe_softc *sc, uint32_t offset, uint32_t *ret_val,
5272 DBENTER(BXE_INSANE_NVRAM);
5274 /* Build the command word. */
5275 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
5277 /* Need to clear DONE bit separately. */
5278 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
5280 /* Address within the NVRAM to read. */
5281 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
5282 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
5284 /* Issue a read command. */
5285 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
5287 /* Wait for completion. */
5290 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
5292 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
5294 if (val & MCPR_NVM_COMMAND_DONE) {
5295 val = REG_RD(sc, MCP_REG_MCPR_NVM_READ);
5303 DBPRINT(sc, BXE_INSANE_NVRAM, "%s(): Read 0x%08X from offset 0x%08X.\n",
5304 __FUNCTION__, *ret_val, offset);
5305 DBEXIT(BXE_INSANE_NVRAM);
5311 * 0 = Success, !0 = Failure.
5314 bxe_nvram_read(struct bxe_softc *sc, uint32_t offset, uint8_t *ret_buf,
5317 uint32_t cmd_flags, val;
5320 DBENTER(BXE_EXTREME_NVRAM);
5322 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
5323 DBPRINT(sc, BXE_WARN, "%s(): Unaligned address or invalid "
5324 "buffer for NVRAM read (offset = 0x%08X, buf_size = %d)!\n",
5325 __FUNCTION__, offset, buf_size);
5327 goto bxe_nvram_read_exit;
5330 if (offset + buf_size > sc->common.flash_size) {
5331 DBPRINT(sc, BXE_WARN, "%s(): Read extends beyond the end of "
5332 "the NVRAM (offset (0x%08X) + buf_size (%d) > flash_size "
5333 "(0x%08X))!\n", __FUNCTION__, offset, buf_size,
5334 sc->common.flash_size);
5336 goto bxe_nvram_read_exit;
5339 rc = bxe_nvram_acquire_lock(sc);
5341 goto bxe_nvram_read_exit;
5343 bxe_nvram_enable_access(sc);
5345 /* Read the first word(s). */
5346 cmd_flags = MCPR_NVM_COMMAND_FIRST;
5347 while ((buf_size > sizeof(uint32_t)) && (rc == 0)) {
5348 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
5349 memcpy(ret_buf, &val, 4);
5351 /* Advance to the next DWORD. */
5352 offset += sizeof(uint32_t);
5353 ret_buf += sizeof(uint32_t);
5354 buf_size -= sizeof(uint32_t);
5358 /* Read the final word. */
5360 cmd_flags |= MCPR_NVM_COMMAND_LAST;
5361 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
5362 memcpy(ret_buf, &val, 4);
5365 /* Disable access to NVRAM interface. */
5366 bxe_nvram_disable_access(sc);
5367 bxe_nvram_release_lock(sc);
5369 bxe_nvram_read_exit:
5370 DBEXIT(BXE_EXTREME_NVRAM);
5374 #ifdef BXE_NVRAM_WRITE_SUPPORT
5377 * 0 = Success, !0 = Failure.
5380 bxe_nvram_write_dword(struct bxe_softc *sc, uint32_t offset, uint32_t val,
5385 DBENTER(BXE_VERBOSE_NVRAM);
5387 /* Build the command word. */
5388 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
5390 /* Need to clear DONE bit separately. */
5391 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
5393 /* Write the data. */
5394 REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val);
5396 /* Address to write within the NVRAM. */
5397 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
5398 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
5400 /* Issue the write command. */
5401 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
5403 /* Wait for completion. */
5405 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
5407 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
5408 if (val & MCPR_NVM_COMMAND_DONE) {
5414 DBEXIT(BXE_VERBOSE_NVRAM);
5418 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
5425 bxe_nvram_write1(struct bxe_softc *sc, uint32_t offset, uint8_t *data_buf,
5428 uint32_t align_offset, cmd_flags, val;
5431 DBENTER(BXE_VERBOSE_NVRAM);
5433 if (offset + buf_size > sc->common.flash_size) {
5434 DBPRINT(sc, BXE_WARN, "%s(): Write extends beyond the end of "
5435 "the NVRAM (offset (0x%08X) + buf_size (%d) > flash_size "
5436 "(0x%08X))!\n", __FUNCTION__, offset, buf_size,
5437 sc->common.flash_size);
5439 goto bxe_nvram_write1_exit;
5442 /* request access to nvram interface */
5443 rc = bxe_nvram_acquire_lock(sc);
5445 goto bxe_nvram_write1_exit;
5447 /* Enable access to the NVRAM interface. */
5448 bxe_nvram_enable_access(sc);
5450 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
5451 align_offset = (offset & ~0x03);
5452 rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags);
5455 val &= ~(0xff << BYTE_OFFSET(offset));
5456 val |= (*data_buf << BYTE_OFFSET(offset));
5459 rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags);
5462 /* Disable access to the NVRAM interface. */
5463 bxe_nvram_disable_access(sc);
5464 bxe_nvram_release_lock(sc);
5466 bxe_nvram_write1_exit:
5467 DBEXIT(BXE_VERBOSE_NVRAM);
5473 * 0 = Success, !0 = Failure.
5476 bxe_nvram_write(struct bxe_softc *sc, uint32_t offset, uint8_t *data_buf,
5479 uint32_t cmd_flags, val, written_so_far;
5485 return (bxe_nvram_write1(sc, offset, data_buf, buf_size));
5487 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
5488 DBPRINT(sc, BXE_WARN, "%s(): Unaligned address or invalid "
5489 "buffer for NVRAM write "
5490 "(offset = 0x%08X, buf_size = %d)!\n", __FUNCTION__,
5493 goto bxe_nvram_write_exit;
5496 if (offset + buf_size > sc->common.flash_size) {
5497 DBPRINT(sc, BXE_WARN, "%s(): Write extends beyond the end of "
5498 "the NVRAM (offset (0x%08X) + buf_size (%d) > flash_size "
5499 "(0x%08X))!\n", __FUNCTION__, offset, buf_size,
5500 sc->common.flash_size);
5502 goto bxe_nvram_write_exit;
5505 /* Request access to NVRAM interface. */
5506 rc = bxe_nvram_acquire_lock(sc);
5508 goto bxe_nvram_write_exit;
5510 /* Enable access to the NVRAM interface. */
5511 bxe_nvram_enable_access(sc);
5514 cmd_flags = MCPR_NVM_COMMAND_FIRST;
5515 while ((written_so_far < buf_size) && (rc == 0)) {
5516 if (written_so_far == (buf_size - sizeof(uint32_t)))
5517 cmd_flags |= MCPR_NVM_COMMAND_LAST;
5518 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
5519 cmd_flags |= MCPR_NVM_COMMAND_LAST;
5520 else if ((offset % NVRAM_PAGE_SIZE) == 0)
5521 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
5523 memcpy(&val, data_buf, 4);
5525 rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags);
5527 /* Advance to the next DWORD. */
5528 offset += sizeof(uint32_t);
5529 data_buf += sizeof(uint32_t);
5530 written_so_far += sizeof(uint32_t);
5534 /* Disable access to the NVRAM interface. */
5535 bxe_nvram_disable_access(sc);
5536 bxe_nvram_release_lock(sc);
5538 bxe_nvram_write_exit:
5539 DBEXIT(BXE_VERBOSE_NVRAM);
5545 * This function validates NVRAM content by reading spcific
5546 * regions and validating that the NVRAM checksum matches the
5550 * 0 = Success, !0 = Failure.
5553 bxe_nvram_test(struct bxe_softc *sc)
5555 static const struct {
5559 { 0, 0x14 }, /* bootstrap area*/
5560 { 0x14, 0xec }, /* directory area */
5561 { 0x100, 0x350 }, /* manuf_info */
5562 { 0x450, 0xf0 }, /* feature_info */
5563 { 0x640, 0x64 }, /* upgrade_key_info */
5564 { 0x708, 0x70 }, /* manuf_key_info */
5567 uint32_t magic, csum, buf[0x350 / 4];
5571 DBENTER(BXE_VERBOSE_NVRAM);
5573 data = (uint8_t *) buf;
5575 /* Read the DWORD at offset 0 in NVRAM. */
5576 rc = bxe_nvram_read(sc, 0, data, 4);
5578 BXE_PRINTF("%s(%d): Error (%d) returned reading NVRAM!\n",
5579 __FILE__, __LINE__, rc);
5580 goto bxe_nvram_test_exit;
5583 /* Make sure we found our magic value. */
5584 magic = be32toh(buf[0]);
5585 if (magic != 0x669955aa) {
5586 BXE_PRINTF("%s(%d): Invalid magic value (0x%08x) found!\n",
5587 __FILE__, __LINE__, magic);
5589 goto bxe_nvram_test_exit;
5592 /* Read through each region in NVRAM and validate the checksum. */
5593 for (i = 0; nvram_tbl[i].size; i++) {
5594 DBPRINT(sc, BXE_VERBOSE_NVRAM, "%s(): Testing NVRAM region %d, "
5595 "starting offset = %d, length = %d\n", __FUNCTION__, i,
5596 nvram_tbl[i].offset, nvram_tbl[i].size);
5598 rc = bxe_nvram_read(sc, nvram_tbl[i].offset, data,
5601 BXE_PRINTF("%s(%d): Error (%d) returned reading NVRAM "
5602 "region %d!\n", __FILE__, __LINE__, rc, i);
5603 goto bxe_nvram_test_exit;
5606 csum = ether_crc32_le(data, nvram_tbl[i].size);
5607 if (csum != BXE_CRC32_RESIDUAL) {
5608 BXE_PRINTF("%s(%d): Checksum error (0x%08X) for NVRAM "
5609 "region %d!\n", __FILE__, __LINE__, csum, i);
5611 goto bxe_nvram_test_exit;
5615 bxe_nvram_test_exit:
5616 DBEXIT(BXE_VERBOSE_NVRAM);
5621 * Acknowledge status block and modify interrupt mode.
5626 static __inline void
5627 bxe_ack_sb(struct bxe_softc *sc, uint8_t sb_id, uint8_t storm, uint16_t index,
5628 uint8_t int_mode, uint8_t update)
5630 struct igu_ack_register igu_ack;
5633 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(sc) * 32 + COMMAND_REG_INT_ACK);
5634 igu_ack.status_block_index = index;
5635 igu_ack.sb_id_and_flags =
5636 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
5637 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
5638 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
5639 (int_mode << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
5642 REG_WR(sc, hc_addr, (*(uint32_t *) &igu_ack));
5647 * Update fastpath status block index.
5650 * 0 = Nu completes, 1 = TX completes, 2 = RX completes,
5651 * 3 = RX & TX completes
5653 static __inline uint16_t
5654 bxe_update_fpsb_idx(struct bxe_fastpath *fp)
5656 struct host_status_block *fpsb;
5659 fpsb = fp->status_block;
5664 /* Check for any CSTORM transmit completions. */
5665 if (fp->fp_c_idx != le16toh(fpsb->c_status_block.status_block_index)) {
5666 fp->fp_c_idx = le16toh(fpsb->c_status_block.status_block_index);
5670 /* Check for any USTORM receive completions. */
5671 if (fp->fp_u_idx != le16toh(fpsb->u_status_block.status_block_index)) {
5672 fp->fp_u_idx = le16toh(fpsb->u_status_block.status_block_index);
5680 * Acknowledge interrupt.
5683 * Interrupt value read from IGU.
5686 bxe_ack_int(struct bxe_softc *sc)
5688 uint32_t hc_addr, result;
5690 hc_addr = HC_REG_COMMAND_REG + BP_PORT(sc) * 32 + COMMAND_REG_SIMD_MASK;
5691 result = REG_RD(sc, hc_addr);
5692 DBPRINT(sc, BXE_INSANE_INTR, "%s(): Read 0x%08X from HC addr 0x%08X\n",
5693 __FUNCTION__, result, hc_addr);
5699 * Slowpath event handler.
5701 * Checks that a ramrod completion occurs while the
5702 * controller is in the proper state.
5708 bxe_sp_event(struct bxe_fastpath *fp, union eth_rx_cqe *rr_cqe)
5710 struct bxe_softc *sc;
5714 DBENTER(BXE_VERBOSE_RAMROD);
5716 cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
5717 command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
5718 DBPRINT(sc, BXE_VERBOSE_RAMROD, "%s(): CID = %d, ramrod command = %d, "
5719 "device state = 0x%08X, fp[%02d].state = 0x%08X, type = %d\n",
5720 __FUNCTION__, cid, command, sc->state, fp->index, fp->state,
5721 rr_cqe->ramrod_cqe.ramrod_type);
5723 /* Free up an entry on the slowpath queue. */
5726 /* Handle ramrod commands that completed on a client connection. */
5728 /* Check for a completion for the current state. */
5729 switch (command | fp->state) {
5730 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BXE_FP_STATE_OPENING):
5731 DBPRINT(sc, BXE_VERBOSE_RAMROD,
5732 "%s(): Completed fp[%02d] CLIENT_SETUP Ramrod.\n",
5734 fp->state = BXE_FP_STATE_OPEN;
5736 case (RAMROD_CMD_ID_ETH_HALT | BXE_FP_STATE_HALTING):
5737 DBPRINT(sc, BXE_VERBOSE_RAMROD,
5738 "%s(): Completed fp[%02d] ETH_HALT ramrod\n",
5740 fp->state = BXE_FP_STATE_HALTED;
5743 DBPRINT(sc, BXE_VERBOSE_RAMROD,
5744 "%s(): Unexpected microcode reply (%d) while "
5745 "in state 0x%04X!\n", __FUNCTION__, command,
5749 goto bxe_sp_event_exit;
5752 /* Handle ramrod commands that completed on the leading connection. */
5753 switch (command | sc->state) {
5754 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BXE_STATE_OPENING_WAIT4_PORT):
5755 DBPRINT(sc, BXE_VERBOSE_RAMROD,
5756 "%s(): Completed PORT_SETUP ramrod.\n", __FUNCTION__);
5757 sc->state = BXE_STATE_OPEN;
5759 case (RAMROD_CMD_ID_ETH_HALT | BXE_STATE_CLOSING_WAIT4_HALT):
5760 DBPRINT(sc, BXE_VERBOSE_RAMROD,
5761 "%s(): Completed ETH_HALT ramrod.\n", __FUNCTION__);
5762 sc->state = BXE_STATE_CLOSING_WAIT4_DELETE;
5763 fp->state = BXE_FP_STATE_HALTED;
5765 case (RAMROD_CMD_ID_ETH_CFC_DEL | BXE_STATE_CLOSING_WAIT4_HALT):
5766 DBPRINT(sc, BXE_VERBOSE_RAMROD,
5767 "%s(): Completed fp[%02d] ETH_CFC_DEL ramrod.\n",
5769 sc->fp[cid].state = BXE_FP_STATE_CLOSED;
5771 case (RAMROD_CMD_ID_ETH_SET_MAC | BXE_STATE_OPEN):
5772 DBPRINT(sc, BXE_VERBOSE_RAMROD,
5773 "%s(): Completed ETH_SET_MAC ramrod in STATE_OPEN state.\n",
5776 case (RAMROD_CMD_ID_ETH_SET_MAC | BXE_STATE_CLOSING_WAIT4_HALT):
5777 DBPRINT(sc, BXE_VERBOSE_RAMROD,
5778 "%s(): Completed ETH_SET_MAC ramrod in "
5779 "CLOSING_WAIT4_HALT state.\n", __FUNCTION__);
5782 DBPRINT(sc, BXE_FATAL, "%s(): Unexpected microcode reply (%d)! "
5783 "State is 0x%08X\n", __FUNCTION__, command, sc->state);
5787 /* Force bxe_wait_ramrod() to see the change. */
5789 DBEXIT(BXE_VERBOSE_RAMROD);
5793 * Lock access to a hardware resource using controller arbitration
5797 * 0 = Success, !0 = Failure.
5800 bxe_acquire_hw_lock(struct bxe_softc *sc, uint32_t resource)
5802 uint32_t hw_lock_control_reg, lock_status, resource_bit;
5806 DBENTER(BXE_VERBOSE_MISC);
5807 DBPRINT(sc, BXE_VERBOSE_MISC, "%s(): Locking resource 0x%08X\n",
5808 __FUNCTION__, resource);
5811 resource_bit = 1 << resource;
5814 hw_lock_control_reg = ((func <= 5) ?
5815 (MISC_REG_DRIVER_CONTROL_1 + func * 8) :
5816 (MISC_REG_DRIVER_CONTROL_7 + (func - 6) * 8));
5818 /* Validating that the resource is within range. */
5819 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
5820 DBPRINT(sc, BXE_WARN, "%s(): Resource is out of range! "
5821 "resource(0x%08X) > HW_LOCK_MAX_RESOURCE_VALUE(0x%08X)\n",
5822 __FUNCTION__, resource, HW_LOCK_MAX_RESOURCE_VALUE);
5824 goto bxe_acquire_hw_lock_exit;
5827 /* Validating that the resource is not already taken. */
5828 lock_status = REG_RD(sc, hw_lock_control_reg);
5829 if (lock_status & resource_bit) {
5830 DBPRINT(sc, BXE_WARN, "%s(): Failed to acquire lock! "
5831 "lock_status = 0x%08X, resource_bit = 0x%08X\n",
5832 __FUNCTION__, lock_status, resource_bit);
5834 goto bxe_acquire_hw_lock_exit;
5837 /* Try for 5 seconds every 5ms. */
5838 for (cnt = 0; cnt < 1000; cnt++) {
5839 /* Try to acquire the lock. */
5840 REG_WR(sc, hw_lock_control_reg + 4, resource_bit);
5841 lock_status = REG_RD(sc, hw_lock_control_reg);
5843 if (lock_status & resource_bit)
5844 goto bxe_acquire_hw_lock_exit;
5848 DBPRINT(sc, BXE_WARN, "%s(): Timeout!\n", __FUNCTION__);
5851 bxe_acquire_hw_lock_exit:
5852 DBEXIT(BXE_VERBOSE_MISC);
5857 * Unlock access to a hardware resource using controller arbitration
5861 * 0 = Success, !0 = Failure.
5864 bxe_release_hw_lock(struct bxe_softc *sc, uint32_t resource)
5866 uint32_t hw_lock_control_reg, lock_status, resource_bit;
5870 DBENTER(BXE_VERBOSE_MISC);
5871 DBPRINT(sc, BXE_VERBOSE_MISC, "%s(): Unlocking resource 0x%08X\n",
5872 __FUNCTION__, resource);
5874 resource_bit = 1 << resource;
5877 /* Validating that the resource is within range */
5878 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
5879 DBPRINT(sc, BXE_WARN, "%s(): Resource is out of range! "
5880 "resource(0x%08X) > HW_LOCK_MAX_RESOURCE_VALUE(0x%08X)\n",
5881 __FUNCTION__, resource, HW_LOCK_MAX_RESOURCE_VALUE);
5883 goto bxe_release_hw_lock_exit;
5886 /* Find the register for the resource lock. */
5887 hw_lock_control_reg = ((func <= 5) ?
5888 (MISC_REG_DRIVER_CONTROL_1 + func * 8) :
5889 (MISC_REG_DRIVER_CONTROL_7 + (func - 6) * 8));
5891 /* Validating that the resource is currently taken */
5892 lock_status = REG_RD(sc, hw_lock_control_reg);
5893 if (!(lock_status & resource_bit)) {
5894 DBPRINT(sc, BXE_WARN, "%s(): The resource is not currently "
5895 "locked! lock_status = 0x%08X, resource_bit = 0x%08X\n",
5896 __FUNCTION__, lock_status, resource_bit);
5898 goto bxe_release_hw_lock_exit;
5901 /* Free the hardware lock. */
5902 REG_WR(sc, hw_lock_control_reg, resource_bit);
5904 bxe_release_hw_lock_exit:
5905 DBEXIT(BXE_VERBOSE_MISC);
5910 bxe_get_gpio(struct bxe_softc *sc, int gpio_num, uint8_t port)
5912 uint32_t gpio_mask, gpio_reg;
5913 int gpio_port, gpio_shift, value;
5915 /* The GPIO should be swapped if swap register is set and active */
5916 gpio_port = (REG_RD(sc, NIG_REG_PORT_SWAP) && REG_RD(sc,
5917 NIG_REG_STRAP_OVERRIDE)) ^ port;
5918 gpio_shift = gpio_num +
5919 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
5920 gpio_mask = 1 << gpio_shift;
5922 if (gpio_num > MISC_REGISTERS_GPIO_3) {
5923 DBPRINT(sc, BXE_WARN, "%s(): Invalid GPIO %d\n",
5924 __FUNCTION__, gpio_num);
5928 /* read GPIO value */
5929 gpio_reg = REG_RD(sc, MISC_REG_GPIO);
5931 /* get the requested pin value */
5932 if ((gpio_reg & gpio_mask) == gpio_mask)
5937 DBPRINT(sc, BXE_VERBOSE_PHY, "pin %d value 0x%x\n", gpio_num, value);
5943 * Sets the state of a General Purpose I/O (GPIO).
5949 bxe_set_gpio(struct bxe_softc *sc, int gpio_num, uint32_t mode, uint8_t port)
5951 uint32_t gpio_reg, gpio_mask;
5952 int gpio_port, gpio_shift, rc;
5954 DBENTER(BXE_VERBOSE_MISC);
5956 /* The GPIO should be swapped if swap register is set and active. */
5957 gpio_port = (REG_RD(sc, NIG_REG_PORT_SWAP) && REG_RD(sc,
5958 NIG_REG_STRAP_OVERRIDE)) ^ port;
5959 gpio_shift = gpio_num +
5960 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
5961 gpio_mask = (1 << gpio_shift);
5964 if (gpio_num > MISC_REGISTERS_GPIO_3) {
5965 DBPRINT(sc, BXE_FATAL, "%s(): Invalid GPIO (%d)!\n",
5966 __FUNCTION__, gpio_num);
5968 goto bxe_set_gpio_exit;
5971 /* Make sure no one else is trying to use the GPIO. */
5972 rc = bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
5974 DBPRINT(sc, BXE_WARN, "%s(): Can't acquire GPIO lock!\n",
5976 goto bxe_set_gpio_exit;
5979 /* Read GPIO and mask all but the float bits. */
5980 gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
5983 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
5984 DBPRINT(sc, BXE_VERBOSE, "%s(): Set GPIO %d (shift %d) -> "
5985 "output low\n", __FUNCTION__, gpio_num, gpio_shift);
5986 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
5987 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
5989 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
5990 DBPRINT(sc, BXE_VERBOSE, "%s(): Set GPIO %d (shift %d) -> "
5991 "output high\n", __FUNCTION__, gpio_num, gpio_shift);
5992 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
5993 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
5995 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
5996 DBPRINT(sc, BXE_VERBOSE, "%s(): Set GPIO %d (shift %d) -> "
5997 "input\n", __FUNCTION__, gpio_num, gpio_shift);
5998 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
6001 DBPRINT(sc, BXE_FATAL, "%s(): Unknown GPIO mode (0x%08X)!\n",
6002 __FUNCTION__, mode);
6006 REG_WR(sc, MISC_REG_GPIO, gpio_reg);
6007 rc = bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
6009 DBPRINT(sc, BXE_WARN, "%s(): Can't release GPIO lock!\n",
6014 DBEXIT(BXE_VERBOSE_MISC);
6019 bxe_set_gpio_int(struct bxe_softc *sc, int gpio_num, uint32_t mode,
6022 uint32_t gpio_mask, gpio_reg;
6023 int gpio_port, gpio_shift;
6025 /* The GPIO should be swapped if swap register is set and active */
6026 gpio_port = (REG_RD(sc, NIG_REG_PORT_SWAP) && REG_RD(sc,
6027 NIG_REG_STRAP_OVERRIDE)) ^ port;
6028 gpio_shift = gpio_num +
6029 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
6030 gpio_mask = (1 << gpio_shift);
6031 if (gpio_num > MISC_REGISTERS_GPIO_3) {
6032 DBPRINT(sc, BXE_WARN, "%s(): Invalid GPIO %d\n",
6033 __FUNCTION__, gpio_num);
6037 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
6039 gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT);
6042 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
6043 DBPRINT(sc, BXE_VERBOSE_PHY, "Clear GPIO INT %d (shift %d) -> "
6044 "output low\n", gpio_num, gpio_shift);
6045 /* clear SET and set CLR */
6046 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
6047 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
6049 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
6050 DBPRINT(sc, BXE_VERBOSE_PHY, "Set GPIO INT %d (shift %d) -> "
6051 "output high\n", gpio_num, gpio_shift);
6052 /* clear CLR and set SET */
6053 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
6054 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
6060 REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg);
6061 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
6067 * Sets the state of a Shared Purpose I/O (SPIO).
6070 * 0 = Success, !0 = Failure.
6073 bxe_set_spio(struct bxe_softc *sc, int spio_num, uint32_t mode)
6075 uint32_t spio_reg, spio_mask;
6079 spio_mask = 1 << spio_num;
6081 /* Validate the SPIO. */
6082 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
6083 (spio_num > MISC_REGISTERS_SPIO_7)) {
6084 DBPRINT(sc, BXE_WARN, "%s(): Invalid SPIO (%d)!\n",
6085 __FUNCTION__, spio_num);
6087 goto bxe_set_spio_exit;
6090 rc = bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
6092 DBPRINT(sc, BXE_WARN, "%s(): Can't acquire SPIO lock!\n",
6094 goto bxe_set_spio_exit;
6097 /* Read SPIO and mask all but the float bits. */
6098 spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
6101 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
6102 DBPRINT(sc, BXE_VERBOSE_MISC, "%s(): Set SPIO %d -> "
6103 "output low\n", __FUNCTION__, spio_num);
6104 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
6105 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
6107 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
6108 DBPRINT(sc, BXE_VERBOSE_MISC, "%s(): Set SPIO %d -> "
6109 "output high\n", __FUNCTION__, spio_num);
6110 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
6111 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
6113 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
6114 DBPRINT(sc, BXE_VERBOSE_MISC, "%s(): Set SPIO %d -> "
6115 "input\n", __FUNCTION__, spio_num);
6116 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
6119 DBPRINT(sc, BXE_WARN, "%s(): Unknown SPIO mode (0x%08X)!\n",
6120 __FUNCTION__, mode);
6124 REG_WR(sc, MISC_REG_SPIO, spio_reg);
6125 rc = bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
6127 DBPRINT(sc, BXE_WARN, "%s(): Can't release SPIO lock!\n",
6136 * When the 57711E is operating in multi-function mode, the controller
6137 * must be configured to arbitrate TX between multiple VNICs.
6143 bxe_init_port_minmax(struct bxe_softc *sc)
6145 uint32_t fair_periodic_timeout_usec, r_param, t_fair;
6147 DBENTER(BXE_VERBOSE_MISC);
6149 r_param = sc->link_vars.line_speed / 8;
6151 memset(&(sc->cmng.rs_vars), 0,
6152 sizeof(struct rate_shaping_vars_per_port));
6153 memset(&(sc->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
6155 /* 100 usec in SDM ticks = 25 since each tick is 4 usec. */
6156 sc->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
6158 * This is the threshold below which no timer arming will occur.
6159 * We use a coefficient of 1, 25 so that the threshold is a
6160 * little bigger that real time to compensate for timer
6163 sc->cmng.rs_vars.rs_threshold = (RS_PERIODIC_TIMEOUT_USEC *
6165 /* Resolution of fairness timer. */
6166 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
6168 /* For 10G it is 1000us, for 1G it is 10000us. */
6169 t_fair = T_FAIR_COEF / sc->link_vars.line_speed;
6170 /* This is the threshold where we won't arm the timer
6172 sc->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
6174 * Multiply by 1e3/8 to get bytes/msec. We don't want the
6175 * credits to pass a credit of the T_FAIR*FAIR_MEM (algorithm
6178 sc->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
6179 /* Since each tick is 4 us. */
6180 sc->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
6182 DBEXIT(BXE_VERBOSE_MISC);
6187 * This function is called when a link interrupt is generated
6188 * and configures the controller for the new link state.
6194 bxe_link_attn(struct bxe_softc *sc)
6196 struct host_port_stats *pstats;
6197 uint32_t pause_enabled;
6198 int func, i, port, vn;
6200 DBENTER(BXE_VERBOSE_PHY);
6202 /* Make sure that we are synced with the current statistics. */
6203 bxe_stats_handle(sc, STATS_EVENT_STOP);
6205 bxe_link_update(&sc->link_params, &sc->link_vars);
6207 if (sc->link_vars.link_up) {
6208 if (CHIP_IS_E1H(sc)) {
6212 if (sc->link_vars.flow_ctrl & FLOW_CTRL_TX)
6215 REG_WR(sc, BAR_USTORM_INTMEM +
6216 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
6220 if (sc->link_vars.mac_type == MAC_TYPE_BMAC) {
6221 pstats = BXE_SP(sc, port_stats);
6222 /* Reset old BMAC statistics. */
6223 memset(&(pstats->mac_stx[0]), 0,
6224 sizeof(struct mac_stx));
6227 if ((sc->state == BXE_STATE_OPEN) ||
6228 (sc->state == BXE_STATE_DISABLED))
6229 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
6232 /* Need additional handling for multi-function devices. */
6235 if (sc->link_vars.link_up) {
6236 if (sc->dcc_enable == TRUE) {
6237 bxe_congestionmgmt(sc, TRUE);
6238 /* Store in internal memory. */
6240 sizeof(struct cmng_struct_per_port) / 4;
6242 REG_WR(sc, BAR_XSTORM_INTMEM +
6243 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + (i*4),
6244 ((uint32_t *)(&sc->cmng))[i]);
6248 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
6249 /* Don't send an attention to ourselves. */
6250 if (vn == BP_E1HVN(sc))
6252 func = ((vn << 1) | port);
6254 * Send an attention to other drivers on the same port.
6256 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_0 +
6257 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func) * 4, 1);
6261 DBEXIT(BXE_VERBOSE_PHY);
6265 * Sets the driver instance as the port management function (PMF).
6267 * This is only used on "multi-function" capable devices such as the
6268 * 57711E and initializes the controller so that the PMF driver instance
6269 * can interact with other driver instances that may be operating on
6270 * the same Ethernet port.
6276 bxe_pmf_update(struct bxe_softc *sc)
6281 /* Record that this driver instance is managing the port. */
6283 DBPRINT(sc, BXE_INFO, "%s(): Enabling this port as PMF.\n",
6286 /* Enable NIG attention. */
6288 val = (0xff0f | (1 << (BP_E1HVN(sc) + 4)));
6289 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, val);
6290 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, val);
6292 bxe_stats_handle(sc, STATS_EVENT_PMF);
6295 /* 8073 Download definitions */
6296 /* spi Parameters.*/
6297 #define SPI_CTRL_1_L 0xC000
6298 #define SPI_CTRL_1_H 0xC002
6299 #define SPI_CTRL_2_L 0xC400
6300 #define SPI_CTRL_2_H 0xC402
6301 #define SPI_TXFIFO 0xD000
6302 #define SPI_RXFIFO 0xD400
6304 /* Input Command Messages.*/
6306 * Write CPU/SPI Control Regs, followed by Count And CPU/SPI Controller
6307 * Reg add/data pairs.
6309 #define WR_CPU_CTRL_REGS 0x11
6311 * Read CPU/SPI Control Regs, followed by Count and CPU/SPI Controller
6314 #define RD_CPU_CTRL_REGS 0xEE
6316 * Write CPU/SPI Control Regs Continously, followed by Count and
6317 * CPU/SPI Controller Reg addr and data's.
6319 #define WR_CPU_CTRL_FIFO 0x66
6320 /* Output Command Messages.*/
6323 /* SPI Controller Commands (known As messages).*/
6324 #define MSGTYPE_HWR 0x40
6325 #define MSGTYPE_HRD 0x80
6326 #define WRSR_OPCODE 0x01
6327 #define WR_OPCODE 0x02
6328 #define RD_OPCODE 0x03
6329 #define WRDI_OPCODE 0x04
6330 #define RDSR_OPCODE 0x05
6331 #define WREN_OPCODE 0x06
6332 #define WR_BLOCK_SIZE 0x40 /* Maximum 64 Bytes Writes.*/
6335 * Post a slowpath command.
6337 * A slowpath command is used to propogate a configuration change through
6338 * the controller in a controlled manner, allowing each STORM processor and
6339 * other H/W blocks to phase in the change. The commands sent on the
6340 * slowpath are referred to as ramrods. Depending on the ramrod used the
6341 * completion of the ramrod will occur in different ways. Here's a
6342 * breakdown of ramrods and how they complete:
6344 * RAMROD_CMD_ID_ETH_PORT_SETUP
6345 * Used to setup the leading connection on a port. Completes on the
6346 * Receive Completion Queue (RCQ) of that port (typically fp[0]).
6348 * RAMROD_CMD_ID_ETH_CLIENT_SETUP
6349 * Used to setup an additional connection on a port. Completes on the
6350 * RCQ of the multi-queue/RSS connection being initialized.
6352 * RAMROD_CMD_ID_ETH_STAT_QUERY
6353 * Used to force the storm processors to update the statistics database
6354 * in host memory. This ramrod is send on the leading connection CID and
6355 * completes as an index increment of the CSTORM on the default status
6358 * RAMROD_CMD_ID_ETH_UPDATE
6359 * Used to update the state of the leading connection, usually to udpate
6360 * the RSS indirection table. Completes on the RCQ of the leading
6361 * connection. (Not currently used under FreeBSD until OS support becomes
6364 * RAMROD_CMD_ID_ETH_HALT
6365 * Used when tearing down a connection prior to driver unload. Completes
6366 * on the RCQ of the multi-queue/RSS connection being torn down. Don't
6367 * use this on the leading connection.
6369 * RAMROD_CMD_ID_ETH_SET_MAC
6370 * Sets the Unicast/Broadcast/Multicast used by the port. Completes on
6371 * the RCQ of the leading connection.
6373 * RAMROD_CMD_ID_ETH_CFC_DEL
6374 * Used when tearing down a conneciton prior to driver unload. Completes
6375 * on the RCQ of the leading connection (since the current connection
6376 * has been completely removed from controller memory).
6378 * RAMROD_CMD_ID_ETH_PORT_DEL
6379 * Used to tear down the leading connection prior to driver unload,
6380 * typically fp[0]. Completes as an index increment of the CSTORM on the
6381 * default status block.
6383 * RAMROD_CMD_ID_ETH_FORWARD_SETUP
6384 * Used for connection offload. Completes on the RCQ of the multi-queue
6385 * RSS connection that is being offloaded. (Not currently used under
6388 * There can only be one command pending per function.
6391 * 0 = Success, !0 = Failure.
6394 bxe_sp_post(struct bxe_softc *sc, int command, int cid, uint32_t data_hi,
6395 uint32_t data_lo, int common)
6399 DBRUNMSG((BXE_EXTREME_LOAD | BXE_EXTREME_RESET |
6400 BXE_EXTREME_UNLOAD | BXE_EXTREME_RAMROD),
6401 bxe_decode_ramrod_cmd(sc, command));
6403 DBPRINT(sc, BXE_VERBOSE_RAMROD, "%s(): cid = %d, data_hi = 0x%08X, "
6404 "data_low = 0x%08X, remaining spq entries = %d\n", __FUNCTION__,
6405 cid, data_hi, data_lo, sc->spq_left);
6408 /* Skip all slowpath commands if the driver has panic'd. */
6411 goto bxe_sp_post_exit;
6416 /* We are limited to 8 slowpath commands. */
6417 if (!sc->spq_left) {
6418 BXE_PRINTF("%s(%d): Slowpath queue is full!\n",
6419 __FILE__, __LINE__);
6422 goto bxe_sp_post_exit;
6425 /* Encode the CID with the command. */
6426 sc->spq_prod_bd->hdr.conn_and_cmd_data =
6427 htole32(((command << SPE_HDR_CMD_ID_SHIFT) | HW_CID(sc, cid)));
6428 sc->spq_prod_bd->hdr.type = htole16(ETH_CONNECTION_TYPE);
6431 sc->spq_prod_bd->hdr.type |=
6432 htole16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
6434 /* Point the hardware at the new configuration data. */
6435 sc->spq_prod_bd->data.mac_config_addr.hi = htole32(data_hi);
6436 sc->spq_prod_bd->data.mac_config_addr.lo = htole32(data_lo);
6438 /* Reduce the number of available slots for slowpath commands. */
6441 /* Manage the end of the ring. */
6442 if (sc->spq_prod_bd == sc->spq_last_bd) {
6443 sc->spq_prod_bd = sc->spq;
6444 sc->spq_prod_idx = 0;
6445 DBPRINT(sc, BXE_VERBOSE, "%s(): End of slowpath queue.\n",
6453 /* Kick off the slowpath command. */
6454 REG_WR(sc, BAR_XSTORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
6464 * Acquire the MCP access lock.
6467 * 0 = Success, !0 = Failure.
6470 bxe_acquire_alr(struct bxe_softc *sc)
6475 DBENTER(BXE_VERBOSE_MISC);
6479 /* Acquire lock using mcpr_access_lock SPLIT register. */
6480 for (i = 0; i < retries * 10; i++) {
6482 REG_WR(sc, GRCBASE_MCP + 0x9c, val);
6483 val = REG_RD(sc, GRCBASE_MCP + 0x9c);
6485 if (val & (1L << 31))
6491 if (!(val & (1L << 31))) {
6492 DBPRINT(sc, BXE_WARN,
6493 "%s(): Cannot acquire MCP split access lock.\n",
6498 DBEXIT(BXE_VERBOSE_MISC);
6504 * Release the MCP access lock.
6510 bxe_release_alr(struct bxe_softc* sc)
6513 DBENTER(BXE_VERBOSE_MISC);
6515 REG_WR(sc, GRCBASE_MCP + 0x9c, 0);
6517 DBEXIT(BXE_VERBOSE_MISC);
6521 * Update driver's copies of the values in the host default status block.
6524 * Bitmap indicating changes to the block.
6526 static __inline uint16_t
6527 bxe_update_dsb_idx(struct bxe_softc *sc)
6529 struct host_def_status_block *dsb;
6534 /* Read memory barrier since block is written by hardware. */
6537 if (sc->def_att_idx !=
6538 le16toh(dsb->atten_status_block.attn_bits_index)) {
6540 le16toh(dsb->atten_status_block.attn_bits_index);
6544 if (sc->def_c_idx !=
6545 le16toh(dsb->c_def_status_block.status_block_index)) {
6547 le16toh(dsb->c_def_status_block.status_block_index);
6551 if (sc->def_u_idx !=
6552 le16toh(dsb->u_def_status_block.status_block_index)) {
6554 le16toh(dsb->u_def_status_block.status_block_index);
6558 if (sc->def_x_idx !=
6559 le16toh(dsb->x_def_status_block.status_block_index)) {
6561 le16toh(dsb->x_def_status_block.status_block_index);
6565 if (sc->def_t_idx !=
6566 le16toh(dsb->t_def_status_block.status_block_index)) {
6568 le16toh(dsb->t_def_status_block.status_block_index);
6576 * Handle any attentions that have been newly asserted.
6582 bxe_attn_int_asserted(struct bxe_softc *sc, uint32_t asserted)
6584 uint32_t aeu_addr, hc_addr, nig_int_mask_addr;
6585 uint32_t aeu_mask, nig_mask;
6588 DBENTER(BXE_VERBOSE_INTR);
6591 hc_addr = (HC_REG_COMMAND_REG + port * 32 + COMMAND_REG_ATTN_BITS_SET);
6592 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6593 MISC_REG_AEU_MASK_ATTN_FUNC_0;
6594 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
6595 NIG_REG_MASK_INTERRUPT_PORT0;
6598 if (sc->attn_state & asserted)
6599 BXE_PRINTF("%s(%d): IGU attention ERROR!\n",
6600 __FILE__, __LINE__);
6602 rc = bxe_acquire_hw_lock(sc,
6603 HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
6605 DBPRINT(sc, BXE_WARN,
6606 "%s(): Failed to acquire attention lock for port %d!\n",
6607 __FUNCTION__, port);
6608 goto bxe_attn_int_asserted_exit;
6611 aeu_mask = REG_RD(sc, aeu_addr);
6612 DBPRINT(sc, BXE_VERBOSE_INTR,
6613 "%s(): aeu_mask = 0x%08X, newly asserted = 0x%08X\n", __FUNCTION__,
6614 aeu_mask, asserted);
6616 aeu_mask &= ~(asserted & 0xff);
6617 DBPRINT(sc, BXE_VERBOSE_INTR, "%s(): new mask = 0x%08X\n", __FUNCTION__,
6619 REG_WR(sc, aeu_addr, aeu_mask);
6621 rc = bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
6623 DBPRINT(sc, BXE_WARN,
6624 "%s(): Failed to release attention lock!\n", __FUNCTION__);
6625 goto bxe_attn_int_asserted_exit;
6628 DBPRINT(sc, BXE_VERBOSE_INTR, "%s(): attn_state = 0x%08X\n",
6629 __FUNCTION__, sc->attn_state);
6631 sc->attn_state |= asserted;
6632 DBPRINT(sc, BXE_VERBOSE_INTR, "%s(): new attn_state = 0x%08X\n",
6633 __FUNCTION__, sc->attn_state);
6635 if (asserted & ATTN_HARD_WIRED_MASK) {
6636 if (asserted & ATTN_NIG_FOR_FUNC) {
6637 bxe_acquire_phy_lock(sc);
6639 /* Save NIG interrupt mask. */
6640 nig_mask = REG_RD(sc, nig_int_mask_addr);
6641 REG_WR(sc, nig_int_mask_addr, 0);
6646 if (asserted & ATTN_SW_TIMER_4_FUNC)
6647 DBPRINT(sc, BXE_WARN, "%s(): ATTN_SW_TIMER_4_FUNC!\n",
6650 if (asserted & GPIO_2_FUNC)
6651 DBPRINT(sc, BXE_WARN, "%s(): GPIO_2_FUNC!\n",
6654 if (asserted & GPIO_3_FUNC)
6655 DBPRINT(sc, BXE_WARN, "%s(): GPIO_3_FUNC!\n",
6658 if (asserted & GPIO_4_FUNC)
6659 DBPRINT(sc, BXE_WARN, "%s(): GPIO_4_FUNC!\n",
6663 if (asserted & ATTN_GENERAL_ATTN_1) {
6664 DBPRINT(sc, BXE_WARN,
6665 "%s(): ATTN_GENERAL_ATTN_1!\n",
6667 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
6670 if (asserted & ATTN_GENERAL_ATTN_2) {
6671 DBPRINT(sc, BXE_WARN,
6672 "%s(): ATTN_GENERAL_ATTN_2!\n",
6674 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
6677 if (asserted & ATTN_GENERAL_ATTN_3) {
6678 DBPRINT(sc, BXE_WARN,
6679 "%s(): ATTN_GENERAL_ATTN_3!\n",
6681 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
6684 if (asserted & ATTN_GENERAL_ATTN_4) {
6685 DBPRINT(sc, BXE_WARN,
6686 "%s(): ATTN_GENERAL_ATTN_4!\n",
6688 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
6691 if (asserted & ATTN_GENERAL_ATTN_5) {
6692 DBPRINT(sc, BXE_WARN,
6693 "%s(): ATTN_GENERAL_ATTN_5!\n",
6695 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
6697 if (asserted & ATTN_GENERAL_ATTN_6) {
6698 DBPRINT(sc, BXE_WARN,
6699 "%s(): ATTN_GENERAL_ATTN_6!\n",
6701 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
6706 DBPRINT(sc, BXE_VERBOSE_INTR,
6707 "%s(): Writing 0x%08X to HC addr 0x%08X\n", __FUNCTION__,
6709 REG_WR(sc, hc_addr, asserted);
6711 /* Now set back the NIG mask. */
6712 if (asserted & ATTN_NIG_FOR_FUNC) {
6713 REG_WR(sc, nig_int_mask_addr, nig_mask);
6714 bxe_release_phy_lock(sc);
6717 bxe_attn_int_asserted_exit:
6718 DBEXIT(BXE_VERBOSE_INTR);
6722 * Handle any attentions that have been newly deasserted.
6727 static __inline void
6728 bxe_attn_int_deasserted0(struct bxe_softc *sc, uint32_t attn)
6730 uint32_t val, swap_val, swap_override;
6731 int port, reg_offset;
6733 DBENTER(BXE_VERBOSE_INTR);
6736 reg_offset = port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6737 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6739 /* Handle SPIO5 attention. */
6740 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
6741 val = REG_RD(sc, reg_offset);
6742 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
6743 REG_WR(sc, reg_offset, val);
6745 DBPRINT(sc, BXE_FATAL, "%s(): SPIO5 H/W attention!\n",
6747 /* Fan failure attention */
6748 switch (XGXS_EXT_PHY_TYPE(sc->link_params.ext_phy_config)) {
6749 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6751 * SPIO5 is used on A1022G boards to indicate
6752 * fan failure. Shutdown the controller and
6753 * associated PHY to avoid damage.
6756 /* Low power mode is controled by GPIO 2. */
6757 bxe_set_gpio(sc, MISC_REGISTERS_GPIO_2,
6758 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
6759 /* PHY reset is controled by GPIO 1. */
6760 bxe_set_gpio(sc, MISC_REGISTERS_GPIO_1,
6761 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
6763 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6764 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
6766 * The PHY reset is controlled by GPIO 1.
6767 * Fake the port number to cancel the swap done in
6770 swap_val = REG_RD(sc, NIG_REG_PORT_SWAP);
6771 swap_override = REG_RD(sc, NIG_REG_STRAP_OVERRIDE);
6772 port = (swap_val && swap_override) ^ 1;
6773 bxe_set_gpio(sc, MISC_REGISTERS_GPIO_1,
6774 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
6780 /* Mark the failure. */
6781 sc->link_params.ext_phy_config &=
6782 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6783 sc->link_params.ext_phy_config |=
6784 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
6785 SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config,
6786 sc->link_params.ext_phy_config);
6787 /* Log the failure */
6788 BXE_PRINTF("A fan failure has caused the driver to "
6789 "shutdown the device to prevent permanent damage.\n");
6792 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
6793 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
6794 bxe_acquire_phy_lock(sc);
6795 bxe_handle_module_detect_int(&sc->link_params);
6796 bxe_release_phy_lock(sc);
6799 /* Checking for an assert on the zero block */
6800 if (attn & HW_INTERRUT_ASSERT_SET_0) {
6801 val = REG_RD(sc, reg_offset);
6802 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
6803 REG_WR(sc, reg_offset, val);
6805 BXE_PRINTF("%s(%d): FATAL hardware block attention "
6806 "(set0 = 0x%08X)!\n", __FILE__, __LINE__,
6807 (attn & (uint32_t)HW_INTERRUT_ASSERT_SET_0));
6812 DBEXIT(BXE_VERBOSE_INTR);
6816 * Handle any attentions that have been newly deasserted.
6821 static __inline void
6822 bxe_attn_int_deasserted1(struct bxe_softc *sc, uint32_t attn)
6825 int port, reg_offset;
6827 DBENTER(BXE_VERBOSE_INTR);
6829 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
6830 val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR);
6832 DBPRINT(sc, BXE_FATAL,
6833 "%s(): Doorbell hardware attention (0x%08X).\n",
6836 /* DORQ discard attention */
6838 DBPRINT(sc, BXE_FATAL,
6839 "%s(): FATAL doorbell queue error!\n",
6843 if (attn & HW_INTERRUT_ASSERT_SET_1) {
6845 reg_offset = port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
6846 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1;
6848 val = REG_RD(sc, reg_offset);
6849 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
6850 REG_WR(sc, reg_offset, val);
6852 BXE_PRINTF("%s(%d): FATAL hardware block attention "
6853 "(set1 = 0x%08X)!\n", __FILE__, __LINE__,
6854 (attn & (uint32_t)HW_INTERRUT_ASSERT_SET_1));
6859 DBEXIT(BXE_VERBOSE_INTR);
6863 * Handle any attentions that have been newly deasserted.
6868 static __inline void
6869 bxe_attn_int_deasserted2(struct bxe_softc *sc, uint32_t attn)
6872 int port, reg_offset;
6874 DBENTER(BXE_VERBOSE_INTR);
6876 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
6877 val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR);
6879 DBPRINT(sc, BXE_FATAL,
6880 "%s(): CFC hardware attention (0x%08X).\n", __FUNCTION__,
6883 /* CFC error attention. */
6885 DBPRINT(sc, BXE_FATAL, "%s(): FATAL CFC error!\n",
6889 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
6890 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0);
6892 DBPRINT(sc, BXE_FATAL,
6893 "%s(): PXP hardware attention (0x%08X).\n", __FUNCTION__,
6896 /* RQ_USDMDP_FIFO_OVERFLOW */
6898 DBPRINT(sc, BXE_FATAL, "%s(): FATAL PXP error!\n",
6902 if (attn & HW_INTERRUT_ASSERT_SET_2) {
6904 reg_offset = port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
6905 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2;
6907 val = REG_RD(sc, reg_offset);
6908 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
6909 REG_WR(sc, reg_offset, val);
6911 BXE_PRINTF("%s(%d): FATAL hardware block attention (set2 = "
6912 "0x%08X)! port=%d, val written=0x%x attn=0x%x\n", __FILE__,
6913 __LINE__, (attn & (uint32_t)HW_INTERRUT_ASSERT_SET_2),
6919 DBEXIT(BXE_VERBOSE_INTR);
6923 * Handle any attentions that have been newly deasserted.
6928 static __inline void
6929 bxe_attn_int_deasserted3(struct bxe_softc *sc, uint32_t attn)
6934 DBENTER(BXE_VERBOSE_INTR);
6936 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
6937 /* Look for any port assertions. */
6938 if (attn & BXE_PMF_LINK_ASSERT) {
6940 * We received a message from the driver instance
6941 * that is managing the Ethernet port (link up/down).
6942 * Go ahead and handle it.
6946 DBPRINT(sc, BXE_INFO,
6947 "%s(): Received link attention from PMF.\n",
6950 /* Clear the attention. */
6951 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func * 4, 0);
6952 sc->mf_config[BP_E1HVN(sc)] =
6954 mf_cfg.func_mf_config[(sc->bxe_func & 1)].config);
6955 val = SHMEM_RD(sc, func_mb[func].drv_status);
6956 if (sc->dcc_enable == TRUE) {
6957 if (val & DRV_STATUS_DCC_EVENT_MASK)
6959 val & DRV_STATUS_DCC_EVENT_MASK);
6961 bxe__link_status_update(sc);
6963 if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF))
6965 /* Look for any microcode assertions. */
6966 } else if (attn & BXE_MC_ASSERT_BITS) {
6967 DBPRINT(sc, BXE_FATAL, "%s(): Microcode assert!\n",
6970 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0);
6971 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0);
6972 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0);
6973 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0);
6977 /* Look for any bootcode assertions. */
6978 } else if (attn & BXE_MCP_ASSERT) {
6979 DBPRINT(sc, BXE_FATAL, "%s(): Bootcode assert!\n",
6982 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0);
6984 DBRUN(bxe_dump_fw(sc));
6986 DBPRINT(sc, BXE_FATAL,
6987 "%s(): Unknown hardware assertion "
6988 "(attn = 0x%08X)!\n", __FUNCTION__, attn);
6991 /* Look for any hardware latched attentions. */
6992 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
6993 DBPRINT(sc, BXE_FATAL,
6994 "%s(): Latched attention 0x%08X (masked)!\n", __FUNCTION__,
6997 /* Check if a GRC register access timeout occurred. */
6998 if (attn & BXE_GRC_TIMEOUT) {
6999 val = CHIP_IS_E1H(sc) ? REG_RD(sc,
7000 MISC_REG_GRC_TIMEOUT_ATTN) : 0;
7002 DBPRINT(sc, BXE_WARN,
7003 "%s(): GRC timeout for register 0x%08X!\n",
7007 /* Check if a GRC reserved register was accessed. */
7008 if (attn & BXE_GRC_RSV) {
7009 val = CHIP_IS_E1H(sc) ? REG_RD(sc,
7010 MISC_REG_GRC_RSV_ATTN) : 0;
7012 DBPRINT(sc, BXE_WARN,
7013 "%s(): GRC register 0x%08X is reserved!\n",
7017 REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
7020 DBEXIT(BXE_VERBOSE_INTR);
7024 * Handle any attentions that have been newly deasserted.
7030 bxe_attn_int_deasserted(struct bxe_softc *sc, uint32_t deasserted)
7032 struct attn_route attn;
7033 struct attn_route group_mask;
7034 uint32_t val, reg_addr, aeu_mask;
7037 DBENTER(BXE_VERBOSE_INTR);
7040 * Need to take HW lock because MCP or other port might also try
7041 * to handle this event.
7043 bxe_acquire_alr(sc);
7046 /* Get the current attention signal bits. */
7047 attn.sig[0] = REG_RD(sc,
7048 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port * 4);
7049 attn.sig[1] = REG_RD(sc,
7050 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port * 4);
7051 attn.sig[2] = REG_RD(sc,
7052 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port * 4);
7053 attn.sig[3] = REG_RD(sc,
7054 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port * 4);
7056 DBPRINT(sc, BXE_EXTREME_INTR,
7057 "%s(): attention = 0x%08X 0x%08X 0x%08X 0x%08X\n", __FUNCTION__,
7058 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
7061 * Compare the current attention bits to each attention group
7062 * to see if anyone has registered this attention.
7064 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
7065 if (deasserted & (1 << index)) {
7066 group_mask = sc->attn_group[index];
7068 DBPRINT(sc, BXE_EXTREME_INTR,
7069 "%s(): group[%02d] = 0x%08X 0x%08X 0x%08x 0X%08x\n",
7070 __FUNCTION__, index, group_mask.sig[0],
7071 group_mask.sig[1], group_mask.sig[2],
7074 /* Handle any registered attentions. */
7075 bxe_attn_int_deasserted3(sc,
7076 attn.sig[3] & group_mask.sig[3]);
7077 bxe_attn_int_deasserted1(sc,
7078 attn.sig[1] & group_mask.sig[1]);
7079 bxe_attn_int_deasserted2(sc,
7080 attn.sig[2] & group_mask.sig[2]);
7081 bxe_attn_int_deasserted0(sc,
7082 attn.sig[0] & group_mask.sig[0]);
7084 if ((attn.sig[0] & group_mask.sig[0] &
7085 HW_PRTY_ASSERT_SET_0) ||
7086 (attn.sig[1] & group_mask.sig[1] &
7087 HW_PRTY_ASSERT_SET_1) ||
7088 (attn.sig[2] & group_mask.sig[2] &
7089 HW_PRTY_ASSERT_SET_2))
7090 BXE_PRINTF("%s(%d): FATAL hardware block "
7091 "parity attention!\n", __FILE__, __LINE__);
7095 bxe_release_alr(sc);
7097 reg_addr = (HC_REG_COMMAND_REG +
7098 port * 32 + COMMAND_REG_ATTN_BITS_CLR);
7101 DBPRINT(sc, BXE_EXTREME_INTR,
7102 "%s(): About to mask 0x%08X at HC addr 0x%08X\n", __FUNCTION__,
7103 deasserted, reg_addr);
7104 REG_WR(sc, reg_addr, val);
7106 if (~sc->attn_state & deasserted)
7107 DBPRINT(sc, BXE_FATAL, "%s(): IGU Bug!\n", __FUNCTION__);
7109 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7110 MISC_REG_AEU_MASK_ATTN_FUNC_0;
7112 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7113 aeu_mask = REG_RD(sc, reg_addr);
7115 DBPRINT(sc, BXE_EXTREME_INTR,
7116 "%s(): Current aeu_mask = 0x%08X, newly deasserted = 0x%08X\n",
7117 __FUNCTION__, aeu_mask, deasserted);
7118 aeu_mask |= (deasserted & 0xff);
7120 DBPRINT(sc, BXE_EXTREME_INTR, "%s(): New aeu_mask = 0x%08X\n",
7121 __FUNCTION__, aeu_mask);
7123 REG_WR(sc, reg_addr, aeu_mask);
7124 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7126 DBPRINT(sc, BXE_EXTREME_INTR, "%s(): Current attn_state = 0x%08X\n",
7127 __FUNCTION__, sc->attn_state);
7129 sc->attn_state &= ~deasserted;
7130 DBPRINT(sc, BXE_EXTREME_INTR, "%s(): New attn_state = 0x%08X\n",
7131 __FUNCTION__, sc->attn_state);
7133 DBEXIT(BXE_VERBOSE_INTR);
7137 * Handle interrupts caused by internal attentions (everything else other
7138 * than RX, TX, and link state changes).
7144 bxe_attn_int(struct bxe_softc* sc)
7146 uint32_t attn_ack, attn_bits, attn_state;
7147 uint32_t asserted, deasserted;
7149 DBENTER(BXE_VERBOSE_INTR);
7151 attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits);
7153 le32toh(sc->def_sb->atten_status_block.attn_bits_ack);
7154 attn_state = sc->attn_state;
7155 asserted = attn_bits & ~attn_ack & ~attn_state;
7156 deasserted = ~attn_bits & attn_ack & attn_state;
7158 /* Make sure we're in a sane state. */
7159 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
7160 BXE_PRINTF("%s(%d): Bad attention state!\n",
7161 __FILE__, __LINE__);
7163 /* Handle any attentions that are newly asserted. */
7165 DBPRINT(sc, BXE_VERBOSE_INTR,
7166 "%s(): attn_state = 0x%08X, attn_bits = 0x%08X, "
7167 "attn_ack = 0x%08X, asserted = 0x%08X\n", __FUNCTION__,
7168 attn_state, attn_bits, attn_ack, asserted);
7169 bxe_attn_int_asserted(sc, asserted);
7172 /* Handle any attentions that are newly deasserted. */
7174 DBPRINT(sc, BXE_VERBOSE_INTR,
7175 "%s(): attn_state = 0x%08X, attn_bits = 0x%08X, "
7176 "attn_ack = 0x%08X, deasserted = 0x%08X\n", __FUNCTION__,
7177 attn_state, attn_bits, attn_ack, deasserted);
7178 bxe_attn_int_deasserted(sc, deasserted);
7181 DBEXIT(BXE_VERBOSE_INTR);
7184 /* sum[hi:lo] += add[hi:lo] */
7185 #define ADD_64(s_hi, a_hi, s_lo, a_lo) do { \
7187 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
7190 /* Subtraction = minuend -= subtrahend */
7191 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
7193 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
7197 /* difference = minuend - subtrahend */
7198 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) do { \
7199 if (m_lo < s_lo) { \
7201 d_hi = m_hi - s_hi; \
7203 /* we can 'loan' 1 */ \
7205 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
7207 /* m_hi <= s_hi */ \
7212 /* m_lo >= s_lo */ \
7213 if (m_hi < s_hi) { \
7217 /* m_hi >= s_hi */ \
7218 d_hi = m_hi - s_hi; \
7219 d_lo = m_lo - s_lo; \
7224 #define UPDATE_STAT64(s, t) do { \
7225 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi,\
7226 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
7227 pstats->mac_stx[0].t##_hi = new->s##_hi; \
7228 pstats->mac_stx[0].t##_lo = new->s##_lo; \
7229 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
7230 pstats->mac_stx[1].t##_lo, diff.lo); \
7233 #define UPDATE_STAT64_NIG(s, t) do { \
7234 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
7235 diff.lo, new->s##_lo, old->s##_lo); \
7236 ADD_64(estats->t##_hi, diff.hi, \
7237 estats->t##_lo, diff.lo); \
7240 /* sum[hi:lo] += add */
7241 #define ADD_EXTEND_64(s_hi, s_lo, a) do { \
7243 s_hi += (s_lo < a) ? 1 : 0; \
7246 #define UPDATE_EXTEND_STAT(s) do { \
7247 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
7248 pstats->mac_stx[1].s##_lo, new->s); \
7251 #define UPDATE_EXTEND_TSTAT(s, t) do { \
7252 diff = (tclient->s) - (old_tclient->s); \
7253 old_tclient->s = (tclient->s); \
7254 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
7257 #define UPDATE_EXTEND_XSTAT(s, t) do { \
7258 diff = xclient->s - old_xclient->s; \
7259 old_xclient->s = xclient->s; \
7260 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
7263 #define UPDATE_EXTEND_USTAT(s, t) do { \
7264 diff = uclient->s - old_uclient->s; \
7265 old_uclient->s = uclient->s; \
7266 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
7269 #define SUB_EXTEND_64(m_hi, m_lo, s)do { \
7270 SUB_64(m_hi, 0, m_lo, s); \
7273 #define SUB_EXTEND_USTAT(s, t)do { \
7274 diff = (uclient->s) - (old_uclient->s); \
7275 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
7282 #define BITS_PER_LONG 32
7284 #define BITS_PER_LONG 64
7287 static __inline long
7288 bxe_hilo(uint32_t *hiref)
7293 #if (BITS_PER_LONG == 64)
7294 uint32_t hi = *hiref;
7295 return (HILO_U64(hi, lo));
7302 * Request the STORM statistics by posting a slowpath ramrod.
7308 bxe_stats_storm_post(struct bxe_softc *sc)
7310 struct eth_query_ramrod_data ramrod_data = {0};
7313 DBENTER(BXE_INSANE_STATS);
7315 if (!sc->stats_pending) {
7316 ramrod_data.drv_counter = sc->stats_counter++;
7317 ramrod_data.collect_port = sc->port.pmf ? 1 : 0;
7318 for (i = 0; i < sc->num_queues; i++)
7319 ramrod_data.ctr_id_vector |= (1 << sc->fp[i].cl_id);
7321 rc = bxe_sp_post(sc, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
7322 ((uint32_t *)&ramrod_data)[1],
7323 ((uint32_t *)&ramrod_data)[0], 0);
7325 /* Stats ramrod has it's own slot on the SPQ. */
7327 sc->stats_pending = 1;
7331 DBEXIT(BXE_INSANE_STATS);
7335 * Setup the adrress used by the driver to report port-based statistics
7336 * back to the controller.
7342 bxe_stats_port_base_init(struct bxe_softc *sc)
7344 uint32_t *stats_comp;
7345 struct dmae_command *dmae;
7347 DBENTER(BXE_VERBOSE_STATS);
7349 /* Only the port management function (PMF) does this work. */
7350 if ((sc->port.pmf == 0) || !sc->port.port_stx) {
7351 BXE_PRINTF("%s(%d): Invalid statistcs port setup!\n",
7352 __FILE__, __LINE__);
7353 goto bxe_stats_port_base_init_exit;
7356 stats_comp = BXE_SP(sc, stats_comp);
7357 sc->executer_idx = 0;
7359 /* DMA the address of the drivers port statistics block. */
7360 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
7361 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
7362 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
7363 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
7365 DMAE_CMD_ENDIANITY_B_DW_SWAP |
7367 DMAE_CMD_ENDIANITY_DW_SWAP |
7369 (BP_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
7370 (BP_E1HVN(sc) << DMAE_CMD_E1HVN_SHIFT));
7371 dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
7372 dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
7373 dmae->dst_addr_lo = sc->port.port_stx >> 2;
7374 dmae->dst_addr_hi = 0;
7375 dmae->len = sizeof(struct host_port_stats) >> 2;
7376 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
7377 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
7378 dmae->comp_val = DMAE_COMP_VAL;
7381 bxe_stats_hw_post(sc);
7384 bxe_stats_port_base_init_exit:
7385 DBEXIT(BXE_VERBOSE_STATS);
7389 * Setup the adrress used by the driver to report function-based statistics
7390 * back to the controller.
7396 bxe_stats_func_base_init(struct bxe_softc *sc)
7402 DBENTER(BXE_VERBOSE_STATS);
7404 /* Only the port management function (PMF) does this work. */
7405 if ((sc->port.pmf == 0) || !sc->func_stx) {
7406 BXE_PRINTF("%s(%d): Invalid statistcs function setup!\n",
7407 __FILE__, __LINE__);
7408 goto bxe_stats_func_base_init_exit;
7412 func_stx = sc->func_stx;
7413 vn_max = IS_E1HMF(sc) ? E1HVN_MAX : E1VN_MAX;
7415 /* Initialize each function individually. */
7416 for (vn = VN_0; vn < vn_max; vn++) {
7417 func = 2 * vn + port;
7418 sc->func_stx = SHMEM_RD(sc, func_mb[func].fw_mb_param);
7419 bxe_stats_func_init(sc);
7420 bxe_stats_hw_post(sc);
7424 sc->func_stx = func_stx;
7426 bxe_stats_func_base_init_exit:
7427 DBEXIT(BXE_VERBOSE_STATS);
7431 * DMA the function-based statistics to the controller.
7437 bxe_stats_func_base_update(struct bxe_softc *sc)
7439 uint32_t *stats_comp;
7440 struct dmae_command *dmae;
7442 DBENTER(BXE_VERBOSE_STATS);
7444 /* Only the port management function (PMF) does this work. */
7445 if ((sc->port.pmf == 0) || !sc->func_stx) {
7446 BXE_PRINTF("%s(%d): Invalid statistcs function update!\n",
7447 __FILE__, __LINE__);
7448 goto bxe_stats_func_base_update_exit;
7451 dmae = &sc->stats_dmae;
7452 stats_comp = BXE_SP(sc, stats_comp);
7453 sc->executer_idx = 0;
7454 memset(dmae, 0, sizeof(struct dmae_command));
7456 /* DMA the function statistics from the driver to the H/W. */
7457 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
7458 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
7459 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
7461 DMAE_CMD_ENDIANITY_B_DW_SWAP |
7463 DMAE_CMD_ENDIANITY_DW_SWAP |
7465 (BP_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
7466 (BP_E1HVN(sc) << DMAE_CMD_E1HVN_SHIFT));
7467 dmae->src_addr_lo = sc->func_stx >> 2;
7468 dmae->src_addr_hi = 0;
7469 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats_base));
7470 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats_base));
7471 dmae->len = sizeof(struct host_func_stats) >> 2;
7472 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
7473 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
7474 dmae->comp_val = DMAE_COMP_VAL;
7477 bxe_stats_hw_post(sc);
7480 bxe_stats_func_base_update_exit:
7481 DBEXIT(BXE_VERBOSE_STATS);
7486 * Initialize statistics.
7492 bxe_stats_init(struct bxe_softc *sc)
7494 struct bxe_fastpath *fp;
7497 DBENTER(BXE_VERBOSE_STATS);
7499 if (sc->stats_enable == FALSE)
7500 goto bxe_stats_init_exit;
7504 sc->executer_idx = 0;
7505 sc->stats_counter = 0;
7506 sc->stats_pending = 0;
7508 /* Fetch the offset of port & function statistics in shared memory. */
7510 sc->port.port_stx = 0;
7513 sc->port.port_stx = SHMEM_RD(sc, port_mb[port].port_stx);
7514 sc->func_stx = SHMEM_RD(sc, func_mb[func].fw_mb_param);
7517 DBPRINT(sc, BXE_VERBOSE_STATS, "%s(): sc->port.port_stx = 0x%08X\n",
7518 __FUNCTION__, sc->port.port_stx);
7519 DBPRINT(sc, BXE_VERBOSE_STATS, "%s(): sc->func_stx = 0x%08X\n",
7520 __FUNCTION__, sc->func_stx);
7522 /* Port statistics. */
7523 memset(&(sc->port.old_nig_stats), 0, sizeof(struct nig_stats));
7524 sc->port.old_nig_stats.brb_discard = REG_RD(sc,
7525 NIG_REG_STAT0_BRB_DISCARD + port * 0x38);
7526 sc->port.old_nig_stats.brb_truncate = REG_RD(sc,
7527 NIG_REG_STAT0_BRB_TRUNCATE + port * 0x38);
7528 REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port * 0x50,
7529 &(sc->port.old_nig_stats.egress_mac_pkt0_lo), 2);
7530 REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port * 0x50,
7531 &(sc->port.old_nig_stats.egress_mac_pkt1_lo), 2);
7533 /* Function statistics. */
7534 for (i = 0; i < sc->num_queues; i++) {
7537 /* Clear all per-queue statistics. */
7538 memset(&fp->old_tclient, 0,
7539 sizeof(struct tstorm_per_client_stats));
7540 memset(&fp->old_uclient, 0,
7541 sizeof(struct ustorm_per_client_stats));
7542 memset(&fp->old_xclient, 0,
7543 sizeof(struct xstorm_per_client_stats));
7544 memset(&fp->eth_q_stats, 0,
7545 sizeof(struct bxe_q_stats));
7548 /* ToDo: Clear any driver specific statistics? */
7550 sc->stats_state = STATS_STATE_DISABLED;
7552 if (sc->port.pmf == 1) {
7553 /* Init port & function stats if we're PMF. */
7554 if (sc->port.port_stx)
7555 bxe_stats_port_base_init(sc);
7557 bxe_stats_func_base_init(sc);
7558 } else if (sc->func_stx)
7559 /* Update function stats if we're not PMF. */
7560 bxe_stats_func_base_update(sc);
7562 bxe_stats_init_exit:
7563 DBEXIT(BXE_VERBOSE_STATS);
7572 bxe_stats_hw_post(struct bxe_softc *sc)
7574 struct dmae_command *dmae;
7575 uint32_t *stats_comp;
7578 DBENTER(BXE_INSANE_STATS);
7580 dmae = &sc->stats_dmae;
7581 stats_comp = BXE_SP(sc, stats_comp);
7582 *stats_comp = DMAE_COMP_VAL;
7584 if (sc->executer_idx) {
7585 loader_idx = PMF_DMAE_C(sc);
7587 memset(dmae, 0, sizeof(struct dmae_command));
7589 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
7590 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
7591 DMAE_CMD_DST_RESET |
7593 DMAE_CMD_ENDIANITY_B_DW_SWAP |
7595 DMAE_CMD_ENDIANITY_DW_SWAP |
7597 (BP_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
7598 (BP_E1HVN(sc) << DMAE_CMD_E1HVN_SHIFT));
7600 dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, dmae[0]));
7601 dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, dmae[0]));
7602 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
7603 sizeof(struct dmae_command) * (loader_idx + 1)) >> 2;
7604 dmae->dst_addr_hi = 0;
7605 dmae->len = sizeof(struct dmae_command) >> 2;
7610 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
7611 dmae->comp_addr_hi = 0;
7615 bxe_post_dmae(sc, dmae, loader_idx);
7617 } else if (sc->func_stx) {
7619 bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
7622 DBEXIT(BXE_INSANE_STATS);
7626 * Delay routine which polls for the DMA engine to complete.
7629 * 0 = Failure, !0 = Success
7632 bxe_stats_comp(struct bxe_softc *sc)
7634 uint32_t *stats_comp;
7637 DBENTER(BXE_VERBOSE_STATS);
7639 stats_comp = BXE_SP(sc, stats_comp);
7642 while (*stats_comp != DMAE_COMP_VAL) {
7644 BXE_PRINTF("%s(%d): Timeout waiting for statistics "
7645 "completions.\n", __FILE__, __LINE__);
7652 DBEXIT(BXE_VERBOSE_STATS);
7653 /* ToDo: Shouldn't this return the value of cnt? */
7658 * DMA port statistcs from controller to driver.
7664 bxe_stats_pmf_update(struct bxe_softc *sc)
7666 struct dmae_command *dmae;
7667 uint32_t opcode, *stats_comp;
7670 DBENTER(BXE_VERBOSE_STATS);
7672 stats_comp = BXE_SP(sc, stats_comp);
7673 loader_idx = PMF_DMAE_C(sc);
7675 /* We shouldn't be here if any of the following are false. */
7676 if (!IS_E1HMF(sc) || (sc->port.pmf == 0) || !sc->port.port_stx) {
7677 BXE_PRINTF("%s(%d): Statistics bug!\n", __FILE__, __LINE__);
7678 goto bxe_stats_pmf_update_exit;
7681 sc->executer_idx = 0;
7683 /* Instruct DMA engine to copy port statistics from H/W to driver. */
7684 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
7685 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
7686 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
7688 DMAE_CMD_ENDIANITY_B_DW_SWAP |
7690 DMAE_CMD_ENDIANITY_DW_SWAP |
7692 (BP_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
7693 (BP_E1HVN(sc) << DMAE_CMD_E1HVN_SHIFT));
7695 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
7696 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
7697 dmae->src_addr_lo = sc->port.port_stx >> 2;
7698 dmae->src_addr_hi = 0;
7699 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
7700 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
7701 dmae->len = DMAE_LEN32_RD_MAX;
7702 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
7703 dmae->comp_addr_hi = 0;
7706 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
7707 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
7708 dmae->src_addr_lo = (sc->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
7709 dmae->src_addr_hi = 0;
7710 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats) +
7711 DMAE_LEN32_RD_MAX * 4);
7712 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats) +
7713 DMAE_LEN32_RD_MAX * 4);
7714 dmae->len = (sizeof(struct host_port_stats) >> 2) -
7716 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
7717 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
7718 dmae->comp_val = DMAE_COMP_VAL;
7720 /* Start the DMA and wait for the result. */
7722 bxe_stats_hw_post(sc);
7725 bxe_stats_pmf_update_exit:
7726 DBEXIT(BXE_VERBOSE_STATS);
7730 * Prepare the DMAE parameters required for all statistics.
7732 * This function should only be called by the driver instance
7733 * that is designated as the port management function (PMF).
7739 bxe_stats_port_init(struct bxe_softc *sc)
7741 struct dmae_command *dmae;
7742 uint32_t mac_addr, opcode, *stats_comp;
7743 int loader_idx, port, vn;
7745 DBENTER(BXE_VERBOSE_STATS);
7749 loader_idx = PMF_DMAE_C(sc);
7750 stats_comp = BXE_SP(sc, stats_comp);
7752 /* Only the port management function (PMF) does this work. */
7753 if (!sc->link_vars.link_up || (sc->port.pmf == 0)) {
7754 BXE_PRINTF("%s(%d): Invalid statistics port setup!\n",
7755 __FILE__, __LINE__);
7756 goto bxe_stats_port_init_exit;
7759 sc->executer_idx = 0;
7761 /* The same opcde is used for multiple DMA operations. */
7762 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
7763 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
7764 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
7766 DMAE_CMD_ENDIANITY_B_DW_SWAP |
7768 DMAE_CMD_ENDIANITY_DW_SWAP |
7770 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
7771 (vn << DMAE_CMD_E1HVN_SHIFT));
7773 /* Setup the DMA for port statistics. */
7774 if (sc->port.port_stx) {
7775 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
7776 dmae->opcode = opcode;
7777 dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
7778 dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
7779 dmae->dst_addr_lo = sc->port.port_stx >> 2;
7780 dmae->dst_addr_hi = 0;
7781 dmae->len = sizeof(struct host_port_stats) >> 2;
7782 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
7783 dmae->comp_addr_hi = 0;
7787 /* Setup the DMA for function statistics. */
7789 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
7790 dmae->opcode = opcode;
7791 dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats));
7792 dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats));
7793 dmae->dst_addr_lo = sc->func_stx >> 2;
7794 dmae->dst_addr_hi = 0;
7795 dmae->len = sizeof(struct host_func_stats) >> 2;
7796 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
7797 dmae->comp_addr_hi = 0;
7801 /* Setup statistics reporting for the MAC. */
7802 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
7803 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
7804 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
7806 DMAE_CMD_ENDIANITY_B_DW_SWAP |
7808 DMAE_CMD_ENDIANITY_DW_SWAP |
7810 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
7811 (vn << DMAE_CMD_E1HVN_SHIFT));
7813 if (sc->link_vars.mac_type == MAC_TYPE_BMAC) {
7814 /* Enable statistics for the 10Gb BMAC. */
7816 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
7817 NIG_REG_INGRESS_BMAC0_MEM);
7819 /* Setup BMAC TX statistics (TX_STAT_GTPKT .. TX_STAT_GTBYT). */
7820 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
7821 dmae->opcode = opcode;
7822 dmae->src_addr_lo = (mac_addr +
7823 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
7824 dmae->src_addr_hi = 0;
7825 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats));
7826 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats));
7827 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
7828 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
7829 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
7830 dmae->comp_addr_hi = 0;
7833 /* Setup BMAC RX statistcs (RX_STAT_GR64 .. RX_STAT_GRIPJ). */
7834 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
7835 dmae->opcode = opcode;
7836 dmae->src_addr_lo = (mac_addr +
7837 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
7838 dmae->src_addr_hi = 0;
7839 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats) +
7840 offsetof(struct bmac_stats, rx_stat_gr64_lo));
7841 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats) +
7842 offsetof(struct bmac_stats, rx_stat_gr64_lo));
7843 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
7844 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
7845 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
7846 dmae->comp_addr_hi = 0;
7849 } else if (sc->link_vars.mac_type == MAC_TYPE_EMAC) {
7850 /* Enable statistics for the 1Gb EMAC. */
7852 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
7854 /* Setup EMAC RX statistics. */
7855 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
7856 dmae->opcode = opcode;
7857 dmae->src_addr_lo = (mac_addr + EMAC_REG_EMAC_RX_STAT_AC) >> 2;
7858 dmae->src_addr_hi = 0;
7859 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats));
7860 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats));
7861 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
7862 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
7863 dmae->comp_addr_hi = 0;
7866 /* Setup additional EMAC RX statistics. */
7867 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
7868 dmae->opcode = opcode;
7869 dmae->src_addr_lo = (mac_addr +
7870 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
7871 dmae->src_addr_hi = 0;
7872 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats) +
7873 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
7874 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats) +
7875 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
7877 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
7878 dmae->comp_addr_hi = 0;
7881 /* Setup EMAC TX statistics. */
7882 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
7883 dmae->opcode = opcode;
7884 dmae->src_addr_lo = (mac_addr + EMAC_REG_EMAC_TX_STAT_AC) >> 2;
7885 dmae->src_addr_hi = 0;
7886 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats) +
7887 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
7888 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats) +
7889 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
7890 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
7891 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
7892 dmae->comp_addr_hi = 0;
7895 DBPRINT(sc, BXE_WARN, "%s(): Undefined MAC type.\n",
7899 /* Enable NIG statistics. */
7900 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
7901 dmae->opcode = opcode;
7902 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
7903 NIG_REG_STAT0_BRB_DISCARD) >> 2;
7904 dmae->src_addr_hi = 0;
7905 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats));
7906 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats));
7907 dmae->len = (sizeof(struct nig_stats) - 4 * sizeof(uint32_t)) >> 2;
7908 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
7909 dmae->comp_addr_hi = 0;
7912 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
7913 dmae->opcode = opcode;
7914 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
7915 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
7916 dmae->src_addr_hi = 0;
7917 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats) +
7918 offsetof(struct nig_stats, egress_mac_pkt0_lo));
7919 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats) +
7920 offsetof(struct nig_stats, egress_mac_pkt0_lo));
7921 dmae->len = (2 * sizeof(uint32_t)) >> 2;
7922 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
7923 dmae->comp_addr_hi = 0;
7926 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
7927 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
7928 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
7929 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
7931 DMAE_CMD_ENDIANITY_B_DW_SWAP |
7933 DMAE_CMD_ENDIANITY_DW_SWAP |
7935 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
7936 (vn << DMAE_CMD_E1HVN_SHIFT));
7937 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
7938 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
7939 dmae->src_addr_hi = 0;
7940 dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats) +
7941 offsetof(struct nig_stats, egress_mac_pkt1_lo));
7942 dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats) +
7943 offsetof(struct nig_stats, egress_mac_pkt1_lo));
7944 dmae->len = (2 * sizeof(uint32_t)) >> 2;
7945 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
7946 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
7947 dmae->comp_val = DMAE_COMP_VAL;
7949 /* Clear the statistics completion value. */
7952 bxe_stats_port_init_exit:
7953 DBEXIT(BXE_VERBOSE_STATS);
7957 * Prepare the DMAE parameters required for function statistics.
7959 * This function is called by all driver instances.
7965 bxe_stats_func_init(struct bxe_softc *sc)
7967 struct dmae_command *dmae;
7968 uint32_t *stats_comp;
7970 DBENTER(BXE_VERBOSE_STATS);
7972 if (!sc->func_stx) {
7973 BXE_PRINTF("%s(%d): Invalid statistics function setup!\n",
7974 __FILE__, __LINE__);
7975 goto bxe_stats_func_init_exit;
7978 dmae = &sc->stats_dmae;
7979 stats_comp = BXE_SP(sc, stats_comp);
7980 sc->executer_idx = 0;
7981 memset(dmae, 0, sizeof(struct dmae_command));
7983 /* Setup the DMA for function statistics. */
7984 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
7985 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
7986 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
7988 DMAE_CMD_ENDIANITY_B_DW_SWAP |
7990 DMAE_CMD_ENDIANITY_DW_SWAP |
7992 (BP_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
7993 (BP_E1HVN(sc) << DMAE_CMD_E1HVN_SHIFT));
7995 dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats));
7996 dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats));
7997 dmae->dst_addr_lo = sc->func_stx >> 2;
7998 dmae->dst_addr_hi = 0;
7999 dmae->len = sizeof(struct host_func_stats) >> 2;
8000 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
8001 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
8002 dmae->comp_val = DMAE_COMP_VAL;
8006 bxe_stats_func_init_exit:
8007 DBEXIT(BXE_VERBOSE_STATS);
8011 * Starts a statistics update DMA and waits for completion.
8017 bxe_stats_start(struct bxe_softc *sc)
8020 DBENTER(BXE_VERBOSE_STATS);
8022 if (sc->port.pmf == 1)
8023 bxe_stats_port_init(sc);
8024 else if (sc->func_stx)
8025 bxe_stats_func_init(sc);
8027 bxe_stats_hw_post(sc);
8028 bxe_stats_storm_post(sc);
8030 DBEXIT(BXE_VERBOSE_STATS);
8038 bxe_stats_pmf_start(struct bxe_softc *sc)
8040 DBENTER(BXE_VERBOSE_STATS);
8043 bxe_stats_pmf_update(sc);
8044 bxe_stats_start(sc);
8046 DBEXIT(BXE_VERBOSE_STATS);
8054 bxe_stats_restart(struct bxe_softc *sc)
8057 DBENTER(BXE_VERBOSE_STATS);
8060 bxe_stats_start(sc);
8062 DBEXIT(BXE_VERBOSE_STATS);
8066 * Update the Big MAC (10Gb BMAC) statistics.
8072 bxe_stats_bmac_update(struct bxe_softc *sc)
8074 struct bmac_stats *new;
8075 struct host_port_stats *pstats;
8076 struct bxe_port_stats *estats;
8077 struct regpair diff;
8079 DBENTER(BXE_INSANE_STATS);
8081 new = BXE_SP(sc, mac_stats.bmac_stats);
8082 pstats = BXE_SP(sc, port_stats);
8083 estats = &sc->eth_stats;
8085 UPDATE_STAT64(rx_stat_grerb,
8086 rx_stat_ifhcinbadoctets);
8087 UPDATE_STAT64(rx_stat_grfcs,
8088 rx_stat_dot3statsfcserrors);
8089 UPDATE_STAT64(rx_stat_grund,
8090 rx_stat_etherstatsundersizepkts);
8091 UPDATE_STAT64(rx_stat_grovr,
8092 rx_stat_dot3statsframestoolong);
8093 UPDATE_STAT64(rx_stat_grfrg,
8094 rx_stat_etherstatsfragments);
8095 UPDATE_STAT64(rx_stat_grjbr,
8096 rx_stat_etherstatsjabbers);
8097 UPDATE_STAT64(rx_stat_grxcf,
8098 rx_stat_maccontrolframesreceived);
8099 UPDATE_STAT64(rx_stat_grxpf,
8100 rx_stat_xoffstateentered);
8101 UPDATE_STAT64(rx_stat_grxpf,
8103 UPDATE_STAT64(tx_stat_gtxpf,
8104 tx_stat_outxoffsent);
8105 UPDATE_STAT64(tx_stat_gtxpf,
8106 tx_stat_flowcontroldone);
8107 UPDATE_STAT64(tx_stat_gt64,
8108 tx_stat_etherstatspkts64octets);
8109 UPDATE_STAT64(tx_stat_gt127,
8110 tx_stat_etherstatspkts65octetsto127octets);
8111 UPDATE_STAT64(tx_stat_gt255,
8112 tx_stat_etherstatspkts128octetsto255octets);
8113 UPDATE_STAT64(tx_stat_gt511,
8114 tx_stat_etherstatspkts256octetsto511octets);
8115 UPDATE_STAT64(tx_stat_gt1023,
8116 tx_stat_etherstatspkts512octetsto1023octets);
8117 UPDATE_STAT64(tx_stat_gt1518,
8118 tx_stat_etherstatspkts1024octetsto1522octets);
8119 UPDATE_STAT64(tx_stat_gt2047,
8121 UPDATE_STAT64(tx_stat_gt4095,
8123 UPDATE_STAT64(tx_stat_gt9216,
8125 UPDATE_STAT64(tx_stat_gt16383,
8126 tx_stat_bmac_16383);
8127 UPDATE_STAT64(tx_stat_gterr,
8128 tx_stat_dot3statsinternalmactransmiterrors);
8129 UPDATE_STAT64(tx_stat_gtufl,
8132 estats->pause_frames_received_hi =
8133 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
8134 estats->pause_frames_received_lo =
8135 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
8136 estats->pause_frames_sent_hi =
8137 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
8138 estats->pause_frames_sent_lo =
8139 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
8141 DBEXIT(BXE_INSANE_STATS);
8145 * Update the Ethernet MAC (1Gb EMAC) statistics.
8151 bxe_stats_emac_update(struct bxe_softc *sc)
8153 struct emac_stats *new;
8154 struct host_port_stats *pstats;
8155 struct bxe_port_stats *estats;
8157 DBENTER(BXE_INSANE_STATS);
8159 new = BXE_SP(sc, mac_stats.emac_stats);
8160 pstats = BXE_SP(sc, port_stats);
8161 estats = &sc->eth_stats;
8163 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
8164 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
8165 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
8166 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
8167 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
8168 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
8169 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
8170 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
8171 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
8172 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
8173 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
8174 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
8175 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
8176 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
8177 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
8178 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
8179 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
8180 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
8181 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
8182 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
8183 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
8184 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
8185 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
8186 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
8187 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
8188 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
8189 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
8190 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
8191 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
8192 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
8193 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
8195 estats->pause_frames_received_hi =
8196 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
8197 estats->pause_frames_received_lo =
8198 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
8199 ADD_64(estats->pause_frames_received_hi,
8200 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
8201 estats->pause_frames_received_lo,
8202 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
8204 estats->pause_frames_sent_hi =
8205 pstats->mac_stx[1].tx_stat_outxonsent_hi;
8206 estats->pause_frames_sent_lo =
8207 pstats->mac_stx[1].tx_stat_outxonsent_lo;
8208 ADD_64(estats->pause_frames_sent_hi,
8209 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
8210 estats->pause_frames_sent_lo,
8211 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
8213 DBEXIT(BXE_INSANE_STATS);
8218 * 0 = Success, !0 = Failure.
8221 bxe_stats_hw_update(struct bxe_softc *sc)
8223 struct nig_stats *new, *old;
8224 struct host_port_stats *pstats;
8225 struct bxe_port_stats *estats;
8226 struct regpair diff;
8227 uint32_t nig_timer_max;
8230 DBENTER(BXE_INSANE_STATS);
8233 new = BXE_SP(sc, nig_stats);
8234 old = &(sc->port.old_nig_stats);
8235 pstats = BXE_SP(sc, port_stats);
8236 estats = &sc->eth_stats;
8238 /* Update statistics for the active MAC. */
8239 if (sc->link_vars.mac_type == MAC_TYPE_BMAC)
8240 bxe_stats_bmac_update(sc);
8241 else if (sc->link_vars.mac_type == MAC_TYPE_EMAC)
8242 bxe_stats_emac_update(sc);
8244 DBPRINT(sc, BXE_WARN,
8245 "%s(): Statistics updated by DMAE but no MAC is active!\n",
8248 goto bxe_stats_hw_update_exit;
8251 /* Now update the hardware (NIG) statistics. */
8252 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
8253 new->brb_discard - old->brb_discard);
8254 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
8255 new->brb_truncate - old->brb_truncate);
8257 UPDATE_STAT64_NIG(egress_mac_pkt0,
8258 etherstatspkts1024octetsto1522octets);
8259 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
8261 memcpy(old, new, sizeof(struct nig_stats));
8263 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
8264 sizeof(struct mac_stx));
8265 estats->brb_drop_hi = pstats->brb_drop_hi;
8266 estats->brb_drop_lo = pstats->brb_drop_lo;
8268 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
8272 SHMEM_RD(sc, port_mb[BP_PORT(sc)].stat_nig_timer);
8273 if (nig_timer_max != estats->nig_timer_max) {
8274 estats->nig_timer_max = nig_timer_max;
8275 DBPRINT(sc, BXE_WARN,
8276 "%s(): NIG timer reached max value (%u)!\n",
8277 __FUNCTION__, estats->nig_timer_max);
8281 bxe_stats_hw_update_exit:
8282 DBEXIT(BXE_INSANE_STATS);
8288 * 0 = Success, !0 = Failure.
8292 bxe_stats_storm_update(struct bxe_softc *sc)
8295 struct eth_stats_query *stats;
8296 struct bxe_port_stats *estats;
8297 struct host_func_stats *fstats;
8298 struct bxe_q_stats *qstats;
8299 struct tstorm_per_port_stats *tport;
8300 struct tstorm_per_client_stats *tclient;
8301 struct ustorm_per_client_stats *uclient;
8302 struct xstorm_per_client_stats *xclient;
8303 struct tstorm_per_client_stats *old_tclient;
8304 struct ustorm_per_client_stats *old_uclient;
8305 struct xstorm_per_client_stats *old_xclient;
8306 struct bxe_fastpath * fp;
8309 DBENTER(BXE_INSANE_STATS);
8313 stats = BXE_SP(sc, fw_stats);
8314 tport = &stats->tstorm_common.port_statistics;
8315 fstats = BXE_SP(sc, func_stats);
8317 memcpy(&(fstats->total_bytes_received_hi),
8318 &(BXE_SP(sc, func_stats_base)->total_bytes_received_hi),
8319 sizeof(struct host_func_stats) - 2 * sizeof(uint32_t));
8321 estats = &sc->eth_stats;
8322 estats->no_buff_discard_hi = 0;
8323 estats->no_buff_discard_lo = 0;
8324 estats->error_bytes_received_hi = 0;
8325 estats->error_bytes_received_lo = 0;
8326 estats->etherstatsoverrsizepkts_hi = 0;
8327 estats->etherstatsoverrsizepkts_lo = 0;
8329 for (i = 0; i < sc->num_queues; i++) {
8332 tclient = &stats->tstorm_common.client_statistics[cl_id];
8333 old_tclient = &fp->old_tclient;
8334 uclient = &stats->ustorm_common.client_statistics[cl_id];
8335 old_uclient = &fp->old_uclient;
8336 xclient = &stats->xstorm_common.client_statistics[cl_id];
8337 old_xclient = &fp->old_xclient;
8338 qstats = &fp->eth_q_stats;
8340 /* Are TSTORM statistics valid? */
8341 if ((uint16_t)(le16toh(tclient->stats_counter) + 1) !=
8342 sc->stats_counter) {
8343 DBPRINT(sc, BXE_WARN, "%s(): Stats not updated by TSTORM "
8344 "(tstorm counter (%d) != stats_counter (%d))!\n",
8345 __FUNCTION__, tclient->stats_counter, sc->stats_counter);
8347 goto bxe_stats_storm_update_exit;
8350 /* Are USTORM statistics valid? */
8351 if ((uint16_t)(le16toh(uclient->stats_counter) + 1) !=
8352 sc->stats_counter) {
8353 DBPRINT(sc, BXE_WARN, "%s(): Stats not updated by USTORM "
8354 "(ustorm counter (%d) != stats_counter (%d))!\n",
8355 __FUNCTION__, uclient->stats_counter, sc->stats_counter);
8357 goto bxe_stats_storm_update_exit;
8360 /* Are XSTORM statistics valid? */
8361 if ((uint16_t)(le16toh(xclient->stats_counter) + 1) !=
8362 sc->stats_counter) {
8363 DBPRINT(sc, BXE_WARN, "%s(): Stats not updated by XSTORM "
8364 "(xstorm counter (%d) != stats_counter (%d))!\n",
8365 __FUNCTION__, xclient->stats_counter, sc->stats_counter);
8367 goto bxe_stats_storm_update_exit;
8370 qstats->total_bytes_received_hi =
8371 (tclient->rcv_broadcast_bytes.hi);
8372 qstats->total_bytes_received_lo =
8373 le32toh(tclient->rcv_broadcast_bytes.lo);
8375 ADD_64(qstats->total_bytes_received_hi,
8376 le32toh(tclient->rcv_multicast_bytes.hi),
8377 qstats->total_bytes_received_lo,
8378 le32toh(tclient->rcv_multicast_bytes.lo));
8380 ADD_64(qstats->total_bytes_received_hi,
8381 le32toh(tclient->rcv_unicast_bytes.hi),
8382 qstats->total_bytes_received_lo,
8383 le32toh(tclient->rcv_unicast_bytes.lo));
8385 SUB_64(qstats->total_bytes_received_hi,
8386 le32toh(uclient->bcast_no_buff_bytes.hi),
8387 qstats->total_bytes_received_lo,
8388 le32toh(uclient->bcast_no_buff_bytes.lo));
8390 SUB_64(qstats->total_bytes_received_hi,
8391 le32toh(uclient->mcast_no_buff_bytes.hi),
8392 qstats->total_bytes_received_lo,
8393 le32toh(uclient->mcast_no_buff_bytes.lo));
8395 SUB_64(qstats->total_bytes_received_hi,
8396 le32toh(uclient->ucast_no_buff_bytes.hi),
8397 qstats->total_bytes_received_lo,
8398 le32toh(uclient->ucast_no_buff_bytes.lo));
8400 qstats->valid_bytes_received_hi =
8401 qstats->total_bytes_received_hi;
8402 qstats->valid_bytes_received_lo =
8403 qstats->total_bytes_received_lo;
8405 qstats->error_bytes_received_hi =
8406 le32toh(tclient->rcv_error_bytes.hi);
8407 qstats->error_bytes_received_lo =
8408 le32toh(tclient->rcv_error_bytes.lo);
8410 ADD_64(qstats->total_bytes_received_hi,
8411 qstats->error_bytes_received_hi,
8412 qstats->total_bytes_received_lo,
8413 qstats->error_bytes_received_lo);
8415 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
8416 total_unicast_packets_received);
8417 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
8418 total_multicast_packets_received);
8419 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
8420 total_broadcast_packets_received);
8421 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
8422 etherstatsoverrsizepkts);
8423 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
8425 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
8426 total_unicast_packets_received);
8427 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
8428 total_multicast_packets_received);
8429 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
8430 total_broadcast_packets_received);
8431 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
8432 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
8433 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
8435 qstats->total_bytes_transmitted_hi =
8436 le32toh(xclient->unicast_bytes_sent.hi);
8437 qstats->total_bytes_transmitted_lo =
8438 le32toh(xclient->unicast_bytes_sent.lo);
8440 ADD_64(qstats->total_bytes_transmitted_hi,
8441 le32toh(xclient->multicast_bytes_sent.hi),
8442 qstats->total_bytes_transmitted_lo,
8443 le32toh(xclient->multicast_bytes_sent.lo));
8445 ADD_64(qstats->total_bytes_transmitted_hi,
8446 le32toh(xclient->broadcast_bytes_sent.hi),
8447 qstats->total_bytes_transmitted_lo,
8448 le32toh(xclient->broadcast_bytes_sent.lo));
8450 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
8451 total_unicast_packets_transmitted);
8453 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
8454 total_multicast_packets_transmitted);
8456 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
8457 total_broadcast_packets_transmitted);
8459 old_tclient->checksum_discard = tclient->checksum_discard;
8460 old_tclient->ttl0_discard = tclient->ttl0_discard;
8462 ADD_64(fstats->total_bytes_received_hi,
8463 qstats->total_bytes_received_hi,
8464 fstats->total_bytes_received_lo,
8465 qstats->total_bytes_received_lo);
8466 ADD_64(fstats->total_bytes_transmitted_hi,
8467 qstats->total_bytes_transmitted_hi,
8468 fstats->total_bytes_transmitted_lo,
8469 qstats->total_bytes_transmitted_lo);
8470 ADD_64(fstats->total_unicast_packets_received_hi,
8471 qstats->total_unicast_packets_received_hi,
8472 fstats->total_unicast_packets_received_lo,
8473 qstats->total_unicast_packets_received_lo);
8474 ADD_64(fstats->total_multicast_packets_received_hi,
8475 qstats->total_multicast_packets_received_hi,
8476 fstats->total_multicast_packets_received_lo,
8477 qstats->total_multicast_packets_received_lo);
8478 ADD_64(fstats->total_broadcast_packets_received_hi,
8479 qstats->total_broadcast_packets_received_hi,
8480 fstats->total_broadcast_packets_received_lo,
8481 qstats->total_broadcast_packets_received_lo);
8482 ADD_64(fstats->total_unicast_packets_transmitted_hi,
8483 qstats->total_unicast_packets_transmitted_hi,
8484 fstats->total_unicast_packets_transmitted_lo,
8485 qstats->total_unicast_packets_transmitted_lo);
8486 ADD_64(fstats->total_multicast_packets_transmitted_hi,
8487 qstats->total_multicast_packets_transmitted_hi,
8488 fstats->total_multicast_packets_transmitted_lo,
8489 qstats->total_multicast_packets_transmitted_lo);
8490 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
8491 qstats->total_broadcast_packets_transmitted_hi,
8492 fstats->total_broadcast_packets_transmitted_lo,
8493 qstats->total_broadcast_packets_transmitted_lo);
8494 ADD_64(fstats->valid_bytes_received_hi,
8495 qstats->valid_bytes_received_hi,
8496 fstats->valid_bytes_received_lo,
8497 qstats->valid_bytes_received_lo);
8499 ADD_64(estats->error_bytes_received_hi,
8500 qstats->error_bytes_received_hi,
8501 estats->error_bytes_received_lo,
8502 qstats->error_bytes_received_lo);
8503 ADD_64(estats->etherstatsoverrsizepkts_hi,
8504 qstats->etherstatsoverrsizepkts_hi,
8505 estats->etherstatsoverrsizepkts_lo,
8506 qstats->etherstatsoverrsizepkts_lo);
8507 ADD_64(estats->no_buff_discard_hi,
8508 qstats->no_buff_discard_hi,
8509 estats->no_buff_discard_lo,
8510 qstats->no_buff_discard_lo);
8513 ADD_64(fstats->total_bytes_received_hi,
8514 estats->rx_stat_ifhcinbadoctets_hi,
8515 fstats->total_bytes_received_lo,
8516 estats->rx_stat_ifhcinbadoctets_lo);
8518 memcpy(estats, &(fstats->total_bytes_received_hi),
8519 sizeof(struct host_func_stats) - 2 * sizeof(uint32_t));
8521 ADD_64(estats->etherstatsoverrsizepkts_hi,
8522 estats->rx_stat_dot3statsframestoolong_hi,
8523 estats->etherstatsoverrsizepkts_lo,
8524 estats->rx_stat_dot3statsframestoolong_lo);
8525 ADD_64(estats->error_bytes_received_hi,
8526 estats->rx_stat_ifhcinbadoctets_hi,
8527 estats->error_bytes_received_lo,
8528 estats->rx_stat_ifhcinbadoctets_lo);
8531 estats->mac_filter_discard =
8532 le32toh(tport->mac_filter_discard);
8533 estats->xxoverflow_discard =
8534 le32toh(tport->xxoverflow_discard);
8535 estats->brb_truncate_discard =
8536 le32toh(tport->brb_truncate_discard);
8537 estats->mac_discard = le32toh(tport->mac_discard);
8540 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
8542 sc->stats_pending = 0;
8544 bxe_stats_storm_update_exit:
8546 DBEXIT(BXE_INSANE_STATS);
8551 * Copy the controller maintained statistics over to the OS.
8557 bxe_stats_net_update(struct bxe_softc *sc)
8559 struct tstorm_per_client_stats *old_tclient;
8560 struct bxe_port_stats *estats;
8563 DBENTER(BXE_INSANE_STATS);
8565 old_tclient = &sc->fp[0].old_tclient;
8566 estats = &sc->eth_stats;
8570 * Update the OS interface statistics from
8571 * the hardware statistics.
8574 ifp->if_collisions =
8575 (u_long) estats->tx_stat_dot3statssinglecollisionframes_lo +
8576 (u_long) estats->tx_stat_dot3statsmultiplecollisionframes_lo +
8577 (u_long) estats->tx_stat_dot3statslatecollisions_lo +
8578 (u_long) estats->tx_stat_dot3statsexcessivecollisions_lo;
8581 (u_long) old_tclient->checksum_discard +
8582 (u_long) estats->no_buff_discard_lo +
8583 (u_long) estats->mac_discard +
8584 (u_long) estats->rx_stat_etherstatsundersizepkts_lo +
8585 (u_long) estats->brb_drop_lo +
8586 (u_long) estats->brb_truncate_discard +
8587 (u_long) estats->rx_stat_dot3statsfcserrors_lo +
8588 (u_long) estats->rx_stat_dot3statsalignmenterrors_lo +
8589 (u_long) estats->xxoverflow_discard;
8592 (u_long) estats->tx_stat_dot3statslatecollisions_lo +
8593 (u_long) estats->tx_stat_dot3statsexcessivecollisions_lo +
8594 (u_long) estats->tx_stat_dot3statsinternalmactransmiterrors_lo;
8597 bxe_hilo(&estats->total_unicast_packets_received_hi) +
8598 bxe_hilo(&estats->total_multicast_packets_received_hi) +
8599 bxe_hilo(&estats->total_broadcast_packets_received_hi);
8602 bxe_hilo(&estats->total_unicast_packets_transmitted_hi) +
8603 bxe_hilo(&estats->total_multicast_packets_transmitted_hi) +
8604 bxe_hilo(&estats->total_broadcast_packets_transmitted_hi);
8606 DBEXIT(BXE_INSANE_STATS);
8615 bxe_stats_update(struct bxe_softc *sc)
8617 uint32_t *stats_comp;
8620 DBENTER(BXE_INSANE_STATS);
8622 stats_comp = BXE_SP(sc, stats_comp);
8625 /* Make sure the statistics DMAE update has completed. */
8626 if (*stats_comp != DMAE_COMP_VAL)
8627 goto bxe_stats_update_exit;
8629 /* Check for any hardware statistics updates. */
8630 if (sc->port.pmf == 1)
8631 update = (bxe_stats_hw_update(sc) == 0);
8633 /* Check for any STORM statistics updates. */
8634 update |= (bxe_stats_storm_update(sc) == 0);
8636 /* If we got updated hardware statistics then update the OS. */
8638 bxe_stats_net_update(sc);
8640 /* Check if any statistics updates are pending. */
8641 if (sc->stats_pending) {
8642 /* The update hasn't completed, keep waiting. */
8643 sc->stats_pending++;
8645 /* Have we been waiting for too long? */
8646 if (sc->stats_pending >= 3) {
8648 "%s(%d): Failed to get statistics after "
8649 "3 tries!\n", __FILE__, __LINE__);
8651 goto bxe_stats_update_exit;
8656 /* Kickoff the next statistics request. */
8657 bxe_stats_hw_post(sc);
8658 bxe_stats_storm_post(sc);
8660 bxe_stats_update_exit:
8661 DBEXIT(BXE_INSANE_STATS);
8670 bxe_stats_port_stop(struct bxe_softc *sc)
8672 struct dmae_command *dmae;
8673 uint32_t opcode, *stats_comp;
8676 DBENTER(BXE_VERBOSE_STATS);
8678 stats_comp = BXE_SP(sc, stats_comp);
8679 loader_idx = PMF_DMAE_C(sc);
8680 sc->executer_idx = 0;
8682 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
8684 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
8686 DMAE_CMD_ENDIANITY_B_DW_SWAP |
8688 DMAE_CMD_ENDIANITY_DW_SWAP |
8690 (BP_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
8691 (BP_E1HVN(sc) << DMAE_CMD_E1HVN_SHIFT));
8693 if (sc->port.port_stx) {
8694 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
8697 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
8699 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
8701 dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
8702 dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
8703 dmae->dst_addr_lo = sc->port.port_stx >> 2;
8704 dmae->dst_addr_hi = 0;
8705 dmae->len = sizeof(struct host_port_stats) >> 2;
8708 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
8709 dmae->comp_addr_hi = 0;
8712 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc,
8714 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc,
8716 dmae->comp_val = DMAE_COMP_VAL;
8723 dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
8724 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
8725 dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats));
8726 dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats));
8727 dmae->dst_addr_lo = sc->func_stx >> 2;
8728 dmae->dst_addr_hi = 0;
8729 dmae->len = sizeof(struct host_func_stats) >> 2;
8730 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
8731 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
8732 dmae->comp_val = DMAE_COMP_VAL;
8737 DBEXIT(BXE_VERBOSE_STATS);
8745 bxe_stats_stop(struct bxe_softc *sc)
8749 DBENTER(BXE_VERBOSE_STATS);
8753 /* Wait for any pending completions. */
8756 if (sc->port.pmf == 1)
8757 update = (bxe_stats_hw_update(sc) == 0);
8759 update |= (bxe_stats_storm_update(sc) == 0);
8762 bxe_stats_net_update(sc);
8764 if (sc->port.pmf == 1)
8765 bxe_stats_port_stop(sc);
8767 bxe_stats_hw_post(sc);
8771 DBEXIT(BXE_VERBOSE_STATS);
8775 * A dummy function to fill in the statistics state transition table.
8781 bxe_stats_do_nothing(struct bxe_softc *sc)
8783 DBENTER(BXE_VERBOSE_STATS);
8784 DBEXIT(BXE_VERBOSE_STATS);
8787 static const struct {
8788 void (*action)(struct bxe_softc *sc);
8789 enum bxe_stats_state next_state;
8790 } bxe_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
8793 /* DISABLED PMF */ {bxe_stats_pmf_update, STATS_STATE_DISABLED},
8794 /* LINK_UP */ {bxe_stats_start, STATS_STATE_ENABLED},
8795 /* UPDATE */ {bxe_stats_do_nothing, STATS_STATE_DISABLED},
8796 /* STOP */ {bxe_stats_do_nothing, STATS_STATE_DISABLED}
8800 /* ENABLED PMF */ {bxe_stats_pmf_start, STATS_STATE_ENABLED},
8801 /* LINK_UP */ {bxe_stats_restart, STATS_STATE_ENABLED},
8802 /* UPDATE */ {bxe_stats_update, STATS_STATE_ENABLED},
8803 /* STOP */ {bxe_stats_stop, STATS_STATE_DISABLED}
8808 * Move to the next state of the statistics state machine.
8814 bxe_stats_handle(struct bxe_softc *sc, enum bxe_stats_event event)
8816 enum bxe_stats_state state;
8818 DBENTER(BXE_EXTREME_STATS);
8820 state = sc->stats_state;
8823 if (event != STATS_EVENT_UPDATE)
8824 DBPRINT(sc, BXE_VERBOSE_STATS,
8825 "%s(): Current state = %d, event = %d.\n", __FUNCTION__,
8829 bxe_stats_stm[state][event].action(sc);
8830 sc->stats_state = bxe_stats_stm[state][event].next_state;
8833 if (event != STATS_EVENT_UPDATE)
8834 DBPRINT(sc, BXE_VERBOSE_STATS, "%s(): New state = %d.\n",
8835 __FUNCTION__, sc->stats_state);
8838 DBEXIT(BXE_EXTREME_STATS);
8842 * bxe_chktso_window()
8843 * Checks to ensure the 13 bd sliding window is >= MSS for TSO.
8844 * Check that (13 total bds - 3bds) = 10 bd window >= MSS.
8845 * The window: 3 bds are = 1 (for headers BD) + 2 (for PBD and last BD)
8846 * The headers comes in a seperate bd in FreeBSD. So 13-3=10.
8849 * 0 if OK to send, 1 if packet needs further defragmentation.
8852 bxe_chktso_window(struct bxe_softc* sc, int nsegs, bus_dma_segment_t *segs,
8855 uint32_t num_wnds, wnd_size, wnd_sum;
8856 int32_t frag_idx, wnd_idx;
8857 unsigned short lso_mss;
8863 num_wnds = nsegs - wnd_size;
8864 lso_mss = htole16(m0->m_pkthdr.tso_segsz);
8867 * Total Header lengths Eth+IP+TCP in 1st FreeBSD mbuf so
8868 * calculate the first window sum of data skip the first
8869 * assuming it is the header in FreeBSD.
8871 for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++)
8872 wnd_sum += htole16(segs[frag_idx].ds_len);
8874 /* Chk the first 10 bd window size */
8875 if (wnd_sum < lso_mss)
8876 return (defrag = 1);
8878 /* Run through the windows */
8879 for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) {
8880 /* Subtract the 1st mbuf->m_len of the last wndw(-header). */
8881 wnd_sum -= htole16(segs[wnd_idx+1].ds_len);
8882 /* Add the next mbuf len to the len of our new window. */
8883 wnd_sum += htole16(segs[frag_idx].ds_len);
8884 if (wnd_sum < lso_mss) {
8895 * Encapsultes an mbuf cluster into the tx_bd chain structure and
8896 * makes the memory visible to the controller.
8898 * If an mbuf is submitted to this routine and cannot be given to the
8899 * controller (e.g. it has too many fragments) then the function may free
8900 * the mbuf and return to the caller.
8903 * 0 = Success, !0 = Failure
8904 * Note the side effect that an mbuf may be freed if it causes a problem.
8907 bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
8909 bus_dma_segment_t segs[32];
8912 struct eth_tx_parse_bd *tx_parse_bd;
8913 struct eth_tx_bd *tx_data_bd;
8914 struct eth_tx_bd *tx_total_pkt_size_bd;
8915 struct eth_tx_start_bd *tx_start_bd;
8916 uint16_t etype, sw_tx_bd_prod, sw_pkt_prod, total_pkt_size;
8917 // uint16_t bd_index, pkt_index;
8919 int i, defragged, e_hlen, error, nsegs, rc, nbds, vlan_off, ovlan;
8920 struct bxe_softc *sc;
8923 DBENTER(BXE_VERBOSE_SEND);
8925 DBRUN(M_ASSERTPKTHDR(*m_head));
8928 rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0;
8932 tx_total_pkt_size_bd = NULL;
8934 /* Get the H/W pointer (0 to 65535) for packets and BD's. */
8935 sw_pkt_prod = fp->tx_pkt_prod;
8936 sw_tx_bd_prod = fp->tx_bd_prod;
8938 /* Create the S/W index (0 to MAX_TX_BD) for packets and BD's. */
8939 // pkt_index = TX_BD(sw_pkt_prod);
8940 // bd_index = TX_BD(sw_tx_bd_prod);
8942 mac_type = UNICAST_ADDRESS;
8944 /* Map the mbuf into the next open DMAable memory. */
8945 map = fp->tx_mbuf_map[TX_BD(sw_pkt_prod)];
8946 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, map, m0,
8947 segs, &nsegs, BUS_DMA_NOWAIT);
8949 /* Handle any mapping errors. */
8950 if(__predict_false(error != 0)){
8951 fp->tx_dma_mapping_failure++;
8952 if (error == ENOMEM) {
8953 /* Resource issue, try again later. */
8955 } else if (error == EFBIG) {
8956 /* Possibly recoverable with defragmentation. */
8957 fp->mbuf_defrag_attempts++;
8958 m0 = m_defrag(*m_head, M_NOWAIT);
8960 fp->mbuf_defrag_failures++;
8963 /* Defrag successful, try mapping again.*/
8965 error = bus_dmamap_load_mbuf_sg(
8966 fp->tx_mbuf_tag, map, m0,
8967 segs, &nsegs, BUS_DMA_NOWAIT);
8969 fp->tx_dma_mapping_failure++;
8974 /* Unknown, unrecoverable mapping error. */
8975 DBPRINT(sc, BXE_WARN_SEND,
8976 "%s(): Unknown TX mapping error! "
8977 "rc = %d.\n", __FUNCTION__, error);
8978 DBRUN(bxe_dump_mbuf(sc, m0));
8982 goto bxe_tx_encap_continue;
8985 /* Make sure there's enough room in the send queue. */
8986 if (__predict_false((nsegs + 2) >
8987 (USABLE_TX_BD - fp->tx_bd_used))) {
8988 /* Recoverable, try again later. */
8989 fp->tx_hw_queue_full++;
8990 bus_dmamap_unload(fp->tx_mbuf_tag, map);
8992 goto bxe_tx_encap_continue;
8995 /* Capture the current H/W TX chain high watermark. */
8996 if (__predict_false(fp->tx_hw_max_queue_depth <
8998 fp->tx_hw_max_queue_depth = fp->tx_bd_used;
9000 /* Now make sure it fits in the packet window. */
9001 if (__predict_false(nsegs > 12)) {
9003 * The mbuf may be to big for the controller
9004 * to handle. If the frame is a TSO frame
9005 * we'll need to do an additional check.
9007 if(m0->m_pkthdr.csum_flags & CSUM_TSO){
9008 if (bxe_chktso_window(sc,nsegs,segs,m0) == 0)
9010 goto bxe_tx_encap_continue;
9012 fp->tx_window_violation_tso++;
9014 fp->tx_window_violation_std++;
9016 /* No sense trying to defrag again, we'll drop the frame. */
9021 bxe_tx_encap_continue:
9022 /* Check for errors */
9025 /* Recoverable try again later */
9027 fp->tx_soft_errors++;
9028 fp->tx_mbuf_alloc--;
9032 goto bxe_tx_encap_exit;
9035 /* Save the mbuf and mapping. */
9036 fp->tx_mbuf_ptr[TX_BD(sw_pkt_prod)] = m0;
9037 fp->tx_mbuf_map[TX_BD(sw_pkt_prod)] = map;
9039 /* Set flag according to packet type (UNICAST_ADDRESS is default). */
9040 if (m0->m_flags & M_BCAST)
9041 mac_type = BROADCAST_ADDRESS;
9042 else if (m0->m_flags & M_MCAST)
9043 mac_type = MULTICAST_ADDRESS;
9045 /* Prepare the first transmit (Start) BD for the mbuf. */
9046 tx_start_bd = &fp->tx_chain[TX_BD(sw_tx_bd_prod)].start_bd;
9048 tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
9049 tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
9050 tx_start_bd->nbytes = htole16(segs[0].ds_len);
9051 total_pkt_size += tx_start_bd->nbytes;
9052 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9053 tx_start_bd->general_data =
9054 (mac_type << ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
9056 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
9058 /* All frames have at least Start BD + Parsing BD. */
9060 tx_start_bd->nbd = htole16(nbds);
9062 if (m0->m_flags & M_VLANTAG) {
9063 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9064 tx_start_bd->vlan = htole16(m0->m_pkthdr.ether_vtag);
9067 * In cases where the VLAN tag is not used the firmware
9068 * expects to see a packet counter in the VLAN tag field
9069 * Failure to do so will cause an assertion which will
9070 * stop the controller.
9072 tx_start_bd->vlan = htole16(fp->tx_pkt_prod);
9075 * Add a parsing BD from the chain. The parsing BD is always added,
9076 * however, it is only used for TSO & chksum.
9078 sw_tx_bd_prod = NEXT_TX_BD(sw_tx_bd_prod);
9079 tx_parse_bd = (struct eth_tx_parse_bd *)
9080 &fp->tx_chain[TX_BD(sw_tx_bd_prod)].parse_bd;
9081 memset(tx_parse_bd, 0, sizeof(struct eth_tx_parse_bd));
9083 /* Gather all info about the packet and add to tx_parse_bd */
9084 if (m0->m_pkthdr.csum_flags) {
9085 struct ether_vlan_header *eh;
9086 struct ip *ip = NULL;
9087 struct tcphdr *th = NULL;
9089 struct udphdr *uh = NULL;
9091 /* Map Ethernet header to find type & header length. */
9092 eh = mtod(m0, struct ether_vlan_header *);
9094 /* Handle VLAN encapsulation if present. */
9095 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
9096 etype = ntohs(eh->evl_proto);
9097 e_hlen = ETHER_HDR_LEN + vlan_off;
9099 etype = ntohs(eh->evl_encap_proto);
9100 e_hlen = ETHER_HDR_LEN;
9103 /* Set the Ethernet header length in 16 bit words. */
9104 tx_parse_bd->global_data = (e_hlen + ovlan) >> 1;
9105 tx_parse_bd->global_data |= ((m0->m_flags & M_VLANTAG) <<
9106 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT);
9110 /* If mbuf len < 20bytes, IP header is in next mbuf. */
9111 if (m0->m_len < sizeof(struct ip))
9112 ip = (struct ip *) m0->m_next->m_data;
9114 ip = (struct ip *) (m0->m_data + e_hlen);
9116 /* Calculate IP header length (16 bit words). */
9117 tx_parse_bd->ip_hlen = (ip->ip_hl << 1);
9119 /* Calculate enet + IP header length (16 bit words). */
9120 tx_parse_bd->total_hlen = tx_parse_bd->ip_hlen +
9123 if (m0->m_pkthdr.csum_flags & CSUM_IP) {
9124 fp->tx_offload_frames_csum_ip++;
9125 flags |= ETH_TX_BD_FLAGS_IP_CSUM;
9128 /* Handle any checksums requested by the stack. */
9129 if ((m0->m_pkthdr.csum_flags & CSUM_TCP)||
9130 (m0->m_pkthdr.csum_flags & CSUM_TSO)){
9132 /* Get the TCP header. */
9133 th = (struct tcphdr *)((caddr_t)ip +
9136 /* Add the TCP checksum offload flag. */
9137 flags |= ETH_TX_BD_FLAGS_L4_CSUM;
9138 fp->tx_offload_frames_csum_tcp++;
9140 /* Update the enet + IP + TCP header length. */
9141 tx_parse_bd->total_hlen +=
9142 (uint16_t)(th->th_off << 1);
9144 /* Get the pseudo header checksum. */
9145 tx_parse_bd->tcp_pseudo_csum =
9148 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) {
9150 * The hardware doesn't actually support UDP
9151 * checksum offload but we can fake it by
9152 * doing TCP checksum offload and factoring
9153 * out the extra bytes that are different
9154 * between the TCP header and the UDP header.
9156 * Calculation will begin 10 bytes before the
9157 * actual start of the UDP header. To work
9158 * around this we need to calculate the
9159 * checksum of the 10 bytes before the UDP
9160 * header and factor that out of the UDP
9161 * pseudo header checksum before asking the
9162 * H/W to calculate the full UDP checksum.
9167 /* This value is 10. */
9168 uint8_t fix = (uint8_t) (offsetof(struct tcphdr, th_sum) -
9169 (int) offsetof(struct udphdr, uh_sum));
9172 * Add the TCP checksum offload flag for
9175 flags |= ETH_TX_BD_FLAGS_L4_CSUM;
9176 fp->tx_offload_frames_csum_udp++;
9177 tx_parse_bd->global_data |=
9178 ETH_TX_PARSE_BD_UDP_CS_FLG;
9180 /* Get a pointer to the UDP header. */
9181 uh = (struct udphdr *)((caddr_t)ip +
9184 /* Set pointer 10 bytes before UDP header. */
9185 tmp_uh = (uint32_t *)((uint8_t *)uh -
9189 * Calculate a pseudo header checksum over
9190 * the 10 bytes before the UDP header.
9192 tmp_csum = in_pseudo(ntohl(*tmp_uh),
9193 ntohl(*(tmp_uh + 1)),
9194 ntohl((*(tmp_uh + 2)) & 0x0000FFFF));
9196 /* Update the enet + IP + UDP header length. */
9197 tx_parse_bd->total_hlen +=
9198 (sizeof(struct udphdr) >> 1);
9199 tx_parse_bd->tcp_pseudo_csum =
9200 ~in_addword(uh->uh_sum, ~tmp_csum);
9203 /* Update the offload flags. */
9204 tx_start_bd->bd_flags.as_bitfield |= flags;
9207 case ETHERTYPE_IPV6:
9208 fp->tx_unsupported_tso_request_ipv6++;
9209 /* ToDo: Add IPv6 support. */
9213 fp->tx_unsupported_tso_request_not_tcp++;
9214 /* ToDo - How to handle this error? */
9217 /* Setup the Parsing BD with TSO specific info */
9218 if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
9219 uint16_t hdr_len = tx_parse_bd->total_hlen << 1;
9221 tx_start_bd->bd_flags.as_bitfield |=
9222 ETH_TX_BD_FLAGS_SW_LSO;
9223 fp->tx_offload_frames_tso++;
9225 /* ToDo: Does this really help? */
9226 if (__predict_false(tx_start_bd->nbytes > hdr_len)) {
9227 fp->tx_header_splits++;
9229 * Split the first BD into 2 BDs to make the
9230 * firmwares job easy...
9233 DBPRINT(sc, BXE_EXTREME_SEND,
9234 "%s(): TSO split headr size is %d (%x:%x) nbds %d\n",
9235 __FUNCTION__, tx_start_bd->nbytes,
9236 tx_start_bd->addr_hi,
9237 tx_start_bd->addr_lo, nbds);
9239 sw_tx_bd_prod = NEXT_TX_BD(sw_tx_bd_prod);
9241 /* New transmit BD (after the tx_parse_bd). */
9243 &fp->tx_chain[TX_BD(sw_tx_bd_prod)].reg_bd;
9244 tx_data_bd->addr_hi =
9245 htole32(U64_HI(segs[0].ds_addr + hdr_len));
9246 tx_data_bd->addr_lo =
9247 htole32(U64_LO(segs[0].ds_addr + hdr_len));
9248 tx_data_bd->nbytes =
9249 htole16(segs[0].ds_len) - hdr_len;
9250 if (tx_total_pkt_size_bd == NULL)
9251 tx_total_pkt_size_bd = tx_data_bd;
9255 * The controller needs the following info for TSO:
9256 * MSS, tcp_send_seq, ip_id, and tcp_pseudo_csum.
9258 tx_parse_bd->lso_mss = htole16(m0->m_pkthdr.tso_segsz);
9259 tx_parse_bd->tcp_send_seq = ntohl(th->th_seq);
9260 tx_parse_bd->tcp_flags = th->th_flags;
9261 tx_parse_bd->ip_id = ntohs(ip->ip_id);
9263 tx_parse_bd->tcp_pseudo_csum =
9264 ntohs(in_pseudo(ip->ip_src.s_addr,
9265 ip->ip_dst.s_addr, htons(IPPROTO_TCP)));
9267 tx_parse_bd->global_data |=
9268 ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9272 /* Prepare remaining BDs. Start_tx_bd contains first seg (frag). */
9273 for (i = 1; i < nsegs ; i++) {
9274 sw_tx_bd_prod = NEXT_TX_BD(sw_tx_bd_prod);
9275 tx_data_bd = &fp->tx_chain[TX_BD(sw_tx_bd_prod)].reg_bd;
9276 tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr));
9277 tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr));
9278 tx_data_bd->nbytes = htole16(segs[i].ds_len);
9279 if (tx_total_pkt_size_bd == NULL)
9280 tx_total_pkt_size_bd = tx_data_bd;
9281 total_pkt_size += tx_data_bd->nbytes;
9284 if(tx_total_pkt_size_bd != NULL)
9285 tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size;
9287 /* Update TX BD producer index value for next TX */
9288 sw_tx_bd_prod = NEXT_TX_BD(sw_tx_bd_prod);
9290 /* Update the used TX BD counter. */
9291 fp->tx_bd_used += nbds;
9294 * If the chain of tx_bd's describing this frame
9295 * is adjacent to or spans an eth_tx_next_bd element
9296 * then we need to increment the nbds value.
9298 if(TX_IDX(sw_tx_bd_prod) < nbds)
9301 /* Don't allow reordering of writes for nbd and packets. */
9303 fp->tx_db.data.prod += nbds;
9305 /* Producer points to the next free tx_bd at this point. */
9307 fp->tx_bd_prod = sw_tx_bd_prod;
9309 DOORBELL(sc, fp->index, fp->tx_db.raw);
9313 /* Prevent speculative reads from getting ahead of the status block. */
9314 bus_space_barrier(sc->bxe_btag, sc->bxe_bhandle,
9315 0, 0, BUS_SPACE_BARRIER_READ);
9317 /* Prevent speculative reads from getting ahead of the doorbell. */
9318 bus_space_barrier(sc->bxe_db_btag, sc->bxe_db_bhandle,
9319 0, 0, BUS_SPACE_BARRIER_READ);
9322 DBEXIT(BXE_VERBOSE_SEND);
9328 * Legacy (non-RSS) dispatch routine.
9334 bxe_tx_start(struct ifnet *ifp)
9336 struct bxe_softc *sc;
9337 struct bxe_fastpath *fp;
9340 DBENTER(BXE_EXTREME_SEND);
9342 /* Exit if the transmit queue is full or link down. */
9343 if (((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
9344 IFF_DRV_RUNNING) || !sc->link_vars.link_up) {
9345 DBPRINT(sc, BXE_WARN,
9346 "%s(): No link or TX queue full, ignoring "
9347 "transmit request.\n", __FUNCTION__);
9348 goto bxe_tx_start_exit;
9351 /* Set the TX queue for the frame. */
9355 bxe_tx_start_locked(ifp, fp);
9359 DBEXIT(BXE_EXTREME_SEND);
9364 * Legacy (non-RSS) transmit routine.
9370 bxe_tx_start_locked(struct ifnet *ifp, struct bxe_fastpath *fp)
9372 struct bxe_softc *sc;
9373 struct mbuf *m = NULL;
9377 DBENTER(BXE_EXTREME_SEND);
9379 BXE_FP_LOCK_ASSERT(fp);
9381 /* Keep adding entries while there are frames to send. */
9382 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
9384 /* Check for any frames to send. */
9385 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
9386 if (__predict_false(m == NULL))
9389 /* The transmit mbuf now belongs to us, keep track of it. */
9390 fp->tx_mbuf_alloc++;
9393 * Pack the data into the transmit ring. If we
9394 * don't have room, place the mbuf back at the
9395 * head of the TX queue, set the OACTIVE flag,
9396 * and wait for the NIC to drain the chain.
9398 if (__predict_false(bxe_tx_encap(fp, &m))) {
9399 fp->tx_encap_failures++;
9400 /* Very Bad Frames(tm) may have been dropped. */
9403 * Mark the TX queue as full and return
9406 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
9407 IFQ_DRV_PREPEND(&ifp->if_snd, m);
9408 fp->tx_mbuf_alloc--;
9409 fp->tx_queue_xoff++;
9414 /* Stop looking for more work. */
9418 /* The transmit frame was enqueued successfully. */
9421 /* Send a copy of the frame to any BPF listeners. */
9425 /* No TX packets were dequeued. */
9427 /* Reset the TX watchdog timeout timer. */
9428 fp->watchdog_timer = BXE_TX_TIMEOUT;
9430 DBEXIT(BXE_EXTREME_SEND);
9433 #if __FreeBSD_version >= 800000
9435 * Multiqueue (RSS) dispatch routine.
9438 * 0 if transmit succeeds, !0 otherwise.
9441 bxe_tx_mq_start(struct ifnet *ifp, struct mbuf *m)
9443 struct bxe_softc *sc;
9444 struct bxe_fastpath *fp;
9448 DBENTER(BXE_EXTREME_SEND);
9452 /* If using flow ID, assign the TX queue based on the flow ID. */
9453 if ((m->m_flags & M_FLOWID) != 0)
9454 fp_index = m->m_pkthdr.flowid % sc->num_queues;
9456 /* Select the fastpath TX queue for the frame. */
9457 fp = &sc->fp[fp_index];
9459 /* Skip H/W enqueue if transmit queue is full or link down. */
9460 if (((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
9461 IFF_DRV_RUNNING) || !sc->link_vars.link_up) {
9462 /* Stash the mbuf if we can. */
9463 rc = drbr_enqueue(ifp, fp->br, m);
9464 goto bxe_tx_mq_start_exit;
9468 rc = bxe_tx_mq_start_locked(ifp, fp, m);
9471 bxe_tx_mq_start_exit:
9472 DBEXIT(BXE_EXTREME_SEND);
9478 * Multiqueue (TSS) transmit routine. This routine is responsible
9479 * for adding a frame to the hardware's transmit queue.
9482 * 0 if transmit succeeds, !0 otherwise.
9485 bxe_tx_mq_start_locked(struct ifnet *ifp,
9486 struct bxe_fastpath *fp, struct mbuf *m)
9488 struct bxe_softc *sc;
9490 int depth, rc, tx_count;
9493 DBENTER(BXE_EXTREME_SEND);
9497 /* Fetch the depth of the driver queue. */
9498 depth = drbr_inuse(ifp, fp->br);
9499 if (depth > fp->tx_max_drbr_queue_depth)
9500 fp->tx_max_drbr_queue_depth = depth;
9502 BXE_FP_LOCK_ASSERT(fp);
9505 /* No new work, check for pending frames. */
9506 next = drbr_dequeue(ifp, fp->br);
9507 } else if (drbr_needs_enqueue(ifp, fp->br)) {
9508 /* Both new and pending work, maintain packet order. */
9509 rc = drbr_enqueue(ifp, fp->br, m);
9511 fp->tx_soft_errors++;
9512 goto bxe_tx_mq_start_locked_exit;
9514 next = drbr_dequeue(ifp, fp->br);
9516 /* New work only, nothing pending. */
9519 /* Keep adding entries while there are frames to send. */
9520 while (next != NULL) {
9522 /* The transmit mbuf now belongs to us, keep track of it. */
9523 fp->tx_mbuf_alloc++;
9526 * Pack the data into the transmit ring. If we
9527 * don't have room, place the mbuf back at the
9528 * head of the TX queue, set the OACTIVE flag,
9529 * and wait for the NIC to drain the chain.
9531 rc = bxe_tx_encap(fp, &next);
9532 if (__predict_false(rc != 0)) {
9533 fp->tx_encap_failures++;
9534 /* Very Bad Frames(tm) may have been dropped. */
9537 * Mark the TX queue as full and save
9540 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
9541 fp->tx_frame_deferred++;
9543 /* This may reorder frame. */
9544 rc = drbr_enqueue(ifp, fp->br, next);
9545 fp->tx_mbuf_alloc--;
9548 /* Stop looking for more work. */
9552 /* The transmit frame was enqueued successfully. */
9556 ifp->if_obytes += next->m_pkthdr.len;
9557 if (next->m_flags & M_MCAST)
9560 /* Send a copy of the frame to any BPF listeners. */
9561 BPF_MTAP(ifp, next);
9563 /* Handle any completions if we're running low. */
9564 if (fp->tx_bd_used >= BXE_TX_CLEANUP_THRESHOLD)
9567 /* Close TX since there's so little room left. */
9568 if (fp->tx_bd_used >= BXE_TX_CLEANUP_THRESHOLD) {
9569 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
9573 next = drbr_dequeue(ifp, fp->br);
9576 /* No TX packets were dequeued. */
9578 /* Reset the TX watchdog timeout timer. */
9579 fp->watchdog_timer = BXE_TX_TIMEOUT;
9581 bxe_tx_mq_start_locked_exit:
9582 DBEXIT(BXE_EXTREME_SEND);
9588 bxe_mq_flush(struct ifnet *ifp)
9590 struct bxe_softc *sc;
9591 struct bxe_fastpath *fp;
9597 DBENTER(BXE_VERBOSE_UNLOAD);
9599 for (i = 0; i < sc->num_queues; i++) {
9602 if (fp->br != NULL) {
9603 DBPRINT(sc, BXE_VERBOSE_UNLOAD,
9604 "%s(): Clearing fp[%02d]...\n",
9605 __FUNCTION__, fp->index);
9608 while ((m = buf_ring_dequeue_sc(fp->br)) != NULL)
9616 DBEXIT(BXE_VERBOSE_UNLOAD);
9618 #endif /* FreeBSD_version >= 800000 */
9622 * Handles any IOCTL calls from the operating system.
9625 * 0 for success, positive value for failure.
9628 bxe_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
9630 struct bxe_softc *sc;
9632 int error, mask, reinit;
9635 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_MISC);
9637 ifr = (struct ifreq *)data;
9644 DBPRINT(sc, BXE_VERBOSE_MISC, "%s(): Received SIOCSIFMTU\n",
9647 /* Check that the MTU setting is supported. */
9648 if ((ifr->ifr_mtu < BXE_MIN_MTU) ||
9649 (ifr->ifr_mtu > BXE_JUMBO_MTU)) {
9655 ifp->if_mtu = ifr->ifr_mtu;
9656 BXE_CORE_UNLOCK(sc);
9661 /* Toggle the interface state up or down. */
9662 DBPRINT(sc, BXE_VERBOSE_MISC, "%s(): Received SIOCSIFFLAGS\n",
9666 /* Check if the interface is up. */
9667 if (ifp->if_flags & IFF_UP) {
9668 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
9669 /* Set promiscuous/multicast flags. */
9670 bxe_set_rx_mode(sc);
9673 bxe_init_locked(sc, LOAD_NORMAL);
9676 /* Bring down the interface. */
9677 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
9678 bxe_stop_locked(sc, UNLOAD_NORMAL);
9680 BXE_CORE_UNLOCK(sc);
9685 /* Add/Delete multicast addresses. */
9686 DBPRINT(sc, BXE_VERBOSE_MISC,
9687 "%s(): Received SIOCADDMULTI/SIOCDELMULTI\n", __FUNCTION__);
9690 /* Check if the interface is up. */
9691 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
9692 /* Set receive mode flags. */
9693 bxe_set_rx_mode(sc);
9694 BXE_CORE_UNLOCK(sc);
9699 /* Set/Get Interface media */
9700 DBPRINT(sc, BXE_VERBOSE_MISC,
9701 "%s(): Received SIOCSIFMEDIA/SIOCGIFMEDIA\n", __FUNCTION__);
9703 error = ifmedia_ioctl(ifp, ifr, &sc->bxe_ifmedia, command);
9706 /* Set interface capability */
9708 /* Find out which capabilities have changed. */
9709 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
9710 DBPRINT(sc, BXE_VERBOSE_MISC,
9711 "%s(): Received SIOCSIFCAP (mask = 0x%08X)\n", __FUNCTION__,
9716 /* Toggle the LRO capabilites enable flag. */
9717 if (mask & IFCAP_LRO) {
9718 ifp->if_capenable ^= IFCAP_LRO;
9719 sc->bxe_flags ^= BXE_TPA_ENABLE_FLAG;
9720 DBPRINT(sc, BXE_INFO_MISC,
9721 "%s(): Toggling LRO (bxe_flags = "
9722 "0x%08X).\n", __FUNCTION__, sc->bxe_flags);
9724 /* LRO requires different buffer setup. */
9728 /* Toggle the TX checksum capabilites enable flag. */
9729 if (mask & IFCAP_TXCSUM) {
9730 DBPRINT(sc, BXE_VERBOSE_MISC,
9731 "%s(): Toggling IFCAP_TXCSUM.\n", __FUNCTION__);
9733 ifp->if_capenable ^= IFCAP_TXCSUM;
9735 if (IFCAP_TXCSUM & ifp->if_capenable)
9736 ifp->if_hwassist = BXE_IF_HWASSIST;
9738 ifp->if_hwassist = 0;
9741 /* Toggle the RX checksum capabilities enable flag. */
9742 if (mask & IFCAP_RXCSUM) {
9743 DBPRINT(sc, BXE_VERBOSE_MISC,
9744 "%s(): Toggling IFCAP_RXCSUM.\n", __FUNCTION__);
9746 ifp->if_capenable ^= IFCAP_RXCSUM;
9748 if (IFCAP_RXCSUM & ifp->if_capenable)
9749 ifp->if_hwassist = BXE_IF_HWASSIST;
9751 ifp->if_hwassist = 0;
9754 /* Toggle VLAN_MTU capabilities enable flag. */
9755 if (mask & IFCAP_VLAN_MTU) {
9756 /* ToDo: Is this really true? */
9757 BXE_PRINTF("%s(%d): Changing VLAN_MTU not supported.\n",
9758 __FILE__, __LINE__);
9762 /* Toggle VLANHWTAG capabilities enabled flag. */
9763 if (mask & IFCAP_VLAN_HWTAGGING) {
9764 /* ToDo: Is this really true? */
9766 "%s(%d): Changing VLAN_HWTAGGING not supported!\n",
9767 __FILE__, __LINE__);
9771 /* Toggle TSO4 capabilities enabled flag. */
9772 if (mask & IFCAP_TSO4) {
9773 DBPRINT(sc, BXE_VERBOSE_MISC,
9774 "%s(): Toggling IFCAP_TSO4.\n", __FUNCTION__);
9776 ifp->if_capenable ^= IFCAP_TSO4;
9779 /* Toggle TSO6 capabilities enabled flag. */
9780 if (mask & IFCAP_TSO6) {
9781 /* ToDo: Add TSO6 support. */
9783 "%s(%d): Changing TSO6 not supported!\n",
9784 __FILE__, __LINE__);
9786 BXE_CORE_UNLOCK(sc);
9789 * ToDo: Look into supporting:
9794 * WOL[_UCAST|_MCAST|_MAGIC]
9799 /* We don't know how to handle the IOCTL, pass it on. */
9800 error = ether_ioctl(ifp, command, data);
9804 /* Restart the controller with the new capabilities. */
9805 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && (reinit != 0)) {
9807 bxe_stop_locked(sc, UNLOAD_NORMAL);
9808 bxe_init_locked(sc, LOAD_NORMAL);
9809 BXE_CORE_UNLOCK(sc);
9812 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_MISC);
9818 * Gets the current value of the RX Completion Consumer index
9819 * from the fastpath status block, updates it as necessary if
9820 * it is pointing to a "Next Page" entry, and returns it to the
9824 * The adjusted value of *fp->rx_cons_sb.
9826 static __inline uint16_t
9827 bxe_rx_cq_cons(struct bxe_fastpath *fp)
9829 volatile uint16_t rx_cq_cons_sb = 0;
9832 rx_cq_cons_sb = (volatile uint16_t) le16toh(*fp->rx_cq_cons_sb);
9835 * It is valid for the hardware's copy of the completion
9836 * consumer index to be pointing at a "Next Page" entry in
9837 * the completion chain but the driver prefers to assume
9838 * that it is pointing at the next available CQE so we
9839 * need to adjust the value accordingly.
9841 if ((rx_cq_cons_sb & USABLE_RCQ_ENTRIES_PER_PAGE) ==
9842 USABLE_RCQ_ENTRIES_PER_PAGE)
9845 return (rx_cq_cons_sb);
9849 bxe_has_tx_work(struct bxe_fastpath *fp)
9853 return (((fp->tx_pkt_prod != le16toh(*fp->tx_pkt_cons_sb)) || \
9854 (fp->tx_pkt_prod != fp->tx_pkt_cons)));
9858 * Checks if there are any received frames to process on the
9862 * 0 = No received frames pending, !0 = Received frames
9866 bxe_has_rx_work(struct bxe_fastpath *fp)
9870 return (bxe_rx_cq_cons(fp) != fp->rx_cq_cons);
9874 * Slowpath task entry point.
9880 bxe_task_sp(void *xsc, int pending)
9882 struct bxe_softc *sc;
9887 DBPRINT(sc, BXE_EXTREME_INTR, "%s(): pending = %d.\n", __FUNCTION__,
9890 /* Check for the source of the interrupt. */
9891 sp_status = bxe_update_dsb_idx(sc);
9893 /* Handle any hardware attentions. */
9894 if (sp_status & 0x1) {
9899 /* CSTORM event asserted (query_stats, port delete ramrod, etc.). */
9900 if (sp_status & 0x2) {
9901 sc->stats_pending = 0;
9905 /* Check for other weirdness. */
9906 if (sp_status != 0) {
9907 DBPRINT(sc, BXE_WARN, "%s(): Unexpected slowpath interrupt "
9908 "(sp_status = 0x%04X)!\n", __FUNCTION__, sp_status);
9911 /* Acknowledge the xSTORM tags and enable slowpath interrupts. */
9912 bxe_ack_sb(sc, DEF_SB_ID, ATTENTION_ID, le16toh(sc->def_att_idx),
9914 bxe_ack_sb(sc, DEF_SB_ID, USTORM_ID, le16toh(sc->def_u_idx),
9916 bxe_ack_sb(sc, DEF_SB_ID, CSTORM_ID, le16toh(sc->def_c_idx),
9918 bxe_ack_sb(sc, DEF_SB_ID, XSTORM_ID, le16toh(sc->def_x_idx),
9920 bxe_ack_sb(sc, DEF_SB_ID, TSTORM_ID, le16toh(sc->def_t_idx),
9926 * Legacy interrupt entry point.
9928 * Verifies that the controller generated the interrupt and
9929 * then calls a separate routine to handle the various
9930 * interrupt causes: link, RX, and TX.
9936 bxe_intr_legacy(void *xsc)
9938 struct bxe_softc *sc;
9939 struct bxe_fastpath *fp;
9940 uint32_t mask, fp_status;
9945 /* Don't handle any interrupts if we're not ready. */
9946 if (__predict_false(sc->intr_sem != 0))
9947 goto bxe_intr_legacy_exit;
9949 /* Bail out if the interrupt wasn't generated by our hardware. */
9950 fp_status = bxe_ack_int(sc);
9952 goto bxe_intr_legacy_exit;
9954 /* Handle the fastpath interrupt. */
9956 * sb_id = 0 for ustorm, 1 for cstorm.
9957 * The bits returned from ack_int() are 0-15,
9958 * bit 0=attention status block
9959 * bit 1=fast path status block
9960 * A mask of 0x2 or more = tx/rx event
9961 * A mask of 1 = slow path event
9964 mask = (0x2 << fp->sb_id);
9965 DBPRINT(sc, BXE_INSANE_INTR, "%s(): fp_status = 0x%08X, mask = "
9966 "0x%08X\n", __FUNCTION__, fp_status, mask);
9968 /* CSTORM event means fastpath completion. */
9969 if (fp_status & mask) {
9970 /* This interrupt must be ours, disable further interrupts. */
9971 bxe_ack_sb(sc, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9973 taskqueue_enqueue(fp->tq, &fp->task);
9975 bxe_task_fp((void *)fp, 0);
9977 /* Clear this event from the status flags. */
9981 /* Handle all slow path interrupts and attentions */
9982 if (fp_status & 0x1) {
9983 /* Acknowledge and disable further slowpath interrupts. */
9984 bxe_ack_sb(sc, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
9986 /* Schedule the slowpath task. */
9987 taskqueue_enqueue(sc->tq, &sc->task);
9989 bxe_task_sp(xsc, 0);
9991 /* Clear this event from the status flags. */
9997 DBPRINT(sc, BXE_WARN,
9998 "%s(): Unexpected fastpath status (fp_status = 0x%08X)!\n",
9999 __FUNCTION__, fp_status);
10003 DBEXIT(BXE_EXTREME_INTR);
10005 bxe_intr_legacy_exit:
10010 * Slowpath interrupt entry point.
10012 * Acknowledge the interrupt and schedule a slowpath task.
10018 bxe_intr_sp(void *xsc)
10020 struct bxe_softc *sc;
10024 DBPRINT(sc, BXE_INSANE_INTR, "%s(%d): Slowpath interrupt.\n",
10025 __FUNCTION__, curcpu);
10027 /* Don't handle any interrupts if we're not ready. */
10028 if (__predict_false(sc->intr_sem != 0))
10029 goto bxe_intr_sp_exit;
10031 /* Acknowledge and disable further slowpath interrupts. */
10032 bxe_ack_sb(sc, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
10035 /* Schedule the slowpath task. */
10036 taskqueue_enqueue(sc->tq, &sc->task);
10038 bxe_task_sp(xsc, 0);
10046 * Fastpath interrupt entry point.
10048 * Acknowledge the interrupt and schedule a fastpath task.
10054 bxe_intr_fp (void *xfp)
10056 struct bxe_fastpath *fp;
10057 struct bxe_softc *sc;
10062 DBPRINT(sc, BXE_INSANE_INTR,
10063 "%s(%d): fp[%02d].sb_id = %d interrupt.\n",
10064 __FUNCTION__, curcpu, fp->index, fp->sb_id);
10066 /* Don't handle any interrupts if we're not ready. */
10067 if (__predict_false(sc->intr_sem != 0))
10068 goto bxe_intr_fp_exit;
10070 /* Disable further interrupts. */
10071 bxe_ack_sb(sc, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
10073 taskqueue_enqueue(fp->tq, &fp->task);
10075 bxe_task_fp (xfp, 0);
10083 * Fastpath task entry point.
10085 * Handle any pending transmit or receive events.
10091 bxe_task_fp (void *xfp, int pending)
10093 struct bxe_fastpath *fp;
10094 struct bxe_softc *sc;
10099 DBPRINT(sc, BXE_EXTREME_INTR, "%s(%d): Fastpath task on fp[%02d]"
10100 ".sb_id = %d\n", __FUNCTION__, curcpu, fp->index, fp->sb_id);
10102 /* Update the fast path indices */
10103 bxe_update_fpsb_idx(fp);
10105 /* Service any completed TX frames. */
10106 if (bxe_has_tx_work(fp)) {
10112 /* Service any completed RX frames. */
10116 /* Acknowledge the fastpath status block indices. */
10117 bxe_ack_sb(sc, fp->sb_id, USTORM_ID, fp->fp_u_idx, IGU_INT_NOP, 1);
10118 bxe_ack_sb(sc, fp->sb_id, CSTORM_ID, fp->fp_c_idx, IGU_INT_ENABLE, 1);
10122 * Clears the fastpath (per-queue) status block.
10128 bxe_zero_sb(struct bxe_softc *sc, int sb_id)
10132 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_INTR);
10133 port = BP_PORT(sc);
10136 bxe_init_fill(sc, CSEM_REG_FAST_MEMORY +
10137 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
10138 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
10139 bxe_init_fill(sc, CSEM_REG_FAST_MEMORY +
10140 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
10141 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
10143 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_INTR);
10147 * Initialize the fastpath (per queue) status block.
10153 bxe_init_sb(struct bxe_softc *sc, struct host_status_block *sb,
10154 bus_addr_t mapping, int sb_id)
10157 int func, index, port;
10159 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_INTR);
10161 port = BP_PORT(sc);
10162 func = BP_FUNC(sc);
10164 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_INTR),
10165 "%s(): Initializing sb_id = %d on port %d, function %d.\n",
10166 __FUNCTION__, sb_id, port, func);
10168 /* Setup the USTORM status block. */
10169 section = ((uint64_t)mapping) + offsetof(struct host_status_block,
10171 sb->u_status_block.status_block_id = sb_id;
10173 REG_WR(sc, BAR_CSTORM_INTMEM +
10174 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
10175 REG_WR(sc, BAR_CSTORM_INTMEM +
10176 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
10178 REG_WR8(sc, BAR_CSTORM_INTMEM + FP_USB_FUNC_OFF +
10179 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
10181 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
10182 REG_WR16(sc, BAR_CSTORM_INTMEM +
10183 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 0x1);
10185 /* Setup the CSTORM status block. */
10186 section = ((uint64_t)mapping) + offsetof(struct host_status_block,
10188 sb->c_status_block.status_block_id = sb_id;
10190 /* Write the status block address to CSTORM. Order is important! */
10191 REG_WR(sc, BAR_CSTORM_INTMEM +
10192 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
10193 REG_WR(sc, BAR_CSTORM_INTMEM +
10194 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
10196 REG_WR8(sc, BAR_CSTORM_INTMEM + FP_CSB_FUNC_OFF +
10197 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
10199 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
10200 REG_WR16(sc, BAR_CSTORM_INTMEM +
10201 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 0x1);
10203 /* Enable interrupts. */
10204 bxe_ack_sb(sc, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
10206 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_INTR);
10210 * Clears the default status block.
10216 bxe_zero_def_sb(struct bxe_softc *sc)
10220 func = BP_FUNC(sc);
10222 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_INTR);
10223 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_INTR),
10224 "%s(): Clearing default status block on function %d.\n",
10225 __FUNCTION__, func);
10227 /* Fill the STORM's copy of the default status block with 0. */
10228 bxe_init_fill(sc, TSEM_REG_FAST_MEMORY +
10229 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
10230 sizeof(struct tstorm_def_status_block) / 4);
10231 bxe_init_fill(sc, CSEM_REG_FAST_MEMORY +
10232 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
10233 sizeof(struct cstorm_def_status_block_u) / 4);
10234 bxe_init_fill(sc, CSEM_REG_FAST_MEMORY +
10235 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
10236 sizeof(struct cstorm_def_status_block_c) / 4);
10237 bxe_init_fill(sc, XSEM_REG_FAST_MEMORY +
10238 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
10239 sizeof(struct xstorm_def_status_block) / 4);
10241 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_INTR);
10245 * Initialize default status block.
10251 bxe_init_def_sb(struct bxe_softc *sc, struct host_def_status_block *def_sb,
10252 bus_addr_t mapping, int sb_id)
10255 int func, index, port, reg_offset, val;
10257 port = BP_PORT(sc);
10258 func = BP_FUNC(sc);
10260 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_INTR);
10261 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_INTR),
10262 "%s(): Initializing default status block on port %d, function %d.\n",
10263 __FUNCTION__, port, func);
10265 /* Setup the default status block (DSB). */
10266 section = ((uint64_t)mapping) + offsetof(struct host_def_status_block,
10267 atten_status_block);
10268 def_sb->atten_status_block.status_block_id = sb_id;
10269 sc->attn_state = 0;
10270 sc->def_att_idx = 0;
10273 * Read routing configuration for attn signal
10274 * output of groups. Currently, only groups
10275 * 0 through 3 are wired.
10277 reg_offset = port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
10278 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
10280 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
10281 sc->attn_group[index].sig[0] = REG_RD(sc, reg_offset +
10283 sc->attn_group[index].sig[1] = REG_RD(sc, reg_offset +
10284 0x10 * index + 0x4);
10285 sc->attn_group[index].sig[2] = REG_RD(sc, reg_offset +
10286 0x10 * index + 0x8);
10287 sc->attn_group[index].sig[3] = REG_RD(sc, reg_offset +
10288 0x10 * index + 0xc);
10290 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET |
10292 "%s(): attn_group[%d] = 0x%08X 0x%08X 0x%08x 0X%08x\n",
10293 __FUNCTION__, index, sc->attn_group[index].sig[0],
10294 sc->attn_group[index].sig[1], sc->attn_group[index].sig[2],
10295 sc->attn_group[index].sig[3]);
10298 reg_offset = port ? HC_REG_ATTN_MSG1_ADDR_L : HC_REG_ATTN_MSG0_ADDR_L;
10300 REG_WR(sc, reg_offset, U64_LO(section));
10301 REG_WR(sc, reg_offset + 4, U64_HI(section));
10303 reg_offset = port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0;
10305 val = REG_RD(sc, reg_offset);
10307 REG_WR(sc, reg_offset, val);
10310 section = ((uint64_t)mapping) + offsetof(struct host_def_status_block,
10311 u_def_status_block);
10312 def_sb->u_def_status_block.status_block_id = sb_id;
10315 REG_WR(sc, BAR_CSTORM_INTMEM +
10316 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
10317 REG_WR(sc, BAR_CSTORM_INTMEM +
10318 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4), U64_HI(section));
10319 REG_WR8(sc, BAR_CSTORM_INTMEM + DEF_USB_FUNC_OFF +
10320 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
10322 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
10323 REG_WR16(sc, BAR_CSTORM_INTMEM +
10324 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
10327 section = ((uint64_t)mapping) + offsetof(struct host_def_status_block,
10328 c_def_status_block);
10329 def_sb->c_def_status_block.status_block_id = sb_id;
10332 REG_WR(sc, BAR_CSTORM_INTMEM +
10333 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
10334 REG_WR(sc, BAR_CSTORM_INTMEM +
10335 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4), U64_HI(section));
10336 REG_WR8(sc, BAR_CSTORM_INTMEM + DEF_CSB_FUNC_OFF +
10337 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
10339 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
10340 REG_WR16(sc, BAR_CSTORM_INTMEM +
10341 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
10344 section = ((uint64_t)mapping) + offsetof(struct host_def_status_block,
10345 t_def_status_block);
10346 def_sb->t_def_status_block.status_block_id = sb_id;
10349 REG_WR(sc, BAR_TSTORM_INTMEM +
10350 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
10351 REG_WR(sc, BAR_TSTORM_INTMEM +
10352 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), U64_HI(section));
10353 REG_WR8(sc, BAR_TSTORM_INTMEM + DEF_TSB_FUNC_OFF +
10354 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
10356 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
10357 REG_WR16(sc, BAR_TSTORM_INTMEM +
10358 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
10361 section = ((uint64_t)mapping) + offsetof(struct host_def_status_block,
10362 x_def_status_block);
10363 def_sb->x_def_status_block.status_block_id = sb_id;
10366 REG_WR(sc, BAR_XSTORM_INTMEM +
10367 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
10368 REG_WR(sc, BAR_XSTORM_INTMEM +
10369 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), U64_HI(section));
10370 REG_WR8(sc, BAR_XSTORM_INTMEM + DEF_XSB_FUNC_OFF +
10371 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
10373 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
10374 REG_WR16(sc, BAR_XSTORM_INTMEM +
10375 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
10377 sc->stats_pending = 0;
10378 sc->set_mac_pending = 0;
10380 bxe_ack_sb(sc, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
10382 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_INTR);
10386 * Update interrupt coalescing parameters.
10392 bxe_update_coalesce(struct bxe_softc *sc)
10394 int i, port, sb_id;
10396 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
10398 port = BP_PORT(sc);
10399 /* Cycle through each fastpath queue and set the coalescing values. */
10400 for (i = 0; i < sc->num_queues; i++) {
10401 sb_id = sc->fp[i].sb_id;
10403 /* Receive interrupt coalescing is done on USTORM. */
10404 REG_WR8(sc, BAR_CSTORM_INTMEM +
10405 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
10406 U_SB_ETH_RX_CQ_INDEX), sc->rx_ticks / (BXE_BTR * 4));
10408 REG_WR16(sc, BAR_CSTORM_INTMEM +
10409 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
10410 U_SB_ETH_RX_CQ_INDEX),
10411 (sc->rx_ticks / (BXE_BTR * 4)) ? 0 : 1);
10413 /* Transmit interrupt coalescing is done on CSTORM. */
10414 REG_WR8(sc, BAR_CSTORM_INTMEM +
10415 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
10416 C_SB_ETH_TX_CQ_INDEX), sc->tx_ticks / (BXE_BTR * 4));
10417 REG_WR16(sc, BAR_CSTORM_INTMEM +
10418 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
10419 C_SB_ETH_TX_CQ_INDEX),
10420 (sc->tx_ticks / (BXE_BTR * 4)) ? 0 : 1);
10423 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
10427 * Allocate an mbuf and assign it to the TPA pool.
10430 * 0 = Success, !0 = Failure
10433 * fp->tpa_mbuf_ptr[queue]
10434 * fp->tpa_mbuf_map[queue]
10435 * fp->tpa_mbuf_segs[queue]
10438 bxe_alloc_tpa_mbuf(struct bxe_fastpath *fp, int queue)
10440 struct bxe_softc *sc;
10441 bus_dma_segment_t segs[1];
10447 DBENTER(BXE_INSANE_TPA);
10450 DBRUNIF((fp->disable_tpa == TRUE),
10451 BXE_PRINTF("%s(): fp[%02d] TPA disabled!\n",
10452 __FUNCTION__, fp->index));
10455 /* Simulate an mbuf allocation failure. */
10456 if (DB_RANDOMTRUE(bxe_debug_mbuf_allocation_failure)) {
10457 sc->debug_sim_mbuf_alloc_failed++;
10458 fp->mbuf_tpa_alloc_failed++;
10460 goto bxe_alloc_tpa_mbuf_exit;
10464 /* Allocate the new TPA mbuf. */
10465 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, sc->mbuf_alloc_size);
10466 if (__predict_false(m == NULL)) {
10467 fp->mbuf_tpa_alloc_failed++;
10469 goto bxe_alloc_tpa_mbuf_exit;
10472 DBRUN(fp->tpa_mbuf_alloc++);
10474 /* Initialize the mbuf buffer length. */
10475 m->m_pkthdr.len = m->m_len = sc->mbuf_alloc_size;
10478 /* Simulate an mbuf mapping failure. */
10479 if (DB_RANDOMTRUE(bxe_debug_dma_map_addr_failure)) {
10480 sc->debug_sim_mbuf_map_failed++;
10481 fp->mbuf_tpa_mapping_failed++;
10483 DBRUN(fp->tpa_mbuf_alloc--);
10485 goto bxe_alloc_tpa_mbuf_exit;
10489 /* Map the TPA mbuf into non-paged pool. */
10490 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
10491 fp->tpa_mbuf_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT);
10492 if (__predict_false(rc != 0)) {
10493 fp->mbuf_tpa_mapping_failed++;
10495 DBRUN(fp->tpa_mbuf_alloc--);
10496 goto bxe_alloc_tpa_mbuf_exit;
10499 /* All mubfs must map to a single segment. */
10500 KASSERT(nsegs == 1, ("%s(): Too many segments (%d) returned!",
10501 __FUNCTION__, nsegs));
10503 /* Release any existing TPA mbuf mapping. */
10504 if (fp->tpa_mbuf_map[queue] != NULL) {
10505 bus_dmamap_sync(fp->rx_mbuf_tag,
10506 fp->tpa_mbuf_map[queue], BUS_DMASYNC_POSTREAD);
10507 bus_dmamap_unload(fp->rx_mbuf_tag,
10508 fp->tpa_mbuf_map[queue]);
10511 /* Save the mbuf and mapping info for the TPA mbuf. */
10512 map = fp->tpa_mbuf_map[queue];
10513 fp->tpa_mbuf_map[queue] = fp->tpa_mbuf_spare_map;
10514 fp->tpa_mbuf_spare_map = map;
10515 bus_dmamap_sync(fp->rx_mbuf_tag,
10516 fp->tpa_mbuf_map[queue], BUS_DMASYNC_PREREAD);
10517 fp->tpa_mbuf_ptr[queue] = m;
10518 fp->tpa_mbuf_segs[queue] = segs[0];
10520 bxe_alloc_tpa_mbuf_exit:
10521 DBEXIT(BXE_INSANE_TPA);
10526 * Allocate mbufs for a fastpath TPA pool.
10529 * 0 = Success, !0 = Failure.
10536 bxe_fill_tpa_pool(struct bxe_fastpath *fp)
10538 struct bxe_softc *sc;
10539 int max_agg_queues, queue, rc;
10542 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
10545 if (!TPA_ENABLED(sc)) {
10546 fp->disable_tpa = TRUE;
10547 goto bxe_fill_tpa_pool_exit;
10550 max_agg_queues = CHIP_IS_E1(sc) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
10551 ETH_MAX_AGGREGATION_QUEUES_E1H;
10553 /* Assume the fill operation worked. */
10554 fp->disable_tpa = FALSE;
10556 /* Fill the TPA pool. */
10557 for (queue = 0; queue < max_agg_queues; queue++) {
10558 rc = bxe_alloc_tpa_mbuf(fp, queue);
10561 "%s(%d): fp[%02d] TPA disabled!\n",
10562 __FILE__, __LINE__, fp->index);
10563 fp->disable_tpa = TRUE;
10566 fp->tpa_state[queue] = BXE_TPA_STATE_STOP;
10569 bxe_fill_tpa_pool_exit:
10570 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
10575 * Free all mbufs from a fastpath TPA pool.
10581 * fp->tpa_mbuf_ptr[]
10582 * fp->tpa_mbuf_map[]
10583 * fp->tpa_mbuf_alloc
10586 bxe_free_tpa_pool(struct bxe_fastpath *fp)
10588 struct bxe_softc *sc;
10589 int i, max_agg_queues;
10592 DBENTER(BXE_INSANE_LOAD | BXE_INSANE_UNLOAD | BXE_INSANE_TPA);
10594 if (fp->rx_mbuf_tag == NULL)
10595 goto bxe_free_tpa_pool_exit;
10597 max_agg_queues = CHIP_IS_E1H(sc) ?
10598 ETH_MAX_AGGREGATION_QUEUES_E1H :
10599 ETH_MAX_AGGREGATION_QUEUES_E1;
10601 /* Release all mbufs and and all DMA maps in the TPA pool. */
10602 for (i = 0; i < max_agg_queues; i++) {
10603 if (fp->tpa_mbuf_map[i] != NULL) {
10604 bus_dmamap_sync(fp->rx_mbuf_tag, fp->tpa_mbuf_map[i],
10605 BUS_DMASYNC_POSTREAD);
10606 bus_dmamap_unload(fp->rx_mbuf_tag, fp->tpa_mbuf_map[i]);
10609 if (fp->tpa_mbuf_ptr[i] != NULL) {
10610 m_freem(fp->tpa_mbuf_ptr[i]);
10611 DBRUN(fp->tpa_mbuf_alloc--);
10612 fp->tpa_mbuf_ptr[i] = NULL;
10616 bxe_free_tpa_pool_exit:
10617 DBEXIT(BXE_INSANE_LOAD | BXE_INSANE_UNLOAD | BXE_INSANE_TPA);
10621 * Allocate an mbuf and assign it to the receive scatter gather chain.
10622 * The caller must take care to save a copy of the existing mbuf in the
10626 * 0 = Success, !0= Failure.
10629 * fp->sg_chain[index]
10630 * fp->rx_sge_buf_ptr[index]
10631 * fp->rx_sge_buf_map[index]
10632 * fp->rx_sge_spare_map
10635 bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, uint16_t index)
10637 struct bxe_softc *sc;
10638 struct eth_rx_sge *sge;
10639 bus_dma_segment_t segs[1];
10645 DBENTER(BXE_INSANE_TPA);
10649 /* Simulate an mbuf allocation failure. */
10650 if (DB_RANDOMTRUE(bxe_debug_mbuf_allocation_failure)) {
10651 sc->debug_sim_mbuf_alloc_failed++;
10652 fp->mbuf_sge_alloc_failed++;
10654 goto bxe_alloc_rx_sge_mbuf_exit;
10658 /* Allocate a new SGE mbuf. */
10659 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
10660 if (__predict_false(m == NULL)) {
10661 fp->mbuf_sge_alloc_failed++;
10663 goto bxe_alloc_rx_sge_mbuf_exit;
10666 DBRUN(fp->sge_mbuf_alloc++);
10668 /* Initialize the mbuf buffer length. */
10669 m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE;
10672 /* Simulate an mbuf mapping failure. */
10673 if (DB_RANDOMTRUE(bxe_debug_dma_map_addr_failure)) {
10674 sc->debug_sim_mbuf_map_failed++;
10675 fp->mbuf_sge_mapping_failed++;
10677 DBRUN(fp->sge_mbuf_alloc--);
10679 goto bxe_alloc_rx_sge_mbuf_exit;
10683 /* Map the SGE mbuf into non-paged pool. */
10684 rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_buf_tag,
10685 fp->rx_sge_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT);
10686 if (__predict_false(rc != 0)) {
10687 fp->mbuf_sge_mapping_failed++;
10689 DBRUN(fp->sge_mbuf_alloc--);
10690 goto bxe_alloc_rx_sge_mbuf_exit;
10693 /* All mubfs must map to a single segment. */
10694 KASSERT(nsegs == 1, ("%s(): Too many segments (%d) returned!",
10695 __FUNCTION__, nsegs));
10697 /* Unload any existing SGE mbuf mapping. */
10698 if (fp->rx_sge_buf_map[index] != NULL) {
10699 bus_dmamap_sync(fp->rx_sge_buf_tag,
10700 fp->rx_sge_buf_map[index], BUS_DMASYNC_POSTREAD);
10701 bus_dmamap_unload(fp->rx_sge_buf_tag,
10702 fp->rx_sge_buf_map[index]);
10705 /* Add the new SGE mbuf to the SGE ring. */
10706 map = fp->rx_sge_buf_map[index];
10707 fp->rx_sge_buf_map[index] = fp->rx_sge_spare_map;
10708 fp->rx_sge_spare_map = map;
10709 bus_dmamap_sync(fp->rx_sge_buf_tag,
10710 fp->rx_sge_buf_map[index], BUS_DMASYNC_PREREAD);
10711 fp->rx_sge_buf_ptr[index] = m;
10712 sge = &fp->sg_chain[index];
10713 sge->addr_hi = htole32(U64_HI(segs[0].ds_addr));
10714 sge->addr_lo = htole32(U64_LO(segs[0].ds_addr));
10716 bxe_alloc_rx_sge_mbuf_exit:
10717 DBEXIT(BXE_INSANE_TPA);
10722 * Allocate mbufs for a SGE chain.
10725 * 0 = Success, !0 = Failure.
10732 bxe_fill_sg_chain(struct bxe_fastpath *fp)
10734 struct bxe_softc *sc;
10740 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
10743 if (!TPA_ENABLED(sc)) {
10744 fp->disable_tpa = TRUE;
10745 goto bxe_fill_sg_chain_exit;
10748 /* Assume the fill operation works. */
10749 fp->disable_tpa = FALSE;
10751 /* Fill the RX SGE chain. */
10753 for (i = 0; i < USABLE_RX_SGE; i++) {
10754 rc = bxe_alloc_rx_sge_mbuf(fp, index);
10757 "%s(%d): fp[%02d] SGE memory allocation failure!\n",
10758 __FILE__, __LINE__, fp->index);
10760 fp->disable_tpa = TRUE;
10763 index = NEXT_SGE_IDX(index);
10766 /* Update the driver's copy of the RX SGE producer index. */
10767 fp->rx_sge_prod = index;
10769 bxe_fill_sg_chain_exit:
10770 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
10775 * Free all elements from the receive scatter gather chain.
10781 * fp->rx_sge_buf_ptr[]
10782 * fp->rx_sge_buf_map[]
10783 * fp->sge_mbuf_alloc
10786 bxe_free_sg_chain(struct bxe_fastpath *fp)
10788 struct bxe_softc *sc;
10792 DBENTER(BXE_INSANE_TPA);
10794 if (fp->rx_sge_buf_tag == NULL)
10795 goto bxe_free_sg_chain_exit;
10797 /* Free all mbufs and unload all maps. */
10798 for (i = 0; i < TOTAL_RX_SGE; i++) {
10799 /* Free the map and the mbuf if they're allocated. */
10800 if (fp->rx_sge_buf_map[i] != NULL) {
10801 bus_dmamap_sync(fp->rx_sge_buf_tag,
10802 fp->rx_sge_buf_map[i], BUS_DMASYNC_POSTREAD);
10803 bus_dmamap_unload(fp->rx_sge_buf_tag,
10804 fp->rx_sge_buf_map[i]);
10807 if (fp->rx_sge_buf_ptr[i] != NULL) {
10808 m_freem(fp->rx_sge_buf_ptr[i]);
10809 DBRUN(fp->sge_mbuf_alloc--);
10810 fp->rx_sge_buf_ptr[i] = NULL;
10814 bxe_free_sg_chain_exit:
10815 DBEXIT(BXE_INSANE_TPA);
10819 * Allocate an mbuf, if necessary, and add it to the receive chain.
10822 * 0 = Success, !0 = Failure.
10825 bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, uint16_t index)
10827 struct bxe_softc *sc;
10828 struct eth_rx_bd *rx_bd;
10829 bus_dma_segment_t segs[1];
10835 DBENTER(BXE_INSANE_LOAD | BXE_INSANE_RESET | BXE_INSANE_RECV);
10839 /* Simulate an mbuf allocation failure. */
10840 if (DB_RANDOMTRUE(bxe_debug_mbuf_allocation_failure)) {
10841 sc->debug_sim_mbuf_alloc_failed++;
10842 fp->mbuf_rx_bd_alloc_failed++;
10844 goto bxe_alloc_rx_bd_mbuf_exit;
10848 /* Allocate the new RX BD mbuf. */
10849 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, sc->mbuf_alloc_size);
10850 if (__predict_false(m == NULL)) {
10851 fp->mbuf_rx_bd_alloc_failed++;
10853 goto bxe_alloc_rx_bd_mbuf_exit;
10856 DBRUN(fp->rx_mbuf_alloc++);
10858 /* Initialize the mbuf buffer length. */
10859 m->m_pkthdr.len = m->m_len = sc->mbuf_alloc_size;
10862 /* Simulate an mbuf mapping failure. */
10863 if (DB_RANDOMTRUE(bxe_debug_dma_map_addr_failure)) {
10864 sc->debug_sim_mbuf_map_failed++;
10865 fp->mbuf_rx_bd_mapping_failed++;
10867 DBRUN(fp->rx_mbuf_alloc--);
10869 goto bxe_alloc_rx_bd_mbuf_exit;
10873 /* Map the TPA mbuf into non-paged pool. */
10874 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
10875 fp->rx_mbuf_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT);
10876 if (__predict_false(rc != 0)) {
10877 fp->mbuf_rx_bd_mapping_failed++;
10879 DBRUN(fp->rx_mbuf_alloc--);
10880 goto bxe_alloc_rx_bd_mbuf_exit;
10883 /* All mubfs must map to a single segment. */
10884 KASSERT(nsegs == 1, ("%s(): Too many segments (%d) returned!",
10885 __FUNCTION__, nsegs));
10887 /* Release any existing RX BD mbuf mapping. */
10888 if (fp->rx_mbuf_map[index] != NULL) {
10889 bus_dmamap_sync(fp->rx_mbuf_tag,
10890 fp->rx_mbuf_map[index], BUS_DMASYNC_POSTREAD);
10891 bus_dmamap_unload(fp->rx_mbuf_tag,
10892 fp->rx_mbuf_map[index]);
10895 /* Save the mbuf and mapping info. */
10896 map = fp->rx_mbuf_map[index];
10897 fp->rx_mbuf_map[index] = fp->rx_mbuf_spare_map;
10898 fp->rx_mbuf_spare_map = map;
10899 bus_dmamap_sync(fp->rx_mbuf_tag,
10900 fp->rx_mbuf_map[index], BUS_DMASYNC_PREREAD);
10901 fp->rx_mbuf_ptr[index] = m;
10902 rx_bd = &fp->rx_chain[index];
10903 rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
10904 rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
10906 bxe_alloc_rx_bd_mbuf_exit:
10907 DBEXIT(BXE_INSANE_LOAD | BXE_INSANE_RESET | BXE_INSANE_RECV);
10914 * Allocate mbufs for a receive chain.
10917 * 0 = Success, !0 = Failure.
10923 bxe_fill_rx_bd_chain(struct bxe_fastpath *fp)
10925 struct bxe_softc *sc;
10930 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
10933 /* Allocate buffers for all the RX BDs in RX BD Chain. */
10934 for (i = 0; i < USABLE_RX_BD; i++) {
10935 rc = bxe_alloc_rx_bd_mbuf(fp, index);
10938 "%s(%d): Memory allocation failure! Cannot fill fp[%02d] RX chain.\n",
10939 __FILE__, __LINE__, fp->index);
10943 index = NEXT_RX_BD(index);
10946 fp->rx_bd_prod = index;
10947 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
10952 * Free all buffers from the receive chain.
10958 * fp->rx_mbuf_ptr[]
10959 * fp->rx_mbuf_map[]
10960 * fp->rx_mbuf_alloc
10963 bxe_free_rx_bd_chain(struct bxe_fastpath *fp)
10965 struct bxe_softc *sc;
10969 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
10971 if (fp->rx_mbuf_tag == NULL)
10972 goto bxe_free_rx_bd_chain_exit;
10974 /* Free all mbufs and unload all maps. */
10975 for (i = 0; i < TOTAL_RX_BD; i++) {
10976 if (fp->rx_mbuf_map[i] != NULL) {
10977 bus_dmamap_sync(fp->rx_mbuf_tag, fp->rx_mbuf_map[i],
10978 BUS_DMASYNC_POSTREAD);
10979 bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_map[i]);
10982 if (fp->rx_mbuf_ptr[i] != NULL) {
10983 m_freem(fp->rx_mbuf_ptr[i]);
10984 DBRUN(fp->rx_mbuf_alloc--);
10985 fp->rx_mbuf_ptr[i] = NULL;
10989 bxe_free_rx_bd_chain_exit:
10990 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
10994 * Setup mutexes used by the driver.
11000 bxe_mutexes_alloc(struct bxe_softc *sc)
11002 struct bxe_fastpath *fp;
11005 DBENTER(BXE_VERBOSE_LOAD);
11007 BXE_CORE_LOCK_INIT(sc, device_get_nameunit(sc->dev));
11008 BXE_SP_LOCK_INIT(sc, "bxe_sp_lock");
11009 BXE_DMAE_LOCK_INIT(sc, "bxe_dmae_lock");
11010 BXE_PHY_LOCK_INIT(sc, "bxe_phy_lock");
11011 BXE_FWMB_LOCK_INIT(sc, "bxe_fwmb_lock");
11012 BXE_PRINT_LOCK_INIT(sc, "bxe_print_lock");
11014 /* Allocate one mutex for each fastpath structure. */
11015 for (i = 0; i < sc->num_queues; i++ ) {
11018 /* Allocate per fastpath mutexes. */
11019 snprintf(fp->mtx_name, sizeof(fp->mtx_name), "%s:fp[%02d]",
11020 device_get_nameunit(sc->dev), fp->index);
11021 mtx_init(&fp->mtx, fp->mtx_name, NULL, MTX_DEF);
11024 DBEXIT(BXE_VERBOSE_LOAD);
11028 * Free mutexes used by the driver.
11034 bxe_mutexes_free(struct bxe_softc *sc)
11036 struct bxe_fastpath *fp;
11039 DBENTER(BXE_VERBOSE_UNLOAD);
11041 for (i = 0; i < sc->num_queues; i++ ) {
11044 /* Release per fastpath mutexes. */
11045 if (mtx_initialized(&fp->mtx))
11046 mtx_destroy(&fp->mtx);
11049 BXE_PRINT_LOCK_DESTROY(sc);
11050 BXE_FWMB_LOCK_DESTROY(sc);
11051 BXE_PHY_LOCK_DESTROY(sc);
11052 BXE_DMAE_LOCK_DESTROY(sc);
11053 BXE_SP_LOCK_DESTROY(sc);
11054 BXE_CORE_LOCK_DESTROY(sc);
11056 DBEXIT(BXE_VERBOSE_UNLOAD);
11061 * Free memory and clear the RX data structures.
11067 bxe_clear_rx_chains(struct bxe_softc *sc)
11069 struct bxe_fastpath *fp;
11072 DBENTER(BXE_VERBOSE_RESET);
11074 for (i = 0; i < sc->num_queues; i++) {
11077 /* Free all RX buffers. */
11078 bxe_free_rx_bd_chain(fp);
11079 bxe_free_tpa_pool(fp);
11080 bxe_free_sg_chain(fp);
11082 /* Check if any mbufs lost in the process. */
11083 DBRUNIF((fp->tpa_mbuf_alloc), DBPRINT(sc, BXE_FATAL,
11084 "%s(): Memory leak! Lost %d mbufs from fp[%02d] TPA pool!\n",
11085 __FUNCTION__, fp->tpa_mbuf_alloc, fp->index));
11086 DBRUNIF((fp->sge_mbuf_alloc), DBPRINT(sc, BXE_FATAL,
11087 "%s(): Memory leak! Lost %d mbufs from fp[%02d] SGE chain!\n",
11088 __FUNCTION__, fp->sge_mbuf_alloc, fp->index));
11089 DBRUNIF((fp->rx_mbuf_alloc), DBPRINT(sc, BXE_FATAL,
11090 "%s(): Memory leak! Lost %d mbufs from fp[%02d] RX chain!\n",
11091 __FUNCTION__, fp->rx_mbuf_alloc, fp->index));
11094 DBEXIT(BXE_VERBOSE_RESET);
11098 * Initialize the receive rings.
11104 bxe_init_rx_chains(struct bxe_softc *sc)
11106 struct bxe_fastpath *fp;
11109 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11111 func = BP_FUNC(sc);
11113 /* Allocate memory for RX and CQ chains. */
11114 for (i = 0; i < sc->num_queues; i++) {
11116 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
11117 "%s(): Initializing fp[%02d] RX chain.\n", __FUNCTION__, i);
11119 fp->rx_bd_cons = fp->rx_bd_prod = 0;
11120 fp->rx_cq_cons = fp->rx_cq_prod = 0;
11122 /* Pointer to status block's CQ consumer index. */
11123 fp->rx_cq_cons_sb = &fp->status_block->
11124 u_status_block.index_values[HC_INDEX_U_ETH_RX_CQ_CONS];
11126 /* Pointer to status block's receive consumer index. */
11127 fp->rx_bd_cons_sb = &fp->status_block->
11128 u_status_block.index_values[HC_INDEX_U_ETH_RX_BD_CONS];
11130 fp->rx_cq_prod = TOTAL_RCQ_ENTRIES;
11131 fp->rx_pkts = fp->rx_tpa_pkts = fp->rx_soft_errors = 0;
11133 /* Allocate memory for the receive chain. */
11134 rc = bxe_fill_rx_bd_chain(fp);
11136 goto bxe_init_rx_chains_exit;
11138 /* Allocate memory for TPA pool. */
11139 rc = bxe_fill_tpa_pool(fp);
11141 goto bxe_init_rx_chains_exit;
11143 /* Allocate memory for scatter-gather chain. */
11144 rc = bxe_fill_sg_chain(fp);
11146 goto bxe_init_rx_chains_exit;
11148 /* Prepare the receive BD and CQ buffers for DMA access. */
11149 bus_dmamap_sync(fp->rx_dma.tag, fp->rx_dma.map,
11150 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
11152 bus_dmamap_sync(fp->rcq_dma.tag, fp->rcq_dma.map,
11153 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
11156 * Tell the controller that we have rx_bd's and CQE's
11157 * available. Warning! this will generate an interrupt
11158 * (to the TSTORM). This must only be done when the
11159 * controller is initialized.
11161 bxe_update_rx_prod(sc, fp, fp->rx_bd_prod,
11162 fp->rx_cq_prod, fp->rx_sge_prod);
11164 /* ToDo - Move to dma_alloc(). */
11166 * Tell controller where the receive CQ
11167 * chains start in physical memory.
11170 REG_WR(sc, BAR_USTORM_INTMEM +
11171 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
11172 U64_LO(fp->rcq_dma.paddr));
11173 REG_WR(sc, BAR_USTORM_INTMEM +
11174 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
11175 U64_HI(fp->rcq_dma.paddr));
11179 bxe_init_rx_chains_exit:
11180 /* Release memory if an error occurred. */
11182 bxe_clear_rx_chains(sc);
11184 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11189 * Free memory and clear the TX data structures.
11195 bxe_clear_tx_chains(struct bxe_softc *sc)
11197 struct bxe_fastpath *fp;
11200 DBENTER(BXE_VERBOSE_RESET);
11202 for (i = 0; i < sc->num_queues; i++) {
11205 /* Free all mbufs and unload all maps. */
11206 if (fp->tx_mbuf_tag) {
11207 for (j = 0; j < TOTAL_TX_BD; j++) {
11208 if (fp->tx_mbuf_ptr[j] != NULL) {
11209 bus_dmamap_sync(fp->tx_mbuf_tag,
11210 fp->tx_mbuf_map[j],
11211 BUS_DMASYNC_POSTWRITE);
11212 bus_dmamap_unload(fp->tx_mbuf_tag,
11213 fp->tx_mbuf_map[j]);
11214 m_freem(fp->tx_mbuf_ptr[j]);
11215 fp->tx_mbuf_alloc--;
11216 fp->tx_mbuf_ptr[j] = NULL;
11221 /* Check if we lost any mbufs in the process. */
11222 DBRUNIF((fp->tx_mbuf_alloc), DBPRINT(sc, BXE_FATAL,
11223 "%s(): Memory leak! Lost %d mbufs from fp[%02d] TX chain!\n",
11224 __FUNCTION__, fp->tx_mbuf_alloc, fp->index));
11227 DBEXIT(BXE_VERBOSE_RESET);
11231 * Initialize the transmit chain.
11237 bxe_init_tx_chains(struct bxe_softc *sc)
11239 struct bxe_fastpath *fp;
11242 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11244 for (i = 0; i < sc->num_queues; i++) {
11247 /* Initialize transmit doorbell. */
11248 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
11249 fp->tx_db.data.zero_fill1 = 0;
11250 fp->tx_db.data.prod = 0;
11252 /* Initialize tranmsit producer/consumer indices. */
11253 fp->tx_pkt_prod = fp->tx_pkt_cons = 0;
11254 fp->tx_bd_prod = fp->tx_bd_cons = 0;
11255 fp->tx_bd_used = 0;
11257 /* Pointer to TX packet consumer in status block. */
11258 fp->tx_pkt_cons_sb =
11259 &fp->status_block->c_status_block.index_values[C_SB_ETH_TX_CQ_INDEX];
11261 /* Soft TX counters. */
11263 fp->tx_soft_errors = 0;
11264 fp->tx_offload_frames_csum_ip = 0;
11265 fp->tx_offload_frames_csum_tcp = 0;
11266 fp->tx_offload_frames_csum_udp = 0;
11267 fp->tx_offload_frames_tso = 0;
11268 fp->tx_header_splits = 0;
11269 fp->tx_encap_failures = 0;
11270 fp->tx_hw_queue_full = 0;
11271 fp->tx_hw_max_queue_depth = 0;
11272 fp->tx_dma_mapping_failure = 0;
11273 fp->tx_max_drbr_queue_depth = 0;
11274 fp->tx_window_violation_std = 0;
11275 fp->tx_window_violation_tso = 0;
11276 fp->tx_unsupported_tso_request_ipv6 = 0;
11277 fp->tx_unsupported_tso_request_not_tcp = 0;
11278 fp->tx_chain_lost_mbuf = 0;
11279 fp->tx_frame_deferred = 0;
11280 fp->tx_queue_xoff = 0;
11282 /* Clear all TX mbuf pointers. */
11283 for (j = 0; j < TOTAL_TX_BD; j++) {
11284 fp->tx_mbuf_ptr[j] = NULL;
11288 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11292 * Initialize the slowpath ring.
11298 bxe_init_sp_ring(struct bxe_softc *sc)
11302 func = BP_FUNC(sc);
11304 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11306 bzero((char *)sc->slowpath, BXE_SLOWPATH_SZ);
11308 /* When the producer equals the consumer the chain is empty. */
11309 sc->spq_left = MAX_SPQ_PENDING;
11310 sc->spq_prod_idx = 0;
11311 sc->dsb_sp_prod = BXE_SP_DSB_INDEX;
11312 sc->spq_prod_bd = sc->spq;
11313 sc->spq_last_bd = sc->spq_prod_bd + MAX_SP_DESC_CNT;
11315 /* Tell the controller the address of the slowpath ring. */
11316 REG_WR(sc, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
11317 U64_LO(sc->spq_dma.paddr));
11318 REG_WR(sc, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
11319 U64_HI(sc->spq_dma.paddr));
11320 REG_WR(sc, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
11323 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11327 * Initialize STORM processor context.
11333 bxe_init_context(struct bxe_softc *sc)
11335 struct eth_context *context;
11336 struct bxe_fastpath *fp;
11341 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11343 for (i = 0; i < sc->num_queues; i++) {
11344 context = BXE_SP(sc, context[i].eth);
11349 /* Update the USTORM context. */
11350 context->ustorm_st_context.common.sb_index_numbers =
11351 BXE_RX_SB_INDEX_NUM;
11352 context->ustorm_st_context.common.clientId = cl_id;
11353 context->ustorm_st_context.common.status_block_id = sb_id;
11354 /* Enable packet alignment/pad and statistics. */
11355 context->ustorm_st_context.common.flags =
11356 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
11357 if (sc->stats_enable == TRUE)
11358 context->ustorm_st_context.common.flags |=
11359 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS;
11360 context->ustorm_st_context.common.statistics_counter_id=cl_id;
11362 * Set packet alignment boundary.
11363 * (Must be >= 4 (i.e. 16 bytes).)
11365 context->ustorm_st_context.common.mc_alignment_log_size = 8;
11366 /* Set the size of the receive buffers. */
11367 context->ustorm_st_context.common.bd_buff_size =
11368 sc->mbuf_alloc_size;
11370 /* Set the address of the receive chain base page. */
11371 context->ustorm_st_context.common.bd_page_base_hi =
11372 U64_HI(fp->rx_dma.paddr);
11373 context->ustorm_st_context.common.bd_page_base_lo =
11374 U64_LO(fp->rx_dma.paddr);
11376 if (TPA_ENABLED(sc) && (fp->disable_tpa == FALSE)) {
11377 /* Enable TPA and SGE chain support. */
11378 context->ustorm_st_context.common.flags |=
11379 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
11381 /* Set the size of the SGE buffer. */
11382 context->ustorm_st_context.common.sge_buff_size =
11383 (uint16_t) (SGE_PAGE_SIZE * PAGES_PER_SGE);
11385 /* Set the address of the SGE chain base page. */
11386 context->ustorm_st_context.common.sge_page_base_hi =
11387 U64_HI(fp->sg_dma.paddr);
11388 context->ustorm_st_context.common.sge_page_base_lo =
11389 U64_LO(fp->sg_dma.paddr);
11391 DBPRINT(sc, BXE_VERBOSE_TPA, "%s(): MTU = %d\n",
11392 __FUNCTION__, (int) sc->bxe_ifp->if_mtu);
11394 /* Describe MTU to SGE alignment. */
11395 context->ustorm_st_context.common.max_sges_for_packet =
11396 SGE_PAGE_ALIGN(sc->bxe_ifp->if_mtu) >>
11398 context->ustorm_st_context.common.max_sges_for_packet =
11399 ((context->ustorm_st_context.common.
11400 max_sges_for_packet + PAGES_PER_SGE - 1) &
11401 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
11403 DBPRINT(sc, BXE_VERBOSE_TPA,
11404 "%s(): max_sges_for_packet = %d\n", __FUNCTION__,
11405 context->ustorm_st_context.common.max_sges_for_packet);
11408 /* Update USTORM context. */
11409 context->ustorm_ag_context.cdu_usage =
11410 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, i),
11411 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
11413 /* Update XSTORM context. */
11414 context->xstorm_ag_context.cdu_reserved =
11415 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, i),
11416 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
11418 /* Set the address of the transmit chain base page. */
11419 context->xstorm_st_context.tx_bd_page_base_hi =
11420 U64_HI(fp->tx_dma.paddr);
11421 context->xstorm_st_context.tx_bd_page_base_lo =
11422 U64_LO(fp->tx_dma.paddr);
11424 /* Enable XSTORM statistics. */
11425 context->xstorm_st_context.statistics_data = (cl_id |
11426 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
11428 /* Update CSTORM status block configuration. */
11429 context->cstorm_st_context.sb_index_number =
11430 C_SB_ETH_TX_CQ_INDEX;
11431 context->cstorm_st_context.status_block_id = sb_id;
11434 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11438 * Initialize indirection table.
11444 bxe_init_ind_table(struct bxe_softc *sc)
11448 func = BP_FUNC(sc);
11450 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11452 if (sc->multi_mode == ETH_RSS_MODE_DISABLED)
11455 /* Initialize the indirection table. */
11456 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
11457 REG_WR8(sc, BAR_TSTORM_INTMEM +
11458 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
11459 sc->fp->cl_id + (i % sc->num_queues));
11461 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11465 * Set client configuration.
11471 bxe_set_client_config(struct bxe_softc *sc)
11473 struct tstorm_eth_client_config tstorm_client = {0};
11476 port = BP_PORT(sc);
11478 DBENTER(BXE_VERBOSE_MISC);
11480 tstorm_client.mtu = sc->bxe_ifp->if_mtu; /* ETHERMTU */
11481 tstorm_client.config_flags =
11482 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
11483 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
11485 /* Unconditionally enable VLAN tag stripping. */
11487 tstorm_client.config_flags |=
11488 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
11489 DBPRINT(sc, BXE_VERBOSE, "%s(): VLAN tag stripping enabled.\n",
11493 /* Initialize the receive mode for each receive queue. */
11494 for (i = 0; i < sc->num_queues; i++) {
11495 tstorm_client.statistics_counter_id = sc->fp[i].cl_id;
11497 REG_WR(sc, BAR_TSTORM_INTMEM +
11498 TSTORM_CLIENT_CONFIG_OFFSET(port, sc->fp[i].cl_id),
11499 ((uint32_t *) &tstorm_client)[0]);
11500 REG_WR(sc, BAR_TSTORM_INTMEM +
11501 TSTORM_CLIENT_CONFIG_OFFSET(port, sc->fp[i].cl_id) + 4,
11502 ((uint32_t *) &tstorm_client)[1]);
11505 DBEXIT(BXE_VERBOSE_MISC);
11509 * Set receive mode.
11511 * Programs the MAC according to the type of unicast/broadcast/multicast
11512 * packets it should receive.
11518 bxe_set_storm_rx_mode(struct bxe_softc *sc)
11520 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
11523 int func, i , port;
11525 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11527 mode = sc->rx_mode;
11528 mask = 1 << BP_L_ID(sc);
11529 func = BP_FUNC(sc);
11530 port = BP_PORT(sc);
11532 /* All but management unicast packets should pass to the host as well */
11533 llh_mask = NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
11534 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
11535 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
11536 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
11538 /* Set the individual accept/drop flags based on the receive mode. */
11540 case BXE_RX_MODE_NONE:
11541 /* Drop everything. */
11542 DBPRINT(sc, BXE_VERBOSE,
11543 "%s(): Setting RX_MODE_NONE for function %d.\n",
11544 __FUNCTION__, func);
11545 tstorm_mac_filter.ucast_drop_all = mask;
11546 tstorm_mac_filter.mcast_drop_all = mask;
11547 tstorm_mac_filter.bcast_drop_all = mask;
11549 case BXE_RX_MODE_NORMAL:
11550 /* Accept all broadcast frames. */
11551 DBPRINT(sc, BXE_VERBOSE,
11552 "%s(): Setting RX_MODE_NORMAL for function %d.\n",
11553 __FUNCTION__, func);
11554 tstorm_mac_filter.bcast_accept_all = mask;
11556 case BXE_RX_MODE_ALLMULTI:
11557 /* Accept all broadcast and multicast frames. */
11558 DBPRINT(sc, BXE_VERBOSE,
11559 "%s(): Setting RX_MODE_ALLMULTI for function %d.\n",
11560 __FUNCTION__, func);
11561 tstorm_mac_filter.mcast_accept_all = mask;
11562 tstorm_mac_filter.bcast_accept_all = mask;
11564 case BXE_RX_MODE_PROMISC:
11565 /* Accept all frames (promiscuous mode). */
11566 DBPRINT(sc, BXE_VERBOSE,
11567 "%s(): Setting RX_MODE_PROMISC for function %d.\n",
11568 __FUNCTION__, func);
11569 tstorm_mac_filter.ucast_accept_all = mask;
11570 tstorm_mac_filter.mcast_accept_all = mask;
11571 tstorm_mac_filter.bcast_accept_all = mask;
11572 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
11577 "%s(%d): Tried to set unknown receive mode (0x%08X)!\n",
11578 __FILE__, __LINE__, mode);
11581 REG_WR(sc, port ? NIG_REG_LLH1_BRB1_DRV_MASK :
11582 NIG_REG_LLH0_BRB1_DRV_MASK, llh_mask);
11584 /* Write the RX mode filter to the TSTORM. */
11585 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config) / 4; i++)
11586 REG_WR(sc, BAR_TSTORM_INTMEM +
11587 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + (i * 4),
11588 ((uint32_t *) &tstorm_mac_filter)[i]);
11590 if (mode != BXE_RX_MODE_NONE)
11591 bxe_set_client_config(sc);
11593 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11597 * Initialize common internal resources. (Applies to both ports and
11604 bxe_init_internal_common(struct bxe_softc *sc)
11608 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11611 * Zero this manually as its initialization is currently not
11612 * handled through block initialization.
11614 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
11615 REG_WR(sc, BAR_USTORM_INTMEM + USTORM_AGG_DATA_OFFSET + i * 4,
11618 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11622 * Initialize port specific internal resources.
11628 bxe_init_internal_port(struct bxe_softc *sc)
11630 int port = BP_PORT(sc);
11632 port = BP_PORT(sc);
11634 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11635 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
11636 "%s(): Port %d internal initialization.\n", __FUNCTION__, port);
11639 * Each SDM timer tick is 4us. Configure host coalescing
11640 * basic timer resolution (BTR) to 12us (3 * 4us).
11642 REG_WR(sc, BAR_CSTORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BXE_BTR);
11643 REG_WR(sc, BAR_CSTORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BXE_BTR);
11644 REG_WR(sc, BAR_TSTORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BXE_BTR);
11645 REG_WR(sc, BAR_XSTORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BXE_BTR);
11647 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11651 * Initialize function specific internal resources.
11657 bxe_init_internal_func(struct bxe_softc *sc)
11659 struct tstorm_eth_function_common_config tstorm_config = {0};
11660 struct stats_indication_flags stats_flags = {0};
11661 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
11662 struct bxe_fastpath *fp;
11663 struct eth_rx_cqe_next_page *nextpg;
11664 uint32_t offset, size;
11665 uint16_t max_agg_size;
11667 int func, i, j, port;
11669 port = BP_PORT(sc);
11670 func = BP_FUNC(sc);
11672 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11673 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
11674 "%s(): Port %d, function %d internal initialization.\n",
11675 __FUNCTION__, port, func);
11678 * Configure which fields the controller looks at when
11679 * distributing incoming frames for RSS/multi-queue operation.
11681 if (sc->num_queues > 1) {
11682 tstorm_config.config_flags = MULTI_FLAGS(sc);
11683 tstorm_config.rss_result_mask = MULTI_MASK;
11686 /* Enable TPA if needed */
11687 if (TPA_ENABLED(sc))
11688 tstorm_config.config_flags |=
11689 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
11692 tstorm_config.config_flags |=
11693 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
11695 tstorm_config.leading_client_id = BP_L_ID(sc);
11697 REG_WR(sc, BAR_TSTORM_INTMEM +
11698 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
11699 (*(uint32_t *)&tstorm_config));
11701 /* Don't receive anything until the link is up. */
11702 sc->rx_mode = BXE_RX_MODE_NONE;
11703 sc->rx_mode_cl_mask = (1 << BP_L_ID(sc));
11704 bxe_set_storm_rx_mode(sc);
11706 for (i = 0; i < sc->num_queues; i++) {
11707 cl_id = sc->fp[i].cl_id;
11708 /* Reset XSTORM per client statistics. */
11709 size = sizeof(struct xstorm_per_client_stats) / 4;
11710 offset = BAR_XSTORM_INTMEM +
11711 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
11712 for (j = 0; j < size; j++)
11713 REG_WR(sc, offset +(j * 4), 0);
11715 /* Reset TSTORM per client statistics. */
11716 size = sizeof(struct tstorm_per_client_stats) / 4;
11717 offset = BAR_TSTORM_INTMEM +
11718 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
11719 for (j = 0; j < size; j++)
11720 REG_WR(sc, offset + (j * 4), 0);
11722 /* Reset USTORM per client statistics. */
11723 size = sizeof(struct ustorm_per_client_stats) / 4;
11724 offset = BAR_USTORM_INTMEM +
11725 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
11726 for (j = 0; j < size; j++)
11727 REG_WR(sc, offset + (j * 4), 0);
11730 /* Initialize statistics related context. */
11731 stats_flags.collect_eth = 1;
11733 REG_WR(sc, BAR_XSTORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
11734 ((uint32_t *)&stats_flags)[0]);
11735 REG_WR(sc, BAR_XSTORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
11736 ((uint32_t *)&stats_flags)[1]);
11738 REG_WR(sc, BAR_TSTORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
11739 ((uint32_t *)&stats_flags)[0]);
11740 REG_WR(sc, BAR_TSTORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
11741 ((uint32_t *)&stats_flags)[1]);
11743 REG_WR(sc, BAR_USTORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
11744 ((uint32_t *)&stats_flags)[0]);
11745 REG_WR(sc, BAR_USTORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
11746 ((uint32_t *)&stats_flags)[1]);
11748 REG_WR(sc, BAR_CSTORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
11749 ((uint32_t *)&stats_flags)[0]);
11750 REG_WR(sc, BAR_CSTORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
11751 ((uint32_t *)&stats_flags)[1]);
11753 REG_WR(sc, BAR_XSTORM_INTMEM + XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
11754 U64_LO(BXE_SP_MAPPING(sc, fw_stats)));
11755 REG_WR(sc, BAR_XSTORM_INTMEM +
11756 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
11757 U64_HI(BXE_SP_MAPPING(sc, fw_stats)));
11759 REG_WR(sc, BAR_TSTORM_INTMEM + TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
11760 U64_LO(BXE_SP_MAPPING(sc, fw_stats)));
11761 REG_WR(sc, BAR_TSTORM_INTMEM +
11762 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
11763 U64_HI(BXE_SP_MAPPING(sc, fw_stats)));
11765 REG_WR(sc, BAR_USTORM_INTMEM + USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
11766 U64_LO(BXE_SP_MAPPING(sc, fw_stats)));
11767 REG_WR(sc, BAR_USTORM_INTMEM +
11768 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
11769 U64_HI(BXE_SP_MAPPING(sc, fw_stats)));
11771 /* Additional initialization for 57711/57711E. */
11772 if (CHIP_IS_E1H(sc)) {
11773 REG_WR8(sc, BAR_XSTORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
11775 REG_WR8(sc, BAR_TSTORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
11777 REG_WR8(sc, BAR_CSTORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
11779 REG_WR8(sc, BAR_USTORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
11782 /* Set the outer VLAN tag. */
11783 REG_WR16(sc, BAR_XSTORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
11787 /* Init completion queue mapping and TPA aggregation size. */
11788 max_agg_size = min((uint32_t)(sc->mbuf_alloc_size +
11789 (8 * BCM_PAGE_SIZE * PAGES_PER_SGE)), (uint32_t)0xffff);
11791 DBPRINT(sc, BXE_VERBOSE_TPA, "%s(): max_agg_size = 0x%08X\n",
11792 __FUNCTION__, max_agg_size);
11794 for (i = 0; i < sc->num_queues; i++) {
11796 nextpg = (struct eth_rx_cqe_next_page *)
11797 &fp->rcq_chain[USABLE_RCQ_ENTRIES_PER_PAGE];
11799 /* Program the completion queue address. */
11800 REG_WR(sc, BAR_USTORM_INTMEM +
11801 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
11802 U64_LO(fp->rcq_dma.paddr));
11803 REG_WR(sc, BAR_USTORM_INTMEM +
11804 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
11805 U64_HI(fp->rcq_dma.paddr));
11807 /* Program the first CQ next page address. */
11808 REG_WR(sc, BAR_USTORM_INTMEM +
11809 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
11811 REG_WR(sc, BAR_USTORM_INTMEM +
11812 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
11815 /* Set the maximum TPA aggregation size. */
11816 REG_WR16(sc, BAR_USTORM_INTMEM +
11817 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
11821 /* Configure lossless flow control. */
11822 if (CHIP_IS_E1H(sc)) {
11823 rx_pause.bd_thr_low = 250;
11824 rx_pause.cqe_thr_low = 250;
11826 rx_pause.sge_thr_low = 0;
11827 rx_pause.bd_thr_high = 350;
11828 rx_pause.cqe_thr_high = 350;
11829 rx_pause.sge_thr_high = 0;
11831 for (i = 0; i < sc->num_queues; i++) {
11833 if (fp->disable_tpa == FALSE) {
11834 rx_pause.sge_thr_low = 150;
11835 rx_pause.sge_thr_high = 250;
11838 offset = BAR_USTORM_INTMEM +
11839 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port, fp->cl_id);
11842 sizeof(struct ustorm_eth_rx_pause_data_e1h) / 4;
11844 REG_WR(sc, offset + (j * 4),
11845 ((uint32_t *)&rx_pause)[j]);
11849 memset(&(sc->cmng), 0, sizeof(struct cmng_struct_per_port));
11850 if (IS_E1HMF(sc)) {
11852 * During init there is no active link.
11853 * Until link is up, assume link rate @ 10Gbps
11855 bxe_read_mf_cfg(sc);
11858 DBPRINT(sc, BXE_VERBOSE_MISC,
11859 "%s(): All MIN values are zeroes, "
11860 "fairness will be disabled.\n", __FUNCTION__);
11863 /* Store it to internal memory */
11864 if (sc->port.pmf) {
11865 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
11866 REG_WR(sc, BAR_XSTORM_INTMEM +
11867 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
11868 ((uint32_t *)(&sc->cmng))[i]);
11871 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11875 * Initialize internal resources.
11881 bxe_init_internal(struct bxe_softc *sc, uint32_t load_code)
11884 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11886 switch (load_code) {
11887 case FW_MSG_CODE_DRV_LOAD_COMMON:
11888 bxe_init_internal_common(sc);
11891 case FW_MSG_CODE_DRV_LOAD_PORT:
11892 bxe_init_internal_port(sc);
11895 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
11896 bxe_init_internal_func(sc);
11901 "%s(%d): Unknown load_code (0x%08X) from MCP!\n",
11902 __FILE__, __LINE__, load_code);
11906 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11911 * Perform driver instance specific initialization.
11917 bxe_init_nic(struct bxe_softc *sc, uint32_t load_code)
11919 struct bxe_fastpath *fp;
11922 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
11924 /* Intialize fastpath structures and the status block. */
11925 for (i = 0; i < sc->num_queues; i++) {
11927 fp->disable_tpa = TRUE;
11929 bzero((char *)fp->status_block, BXE_STATUS_BLK_SZ);
11933 /* Set a pointer back to the driver instance. */
11936 /* Set the fastpath starting state as closed. */
11937 fp->state = BXE_FP_STATE_CLOSED;
11939 /* Self-reference to this fastpath's instance. */
11942 /* Set the client ID beginning with the leading id. */
11943 fp->cl_id = BP_L_ID(sc) + i;
11945 /* Set the status block ID for this fastpath instance. */
11946 fp->sb_id = fp->cl_id;
11948 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
11949 "%s(): fp[%02d]: cl_id = %d, sb_id = %d\n",
11950 __FUNCTION__, fp->index, fp->cl_id, fp->sb_id);
11952 /* Initialize the fastpath status block. */
11953 bxe_init_sb(sc, fp->status_block, fp->sb_dma.paddr,
11955 bxe_update_fpsb_idx(fp);
11960 bzero((char *)sc->def_sb, BXE_DEF_STATUS_BLK_SZ);
11962 /* Initialize the Default Status Block. */
11963 bxe_init_def_sb(sc, sc->def_sb, sc->def_sb_dma.paddr, DEF_SB_ID);
11964 bxe_update_dsb_idx(sc);
11966 /* Initialize the coalescence parameters. */
11967 bxe_update_coalesce(sc);
11969 /* Initialize receive chains. */
11970 rc = bxe_init_rx_chains(sc);
11972 goto bxe_init_nic_exit;
11975 /* Initialize the Transmit BD Chain. */
11976 bxe_init_tx_chains(sc);
11978 /* Initialize the Slow Path Chain. */
11979 bxe_init_sp_ring(sc);
11981 /* Initialize STORM processor context/configuration. */
11982 bxe_init_context(sc);
11984 /* Initialize the Context. */
11985 bxe_init_internal(sc, load_code);
11987 /* Enable indirection table for multi-queue operation. */
11988 bxe_init_ind_table(sc);
11992 /* Disable the interrupts from device until init is complete.*/
11993 bxe_int_disable(sc);
11996 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
12001 * Send a loopback packet through the Network Interface Glue (NIG) block.
12007 bxe_lb_pckt(struct bxe_softc *sc)
12009 #ifdef BXE_USE_DMAE
12010 uint32_t wb_write[3];
12013 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
12015 /* Ethernet source and destination addresses. */
12016 #ifdef BXE_USE_DMAE
12017 wb_write[0] = 0x55555555;
12018 wb_write[1] = 0x55555555;
12019 wb_write[2] = 0x20; /* SOP */
12020 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
12022 REG_WR_IND(sc, NIG_REG_DEBUG_PACKET_LB, 0x55555555);
12023 REG_WR_IND(sc, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
12024 REG_WR_IND(sc, NIG_REG_DEBUG_PACKET_LB + 8, 0x20);
12027 /* NON-IP protocol. */
12028 #ifdef BXE_USE_DMAE
12029 wb_write[0] = 0x09000000;
12030 wb_write[1] = 0x55555555;
12031 wb_write[2] = 0x10; /* EOP */
12032 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
12034 REG_WR_IND(sc, NIG_REG_DEBUG_PACKET_LB, 0x09000000);
12035 REG_WR_IND(sc, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
12036 REG_WR_IND(sc, NIG_REG_DEBUG_PACKET_LB + 8, 0x10);
12039 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
12043 * Perform an internal memory test.
12045 * Some internal memories are not accessible through the PCIe interface so
12046 * we send some debug packets for the test.
12049 * 0 = Success, !0 = Failure.
12052 bxe_int_mem_test(struct bxe_softc *sc)
12060 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
12062 /* Perform a single debug packet test. */
12064 /* Disable inputs of parser neighbor blocks. */
12065 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
12066 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
12067 REG_WR(sc, CFC_REG_DEBUG0, 0x1);
12068 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
12070 /* Write 0 to parser credits for CFC search request. */
12071 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
12073 /* Send an Ethernet packet. */
12076 /* Wait until NIG register shows 1 packet of size 0x10. */
12079 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
12080 val = *BXE_SP(sc, wb_data[0]);
12089 DBPRINT(sc, BXE_FATAL,
12090 "%s(): NIG loopback test 1 timeout (val = 0x%08X)!\n",
12091 __FUNCTION__, val);
12093 goto bxe_int_mem_test_exit;
12096 /* Wait until PRS register shows 1 packet */
12099 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
12109 DBPRINT(sc, BXE_FATAL,
12110 "%s(): PRS loopback test 1 timeout (val = 0x%08X)!\n",
12111 __FUNCTION__, val);
12113 goto bxe_int_mem_test_exit;
12116 /* Reset and init BRB, PRS. */
12117 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x3);
12119 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x3);
12121 bxe_init_block(sc, BRB1_BLOCK, COMMON_STAGE);
12122 bxe_init_block(sc, PRS_BLOCK, COMMON_STAGE);
12124 /* Perform the test again, this time with 10 packets. */
12126 /* Disable inputs of parser neighbor blocks. */
12127 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
12128 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
12129 REG_WR(sc, CFC_REG_DEBUG0, 0x1);
12130 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
12132 /* Write 0 to parser credits for CFC search request. */
12133 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
12135 /* Send 10 Ethernet packets. */
12136 for (i = 0; i < 10; i++)
12139 /* Wait until NIG shows 10 + 1 packets of size 11 * 0x10 = 0xb0. */
12142 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
12143 val = *BXE_SP(sc, wb_data[0]);
12152 DBPRINT(sc, BXE_FATAL,
12153 "%s(): NIG loopback test 2 timeout (val = 0x%08X)!\n",
12154 __FUNCTION__, val);
12156 goto bxe_int_mem_test_exit;
12159 /* Wait until PRS register shows 2 packets. */
12160 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
12162 DBPRINT(sc, BXE_FATAL,
12163 "%s(): PRS loopback test 2 timeout (val = 0x%x)!\n",
12164 __FUNCTION__, val);
12166 goto bxe_int_mem_test_exit;
12169 /* Write 1 to parser credits for CFC search request. */
12170 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
12172 /* Wait until PRS register shows 3 packets. */
12175 /* Wait until NIG register shows 1 packet of size 0x10. */
12176 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
12178 DBPRINT(sc, BXE_FATAL,
12179 "%s(): PRS loopback test 3 timeout (val = 0x%08X)!\n",
12180 __FUNCTION__, val);
12182 goto bxe_int_mem_test_exit;
12185 /* Clear NIG end-of-packet FIFO. */
12186 for (i = 0; i < 11; i++)
12187 REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO);
12189 val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY);
12191 DBPRINT(sc, BXE_INFO, "%s(): Unable to clear NIG!\n",
12194 goto bxe_int_mem_test_exit;
12197 /* Reset and init BRB, PRS, NIG. */
12198 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
12200 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
12202 bxe_init_block(sc, BRB1_BLOCK, COMMON_STAGE);
12203 bxe_init_block(sc, PRS_BLOCK, COMMON_STAGE);
12205 /* Set NIC mode. */
12206 REG_WR(sc, PRS_REG_NIC_MODE, 1);
12208 /* Enable inputs of parser neighbor blocks. */
12209 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff);
12210 REG_WR(sc, TCM_REG_PRS_IFEN, 0x1);
12211 REG_WR(sc, CFC_REG_DEBUG0, 0x0);
12212 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1);
12214 bxe_int_mem_test_exit:
12215 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
12220 * Enable attentions from various blocks.
12226 bxe_enable_blocks_attention(struct bxe_softc *sc)
12229 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
12231 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
12232 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0);
12233 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
12234 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
12235 REG_WR(sc, QM_REG_QM_INT_MASK, 0);
12236 REG_WR(sc, TM_REG_TM_INT_MASK, 0);
12237 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0);
12238 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0);
12239 REG_WR(sc, XCM_REG_XCM_INT_MASK, 0);
12241 REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0);
12242 REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0);
12243 REG_WR(sc, UCM_REG_UCM_INT_MASK, 0);
12245 REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
12246 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0);
12247 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0);
12248 REG_WR(sc, CCM_REG_CCM_INT_MASK, 0);
12250 REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
12252 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0);
12253 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0);
12254 REG_WR(sc, TCM_REG_TCM_INT_MASK, 0);
12256 REG_WR(sc, CDU_REG_CDU_INT_MASK, 0);
12257 REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0);
12258 REG_WR(sc, PBF_REG_PBF_INT_MASK, 0X18);
12260 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
12268 * This code configures the PCI read/write arbiter
12269 * which implements a weighted round robin
12270 * between the virtual queues in the chip.
12272 * The values were derived for each PCI max payload and max request size.
12273 * since max payload and max request size are only known at run time,
12274 * this is done as a separate init stage.
12277 #define NUM_WR_Q 13
12278 #define NUM_RD_Q 29
12279 #define MAX_RD_ORD 3
12280 #define MAX_WR_ORD 2
12282 /* Configuration for one arbiter queue. */
12289 /* Derived configuration for each read queue for each max request size. */
12290 static const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = {
12291 /* 1 */ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
12292 { {4, 8, 4}, {4, 8, 4}, {4, 8, 4}, {4, 8, 4} },
12293 { {4, 3, 3}, {4, 3, 3}, {4, 3, 3}, {4, 3, 3} },
12294 { {8, 3, 6}, {16, 3, 11}, {16, 3, 11}, {16, 3, 11} },
12295 { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
12296 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
12297 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
12298 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
12299 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
12300 /* 10 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12301 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12302 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12303 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12304 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12305 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12306 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12307 { {8, 64, 6}, {16, 64, 11}, {32, 64, 21}, {32, 64, 21} },
12308 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12309 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12310 /* 20 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12311 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12312 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12313 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12314 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12315 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12316 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12317 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12318 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
12319 { {8, 64, 25}, {16, 64, 41}, {32, 64, 81}, {64, 64, 120} }
12322 /* Derived configuration for each write queue for each max request size. */
12323 static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = {
12324 /* 1 */ { {4, 6, 3}, {4, 6, 3}, {4, 6, 3} },
12325 { {4, 2, 3}, {4, 2, 3}, {4, 2, 3} },
12326 { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
12327 { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
12328 { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
12329 { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
12330 { {8, 64, 25}, {16, 64, 25}, {32, 64, 25} },
12331 { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
12332 { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
12333 /* 10 */{ {8, 9, 6}, {16, 9, 11}, {32, 9, 21} },
12334 { {8, 47, 19}, {16, 47, 19}, {32, 47, 21} },
12335 { {8, 9, 6}, {16, 9, 11}, {16, 9, 11} },
12336 { {8, 64, 25}, {16, 64, 41}, {32, 64, 81} }
12339 /* Register addresses for read queues. */
12340 static const struct arb_line read_arb_addr[NUM_RD_Q-1] = {
12341 /* 1 */ {PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0,
12342 PXP2_REG_RQ_BW_RD_UBOUND0},
12343 {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
12344 PXP2_REG_PSWRQ_BW_UB1},
12345 {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
12346 PXP2_REG_PSWRQ_BW_UB2},
12347 {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
12348 PXP2_REG_PSWRQ_BW_UB3},
12349 {PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4,
12350 PXP2_REG_RQ_BW_RD_UBOUND4},
12351 {PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5,
12352 PXP2_REG_RQ_BW_RD_UBOUND5},
12353 {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
12354 PXP2_REG_PSWRQ_BW_UB6},
12355 {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
12356 PXP2_REG_PSWRQ_BW_UB7},
12357 {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
12358 PXP2_REG_PSWRQ_BW_UB8},
12359 /* 10 */{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
12360 PXP2_REG_PSWRQ_BW_UB9},
12361 {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
12362 PXP2_REG_PSWRQ_BW_UB10},
12363 {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
12364 PXP2_REG_PSWRQ_BW_UB11},
12365 {PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12,
12366 PXP2_REG_RQ_BW_RD_UBOUND12},
12367 {PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13,
12368 PXP2_REG_RQ_BW_RD_UBOUND13},
12369 {PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14,
12370 PXP2_REG_RQ_BW_RD_UBOUND14},
12371 {PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15,
12372 PXP2_REG_RQ_BW_RD_UBOUND15},
12373 {PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16,
12374 PXP2_REG_RQ_BW_RD_UBOUND16},
12375 {PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17,
12376 PXP2_REG_RQ_BW_RD_UBOUND17},
12377 {PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18,
12378 PXP2_REG_RQ_BW_RD_UBOUND18},
12379 /* 20 */{PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19,
12380 PXP2_REG_RQ_BW_RD_UBOUND19},
12381 {PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20,
12382 PXP2_REG_RQ_BW_RD_UBOUND20},
12383 {PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22,
12384 PXP2_REG_RQ_BW_RD_UBOUND22},
12385 {PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23,
12386 PXP2_REG_RQ_BW_RD_UBOUND23},
12387 {PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24,
12388 PXP2_REG_RQ_BW_RD_UBOUND24},
12389 {PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25,
12390 PXP2_REG_RQ_BW_RD_UBOUND25},
12391 {PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26,
12392 PXP2_REG_RQ_BW_RD_UBOUND26},
12393 {PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27,
12394 PXP2_REG_RQ_BW_RD_UBOUND27},
12395 {PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
12396 PXP2_REG_PSWRQ_BW_UB28}
12399 /* Register addresses for write queues. */
12400 static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
12401 /* 1 */ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
12402 PXP2_REG_PSWRQ_BW_UB1},
12403 {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
12404 PXP2_REG_PSWRQ_BW_UB2},
12405 {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
12406 PXP2_REG_PSWRQ_BW_UB3},
12407 {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
12408 PXP2_REG_PSWRQ_BW_UB6},
12409 {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
12410 PXP2_REG_PSWRQ_BW_UB7},
12411 {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
12412 PXP2_REG_PSWRQ_BW_UB8},
12413 {PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
12414 PXP2_REG_PSWRQ_BW_UB9},
12415 {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
12416 PXP2_REG_PSWRQ_BW_UB10},
12417 {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
12418 PXP2_REG_PSWRQ_BW_UB11},
12419 /* 10 */{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
12420 PXP2_REG_PSWRQ_BW_UB28},
12421 {PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29,
12422 PXP2_REG_RQ_BW_WR_UBOUND29},
12423 {PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30,
12424 PXP2_REG_RQ_BW_WR_UBOUND30}
12428 bxe_init_pxp_arb(struct bxe_softc *sc, int r_order, int w_order)
12432 if (r_order > MAX_RD_ORD) {
12433 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
12434 "%s(): Read order of %d order adjusted to %d\n",
12435 __FUNCTION__, r_order, MAX_RD_ORD);
12436 r_order = MAX_RD_ORD;
12438 if (w_order > MAX_WR_ORD) {
12439 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
12440 "%s(): Write order of %d order adjusted to %d\n",
12441 __FUNCTION__, w_order, MAX_WR_ORD);
12442 w_order = MAX_WR_ORD;
12445 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
12446 "%s(): Read order %d, write order %d\n",
12447 __FUNCTION__, r_order, w_order);
12449 for (i = 0; i < NUM_RD_Q - 1; i++) {
12450 REG_WR(sc, read_arb_addr[i].l,
12451 read_arb_data[i][r_order].l);
12452 REG_WR(sc, read_arb_addr[i].add,
12453 read_arb_data[i][r_order].add);
12454 REG_WR(sc, read_arb_addr[i].ubound,
12455 read_arb_data[i][r_order].ubound);
12458 for (i = 0; i < NUM_WR_Q - 1; i++) {
12459 if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) ||
12460 (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) {
12462 REG_WR(sc, write_arb_addr[i].l,
12463 write_arb_data[i][w_order].l);
12465 REG_WR(sc, write_arb_addr[i].add,
12466 write_arb_data[i][w_order].add);
12468 REG_WR(sc, write_arb_addr[i].ubound,
12469 write_arb_data[i][w_order].ubound);
12472 val = REG_RD(sc, write_arb_addr[i].l);
12473 REG_WR(sc, write_arb_addr[i].l, val |
12474 (write_arb_data[i][w_order].l << 10));
12476 val = REG_RD(sc, write_arb_addr[i].add);
12477 REG_WR(sc, write_arb_addr[i].add, val |
12478 (write_arb_data[i][w_order].add << 10));
12480 val = REG_RD(sc, write_arb_addr[i].ubound);
12481 REG_WR(sc, write_arb_addr[i].ubound, val |
12482 (write_arb_data[i][w_order].ubound << 7));
12486 val = write_arb_data[NUM_WR_Q - 1][w_order].add;
12487 val += write_arb_data[NUM_WR_Q - 1][w_order].ubound << 10;
12488 val += write_arb_data[NUM_WR_Q - 1][w_order].l << 17;
12489 REG_WR(sc, PXP2_REG_PSWRQ_BW_RD, val);
12491 val = read_arb_data[NUM_RD_Q - 1][r_order].add;
12492 val += read_arb_data[NUM_RD_Q - 1][r_order].ubound << 10;
12493 val += read_arb_data[NUM_RD_Q - 1][r_order].l << 17;
12494 REG_WR(sc, PXP2_REG_PSWRQ_BW_WR, val);
12496 REG_WR(sc, PXP2_REG_RQ_WR_MBS0, w_order);
12497 REG_WR(sc, PXP2_REG_RQ_WR_MBS1, w_order);
12498 REG_WR(sc, PXP2_REG_RQ_RD_MBS0, r_order);
12499 REG_WR(sc, PXP2_REG_RQ_RD_MBS1, r_order);
12501 if (r_order == MAX_RD_ORD)
12502 REG_WR(sc, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
12504 REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
12506 if (CHIP_IS_E1H(sc)) {
12507 /* MPS w_order optimal TH presently TH
12512 val = ((w_order == 0) ? 2 : 3);
12513 REG_WR(sc, PXP2_REG_WR_HC_MPS, val);
12514 REG_WR(sc, PXP2_REG_WR_USDM_MPS, val);
12515 REG_WR(sc, PXP2_REG_WR_CSDM_MPS, val);
12516 REG_WR(sc, PXP2_REG_WR_TSDM_MPS, val);
12517 REG_WR(sc, PXP2_REG_WR_XSDM_MPS, val);
12518 REG_WR(sc, PXP2_REG_WR_QM_MPS, val);
12519 REG_WR(sc, PXP2_REG_WR_TM_MPS, val);
12520 REG_WR(sc, PXP2_REG_WR_SRC_MPS, val);
12521 REG_WR(sc, PXP2_REG_WR_DBG_MPS, val);
12522 REG_WR(sc, PXP2_REG_WR_DMAE_MPS, 2); /* DMAE is special */
12523 REG_WR(sc, PXP2_REG_WR_CDU_MPS, val);
12528 bxe_init_pxp(struct bxe_softc *sc)
12531 int r_order, w_order;
12533 devctl = pci_read_config(sc->dev,
12534 sc->pcie_cap + PCI_EXP_DEVCTL, 2);
12535 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
12536 "%s(): Read 0x%x from devctl\n", __FUNCTION__, devctl);
12537 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
12538 if (sc->mrrs == -1)
12539 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
12541 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
12542 "%s(): Force MRRS read order to %d\n",
12543 __FUNCTION__, sc->mrrs);
12544 r_order = sc->mrrs;
12547 bxe_init_pxp_arb(sc, r_order, w_order);
12551 bxe_setup_fan_failure_detection(struct bxe_softc *sc)
12553 uint32_t phy_type, val;
12554 int is_required, port;
12560 val = SHMEM_RD(sc, dev_info.shared_hw_config.config2) &
12561 SHARED_HW_CFG_FAN_FAILURE_MASK;
12563 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
12567 * The fan failure mechanism is usually related to the PHY type since
12568 * the power consumption of the board is affected by the PHY. Currently,
12569 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
12571 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
12572 for (port = PORT_0; port < PORT_MAX; port++) {
12573 phy_type = SHMEM_RD(sc,
12574 dev_info.port_hw_config[port].external_phy_config) &
12575 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
12577 ((phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
12578 (phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
12579 (phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
12582 if (is_required == 0)
12585 /* Fan failure is indicated by SPIO 5. */
12586 bxe_set_spio(sc, MISC_REGISTERS_SPIO_5, MISC_REGISTERS_SPIO_INPUT_HI_Z);
12588 /* Set to active low mode. */
12589 val = REG_RD(sc, MISC_REG_SPIO_INT);
12590 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
12591 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
12592 REG_WR(sc, MISC_REG_SPIO_INT, val);
12594 /* Enable interrupt to signal the IGU. */
12595 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
12596 val |= (1 << MISC_REGISTERS_SPIO_5);
12597 REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val);
12601 * Common initialization.
12604 * 0 = Success, !0 = Failure.
12607 bxe_init_common(struct bxe_softc *sc)
12613 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
12615 /* Reset all blocks within the chip except the BMAC. */
12616 bxe_reset_common(sc);
12618 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
12619 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
12622 bxe_init_block(sc, MISC_BLOCK, COMMON_STAGE);
12623 if (CHIP_IS_E1H(sc))
12624 REG_WR(sc, MISC_REG_E1HMF_MODE, IS_E1HMF(sc));
12626 REG_WR(sc, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
12628 REG_WR(sc, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
12630 bxe_init_block(sc, PXP_BLOCK, COMMON_STAGE);
12631 if (CHIP_IS_E1(sc)) {
12633 * Enable HW interrupt from PXP on USDM overflow
12634 * bit 16 on INT_MASK_0.
12636 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
12639 bxe_init_block(sc, PXP2_BLOCK, COMMON_STAGE);
12642 #ifdef __BIG_ENDIAN
12643 REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1);
12644 REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1);
12645 REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
12646 REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
12647 REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
12648 /* Make sure this value is 0. */
12649 REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0);
12651 REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1);
12652 REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1);
12653 REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1);
12654 REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
12657 REG_WR(sc, PXP2_REG_RQ_CDU_P_SIZE, 2);
12659 /* Let the HW do it's magic ... */
12661 /* Finish the PXP initialization. */
12662 val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE);
12664 BXE_PRINTF("%s(%d): PXP2 CFG failed!\n", __FILE__, __LINE__);
12666 goto bxe_init_common_exit;
12669 val = REG_RD(sc, PXP2_REG_RD_INIT_DONE);
12671 BXE_PRINTF("%s(%d): PXP2 RD_INIT failed!\n", __FILE__,
12674 goto bxe_init_common_exit;
12677 REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0);
12678 REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0);
12680 bxe_init_block(sc, DMAE_BLOCK, COMMON_STAGE);
12682 sc->dmae_ready = 1;
12683 bxe_init_fill(sc, TSEM_REG_PRAM, 0, 8);
12685 bxe_init_block(sc, TCM_BLOCK, COMMON_STAGE);
12686 bxe_init_block(sc, UCM_BLOCK, COMMON_STAGE);
12687 bxe_init_block(sc, CCM_BLOCK, COMMON_STAGE);
12688 bxe_init_block(sc, XCM_BLOCK, COMMON_STAGE);
12690 bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3);
12691 bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3);
12692 bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3);
12693 bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3);
12695 bxe_init_block(sc, QM_BLOCK, COMMON_STAGE);
12697 /* Soft reset pulse. */
12698 REG_WR(sc, QM_REG_SOFT_RESET, 1);
12699 REG_WR(sc, QM_REG_SOFT_RESET, 0);
12701 bxe_init_block(sc, DQ_BLOCK, COMMON_STAGE);
12702 REG_WR(sc, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
12704 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
12706 bxe_init_block(sc, BRB1_BLOCK, COMMON_STAGE);
12707 bxe_init_block(sc, PRS_BLOCK, COMMON_STAGE);
12708 REG_WR(sc, PRS_REG_A_PRSU_20, 0xf);
12710 if (CHIP_IS_E1H(sc))
12711 REG_WR(sc, PRS_REG_E1HOV_MODE, IS_E1HMF(sc));
12713 bxe_init_block(sc, TSDM_BLOCK, COMMON_STAGE);
12714 bxe_init_block(sc, CSDM_BLOCK, COMMON_STAGE);
12715 bxe_init_block(sc, USDM_BLOCK, COMMON_STAGE);
12716 bxe_init_block(sc, XSDM_BLOCK, COMMON_STAGE);
12717 /* Clear STORM processor memory. */
12718 bxe_init_fill(sc, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(sc));
12719 bxe_init_fill(sc, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(sc));
12720 bxe_init_fill(sc, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(sc));
12721 bxe_init_fill(sc, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(sc));
12723 bxe_init_block(sc, TSEM_BLOCK, COMMON_STAGE);
12724 bxe_init_block(sc, USEM_BLOCK, COMMON_STAGE);
12725 bxe_init_block(sc, CSEM_BLOCK, COMMON_STAGE);
12726 bxe_init_block(sc, XSEM_BLOCK, COMMON_STAGE);
12728 /* Sync semi rtc. */
12729 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x80000000);
12730 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x80000000);
12732 bxe_init_block(sc, UPB_BLOCK, COMMON_STAGE);
12733 bxe_init_block(sc, XPB_BLOCK, COMMON_STAGE);
12734 bxe_init_block(sc, PBF_BLOCK, COMMON_STAGE);
12736 REG_WR(sc, SRC_REG_SOFT_RST, 1);
12737 /* Setup RSS/multi-queue hasking keys. */
12738 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
12739 REG_WR(sc, i, 0xc0cac01a);
12741 bxe_init_block(sc, SRCH_BLOCK, COMMON_STAGE);
12743 REG_WR(sc, SRC_REG_SOFT_RST, 0);
12745 /* Make sure the cdu_context structure has the right size. */
12746 if (sizeof(union cdu_context) != 1024) {
12747 BXE_PRINTF("%s(%d): Invalid size for context (%ld != 1024)!\n",
12748 __FILE__, __LINE__, (long)sizeof(union cdu_context));
12750 goto bxe_init_common_exit;
12753 bxe_init_block(sc, CDU_BLOCK, COMMON_STAGE);
12756 * val = (num_context_in_page << 24) +
12757 * (context_waste_size << 12) +
12758 * context_line_size.
12761 val = (4 << 24) + (0 << 12) + 1024;
12762 REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val);
12764 bxe_init_block(sc, CFC_BLOCK, COMMON_STAGE);
12765 REG_WR(sc, CFC_REG_INIT_REG, 0x7FF);
12766 /* Enable context validation interrupt from CFC. */
12767 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
12769 /* Set the thresholds to prevent CFC/CDU race. */
12770 REG_WR(sc, CFC_REG_DEBUG0, 0x20020000);
12772 bxe_init_block(sc, HC_BLOCK, COMMON_STAGE);
12773 bxe_init_block(sc, MISC_AEU_BLOCK, COMMON_STAGE);
12775 bxe_init_block(sc, PXPCS_BLOCK, COMMON_STAGE);
12776 /* Clear PCIe block debug status bits. */
12777 REG_WR(sc, 0x2814, 0xffffffff);
12778 REG_WR(sc, 0x3820, 0xffffffff);
12780 bxe_init_block(sc, EMAC0_BLOCK, COMMON_STAGE);
12781 bxe_init_block(sc, EMAC1_BLOCK, COMMON_STAGE);
12782 bxe_init_block(sc, DBU_BLOCK, COMMON_STAGE);
12783 bxe_init_block(sc, DBG_BLOCK, COMMON_STAGE);
12785 bxe_init_block(sc, NIG_BLOCK, COMMON_STAGE);
12786 if (CHIP_IS_E1H(sc)) {
12787 REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_E1HMF(sc));
12788 REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_E1HOV(sc));
12791 /* Finish CFC initialization. */
12792 val = bxe_reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10);
12794 BXE_PRINTF("%s(%d): CFC LL_INIT failed!\n",
12795 __FILE__, __LINE__);
12797 goto bxe_init_common_exit;
12800 val = bxe_reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10);
12802 BXE_PRINTF("%s(%d): CFC AC_INIT failed!\n",
12803 __FILE__, __LINE__);
12805 goto bxe_init_common_exit;
12808 val = bxe_reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
12810 BXE_PRINTF("%s(%d): CFC CAM_INIT failed!\n",
12811 __FILE__, __LINE__);
12813 goto bxe_init_common_exit;
12816 REG_WR(sc, CFC_REG_DEBUG0, 0);
12818 /* Read NIG statistic and check for first load since powerup. */
12819 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
12820 val = *BXE_SP(sc, wb_data[0]);
12822 /* Do internal memory self test only after a full power cycle. */
12823 if ((CHIP_IS_E1(sc)) && (val == 0) && bxe_int_mem_test(sc)) {
12824 BXE_PRINTF("%s(%d): Internal memory self-test failed!\n",
12825 __FILE__, __LINE__);
12827 goto bxe_init_common_exit;
12830 /* Handle any board specific initialization. */
12831 switch (XGXS_EXT_PHY_TYPE(sc->link_params.ext_phy_config)) {
12832 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
12833 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
12834 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
12835 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
12842 bxe_setup_fan_failure_detection(sc);
12844 /* Clear PXP2 attentions. */
12845 REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
12847 bxe_enable_blocks_attention(sc);
12850 bxe_acquire_phy_lock(sc);
12851 bxe_common_init_phy(sc, sc->common.shmem_base);
12852 bxe_release_phy_lock(sc);
12855 "%s(%d): Bootcode is missing - cannot initialize PHY!\n",
12856 __FILE__, __LINE__);
12858 bxe_init_common_exit:
12859 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
12864 * Port initialization.
12867 * 0 = Success, !0 = Failure.
12870 bxe_init_port(struct bxe_softc *sc)
12872 uint32_t val, low, high;
12873 uint32_t swap_val, swap_override, aeu_gpio_mask, offset;
12875 int init_stage, port;
12877 port = BP_PORT(sc);
12878 init_stage = port ? PORT1_STAGE : PORT0_STAGE;
12880 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
12882 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
12883 "%s(): Initializing port %d.\n", __FUNCTION__, port);
12885 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4, 0);
12887 bxe_init_block(sc, PXP_BLOCK, init_stage);
12888 bxe_init_block(sc, PXP2_BLOCK, init_stage);
12890 bxe_init_block(sc, TCM_BLOCK, init_stage);
12891 bxe_init_block(sc, UCM_BLOCK, init_stage);
12892 bxe_init_block(sc, CCM_BLOCK, init_stage);
12893 bxe_init_block(sc, XCM_BLOCK, init_stage);
12895 bxe_init_block(sc, DQ_BLOCK, init_stage);
12897 bxe_init_block(sc, BRB1_BLOCK, init_stage);
12899 /* Determine the pause threshold for the BRB */
12901 low = (sc->bxe_flags & BXE_ONE_PORT_FLAG) ? 160 : 246;
12902 else if (sc->bxe_ifp->if_mtu > 4096) {
12903 if (sc->bxe_flags & BXE_ONE_PORT_FLAG)
12906 val = sc->bxe_ifp->if_mtu;
12907 /* (24*1024 + val*4)/256 */
12908 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
12911 low = (sc->bxe_flags & BXE_ONE_PORT_FLAG) ? 80 : 160;
12912 high = low + 56; /* 14 * 1024 / 256 */
12914 REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port * 4, low);
12915 REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port * 4, high);
12917 /* Port PRS comes here. */
12918 bxe_init_block(sc, PRS_BLOCK, init_stage);
12920 bxe_init_block(sc, TSDM_BLOCK, init_stage);
12921 bxe_init_block(sc, CSDM_BLOCK, init_stage);
12922 bxe_init_block(sc, USDM_BLOCK, init_stage);
12923 bxe_init_block(sc, XSDM_BLOCK, init_stage);
12925 bxe_init_block(sc, TSEM_BLOCK, init_stage);
12926 bxe_init_block(sc, USEM_BLOCK, init_stage);
12927 bxe_init_block(sc, CSEM_BLOCK, init_stage);
12928 bxe_init_block(sc, XSEM_BLOCK, init_stage);
12930 bxe_init_block(sc, UPB_BLOCK, init_stage);
12931 bxe_init_block(sc, XPB_BLOCK, init_stage);
12933 bxe_init_block(sc, PBF_BLOCK, init_stage);
12935 /* Configure PBF to work without pause for MTU = 9000. */
12936 REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port * 4, 0);
12938 /* Update threshold. */
12939 REG_WR(sc, PBF_REG_P0_ARB_THRSH + port * 4, (9040/16));
12940 /* Update initial credit. */
12941 REG_WR(sc, PBF_REG_P0_INIT_CRD + port * 4, (9040/16) + 553 - 22);
12943 /* Probe changes. */
12944 REG_WR(sc, PBF_REG_INIT_P0 + port * 4, 1);
12946 REG_WR(sc, PBF_REG_INIT_P0 + port * 4, 0);
12948 bxe_init_block(sc, CDU_BLOCK, init_stage);
12949 bxe_init_block(sc, CFC_BLOCK, init_stage);
12951 if (CHIP_IS_E1(sc)) {
12952 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, 0);
12953 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, 0);
12956 bxe_init_block(sc, HC_BLOCK, init_stage);
12958 bxe_init_block(sc, MISC_AEU_BLOCK, init_stage);
12960 * init aeu_mask_attn_func_0/1:
12961 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
12962 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
12963 * bits 4-7 are used for "per vn group attention"
12965 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port * 4,
12966 (IS_E1HMF(sc) ? 0xF7 : 0x7));
12968 bxe_init_block(sc, PXPCS_BLOCK, init_stage);
12969 bxe_init_block(sc, EMAC0_BLOCK, init_stage);
12970 bxe_init_block(sc, EMAC1_BLOCK, init_stage);
12971 bxe_init_block(sc, DBU_BLOCK, init_stage);
12972 bxe_init_block(sc, DBG_BLOCK, init_stage);
12974 bxe_init_block(sc, NIG_BLOCK, init_stage);
12976 REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 1);
12978 if (CHIP_IS_E1H(sc)) {
12979 /* Enable outer VLAN support if required. */
12980 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port * 4,
12981 (IS_E1HOV(sc) ? 0x1 : 0x2));
12984 REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port * 4, 0);
12985 REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port * 4, 0);
12986 REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port * 4, 1);
12988 bxe_init_block(sc, MCP_BLOCK, init_stage);
12989 bxe_init_block(sc, DMAE_BLOCK, init_stage);
12991 switch (XGXS_EXT_PHY_TYPE(sc->link_params.ext_phy_config)) {
12992 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
12993 bxe_set_gpio(sc, MISC_REGISTERS_GPIO_3,
12994 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
12997 * The GPIO should be swapped if the swap register is
13000 swap_val = REG_RD(sc, NIG_REG_PORT_SWAP);
13001 swap_override = REG_RD(sc, NIG_REG_STRAP_OVERRIDE);
13003 /* Select function upon port-swap configuration. */
13005 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
13006 aeu_gpio_mask = (swap_val && swap_override) ?
13007 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
13008 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
13010 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
13011 aeu_gpio_mask = (swap_val && swap_override) ?
13012 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
13013 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
13015 val = REG_RD(sc, offset);
13016 /* Add GPIO3 to group. */
13017 val |= aeu_gpio_mask;
13018 REG_WR(sc, offset, val);
13020 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
13021 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
13022 /* Add SPIO 5 to group 0. */
13023 reg_addr = port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
13024 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
13025 val = REG_RD(sc, reg_addr);
13026 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
13027 REG_WR(sc, reg_addr, val);
13033 bxe__link_reset(sc);
13035 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
13040 #define ILT_PER_FUNC (768/2)
13041 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
13043 * The phys address is shifted right 12 bits and has an added 1=valid
13044 * bit added to the 53rd bit (bit 52) then since this is a wide
13045 * register(TM) we split it into two 32 bit writes.
13047 #define ONCHIP_ADDR1(x) ((uint32_t)(((uint64_t)x >> 12) & 0xFFFFFFFF))
13048 #define ONCHIP_ADDR2(x) ((uint32_t)((1 << 20) | ((uint64_t)x >> 44)))
13049 #define PXP_ONE_ILT(x) (((x) << 10) | x)
13050 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
13051 #define CNIC_ILT_LINES 0
13060 bxe_ilt_wr(struct bxe_softc *sc, uint32_t index, bus_addr_t addr)
13064 DBENTER(BXE_INSANE_LOAD | BXE_INSANE_RESET);
13066 if (CHIP_IS_E1H(sc))
13067 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index * 8;
13069 reg = PXP2_REG_RQ_ONCHIP_AT + index * 8;
13071 bxe_wb_wr(sc, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
13073 DBEXIT(BXE_INSANE_LOAD | BXE_INSANE_RESET);
13077 * Initialize a function.
13080 * 0 = Success, !0 = Failure.
13083 bxe_init_func(struct bxe_softc *sc)
13085 uint32_t addr, val;
13088 port = BP_PORT(sc);
13089 func = BP_FUNC(sc);
13091 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
13093 DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
13094 "%s(): Initializing port %d, function %d.\n", __FUNCTION__, port,
13097 /* Set MSI reconfigure capability. */
13098 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
13099 val = REG_RD(sc, addr);
13100 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
13101 REG_WR(sc, addr, val);
13103 i = FUNC_ILT_BASE(func);
13105 bxe_ilt_wr(sc, i, BXE_SP_MAPPING(sc, context));
13107 if (CHIP_IS_E1H(sc)) {
13108 REG_WR(sc, PXP2_REG_RQ_CDU_FIRST_ILT, i);
13109 REG_WR(sc, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
13111 REG_WR(sc, PXP2_REG_PSWRQ_CDU0_L2P + func * 4,
13112 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
13114 if (CHIP_IS_E1H(sc)) {
13115 bxe_init_block(sc, MISC_BLOCK, FUNC0_STAGE + func);
13116 bxe_init_block(sc, TCM_BLOCK, FUNC0_STAGE + func);
13117 bxe_init_block(sc, UCM_BLOCK, FUNC0_STAGE + func);
13118 bxe_init_block(sc, CCM_BLOCK, FUNC0_STAGE + func);
13119 bxe_init_block(sc, XCM_BLOCK, FUNC0_STAGE + func);
13120 bxe_init_block(sc, TSEM_BLOCK, FUNC0_STAGE + func);
13121 bxe_init_block(sc, USEM_BLOCK, FUNC0_STAGE + func);
13122 bxe_init_block(sc, CSEM_BLOCK, FUNC0_STAGE + func);
13123 bxe_init_block(sc, XSEM_BLOCK, FUNC0_STAGE + func);
13125 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
13126 REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8, sc->e1hov);
13129 /* Host Coalescing initialization per function. */
13130 if (CHIP_IS_E1H(sc)) {
13131 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func * 4, 0);
13132 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, 0);
13133 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, 0);
13136 bxe_init_block(sc, HC_BLOCK, FUNC0_STAGE + func);
13138 /* Reset PCIe block debug values. */
13139 REG_WR(sc, 0x2114, 0xffffffff);
13140 REG_WR(sc, 0x2120, 0xffffffff);
13142 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
13150 * 0 = Failure, !0 = Failure.
13153 bxe_init_hw(struct bxe_softc *sc, uint32_t load_code)
13158 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
13160 sc->dmae_ready = 0;
13161 switch (load_code) {
13162 case FW_MSG_CODE_DRV_LOAD_COMMON:
13163 rc = bxe_init_common(sc);
13165 goto bxe_init_hw_exit;
13167 case FW_MSG_CODE_DRV_LOAD_PORT:
13168 sc->dmae_ready = 1;
13169 rc = bxe_init_port(sc);
13171 goto bxe_init_hw_exit;
13173 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
13174 sc->dmae_ready = 1;
13175 rc = bxe_init_func(sc);
13177 goto bxe_init_hw_exit;
13180 DBPRINT(sc, BXE_WARN,
13181 "%s(): Unknown load_code (0x%08X) from MCP!\n",
13182 __FUNCTION__, load_code);
13186 /* Fetch additional config data if the bootcode is running. */
13188 func = BP_FUNC(sc);
13189 /* Fetch the pulse sequence number. */
13190 sc->fw_drv_pulse_wr_seq = (SHMEM_RD(sc,
13191 func_mb[func].drv_pulse_mb) & DRV_PULSE_SEQ_MASK);
13194 /* Clear the default status block. */
13195 bxe_zero_def_sb(sc);
13196 for (i = 0; i < sc->num_queues; i++)
13197 bxe_zero_sb(sc, BP_L_ID(sc) + i);
13200 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
13206 * Send a firmware command and wait for the response.
13208 * Post a command to shared memory for the bootcode running on the MCP and
13209 * stall until the bootcode responds or a timeout occurs.
13212 * 0 = Failure, otherwise firmware response code (FW_MSG_CODE_*).
13215 bxe_fw_command(struct bxe_softc *sc, uint32_t command)
13217 uint32_t cnt, rc, seq;
13220 func = BP_FUNC(sc);
13221 seq = ++sc->fw_seq;
13225 DBRUNMSG(BXE_VERBOSE, bxe_decode_mb_msgs(sc, (command | seq), 0));
13229 /* Write the command to the shared memory mailbox. */
13230 SHMEM_WR(sc, func_mb[func].drv_mb_header, (command | seq));
13232 /* Wait up to 2 seconds for a response. */
13234 /* Wait 10ms for a response. */
13237 /* Pickup the response. */
13238 rc = SHMEM_RD(sc, func_mb[func].fw_mb_header);
13239 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 400));
13241 DBRUNMSG(BXE_VERBOSE, bxe_decode_mb_msgs(sc, 0, rc));
13243 /* Make sure we read the right response. */
13244 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK ))
13245 rc &= FW_MSG_CODE_MASK;
13247 BXE_PRINTF("%s(%d): Bootcode failed to respond!\n",
13248 __FILE__, __LINE__);
13249 DBRUN(bxe_dump_fw(sc));
13253 BXE_FWMB_UNLOCK(sc);
13258 * Allocate a block of memory and map it for DMA. No partial
13259 * completions allowed, release any resources acquired if we
13260 * can't acquire all resources.
13263 * 0 = Success, !0 = Failure
13274 bxe_dma_malloc(struct bxe_softc *sc, bus_size_t size,
13275 struct bxe_dma *dma, int mapflags, const char *msg)
13279 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
13281 DBRUNIF(dma->size > 0,
13282 BXE_PRINTF("%s(): Called for %s with size > 0 (%05d)!\n",
13283 __FUNCTION__, msg, (int) dma->size));
13285 rc = bus_dma_tag_create(
13286 sc->parent_tag, /* parent */
13287 BCM_PAGE_SIZE, /* alignment for segs */
13288 BXE_DMA_BOUNDARY, /* cannot cross */
13289 BUS_SPACE_MAXADDR, /* restricted low */
13290 BUS_SPACE_MAXADDR, /* restricted hi */
13291 NULL, NULL, /* filter f(), arg */
13292 size, /* max size for this tag */
13293 1, /* # of discontinuities */
13294 size, /* max seg size */
13295 BUS_DMA_ALLOCNOW, /* flags */
13296 NULL, NULL, /* lock f(), arg */
13300 BXE_PRINTF("%s(%d): bus_dma_tag_create() "
13301 "failed (rc = %d) for %s!\n",
13302 __FILE__, __LINE__, rc, msg);
13303 goto bxe_dma_malloc_fail_create;
13306 rc = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
13307 BUS_DMA_NOWAIT, &dma->map);
13309 BXE_PRINTF("%s(%d): bus_dmamem_alloc() "
13310 "failed (rc = %d) for %s!\n",
13311 __FILE__, __LINE__, rc, msg);
13312 goto bxe_dma_malloc_fail_alloc;
13315 rc = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
13316 bxe_dma_map_addr, &dma->paddr, mapflags | BUS_DMA_NOWAIT);
13318 BXE_PRINTF("%s(%d): bus_dmamap_load() "
13319 "failed (rc = %d) for %s!\n",
13320 __FILE__, __LINE__, rc, msg);
13321 goto bxe_dma_malloc_fail_load;
13326 DBPRINT(sc, BXE_VERBOSE, "%s(): size=%06d, vaddr=0x%p, "
13327 "paddr=0x%jX - %s\n", __FUNCTION__, (int) dma->size,
13328 dma->vaddr, (uintmax_t) dma->paddr, msg);
13330 goto bxe_dma_malloc_exit;
13332 bxe_dma_malloc_fail_load:
13333 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
13335 bxe_dma_malloc_fail_alloc:
13336 bus_dma_tag_destroy(dma->tag);
13339 bxe_dma_malloc_fail_create:
13344 bxe_dma_malloc_exit:
13345 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
13350 * Release a block of DMA memory associated tag/map.
13356 bxe_dma_free(struct bxe_softc *sc, struct bxe_dma *dma)
13358 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_UNLOAD);
13360 if (dma->size > 0) {
13361 bus_dmamap_sync(dma->tag, dma->map,
13362 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
13363 bus_dmamap_unload(dma->tag, dma->map);
13364 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
13365 bus_dma_tag_destroy(dma->tag);
13369 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_UNLOAD);
13373 * Free any DMA memory owned by the driver.
13375 * Scans through each data structre that requires DMA memory and frees
13376 * the memory if allocated.
13382 bxe_host_structures_free(struct bxe_softc *sc)
13384 struct bxe_fastpath *fp;
13385 int i, j, max_agg_queues;
13387 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
13388 max_agg_queues = CHIP_IS_E1H(sc) ?
13389 ETH_MAX_AGGREGATION_QUEUES_E1H :
13390 ETH_MAX_AGGREGATION_QUEUES_E1;
13392 if (sc->parent_tag == NULL)
13393 goto bxe_host_structures_free_exit;
13395 for (i = 0; i < sc->num_queues; i++) {
13398 /* Trust no one! */
13402 /* Status block. */
13403 bxe_dma_free(sc, &fp->sb_dma);
13406 bxe_dma_free(sc, &fp->tx_dma);
13407 fp->tx_chain = NULL;
13410 bxe_dma_free(sc, &fp->rx_dma);
13411 fp->rx_chain = NULL;
13414 bxe_dma_free(sc, &fp->rcq_dma);
13415 fp->rcq_chain = NULL;
13418 bxe_dma_free(sc, &fp->sg_dma);
13419 fp->sg_chain = NULL;
13421 /* Unload and destroy the TX mbuf maps. */
13422 if (fp->tx_mbuf_tag != NULL) {
13423 for (j = 0; j < TOTAL_TX_BD; j++) {
13424 if (fp->tx_mbuf_map[j] != NULL) {
13427 fp->tx_mbuf_map[j]);
13428 bus_dmamap_destroy(
13430 fp->tx_mbuf_map[j]);
13434 bus_dma_tag_destroy(fp->tx_mbuf_tag);
13437 /* Unload and destroy the TPA pool mbuf maps. */
13438 if (fp->rx_mbuf_tag != NULL) {
13439 if (fp->tpa_mbuf_spare_map != NULL) {
13442 fp->tpa_mbuf_spare_map);
13443 bus_dmamap_destroy(
13445 fp->tpa_mbuf_spare_map);
13448 for (j = 0; j < max_agg_queues; j++) {
13449 if (fp->tpa_mbuf_map[j] != NULL) {
13452 fp->tpa_mbuf_map[j]);
13453 bus_dmamap_destroy(
13455 fp->tpa_mbuf_map[j]);
13460 /* Unload and destroy the SGE Buf maps. */
13461 if (fp->rx_sge_buf_tag != NULL) {
13462 if (fp->rx_sge_spare_map != NULL) {
13464 fp->rx_sge_buf_tag,
13465 fp->rx_sge_spare_map);
13466 bus_dmamap_destroy(
13467 fp->rx_sge_buf_tag,
13468 fp->rx_sge_spare_map);
13471 for (j = 0; j < TOTAL_RX_SGE; j++) {
13472 if (fp->rx_sge_buf_map[j] != NULL) {
13474 fp->rx_sge_buf_tag,
13475 fp->rx_sge_buf_map[j]);
13476 bus_dmamap_destroy(
13477 fp->rx_sge_buf_tag,
13478 fp->rx_sge_buf_map[j]);
13482 bus_dma_tag_destroy(fp->rx_sge_buf_tag);
13485 /* Unload and destroy the RX mbuf maps. */
13486 if (fp->rx_mbuf_tag != NULL) {
13487 if (fp->rx_mbuf_spare_map != NULL) {
13488 bus_dmamap_unload(fp->rx_mbuf_tag,
13489 fp->rx_mbuf_spare_map);
13490 bus_dmamap_destroy(fp->rx_mbuf_tag,
13491 fp->rx_mbuf_spare_map);
13494 for (j = 0; j < TOTAL_RX_BD; j++) {
13495 if (fp->rx_mbuf_map[j] != NULL) {
13498 fp->rx_mbuf_map[j]);
13499 bus_dmamap_destroy(
13501 fp->rx_mbuf_map[j]);
13505 bus_dma_tag_destroy(fp->rx_mbuf_tag);
13509 /* Destroy the default status block */
13510 bxe_dma_free(sc, &sc->def_sb_dma);
13513 /* Destroy the statistics block */
13514 bxe_dma_free(sc, &sc->stats_dma);
13517 /* Destroy the slowpath block. */
13518 bxe_dma_free(sc, &sc->slowpath_dma);
13519 sc->slowpath = NULL;
13521 /* Destroy the slowpath queue. */
13522 bxe_dma_free(sc, &sc->spq_dma);
13525 /* Destroy the slowpath queue. */
13526 bxe_dma_free(sc, &sc->gz_dma);
13528 free(sc->strm, M_DEVBUF);
13531 bxe_host_structures_free_exit:
13532 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
13536 * Get DMA memory from the OS.
13538 * Validates that the OS has provided DMA buffers in response to a
13539 * bus_dmamap_load call and saves the physical address of those buffers.
13540 * When the callback is used the OS will return 0 for the mapping function
13541 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any
13542 * failures back to the caller.
13548 bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
13550 bus_addr_t *busaddr;
13553 /* Check for an error and signal the caller that an error occurred. */
13556 "bxe %s(%d): DMA mapping error (error = %d, nseg = %d)!\n",
13557 __FILE__, __LINE__, error, nseg);
13562 *busaddr = segs->ds_addr;
13566 * Allocate any non-paged DMA memory needed by the driver.
13569 * 0 = Success, !0 = Failure.
13572 bxe_host_structures_alloc(device_t dev)
13574 struct bxe_softc *sc;
13575 struct bxe_fastpath *fp;
13577 bus_addr_t busaddr;
13578 bus_size_t max_size, max_seg_size;
13579 int i, j, max_segments;
13581 sc = device_get_softc(dev);
13582 DBENTER(BXE_VERBOSE_RESET);
13584 int max_agg_queues = CHIP_IS_E1H(sc) ?
13585 ETH_MAX_AGGREGATION_QUEUES_E1H :
13586 ETH_MAX_AGGREGATION_QUEUES_E1;
13589 * Allocate the parent bus DMA tag appropriate for PCI.
13591 rc = bus_dma_tag_create(
13592 bus_get_dma_tag(dev), /* PCI parent tag */
13593 1, /* alignment for segs */
13594 BXE_DMA_BOUNDARY, /* cannot cross */
13595 BUS_SPACE_MAXADDR, /* restricted low */
13596 BUS_SPACE_MAXADDR, /* restricted hi */
13597 NULL, /* filter f() */
13598 NULL, /* filter f() arg */
13599 MAXBSIZE, /* max map for this tag */
13600 BUS_SPACE_UNRESTRICTED, /* # of discontinuities */
13601 BUS_SPACE_MAXSIZE_32BIT, /* max seg size */
13603 NULL, /* lock f() */
13604 NULL, /* lock f() arg */
13605 &sc->parent_tag); /* dma tag */
13607 BXE_PRINTF("%s(%d): Could not allocate parent DMA tag!\n",
13608 __FILE__, __LINE__);
13610 goto bxe_host_structures_alloc_exit;
13613 /* Allocate DMA memory for each fastpath structure. */
13614 for (i = 0; i < sc->num_queues; i++) {
13618 * Allocate status block*
13620 rc = bxe_dma_malloc(sc, BXE_STATUS_BLK_SZ,
13621 &fp->sb_dma, BUS_DMA_NOWAIT, "fp status block");
13622 /* ToDo: Only using 32 bytes out of 4KB allocation! */
13624 goto bxe_host_structures_alloc_exit;
13626 (struct host_status_block *) fp->sb_dma.vaddr;
13629 * Allocate TX chain.
13631 rc = bxe_dma_malloc(sc, BXE_TX_CHAIN_PAGE_SZ *
13632 NUM_TX_PAGES, &fp->tx_dma, BUS_DMA_NOWAIT,
13635 goto bxe_host_structures_alloc_exit;
13636 fp->tx_chain = (union eth_tx_bd_types *) fp->tx_dma.vaddr;
13638 /* Link the TX chain pages. */
13639 for (j = 1; j <= NUM_TX_PAGES; j++) {
13640 struct eth_tx_next_bd *tx_n_bd =
13641 &fp->tx_chain[TOTAL_TX_BD_PER_PAGE * j - 1].next_bd;
13643 busaddr = fp->tx_dma.paddr +
13644 BCM_PAGE_SIZE * (j % NUM_TX_PAGES);
13645 tx_n_bd->addr_hi = htole32(U64_HI(busaddr));
13646 tx_n_bd->addr_lo = htole32(U64_LO(busaddr));
13650 * Allocate RX chain.
13652 rc = bxe_dma_malloc(sc, BXE_RX_CHAIN_PAGE_SZ *
13653 NUM_RX_PAGES, &fp->rx_dma, BUS_DMA_NOWAIT,
13656 goto bxe_host_structures_alloc_exit;
13657 fp->rx_chain = (struct eth_rx_bd *) fp->rx_dma.vaddr;
13659 /* Link the RX chain pages. */
13660 for (j = 1; j <= NUM_RX_PAGES; j++) {
13661 struct eth_rx_bd *rx_bd =
13662 &fp->rx_chain[TOTAL_RX_BD_PER_PAGE * j - 2];
13664 busaddr = fp->rx_dma.paddr +
13665 BCM_PAGE_SIZE * (j % NUM_RX_PAGES);
13666 rx_bd->addr_hi = htole32(U64_HI(busaddr));
13667 rx_bd->addr_lo = htole32(U64_LO(busaddr));
13671 * Allocate CQ chain.
13673 rc = bxe_dma_malloc(sc, BXE_RX_CHAIN_PAGE_SZ *
13674 NUM_RCQ_PAGES, &fp->rcq_dma, BUS_DMA_NOWAIT,
13675 "rcq chain pages");
13677 goto bxe_host_structures_alloc_exit;
13678 fp->rcq_chain = (union eth_rx_cqe *) fp->rcq_dma.vaddr;
13680 /* Link the CQ chain pages. */
13681 for (j = 1; j <= NUM_RCQ_PAGES; j++) {
13682 struct eth_rx_cqe_next_page *nextpg =
13683 (struct eth_rx_cqe_next_page *)
13684 &fp->rcq_chain[TOTAL_RCQ_ENTRIES_PER_PAGE * j - 1];
13686 busaddr = fp->rcq_dma.paddr +
13687 BCM_PAGE_SIZE * (j % NUM_RCQ_PAGES);
13688 nextpg->addr_hi = htole32(U64_HI(busaddr));
13689 nextpg->addr_lo = htole32(U64_LO(busaddr));
13693 * Allocate SG chain.
13695 rc = bxe_dma_malloc(sc, BXE_RX_CHAIN_PAGE_SZ *
13696 NUM_RX_SGE_PAGES, &fp->sg_dma, BUS_DMA_NOWAIT,
13699 goto bxe_host_structures_alloc_exit;
13700 fp->sg_chain = (struct eth_rx_sge *) fp->sg_dma.vaddr;
13702 /* Link the SG chain pages. */
13703 for (j = 1; j <= NUM_RX_SGE_PAGES; j++) {
13704 struct eth_rx_sge *nextpg =
13705 &fp->sg_chain[TOTAL_RX_SGE_PER_PAGE * j - 2];
13707 busaddr = fp->sg_dma.paddr +
13708 BCM_PAGE_SIZE * (j % NUM_RX_SGE_PAGES);
13709 nextpg->addr_hi = htole32(U64_HI(busaddr));
13710 nextpg->addr_lo = htole32(U64_LO(busaddr));
13714 * Check required size before mapping to conserve resources.
13716 if (sc->tso_enable == TRUE) {
13717 max_size = BXE_TSO_MAX_SIZE;
13718 max_segments = BXE_TSO_MAX_SEGMENTS;
13719 max_seg_size = BXE_TSO_MAX_SEG_SIZE;
13721 max_size = MCLBYTES * BXE_MAX_SEGMENTS;
13722 max_segments = BXE_MAX_SEGMENTS;
13723 max_seg_size = MCLBYTES;
13726 /* Create a DMA tag for TX mbufs. */
13727 if (bus_dma_tag_create(sc->parent_tag,
13728 1, /* alignment for segs */
13729 BXE_DMA_BOUNDARY, /* cannot cross */
13730 BUS_SPACE_MAXADDR, /* restricted low */
13731 BUS_SPACE_MAXADDR, /* restricted hi */
13732 NULL, /* filter f() */
13733 NULL, /* filter f() arg */
13734 max_size, /* max map for this tag */
13735 max_segments, /* # of discontinuities */
13736 max_seg_size, /* max seg size */
13738 NULL, /* lock f() */
13739 NULL, /* lock f() arg */
13740 &fp->tx_mbuf_tag)) {
13742 "%s(%d): Could not allocate fp[%d] "
13743 "TX mbuf DMA tag!\n",
13744 __FILE__, __LINE__, i);
13746 goto bxe_host_structures_alloc_exit;
13749 /* Create DMA maps for each the TX mbuf cluster(ext buf). */
13750 for (j = 0; j < TOTAL_TX_BD; j++) {
13751 if (bus_dmamap_create(fp->tx_mbuf_tag,
13753 &fp->tx_mbuf_map[j])) {
13755 "%s(%d): Unable to create fp[%02d]."
13756 "tx_mbuf_map[%d] DMA map!\n",
13757 __FILE__, __LINE__, i, j);
13759 goto bxe_host_structures_alloc_exit;
13764 * Create a DMA tag for RX mbufs.
13766 if (bus_dma_tag_create(sc->parent_tag,
13767 1, /* alignment for segs */
13768 BXE_DMA_BOUNDARY, /* cannot cross */
13769 BUS_SPACE_MAXADDR, /* restricted low */
13770 BUS_SPACE_MAXADDR, /* restricted hi */
13771 NULL, /* filter f() */
13772 NULL, /* filter f() arg */
13773 MJUM9BYTES, /* max map for this tag */
13774 1, /* # of discontinuities */
13775 MJUM9BYTES, /* max seg size */
13777 NULL, /* lock f() */
13778 NULL, /* lock f() arg */
13779 &fp->rx_mbuf_tag)) {
13781 "%s(%d): Could not allocate fp[%02d] "
13782 "RX mbuf DMA tag!\n",
13783 __FILE__, __LINE__, i);
13785 goto bxe_host_structures_alloc_exit;
13788 /* Create DMA maps for the RX mbuf clusters. */
13789 if (bus_dmamap_create(fp->rx_mbuf_tag,
13790 BUS_DMA_NOWAIT, &fp->rx_mbuf_spare_map)) {
13792 "%s(%d): Unable to create fp[%02d]."
13793 "rx_mbuf_spare_map DMA map!\n",
13794 __FILE__, __LINE__, i);
13796 goto bxe_host_structures_alloc_exit;
13799 for (j = 0; j < TOTAL_RX_BD; j++) {
13800 if (bus_dmamap_create(fp->rx_mbuf_tag,
13801 BUS_DMA_NOWAIT, &fp->rx_mbuf_map[j])) {
13803 "%s(%d): Unable to create fp[%02d]."
13804 "rx_mbuf_map[%d] DMA map!\n",
13805 __FILE__, __LINE__, i, j);
13807 goto bxe_host_structures_alloc_exit;
13812 * Create a DMA tag for RX SGE bufs.
13814 if (bus_dma_tag_create(sc->parent_tag, 1,
13815 BXE_DMA_BOUNDARY, BUS_SPACE_MAXADDR,
13816 BUS_SPACE_MAXADDR, NULL, NULL, PAGE_SIZE, 1,
13817 PAGE_SIZE, 0, NULL, NULL, &fp->rx_sge_buf_tag)) {
13819 "%s(%d): Could not allocate fp[%02d] "
13820 "RX SGE mbuf DMA tag!\n",
13821 __FILE__, __LINE__, i);
13823 goto bxe_host_structures_alloc_exit;
13826 /* Create DMA maps for the SGE mbuf clusters. */
13827 if (bus_dmamap_create(fp->rx_sge_buf_tag,
13828 BUS_DMA_NOWAIT, &fp->rx_sge_spare_map)) {
13830 "%s(%d): Unable to create fp[%02d]."
13831 "rx_sge_spare_map DMA map!\n",
13832 __FILE__, __LINE__, i);
13834 goto bxe_host_structures_alloc_exit;
13837 for (j = 0; j < TOTAL_RX_SGE; j++) {
13838 if (bus_dmamap_create(fp->rx_sge_buf_tag,
13839 BUS_DMA_NOWAIT, &fp->rx_sge_buf_map[j])) {
13841 "%s(%d): Unable to create fp[%02d]."
13842 "rx_sge_buf_map[%d] DMA map!\n",
13843 __FILE__, __LINE__, i, j);
13845 goto bxe_host_structures_alloc_exit;
13849 /* Create DMA maps for the TPA pool mbufs. */
13850 if (bus_dmamap_create(fp->rx_mbuf_tag,
13851 BUS_DMA_NOWAIT, &fp->tpa_mbuf_spare_map)) {
13853 "%s(%d): Unable to create fp[%02d]."
13854 "tpa_mbuf_spare_map DMA map!\n",
13855 __FILE__, __LINE__, i);
13857 goto bxe_host_structures_alloc_exit;
13860 for (j = 0; j < max_agg_queues; j++) {
13861 if (bus_dmamap_create(fp->rx_mbuf_tag,
13862 BUS_DMA_NOWAIT, &fp->tpa_mbuf_map[j])) {
13864 "%s(%d): Unable to create fp[%02d]."
13865 "tpa_mbuf_map[%d] DMA map!\n",
13866 __FILE__, __LINE__, i, j);
13868 goto bxe_host_structures_alloc_exit;
13872 bxe_init_sge_ring_bit_mask(fp);
13876 * Allocate default status block.
13878 rc = bxe_dma_malloc(sc, BXE_DEF_STATUS_BLK_SZ, &sc->def_sb_dma,
13879 BUS_DMA_NOWAIT, "default status block");
13881 goto bxe_host_structures_alloc_exit;
13882 sc->def_sb = (struct host_def_status_block *) sc->def_sb_dma.vaddr;
13885 * Allocate statistics block.
13887 rc = bxe_dma_malloc(sc, BXE_STATS_BLK_SZ, &sc->stats_dma,
13888 BUS_DMA_NOWAIT, "statistics block");
13890 goto bxe_host_structures_alloc_exit;
13891 sc->stats = (struct statistics_block *) sc->stats_dma.vaddr;
13894 * Allocate slowpath block.
13896 rc = bxe_dma_malloc(sc, BXE_SLOWPATH_SZ, &sc->slowpath_dma,
13897 BUS_DMA_NOWAIT, "slowpath block");
13899 goto bxe_host_structures_alloc_exit;
13900 sc->slowpath = (struct bxe_slowpath *) sc->slowpath_dma.vaddr;
13903 * Allocate slowpath queue.
13905 rc = bxe_dma_malloc(sc, BXE_SPQ_SZ, &sc->spq_dma,
13906 BUS_DMA_NOWAIT, "slowpath queue");
13908 goto bxe_host_structures_alloc_exit;
13909 sc->spq = (struct eth_spe *) sc->spq_dma.vaddr;
13912 * Allocate firmware decompression buffer.
13914 rc = bxe_dma_malloc(sc, BXE_FW_BUF_SIZE, &sc->gz_dma,
13915 BUS_DMA_NOWAIT, "gunzip buffer");
13917 goto bxe_host_structures_alloc_exit;
13918 sc->gz = sc->gz_dma.vaddr;
13919 if (sc->strm == NULL) {
13920 goto bxe_host_structures_alloc_exit;
13923 sc->strm = malloc(sizeof(*sc->strm), M_DEVBUF, M_NOWAIT);
13925 bxe_host_structures_alloc_exit:
13926 DBEXIT(BXE_VERBOSE_RESET);
13931 * Program the MAC address for 57710 controllers.
13937 bxe_set_mac_addr_e1(struct bxe_softc *sc, int set)
13939 struct mac_configuration_cmd *config;
13940 struct mac_configuration_entry *config_table;
13944 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
13946 config = BXE_SP(sc, mac_config);
13947 port = BP_PORT(sc);
13950 * Port 0 Unicast Addresses: 32 Perfect Match Filters (31-0)
13951 * Port 1 Unicast Addresses: 32 Perfect Match Filters (63-32)
13952 * Port 0 Multicast Addresses: 128 Hashes (127-64)
13953 * Port 1 Multicast Addresses: 128 Hashes (191-128)
13956 config->hdr.length = 2;
13957 config->hdr.offset = port ? 32 : 0;
13958 config->hdr.client_id = BP_CL_ID(sc);
13959 config->hdr.reserved1 = 0;
13961 /* Program the primary MAC address. */
13962 config_table = &config->config_table[0];
13963 eaddr = sc->link_params.mac_addr;
13964 config_table->cam_entry.msb_mac_addr = eaddr[0] << 8 | eaddr[1];
13965 config_table->cam_entry.middle_mac_addr = eaddr[2] << 8 | eaddr[3];
13966 config_table->cam_entry.lsb_mac_addr = eaddr[4] << 8 | eaddr[5];
13967 config_table->cam_entry.flags = htole16(port);
13970 config_table->target_table_entry.flags = 0;
13972 CAM_INVALIDATE(config_table);
13974 config_table->target_table_entry.vlan_id = 0;
13976 DBPRINT(sc, BXE_VERBOSE, "%s(): %s MAC (%04x:%04x:%04x)\n",
13977 __FUNCTION__, (set ? "Setting" : "Clearing"),
13978 config_table->cam_entry.msb_mac_addr,
13979 config_table->cam_entry.middle_mac_addr,
13980 config_table->cam_entry.lsb_mac_addr);
13982 /* Program the broadcast MAC address. */
13983 config_table = &config->config_table[1];
13984 config_table->cam_entry.msb_mac_addr = 0xffff;
13985 config_table->cam_entry.middle_mac_addr = 0xffff;
13986 config_table->cam_entry.lsb_mac_addr = 0xffff;
13987 config_table->cam_entry.flags = htole16(port);
13990 config_table->target_table_entry.flags =
13991 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
13993 CAM_INVALIDATE(config_table);
13995 config_table->target_table_entry.vlan_id = 0;
13997 /* Post the command to slow path queue. */
13998 bxe_sp_post(sc, RAMROD_CMD_ID_ETH_SET_MAC, 0,
13999 U64_HI(BXE_SP_MAPPING(sc, mac_config)),
14000 U64_LO(BXE_SP_MAPPING(sc, mac_config)), 0);
14002 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
14006 * Program the MAC address for 57711/57711E controllers.
14012 bxe_set_mac_addr_e1h(struct bxe_softc *sc, int set)
14014 struct mac_configuration_cmd_e1h *config;
14015 struct mac_configuration_entry_e1h *config_table;
14019 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
14021 config = (struct mac_configuration_cmd_e1h *)BXE_SP(sc, mac_config);
14022 port = BP_PORT(sc);
14023 func = BP_FUNC(sc);
14025 if (set && (sc->state != BXE_STATE_OPEN)) {
14026 DBPRINT(sc, BXE_VERBOSE,
14027 "%s(): Can't set E1H MAC in state 0x%08X!\n", __FUNCTION__,
14029 goto bxe_set_mac_addr_e1h_exit;
14034 * Function 0-7 Unicast Addresses: 8 Perfect Match Filters
14035 * Multicast Addresses: 20 + FUNC * 20, 20 each (???)
14037 config->hdr.length = 1;
14038 config->hdr.offset = func;
14039 config->hdr.client_id = 0xff;
14040 config->hdr.reserved1 = 0;
14042 /* Program the primary MAC address. */
14043 config_table = &config->config_table[0];
14044 eaddr = sc->link_params.mac_addr;
14045 config_table->msb_mac_addr = eaddr[0] << 8 | eaddr[1];
14046 config_table->middle_mac_addr = eaddr[2] << 8 | eaddr[3];
14047 config_table->lsb_mac_addr = eaddr[4] << 8 | eaddr[5];
14048 config_table->clients_bit_vector = htole32(1 << sc->fp->cl_id);
14050 config_table->vlan_id = 0;
14051 config_table->e1hov_id = htole16(sc->e1hov);
14054 config_table->flags = port;
14056 config_table->flags =
14057 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
14059 DBPRINT(sc, BXE_VERBOSE,
14060 "%s(): %s MAC (%04x:%04x:%04x), E1HOV = %d, CLID = %d\n",
14061 __FUNCTION__, (set ? "Setting" : "Clearing"),
14062 config_table->msb_mac_addr, config_table->middle_mac_addr,
14063 config_table->lsb_mac_addr, sc->e1hov, BP_L_ID(sc));
14065 bxe_sp_post(sc, RAMROD_CMD_ID_ETH_SET_MAC, 0,
14066 U64_HI(BXE_SP_MAPPING(sc, mac_config)),
14067 U64_LO(BXE_SP_MAPPING(sc, mac_config)), 0);
14069 bxe_set_mac_addr_e1h_exit:
14070 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
14074 * Programs the various packet receive modes (broadcast and multicast).
14081 bxe_set_rx_mode(struct bxe_softc *sc)
14084 struct ifmultiaddr *ifma;
14085 struct mac_configuration_cmd *config;
14086 struct mac_configuration_entry *config_table;
14087 uint32_t mc_filter[MC_HASH_SIZE];
14089 uint32_t crc, bit, regidx, rx_mode;
14090 int i, old, offset, port;
14092 BXE_CORE_LOCK_ASSERT(sc);
14094 rx_mode = BXE_RX_MODE_NORMAL;
14095 port = BP_PORT(sc);
14097 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
14099 if (sc->state != BXE_STATE_OPEN) {
14100 DBPRINT(sc, BXE_WARN, "%s(): State (0x%08X) is not open!\n",
14101 __FUNCTION__, sc->state);
14102 goto bxe_set_rx_mode_exit;
14108 * Check for promiscuous, all multicast, or selected
14109 * multicast address filtering.
14111 if (ifp->if_flags & IFF_PROMISC) {
14112 /* Enable promiscuous mode. */
14113 rx_mode = BXE_RX_MODE_PROMISC;
14114 } else if (ifp->if_flags & IFF_ALLMULTI ||
14115 ifp->if_amcount > BXE_MAX_MULTICAST) {
14116 /* Enable all multicast addresses. */
14117 rx_mode = BXE_RX_MODE_ALLMULTI;
14119 /* Enable selective multicast mode. */
14120 if (CHIP_IS_E1(sc)) {
14122 config = BXE_SP(sc, mcast_config);
14124 if_maddr_rlock(ifp);
14126 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
14127 if (ifma->ifma_addr->sa_family != AF_LINK)
14129 maddr = (uint8_t *)LLADDR(
14130 (struct sockaddr_dl *)ifma->ifma_addr);
14131 config_table = &config->config_table[i];
14132 config_table->cam_entry.msb_mac_addr =
14133 maddr[0] << 8 | maddr[1];
14134 config_table->cam_entry.middle_mac_addr =
14135 maddr[2] << 8 | maddr[3];
14136 config_table->cam_entry.lsb_mac_addr =
14137 maddr[4] << 8 | maddr[5];
14138 config_table->cam_entry.flags = htole16(port);
14139 config_table->target_table_entry.flags = 0;
14140 config_table->target_table_entry.
14141 clients_bit_vector =
14142 htole32(1 << BP_L_ID(sc));
14143 config_table->target_table_entry.vlan_id = 0;
14145 DBPRINT(sc, BXE_INFO,
14146 "%s(): Setting MCAST[%d] (%04X:%04X:%04X)\n",
14148 config_table->cam_entry.msb_mac_addr,
14149 config_table->cam_entry.middle_mac_addr,
14150 config_table->cam_entry.lsb_mac_addr);
14153 if_maddr_runlock(ifp);
14155 old = config->hdr.length;
14157 /* Invalidate any extra MC entries in the CAM. */
14159 for (; i < old; i++) {
14160 config_table = &config->config_table[i];
14161 if (CAM_IS_INVALID(config_table))
14164 CAM_INVALIDATE(config_table);
14168 offset = BXE_MAX_MULTICAST * (1 + port);
14169 config->hdr.length = i;
14170 config->hdr.offset = offset;
14171 config->hdr.client_id = sc->fp->cl_id;
14172 config->hdr.reserved1 = 0;
14174 bxe_sp_post(sc, RAMROD_CMD_ID_ETH_SET_MAC, 0,
14175 U64_HI(BXE_SP_MAPPING(sc, mcast_config)),
14176 U64_LO(BXE_SP_MAPPING(sc, mcast_config)), 0);
14178 /* Accept one or more multicasts */
14179 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
14181 if_maddr_rlock(ifp);
14183 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
14184 if (ifma->ifma_addr->sa_family != AF_LINK)
14186 crc = ether_crc32_le(ifma->ifma_addr->sa_data,
14188 bit = (crc >> 24) & 0xff;
14191 mc_filter[regidx] |= (1 << bit);
14193 if_maddr_runlock(ifp);
14195 for (i = 0; i < MC_HASH_SIZE; i++)
14196 REG_WR(sc, MC_HASH_OFFSET(sc, i), mc_filter[i]);
14200 DBPRINT(sc, BXE_VERBOSE, "%s(): Enabling new receive mode: 0x%08X\n",
14201 __FUNCTION__, rx_mode);
14203 sc->rx_mode = rx_mode;
14204 bxe_set_storm_rx_mode(sc);
14206 bxe_set_rx_mode_exit:
14207 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
14211 * Function specific controller reset.
14217 bxe_reset_func(struct bxe_softc *sc)
14219 int base, func, i, port;
14221 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
14223 port = BP_PORT(sc);
14224 func = BP_FUNC(sc);
14226 /* Configure IGU. */
14227 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, 0);
14228 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, 0);
14229 REG_WR(sc, HC_REG_CONFIG_0 + (port * 4), 0x1000);
14232 base = FUNC_ILT_BASE(func);
14233 for (i = base; i < base + ILT_PER_FUNC; i++)
14234 bxe_ilt_wr(sc, i, 0);
14236 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
14240 * Port specific controller reset.
14246 bxe_reset_port(struct bxe_softc *sc)
14251 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
14253 port = BP_PORT(sc);
14254 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4, 0);
14256 /* Do not receive packets to BRB. */
14257 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port * 4, 0x0);
14259 /* Do not direct receive packets that are not for MCP to the BRB. */
14260 REG_WR(sc, port ? NIG_REG_LLH1_BRB1_NOT_MCP :
14261 NIG_REG_LLH0_BRB1_NOT_MCP, 0x0);
14263 /* Configure AEU. */
14264 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port * 4, 0);
14268 /* Check for BRB port occupancy. */
14269 val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port * 4);
14271 DBPRINT(sc, BXE_VERBOSE,
14272 "%s(): BRB1 is not empty (%d blocks are occupied)!\n",
14273 __FUNCTION__, val);
14275 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
14279 * Common controller reset.
14285 bxe_reset_common(struct bxe_softc *sc)
14288 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
14290 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
14292 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
14295 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
14299 * Reset the controller.
14305 bxe_reset_chip(struct bxe_softc *sc, uint32_t reset_code)
14308 DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
14310 switch (reset_code) {
14311 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
14312 bxe_reset_port(sc);
14313 bxe_reset_func(sc);
14314 bxe_reset_common(sc);
14316 case FW_MSG_CODE_DRV_UNLOAD_PORT:
14317 bxe_reset_port(sc);
14318 bxe_reset_func(sc);
14320 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
14321 bxe_reset_func(sc);
14324 BXE_PRINTF("%s(%d): Unknown reset code (0x%08X) from MCP!\n",
14325 __FILE__, __LINE__, reset_code);
14329 DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
14333 * Called by the OS to set media options (link, speed, etc.)
14334 * when the user specifies "ifconfig bxe media XXX" or
14335 * "ifconfig bxe mediaopt XXX".
14338 * 0 = Success, !0 = Failure
14341 bxe_ifmedia_upd(struct ifnet *ifp)
14343 struct bxe_softc *sc;
14344 struct ifmedia *ifm;
14347 sc = ifp->if_softc;
14348 DBENTER(BXE_VERBOSE_PHY);
14350 ifm = &sc->bxe_ifmedia;
14353 /* We only support Ethernet media type. */
14354 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
14356 goto bxe_ifmedia_upd_exit;
14359 switch (IFM_SUBTYPE(ifm->ifm_media)) {
14361 /* ToDo: What to do here? */
14362 /* Doing nothing translates to success here. */
14370 case IFM_10G_TWINAX:
14373 /* We don't support channging the media type. */
14374 DBPRINT(sc, BXE_WARN, "%s(): Invalid media type!\n",
14379 bxe_ifmedia_upd_exit:
14380 DBENTER(BXE_VERBOSE_PHY);
14385 * Called by the OS to report current media status
14386 * (link, speed, etc.).
14392 bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
14394 struct bxe_softc *sc;
14396 sc = ifp->if_softc;
14397 DBENTER(BXE_EXTREME_LOAD | BXE_EXTREME_RESET);
14399 /* Report link down if the driver isn't running. */
14400 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
14401 ifmr->ifm_active |= IFM_NONE;
14402 goto bxe_ifmedia_status_exit;
14405 /* Setup the default interface info. */
14406 ifmr->ifm_status = IFM_AVALID;
14407 ifmr->ifm_active = IFM_ETHER;
14409 if (sc->link_vars.link_up)
14410 ifmr->ifm_status |= IFM_ACTIVE;
14412 ifmr->ifm_active |= IFM_NONE;
14413 goto bxe_ifmedia_status_exit;
14416 ifmr->ifm_active |= sc->media;
14418 if (sc->link_vars.duplex == MEDIUM_FULL_DUPLEX)
14419 ifmr->ifm_active |= IFM_FDX;
14421 ifmr->ifm_active |= IFM_HDX;
14423 bxe_ifmedia_status_exit:
14424 DBEXIT(BXE_EXTREME_LOAD | BXE_EXTREME_RESET);
14429 * Update last maximum scatter gather entry.
14434 static __inline void
14435 bxe_update_last_max_sge(struct bxe_fastpath *fp, uint16_t index)
14439 last_max = fp->last_max_sge;
14440 if (SUB_S16(index, last_max) > 0)
14441 fp->last_max_sge = index;
14445 * Clear scatter gather mask next elements.
14451 bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp)
14455 for (i = 0; i < NUM_RX_SGE_PAGES; i++) {
14456 index = i * TOTAL_RX_SGE_PER_PAGE + USABLE_RX_SGE_PER_PAGE;
14457 for (j = 0; j < 2; j++) {
14458 SGE_MASK_CLEAR_BIT(fp, index);
14465 * Update SGE producer.
14471 bxe_update_sge_prod(struct bxe_fastpath *fp,
14472 struct eth_fast_path_rx_cqe *fp_cqe)
14474 struct bxe_softc *sc;
14475 uint16_t delta, first_elem, last_max, last_elem, sge_len;
14479 DBENTER(BXE_EXTREME_RECV);
14482 sge_len = SGE_PAGE_ALIGN(le16toh(fp_cqe->pkt_len) -
14483 le16toh(fp_cqe->len_on_bd)) >> SGE_PAGE_SHIFT;
14485 goto bxe_update_sge_prod_exit;
14487 /* First mark all used pages. */
14488 for (i = 0; i < sge_len; i++)
14489 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16toh(fp_cqe->sgl[i])));
14491 /* Assume that the last SGE index is the biggest. */
14492 bxe_update_last_max_sge(fp, le16toh(fp_cqe->sgl[sge_len - 1]));
14494 last_max = RX_SGE(fp->last_max_sge);
14495 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
14496 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
14498 /* If ring is not full. */
14499 if (last_elem + 1 != first_elem)
14502 /* Now update the producer index. */
14503 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
14504 if (fp->rx_sge_mask[i])
14507 fp->rx_sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
14508 delta += RX_SGE_MASK_ELEM_SZ;
14512 fp->rx_sge_prod += delta;
14513 /* clear page-end entries */
14514 bxe_clear_sge_mask_next_elems(fp);
14517 bxe_update_sge_prod_exit:
14518 DBEXIT(BXE_EXTREME_RECV);
14522 * Initialize scatter gather ring bitmask.
14524 * Each entry in the SGE is associated with an aggregation in process.
14525 * Since there is no guarantee that all Ethernet frames associated with
14526 * a partciular TCP flow will arrive at the adapter and be placed into
14527 * the SGE chain contiguously, we maintain a bitmask for each SGE element
14528 * that identifies which aggregation an Ethernet frame belongs to.
14533 static __inline void
14534 bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp)
14537 /* Set the mask to all 1s, it's faster to compare to 0 than to 0xf. */
14538 memset(fp->rx_sge_mask, 0xff,
14539 (TOTAL_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT) * sizeof(uint64_t));
14542 * The SGE chain is formatted just like the RX chain.
14543 * The last two elements are reserved as a "next page pointer"
14544 * to the next page of SGE elements. Clear the last two
14545 * elements in each SGE chain page since they will never be
14546 * used to track an aggregation.
14548 bxe_clear_sge_mask_next_elems(fp);
14552 * The current mbuf is part of an aggregation. Swap the mbuf into the TPA
14553 * aggregation queue, swap an empty mbuf back onto the receive chain, and
14554 * mark the current aggregation queue as in-progress.
14560 bxe_tpa_start(struct bxe_fastpath *fp, uint16_t queue, uint16_t cons,
14563 struct bxe_softc *sc;
14564 struct mbuf *m_temp;
14565 struct eth_rx_bd *rx_bd;
14566 bus_dmamap_t map_temp;
14567 int max_agg_queues;
14570 DBENTER(BXE_INSANE_RECV | BXE_INSANE_TPA);
14574 DBPRINT(sc, BXE_EXTREME_TPA,
14575 "%s(): fp[%02d].tpa[%02d], cons=0x%04X, prod=0x%04X\n",
14576 __FUNCTION__, fp->index, queue, cons, prod);
14578 max_agg_queues = CHIP_IS_E1(sc) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
14579 ETH_MAX_AGGREGATION_QUEUES_E1H;
14581 DBRUNIF((queue > max_agg_queues),
14582 BXE_PRINTF("%s(): fp[%02d] illegal aggregation (%d > %d)!\n",
14583 __FUNCTION__, fp->index, queue, max_agg_queues));
14585 DBRUNIF((fp->tpa_state[queue] != BXE_TPA_STATE_STOP),
14586 BXE_PRINTF("%s(): Starting aggregation on "
14587 "fp[%02d].tpa[%02d] even though queue is not in the "
14588 "TPA_STOP state!\n", __FUNCTION__, fp->index, queue));
14590 /* Remove the existing mbuf and mapping from the TPA pool. */
14591 m_temp = fp->tpa_mbuf_ptr[queue];
14592 map_temp = fp->tpa_mbuf_map[queue];
14594 /* Only the paranoid survive! */
14595 if(m_temp == NULL) {
14596 BXE_PRINTF("%s(%d): fp[%02d].tpa[%02d] not allocated!\n",
14597 __FILE__, __LINE__, fp->index, queue);
14598 /* ToDo: Additional error handling! */
14599 goto bxe_tpa_start_exit;
14602 /* Move received mbuf and mapping to TPA pool. */
14603 fp->tpa_mbuf_ptr[queue] = fp->rx_mbuf_ptr[cons];
14604 fp->tpa_mbuf_map[queue] = fp->rx_mbuf_map[cons];
14606 /* Place the TPA bin into the START state. */
14607 fp->tpa_state[queue] = BXE_TPA_STATE_START;
14608 DBRUN(fp->tpa_queue_used |= (1 << queue));
14610 /* Get the rx_bd for the next open entry on the receive chain. */
14611 rx_bd = &fp->rx_chain[prod];
14613 /* Update the rx_bd with the empty mbuf from the TPA pool. */
14614 rx_bd->addr_hi = htole32(U64_HI(fp->tpa_mbuf_segs[queue].ds_addr));
14615 rx_bd->addr_lo = htole32(U64_LO(fp->tpa_mbuf_segs[queue].ds_addr));
14616 fp->rx_mbuf_ptr[prod] = m_temp;
14617 fp->rx_mbuf_map[prod] = map_temp;
14619 bxe_tpa_start_exit:
14620 DBEXIT(BXE_INSANE_RECV | BXE_INSANE_TPA);
14624 * When a TPA aggregation is completed, loop through the individual mbufs
14625 * of the aggregation, combining them into a single mbuf which will be sent
14626 * up the stack. Refill all freed SGEs with mbufs as we go along.
14629 * 0 = Success, !0 = Failure.
14632 bxe_fill_frag_mbuf(struct bxe_softc *sc, struct bxe_fastpath *fp,
14633 struct mbuf *m, struct eth_fast_path_rx_cqe *fp_cqe, uint16_t cqe_idx)
14635 struct mbuf *m_frag;
14636 uint32_t frag_len, frag_size, pages, i;
14637 uint16_t sge_idx, len_on_bd;
14640 DBENTER(BXE_EXTREME_RECV | BXE_EXTREME_TPA);
14643 len_on_bd = le16toh(fp_cqe->len_on_bd);
14644 frag_size = le16toh(fp_cqe->pkt_len) - len_on_bd;
14645 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
14647 DBPRINT(sc, BXE_VERBOSE_TPA,
14648 "%s(): len_on_bd=%d, frag_size=%d, pages=%d\n",
14649 __FUNCTION__, len_on_bd, frag_size, pages);
14651 /* Make sure the aggregated frame is not too big to handle. */
14652 if (pages > 8 * PAGES_PER_SGE) {
14653 DBPRINT(sc, BXE_FATAL,
14654 "%s(): fp[%02d].rx_sge[0x%04X] has too many pages (%d)!\n",
14655 __FUNCTION__, fp->index, cqe_idx, pages);
14656 DBPRINT(sc, BXE_FATAL,
14657 "%s(): fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
14658 __FUNCTION__, le16toh(fp_cqe->pkt_len), len_on_bd);
14659 bxe_panic_dump(sc);
14661 goto bxe_fill_frag_mbuf_exit;
14665 * Scan through the scatter gather list, pulling individual
14666 * mbufs into a single mbuf for the host stack.
14668 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
14669 sge_idx = RX_SGE(le16toh(fp_cqe->sgl[j]));
14672 * Firmware gives the indices of the SGE as if the ring is an
14673 * array (meaning that the "next" element will consume 2
14676 frag_len = min(frag_size, (uint32_t)(BCM_PAGE_SIZE *
14679 DBPRINT(sc, BXE_VERBOSE_TPA,
14680 "%s(): i=%d, j=%d, frag_size=%d, frag_len=%d\n",
14681 __FUNCTION__, i, j, frag_size, frag_len);
14683 m_frag = fp->rx_sge_buf_ptr[sge_idx];
14685 /* Allocate a new mbuf for the SGE. */
14686 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
14689 * Leave all remaining SGEs in the ring.
14691 goto bxe_fill_frag_mbuf_exit;
14694 /* Update the fragment its length. */
14695 m_frag->m_len = frag_len;
14697 /* Concatenate the fragment to the head mbuf. */
14699 DBRUN(fp->sge_mbuf_alloc--);
14701 /* Update TPA mbuf size and remaining fragment size. */
14702 m->m_pkthdr.len += frag_len;
14703 frag_size -= frag_len;
14706 bxe_fill_frag_mbuf_exit:
14707 DBPRINT(sc, BXE_VERBOSE_TPA,
14708 "%s(): frag_size=%d\n", __FUNCTION__, frag_size);
14709 DBEXIT(BXE_EXTREME_RECV | BXE_EXTREME_TPA);
14714 * The aggregation on the current TPA queue has completed. Pull the
14715 * individual mbuf fragments together into a single mbuf, perform all
14716 * necessary checksum calculations, and send the resuting mbuf to the stack.
14722 bxe_tpa_stop(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t queue,
14723 int pad, int len, union eth_rx_cqe *cqe, uint16_t cqe_idx)
14729 DBENTER(BXE_INSANE_RECV | BXE_INSANE_TPA);
14730 DBPRINT(sc, (BXE_EXTREME_RECV | BXE_EXTREME_TPA),
14731 "%s(): fp[%02d].tpa[%02d], len=%d, pad=%d\n",
14732 __FUNCTION__, fp->index, queue, len, pad);
14736 m = fp->tpa_mbuf_ptr[queue];
14738 /* Allocate a replacement before modifying existing mbuf. */
14739 rc = bxe_alloc_tpa_mbuf(fp, queue);
14741 /* Drop the frame and log a soft error. */
14742 fp->rx_soft_errors++;
14743 goto bxe_tpa_stop_exit;
14746 /* We have a replacement, fixup the current mbuf. */
14748 m->m_pkthdr.len = m->m_len = len;
14750 /* Mark the checksums valid (taken care of by firmware). */
14751 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID |
14752 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
14753 m->m_pkthdr.csum_data = 0xffff;
14755 /* Aggregate all of the SGEs into a single mbuf. */
14756 rc = bxe_fill_frag_mbuf(sc, fp, m, &cqe->fast_path_cqe, cqe_idx);
14758 /* Drop the packet and log an error. */
14759 fp->rx_soft_errors++;
14762 /* Find VLAN tag and send frame up to the stack. */
14763 if ((le16toh(cqe->fast_path_cqe.pars_flags.flags) &
14764 PARSING_FLAGS_VLAN)) {
14765 m->m_pkthdr.ether_vtag =
14766 cqe->fast_path_cqe.vlan_tag;
14767 m->m_flags |= M_VLANTAG;
14770 /* Assign packet to the appropriate interface. */
14771 m->m_pkthdr.rcvif = ifp;
14773 /* Update packet statistics. */
14775 ifp->if_ipackets++;
14777 /* ToDo: Any potential locking issues here? */
14778 /* Pass the frame to the stack. */
14779 (*ifp->if_input)(ifp, m);
14782 /* We passed mbuf up the stack or dropped the frame. */
14783 DBRUN(fp->tpa_mbuf_alloc--);
14786 fp->tpa_state[queue] = BXE_TPA_STATE_STOP;
14787 DBRUN(fp->tpa_queue_used &= ~(1 << queue));
14788 DBEXIT(BXE_INSANE_RECV | BXE_INSANE_TPA);
14792 * Notify the controller that the RX producer indices have been updated for
14793 * a fastpath connection by writing them to the controller.
14798 static __inline void
14799 bxe_update_rx_prod(struct bxe_softc *sc, struct bxe_fastpath *fp,
14800 uint16_t bd_prod, uint16_t cqe_prod, uint16_t sge_prod)
14802 volatile struct ustorm_eth_rx_producers rx_prods = {0};
14805 /* Update producers. */
14806 rx_prods.bd_prod = bd_prod;
14807 rx_prods.cqe_prod = cqe_prod;
14808 rx_prods.sge_prod = sge_prod;
14812 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++){
14813 REG_WR(sc, BAR_USTORM_INTMEM +
14814 USTORM_RX_PRODS_OFFSET(BP_PORT(sc), fp->cl_id) + i * 4,
14815 ((volatile uint32_t *) &rx_prods)[i]);
14818 DBPRINT(sc, BXE_EXTREME_RECV, "%s(%d): Wrote fp[%02d] bd_prod = 0x%04X, "
14819 "cqe_prod = 0x%04X, sge_prod = 0x%04X\n", __FUNCTION__, curcpu,
14820 fp->index, bd_prod, cqe_prod, sge_prod);
14824 * Processes received frames.
14830 bxe_rxeof(struct bxe_fastpath *fp)
14832 struct bxe_softc *sc;
14834 uint16_t rx_bd_cons, rx_bd_cons_idx;
14835 uint16_t rx_bd_prod, rx_bd_prod_idx;
14836 uint16_t rx_cq_cons, rx_cq_cons_idx;
14837 uint16_t rx_cq_prod, rx_cq_cons_sb;
14838 unsigned long rx_pkts = 0;
14844 DBENTER(BXE_EXTREME_RECV);
14846 /* Get the status block's view of the RX completion consumer index. */
14847 rx_cq_cons_sb = bxe_rx_cq_cons(fp);
14850 * Get working copies of the driver's view of the
14851 * RX indices. These are 16 bit values that are
14852 * expected to increment from 0 to 65535 and then
14853 * wrap-around to 0 again.
14855 rx_bd_cons = fp->rx_bd_cons;
14856 rx_bd_prod = fp->rx_bd_prod;
14857 rx_cq_cons = fp->rx_cq_cons;
14858 rx_cq_prod = fp->rx_cq_prod;
14860 DBPRINT(sc, (BXE_EXTREME_RECV),
14861 "%s(%d): BEFORE: fp[%02d], rx_bd_cons = 0x%04X, rx_bd_prod = 0x%04X, "
14862 "rx_cq_cons_sw = 0x%04X, rx_cq_prod_sw = 0x%04X\n", __FUNCTION__,
14863 curcpu, fp->index, rx_bd_cons, rx_bd_prod, rx_cq_cons, rx_cq_prod);
14866 * Memory barrier to prevent speculative reads of the RX buffer
14867 * from getting ahead of the index in the status block.
14872 * Scan through the receive chain as long
14873 * as there is work to do.
14875 while (rx_cq_cons != rx_cq_cons_sb) {
14877 union eth_rx_cqe *cqe;
14878 uint8_t cqe_fp_flags;
14882 * Convert the 16 bit indices used by hardware
14883 * into array indices used by the driver.
14885 rx_cq_cons_idx = RCQ_ENTRY(rx_cq_cons);
14886 rx_bd_prod_idx = RX_BD(rx_bd_prod);
14887 rx_bd_cons_idx = RX_BD(rx_bd_cons);
14890 /* Fetch the completion queue entry (i.e. cookie). */
14891 cqe = (union eth_rx_cqe *)
14892 &fp->rcq_chain[rx_cq_cons_idx];
14893 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
14895 /* Sanity check the cookie flags. */
14896 if (__predict_false(cqe_fp_flags == 0)) {
14897 fp->rx_null_cqe_flags++;
14898 DBRUN(bxe_dump_cqe(fp, rx_cq_cons_idx, cqe));
14899 /* ToDo: What error handling can be done here? */
14902 /* Check the CQE type for slowpath or fastpath completion. */
14903 if (__predict_false(CQE_TYPE(cqe_fp_flags) ==
14904 RX_ETH_CQE_TYPE_ETH_RAMROD)) {
14905 /* This is a slowpath completion. */
14906 bxe_sp_event(fp, cqe);
14907 goto bxe_rxeof_next_cqe;
14910 /* This is a fastpath completion. */
14912 /* Get the length and pad information from the CQE. */
14913 len = le16toh(cqe->fast_path_cqe.pkt_len);
14914 pad = cqe->fast_path_cqe.placement_offset;
14916 /* Check if the completion is for TPA. */
14917 if ((fp->disable_tpa == FALSE) &&
14918 (TPA_TYPE(cqe_fp_flags) !=
14919 (TPA_TYPE_START | TPA_TYPE_END))) {
14920 uint16_t queue = cqe->fast_path_cqe.queue_index;
14923 * No need to worry about error flags in
14924 * the frame as the firmware has already
14925 * managed that for us when aggregating
14929 /* Check if TPA aggregation has started. */
14930 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
14931 bxe_tpa_start(fp, queue, rx_bd_cons_idx,
14933 goto bxe_rxeof_next_rx;
14936 /* Check if TPA aggregation has completed. */
14937 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
14938 DBRUNIF(!BXE_RX_SUM_FIX(cqe),
14939 DBPRINT(sc, BXE_FATAL,
14940 "%s(): STOP on non-TCP data.\n",
14944 * This is the size of the linear
14945 * data on this mbuf.
14947 len = le16toh(cqe->fast_path_cqe.len_on_bd);
14950 * Stop the aggregation and pass
14953 bxe_tpa_stop(sc, fp, queue, pad, len,
14954 cqe, rx_cq_cons_idx);
14955 bxe_update_sge_prod(fp,
14956 &cqe->fast_path_cqe);
14957 goto bxe_rxeof_next_cqe;
14961 m = fp->rx_mbuf_ptr[rx_bd_cons_idx];
14963 /* Allocate a replacement before modifying existing mbuf. */
14964 rc = bxe_alloc_rx_bd_mbuf(fp, rx_bd_prod_idx);
14966 /* Drop the frame and log a soft error. */
14967 fp->rx_soft_errors++;
14968 goto bxe_rxeof_next_rx;
14971 /* Check if the received frame has any errors. */
14972 if (__predict_false(cqe_fp_flags &
14973 ETH_RX_ERROR_FLAGS)) {
14974 DBPRINT(sc, BXE_WARN ,
14975 "%s(): fp[%02d].cqe[0x%04X] has errors "
14976 "(0x%08X)!\n", __FUNCTION__, fp->index,
14977 rx_cq_cons, cqe_fp_flags);
14979 fp->rx_soft_errors++;
14980 goto bxe_rxeof_next_rx;
14983 /* We have a replacement, fixup the current mbuf. */
14985 m->m_pkthdr.len = m->m_len = len;
14987 /* Assign packet to the appropriate interface. */
14988 m->m_pkthdr.rcvif = ifp;
14990 /* Assume no hardware checksum complated. */
14991 m->m_pkthdr.csum_flags = 0;
14993 /* Validate checksum if offload enabled. */
14994 if (ifp->if_capenable & IFCAP_RXCSUM) {
14995 /* Check whether IP checksummed or not. */
14997 !(cqe->fast_path_cqe.status_flags &
14998 ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) {
14999 m->m_pkthdr.csum_flags |=
15001 if (__predict_false(cqe_fp_flags &
15002 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) {
15003 DBPRINT(sc, BXE_WARN_SEND,
15004 "%s(): Invalid IP checksum!\n",
15007 m->m_pkthdr.csum_flags |=
15011 /* Check for a valid TCP/UDP frame. */
15013 !(cqe->fast_path_cqe.status_flags &
15014 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) {
15015 /* Check for a good TCP/UDP checksum. */
15016 if (__predict_false(cqe_fp_flags &
15017 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) {
15018 DBPRINT(sc, BXE_VERBOSE_RECV,
15019 "%s(): Invalid TCP/UDP checksum!\n",
15022 m->m_pkthdr.csum_data = 0xFFFF;
15023 m->m_pkthdr.csum_flags |=
15031 * If we received a packet with a vlan tag,
15032 * attach that information to the packet.
15034 if (cqe->fast_path_cqe.pars_flags.flags &
15035 PARSING_FLAGS_VLAN) {
15036 m->m_pkthdr.ether_vtag =
15037 cqe->fast_path_cqe.vlan_tag;
15038 m->m_flags |= M_VLANTAG;
15041 #if __FreeBSD_version >= 800000
15042 /* Tell OS what RSS queue was used for this flow. */
15043 m->m_pkthdr.flowid = fp->index;
15044 m->m_flags |= M_FLOWID;
15047 /* Last chance to check for problems. */
15048 DBRUN(bxe_validate_rx_packet(fp, rx_cq_cons, cqe, m));
15050 /* Update packet statistics. */
15051 ifp->if_ipackets++;
15054 /* ToDo: Any potential locking issues here? */
15055 /* Pass the frame to the stack. */
15056 (*ifp->if_input)(ifp, m);
15058 DBRUN(fp->rx_mbuf_alloc--);
15062 rx_bd_prod = NEXT_RX_BD(rx_bd_prod);
15063 rx_bd_cons = NEXT_RX_BD(rx_bd_cons);
15065 bxe_rxeof_next_cqe:
15066 rx_cq_prod = NEXT_RCQ_IDX(rx_cq_prod);
15067 rx_cq_cons = NEXT_RCQ_IDX(rx_cq_cons);
15070 * Memory barrier to prevent speculative reads of the RX buffer
15071 * from getting ahead of the index in the status block.
15076 /* Update driver copy of the fastpath indices. */
15077 fp->rx_bd_cons = rx_bd_cons;
15078 fp->rx_bd_prod = rx_bd_prod;
15079 fp->rx_cq_cons = rx_cq_cons;
15080 fp->rx_cq_prod = rx_cq_prod;
15082 DBPRINT(sc, (BXE_EXTREME_RECV),
15083 "%s(%d): AFTER: fp[%02d], rx_bd_cons = 0x%04X, rx_bd_prod = 0x%04X, "
15084 "rx_cq_cons_sw = 0x%04X, rx_cq_prod_sw = 0x%04X\n", __FUNCTION__,
15085 curcpu, fp->index, rx_bd_cons, rx_bd_prod, rx_cq_cons, rx_cq_prod);
15087 /* Update producers */
15088 bxe_update_rx_prod(sc, fp, fp->rx_bd_prod,
15089 fp->rx_cq_prod, fp->rx_sge_prod);
15090 bus_space_barrier(sc->bxe_btag, sc->bxe_bhandle, 0, 0,
15091 BUS_SPACE_BARRIER_READ);
15093 fp->rx_pkts += rx_pkts;
15094 DBEXIT(BXE_EXTREME_RECV);
15098 * Processes transmit completions.
15104 bxe_txeof(struct bxe_fastpath *fp)
15106 struct bxe_softc *sc;
15108 struct eth_tx_start_bd *txbd;
15109 uint16_t hw_pkt_cons, sw_pkt_cons, sw_tx_bd_cons;
15110 uint16_t bd_index, pkt_index, nbds;
15116 DBENTER(BXE_EXTREME_SEND);
15118 /* Get the hardware's view of the TX packet consumer index. */
15119 hw_pkt_cons = le16toh(*fp->tx_pkt_cons_sb);
15120 sw_pkt_cons = fp->tx_pkt_cons;
15121 sw_tx_bd_cons = fp->tx_bd_cons;
15123 /* Cycle through any completed TX chain page entries. */
15124 while (sw_pkt_cons != hw_pkt_cons) {
15125 bd_index = TX_BD(sw_tx_bd_cons);
15126 pkt_index = TX_BD(sw_pkt_cons);
15128 txbd = &fp->tx_chain[bd_index].start_bd;
15131 /* Free the completed frame's mbuf. */
15132 if (__predict_true(fp->tx_mbuf_ptr[pkt_index] != NULL)) {
15133 /* Unmap the mbuf from non-paged memory. */
15134 bus_dmamap_unload(fp->tx_mbuf_tag,
15135 fp->tx_mbuf_map[pkt_index]);
15137 /* Return the mbuf to the system. */
15138 m_freem(fp->tx_mbuf_ptr[pkt_index]);
15139 fp->tx_mbuf_alloc--;
15140 fp->tx_mbuf_ptr[pkt_index] = NULL;
15143 fp->tx_chain_lost_mbuf++;
15146 /* Updated packet consumer value. */
15149 /* Skip over the remaining used buffer descriptors. */
15150 fp->tx_bd_used -= nbds;
15151 for (i = 0; i < nbds; i++)
15152 sw_tx_bd_cons = NEXT_TX_BD(sw_tx_bd_cons);
15154 /* Check for new work since we started. */
15155 hw_pkt_cons = le16toh(*fp->tx_pkt_cons_sb);
15159 /* Enable new transmits if we've made enough room. */
15160 if (fp->tx_bd_used < BXE_TX_CLEANUP_THRESHOLD) {
15161 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
15162 if (fp->tx_bd_used == 0) {
15164 * Clear the watchdog timer if we've emptied
15167 fp->watchdog_timer = 0;
15170 * Reset the watchdog timer if we still have
15171 * transmits pending.
15173 fp->watchdog_timer = BXE_TX_TIMEOUT;
15177 /* Save our indices. */
15178 fp->tx_pkt_cons = sw_pkt_cons;
15179 fp->tx_bd_cons = sw_tx_bd_cons;
15180 DBEXIT(BXE_EXTREME_SEND);
15184 * Transmit timeout handler.
15187 * 0 = No timeout, !0 = timeout occurred.
15190 bxe_watchdog(struct bxe_fastpath *fp)
15192 struct bxe_softc *sc;
15196 DBENTER(BXE_INSANE_SEND);
15199 if (fp->watchdog_timer == 0 || --fp->watchdog_timer) {
15202 goto bxe_watchdog_exit;
15206 BXE_PRINTF("TX watchdog timeout occurred on fp[%02d], "
15207 "resetting!\n", fp->index);
15209 /* DBRUNLV(BXE_FATAL, bxe_breakpoint(sc)); */
15213 /* Mark the interface as down. */
15214 sc->bxe_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
15216 bxe_stop_locked(sc, UNLOAD_NORMAL);
15218 bxe_init_locked(sc, LOAD_OPEN);
15220 BXE_CORE_UNLOCK(sc);
15223 DBEXIT(BXE_INSANE_SEND);
15229 * The periodic timer tick routine.
15231 * This code only runs when the interface is up.
15237 bxe_tick(void *xsc)
15239 struct bxe_softc *sc;
15240 struct bxe_fastpath *fp;
15242 /* Re-enable at a later time. */
15243 uint32_t drv_pulse, mcp_pulse;
15248 DBENTER(BXE_INSANE_MISC);
15251 /* Check for TX timeouts on any fastpath. */
15252 for (i = 0; i < sc->num_queues; i++) {
15255 if (bxe_watchdog(fp) != 0)
15259 func = BP_FUNC(sc);
15261 /* Schedule the next tick. */
15262 callout_reset(&sc->bxe_tick_callout, hz, bxe_tick, sc);
15266 func = BP_FUNC(sc);
15268 ++sc->fw_drv_pulse_wr_seq;
15269 sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
15271 /* Let the MCP know we're alive. */
15272 drv_pulse = sc->fw_drv_pulse_wr_seq;
15273 SHMEM_WR(sc, func_mb[func].drv_pulse_mb, drv_pulse);
15275 /* Check if the MCP is still alive. */
15276 mcp_pulse = (SHMEM_RD(sc, func_mb[func].mcp_pulse_mb) &
15277 MCP_PULSE_SEQ_MASK);
15280 * The delta between driver pulse and MCP response should be 1
15281 * (before MCP response) or 0 (after MCP response).
15283 if ((drv_pulse != mcp_pulse) && (drv_pulse != ((mcp_pulse + 1) &
15284 MCP_PULSE_SEQ_MASK))) {
15285 /* Someone's in cardiac arrest. */
15286 DBPRINT(sc, BXE_WARN,
15287 "%s(): drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
15288 __FUNCTION__, drv_pulse, mcp_pulse);
15293 if ((sc->state == BXE_STATE_OPEN) || (sc->state == BXE_STATE_DISABLED))
15294 bxe_stats_handle(sc, STATS_EVENT_UPDATE);
15299 * Allows the driver state to be dumped through the sysctl interface.
15302 * 0 for success, positive value for failure.
15305 bxe_sysctl_driver_state(SYSCTL_HANDLER_ARGS)
15307 struct bxe_softc *sc;
15308 struct bxe_fastpath *fp;
15309 int error, i, result;
15311 sc = (struct bxe_softc *)arg1;
15313 error = sysctl_handle_int(oidp, &result, 0, req);
15314 if (error || !req->newptr)
15318 bxe_dump_driver_state(sc);
15319 for (i = 0; i < sc->num_queues; i++) {
15321 bxe_dump_fp_state(fp);
15323 bxe_dump_status_block(sc);
15330 * Allows the hardware state to be dumped through the sysctl interface.
15333 * 0 for success, positive value for failure.
15336 bxe_sysctl_hw_state(SYSCTL_HANDLER_ARGS)
15338 struct bxe_softc *sc;
15341 sc = (struct bxe_softc *)arg1;
15343 error = sysctl_handle_int(oidp, &result, 0, req);
15344 if (error || !req->newptr)
15348 bxe_dump_hw_state(sc);
15354 * Allows the MCP firmware to be dumped through the sysctl interface.
15357 * 0 for success, positive value for failure.
15360 bxe_sysctl_dump_fw(SYSCTL_HANDLER_ARGS)
15362 struct bxe_softc *sc;
15365 sc = (struct bxe_softc *)arg1;
15367 error = sysctl_handle_int(oidp, &result, 0, req);
15368 if (error || !req->newptr)
15378 * Provides a sysctl interface to allow dumping the RX completion chain.
15381 * 0 for success, positive value for failure.
15384 bxe_sysctl_dump_rx_cq_chain(SYSCTL_HANDLER_ARGS)
15386 struct bxe_softc *sc;
15387 struct bxe_fastpath *fp;
15390 sc = (struct bxe_softc *)arg1;
15392 error = sysctl_handle_int(oidp, &result, 0, req);
15393 if (error || !req->newptr)
15396 if ((result >= 0) && (result < sc->num_queues)) {
15397 fp = &sc->fp[result];
15398 bxe_dump_rx_cq_chain(fp, 0, TOTAL_RCQ_ENTRIES);
15406 * Provides a sysctl interface to allow dumping the RX chain.
15409 * 0 for success, positive value for failure.
15412 bxe_sysctl_dump_rx_bd_chain(SYSCTL_HANDLER_ARGS)
15414 struct bxe_softc *sc;
15415 struct bxe_fastpath *fp;
15418 sc = (struct bxe_softc *)arg1;
15420 error = sysctl_handle_int(oidp, &result, 0, req);
15421 if (error || !req->newptr)
15424 if ((result >= 0) && (result < sc->num_queues)) {
15425 fp = &sc->fp[result];
15426 bxe_dump_rx_bd_chain(fp, 0, TOTAL_RX_BD);
15433 * Provides a sysctl interface to allow dumping the TX chain.
15436 * 0 for success, positive value for failure.
15439 bxe_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS)
15441 struct bxe_softc *sc;
15442 struct bxe_fastpath *fp;
15445 sc = (struct bxe_softc *)arg1;
15447 error = sysctl_handle_int(oidp, &result, 0, req);
15448 if (error || !req->newptr)
15451 if ((result >= 0) && (result < sc->num_queues)) {
15452 fp = &sc->fp[result];
15453 bxe_dump_tx_chain(fp, 0, TOTAL_TX_BD);
15460 * Provides a sysctl interface to allow reading arbitrary registers in the
15461 * device. DO NOT ENABLE ON PRODUCTION SYSTEMS!
15464 * 0 for success, positive value for failure.
15467 bxe_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
15469 struct bxe_softc *sc;
15470 uint32_t result, val;
15473 sc = (struct bxe_softc *)arg1;
15475 error = sysctl_handle_int(oidp, &result, 0, req);
15476 if (error || (req->newptr == NULL))
15479 val = REG_RD(sc, result);
15480 BXE_PRINTF("reg 0x%08X = 0x%08X\n", result, val);
15486 * Provides a sysctl interface to allow generating a grcdump.
15489 * 0 for success, positive value for failure.
15492 bxe_sysctl_grcdump(SYSCTL_HANDLER_ARGS)
15494 struct bxe_softc *sc;
15497 sc = (struct bxe_softc *)arg1;
15499 error = sysctl_handle_int(oidp, &result, 0, req);
15500 if (error || !req->newptr)
15504 /* Generate a grcdump and log the contents.*/
15505 bxe_grcdump(sc, 1);
15507 /* Generate a grcdump and don't log the contents. */
15508 bxe_grcdump(sc, 0);
15515 * Provides a sysctl interface to forcing the driver to dump state and
15516 * enter the debugger. DO NOT ENABLE ON PRODUCTION SYSTEMS!
15519 * 0 for success, positive value for failure.
15522 bxe_sysctl_breakpoint(SYSCTL_HANDLER_ARGS)
15524 struct bxe_softc *sc;
15528 error = sysctl_handle_int(oidp, &result, 0, req);
15529 if (error || !req->newptr)
15533 sc = (struct bxe_softc *)arg1;
15534 bxe_breakpoint(sc);
15542 * Adds any sysctl parameters for tuning or debugging purposes.
15548 bxe_add_sysctls(struct bxe_softc *sc)
15550 struct sysctl_ctx_list *ctx =
15551 device_get_sysctl_ctx(sc->dev);
15552 struct sysctl_oid_list *children =
15553 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
15554 struct bxe_port_stats *estats = &sc->eth_stats;
15556 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15557 "estats_total_bytes_received_hi",
15558 CTLFLAG_RD, &estats->total_bytes_received_hi,
15559 0, "Total bytes received (hi)");
15561 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15562 "estats_total_bytes_received_lo",
15563 CTLFLAG_RD, &estats->total_bytes_received_lo,
15564 0, "Total bytes received (lo)");
15566 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15567 "estats_valid_bytes_received_hi",
15568 CTLFLAG_RD, &estats->valid_bytes_received_hi,
15569 0, "Valid bytes received (hi)");
15571 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15572 "estats_valid_bytes_received_lo",
15573 CTLFLAG_RD, &estats->valid_bytes_received_lo,
15574 0, "Valid bytes received (lo)");
15576 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15577 "estats_total_unicast_packets_received_hi",
15578 CTLFLAG_RD, &estats->total_unicast_packets_received_hi,
15579 0, "Total unicast packets received (hi)");
15581 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15582 "estats_total_unicast_packets_received_lo",
15583 CTLFLAG_RD, &estats->total_unicast_packets_received_lo,
15584 0, "Total unicast packets received (lo)");
15586 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15587 "estats_total_bytes_transmitted_hi",
15588 CTLFLAG_RD, &estats->total_bytes_transmitted_hi,
15589 0, "Total bytes transmitted (hi)");
15591 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15592 "estats_total_bytes_transmitted_lo",
15593 CTLFLAG_RD, &estats->total_bytes_transmitted_lo,
15594 0, "Total bytes transmitted (lo)");
15596 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15597 "estats_total_unicast_packets_transmitted_hi",
15598 CTLFLAG_RD, &estats->total_unicast_packets_transmitted_hi,
15599 0, "Total unicast packets transmitted (hi)");
15601 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15602 "estats_total_unicast_packets_transmitted_lo",
15603 CTLFLAG_RD, &estats->total_unicast_packets_transmitted_lo,
15604 0, "Total unicast packets transmitted (lo)");
15606 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15607 "estats_total_broadcast_packets_received_lo",
15608 CTLFLAG_RD, &estats->total_broadcast_packets_received_lo,
15609 0, "Total broadcast packets received (lo)");
15611 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15612 "estats_total_broadcast_packets_transmitted_lo",
15613 CTLFLAG_RD, &estats->total_broadcast_packets_transmitted_lo,
15614 0, "Total broadcast packets transmitted (lo)");
15616 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15617 "estats_total_multicast_packets_received_lo",
15618 CTLFLAG_RD, &estats->total_multicast_packets_received_lo,
15619 0, "Total multicast packets received (lo)");
15621 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15622 "estats_total_multicast_packets_transmitted_lo",
15623 CTLFLAG_RD, &estats->total_multicast_packets_transmitted_lo,
15624 0, "Total multicast packets transmitted (lo)");
15626 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15627 "tx_stat_etherstatspkts64octets_hi",
15628 CTLFLAG_RD, &estats->tx_stat_etherstatspkts64octets_hi,
15629 0, "Total 64 byte packets transmitted (hi)");
15631 /* ToDo: Fix for 64 bit access. */
15632 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15633 "tx_stat_etherstatspkts64octets_lo",
15634 CTLFLAG_RD, &estats->tx_stat_etherstatspkts64octets_lo,
15635 0, "Total 64 byte packets transmitted (lo)");
15637 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
15639 CTLFLAG_RD, &estats->driver_xoff,
15640 0, "Driver transmit queue full count");
15642 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
15643 "tx_start_called_with_link_down",
15644 CTLFLAG_RD, &sc->tx_start_called_with_link_down,
15645 "TX start routine called while link down count");
15647 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
15648 "tx_start_called_with_queue_full",
15649 CTLFLAG_RD, &sc->tx_start_called_with_queue_full,
15650 "TX start routine called with queue full count");
15652 /* ToDo: Add more statistics here. */
15655 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "bxe_debug",
15656 CTLFLAG_RW, &bxe_debug, 0,
15657 "Debug message level flag");
15661 #define QUEUE_NAME_LEN 32
15662 char namebuf[QUEUE_NAME_LEN];
15663 struct sysctl_oid *queue_node;
15664 struct sysctl_oid_list *queue_list;
15666 for (int i = 0; i < sc->num_queues; i++) {
15667 struct bxe_fastpath *fp = &sc->fp[i];
15668 snprintf(namebuf, QUEUE_NAME_LEN, "fp[%02d]", i);
15670 queue_node = SYSCTL_ADD_NODE(ctx, children, OID_AUTO,
15671 namebuf, CTLFLAG_RD, NULL, "Queue Name");
15672 queue_list = SYSCTL_CHILDREN(queue_node);
15675 * Receive related fastpath statistics.*
15677 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15679 CTLFLAG_RD, &fp->rx_pkts,
15680 "Received packets");
15682 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15684 CTLFLAG_RD, &fp->rx_tpa_pkts,
15685 "Received TPA packets");
15687 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15688 "rx_null_cqe_flags",
15689 CTLFLAG_RD, &fp->rx_null_cqe_flags,
15690 "CQEs with NULL flags count");
15692 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15694 CTLFLAG_RD, &fp->rx_soft_errors,
15695 "Received frames dropped by driver count");
15698 * Transmit related fastpath statistics.*
15700 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15702 CTLFLAG_RD, &fp->tx_pkts,
15703 "Transmitted packets");
15705 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15707 CTLFLAG_RD, &fp->tx_soft_errors,
15708 "Transmit frames dropped by driver count");
15710 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15711 "tx_offload_frames_csum_ip",
15712 CTLFLAG_RD, &fp->tx_offload_frames_csum_ip,
15713 "IP checksum offload frame count");
15715 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15716 "tx_offload_frames_csum_tcp",
15717 CTLFLAG_RD, &fp->tx_offload_frames_csum_tcp,
15718 "TCP checksum offload frame count");
15720 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15721 "tx_offload_frames_csum_udp",
15722 CTLFLAG_RD, &fp->tx_offload_frames_csum_udp,
15723 "UDP checksum offload frame count");
15725 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15726 "tx_offload_frames_tso",
15727 CTLFLAG_RD, &fp->tx_offload_frames_tso,
15728 "TSO offload frame count");
15730 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15731 "tx_header_splits",
15732 CTLFLAG_RD, &fp->tx_header_splits,
15733 "TSO frame header/data split count");
15735 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15736 "tx_encap_failures",
15737 CTLFLAG_RD, &fp->tx_encap_failures,
15738 "TX encapsulation failure count");
15740 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15741 "tx_hw_queue_full",
15742 CTLFLAG_RD, &fp->tx_hw_queue_full,
15743 "TX H/W queue too full to add a frame count");
15745 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15746 "tx_hw_max_queue_depth",
15747 CTLFLAG_RD, &fp->tx_hw_max_queue_depth,
15748 "TX H/W maximum queue depth count");
15750 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15751 "tx_dma_mapping_failure",
15752 CTLFLAG_RD, &fp->tx_dma_mapping_failure,
15753 "TX DMA mapping failure");
15755 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO,
15756 "tx_max_drbr_queue_depth",
15757 CTLFLAG_RD, &fp->tx_max_drbr_queue_depth,
15758 0, "TX S/W queue maximum depth");
15760 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15761 "tx_window_violation_std",
15762 CTLFLAG_RD, &fp->tx_window_violation_std,
15763 "Standard frame TX BD window violation count");
15765 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15766 "tx_window_violation_tso",
15767 CTLFLAG_RD, &fp->tx_window_violation_tso,
15768 "TSO frame TX BD window violation count");
15770 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15771 "tx_unsupported_tso_request_ipv6",
15772 CTLFLAG_RD, &fp->tx_unsupported_tso_request_ipv6,
15773 "TSO frames with unsupported IPv6 protocol count");
15775 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15776 "tx_unsupported_tso_request_not_tcp",
15777 CTLFLAG_RD, &fp->tx_unsupported_tso_request_not_tcp,
15778 "TSO frames with unsupported protocol count");
15780 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15781 "tx_chain_lost_mbuf",
15782 CTLFLAG_RD, &fp->tx_chain_lost_mbuf,
15783 "Mbufs lost on TX chain count");
15785 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15786 "tx_frame_deferred",
15787 CTLFLAG_RD, &fp->tx_frame_deferred,
15788 "TX frame deferred from H/W queue to S/W queue count");
15790 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15792 CTLFLAG_RD, &fp->tx_queue_xoff,
15793 "TX queue full count");
15796 * Memory related fastpath statistics.*
15798 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15799 "mbuf_rx_bd_alloc_failed",
15800 CTLFLAG_RD, &fp->mbuf_rx_bd_alloc_failed,
15801 "RX BD mbuf allocation failure count");
15803 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15804 "mbuf_rx_bd_mapping_failed",
15805 CTLFLAG_RD, &fp->mbuf_rx_bd_mapping_failed,
15806 "RX BD mbuf mapping failure count");
15808 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15809 "mbuf_tpa_alloc_failed",
15810 CTLFLAG_RD, &fp->mbuf_tpa_alloc_failed,
15811 "TPA mbuf allocation failure count");
15813 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15814 "mbuf_tpa_mapping_failed",
15815 CTLFLAG_RD, &fp->mbuf_tpa_mapping_failed,
15816 "TPA mbuf mapping failure count");
15818 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15819 "mbuf_sge_alloc_failed",
15820 CTLFLAG_RD, &fp->mbuf_sge_alloc_failed,
15821 "SGE mbuf allocation failure count");
15823 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15824 "mbuf_sge_mapping_failed",
15825 CTLFLAG_RD, &fp->mbuf_sge_mapping_failed,
15826 "SGE mbuf mapping failure count");
15828 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15829 "mbuf_defrag_attempts",
15830 CTLFLAG_RD, &fp->mbuf_defrag_attempts,
15831 "Mbuf defrag attempt count");
15833 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
15834 "mbuf_defrag_failures",
15835 CTLFLAG_RD, &fp->mbuf_defrag_failures,
15836 "Mbuf defrag failure count");
15842 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "driver_state",
15843 CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0,
15844 bxe_sysctl_driver_state, "I", "Drive state information");
15846 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_state",
15847 CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0,
15848 bxe_sysctl_hw_state, "I", "Hardware state information");
15850 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dump_fw",
15851 CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0,
15852 bxe_sysctl_dump_fw, "I", "Dump MCP firmware");
15854 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dump_rx_bd_chain",
15855 CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0,
15856 bxe_sysctl_dump_rx_bd_chain, "I", "Dump rx_bd chain");
15858 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dump_rx_cq_chain",
15859 CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0,
15860 bxe_sysctl_dump_rx_cq_chain, "I", "Dump cqe chain");
15862 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dump_tx_chain",
15863 CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0,
15864 bxe_sysctl_dump_tx_chain, "I", "Dump tx_bd chain");
15867 * Generates a GRCdump (run sysctl dev.bxe.0.grcdump=0
15868 * before accessing buffer below).
15870 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "grcdump",
15871 CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0, bxe_sysctl_grcdump,
15872 "I", "Initiate a grcdump operation");
15876 * Use "sysctl -b dev.bxe.0.grcdump_buffer > buf.bin".
15878 SYSCTL_ADD_OPAQUE(ctx, children, OID_AUTO, "grcdump_buffer",
15879 CTLFLAG_RD | CTLFLAG_SKIP, sc->grcdump_buffer,
15880 BXE_GRCDUMP_BUF_SIZE, "IU", "Access grcdump buffer");
15882 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "breakpoint",
15883 CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0,
15884 bxe_sysctl_breakpoint, "I", "Driver breakpoint");
15886 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
15887 CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0,
15888 bxe_sysctl_reg_read, "I", "Register read");
15890 #endif /* BXE_DEBUG */
15894 * BXE Debug Routines
15898 * Writes out the header for the debug dump buffer.
15907 bxe_dump_debug_header(struct bxe_softc *sc, uint32_t *index)
15909 struct hd_param hd_param_cu = {0};
15912 buf = sc->grcdump_buffer;
15913 if (CHIP_IS_E1H(sc))
15914 hd_param_cu = hd_param_e1h;
15916 hd_param_cu = hd_param_e1;
15918 buf[(*index)++] = hd_param_cu.time_stamp;
15919 buf[(*index)++] = hd_param_cu.diag_ver;
15920 buf[(*index)++] = hd_param_cu.grc_dump_ver;
15922 buf[(*index)++] = REG_RD_IND(sc, XSTORM_WAITP_ADDRESS);
15923 buf[(*index)++] = REG_RD_IND(sc, TSTORM_WAITP_ADDRESS);
15924 buf[(*index)++] = REG_RD_IND(sc, USTORM_WAITP_ADDRESS);
15925 buf[(*index)++] = REG_RD_IND(sc, CSTORM_WAITP_ADDRESS);
15927 /* The size of the header is stored at the first DWORD. */
15928 buf[0] = (*index) - 1;
15933 * Writes to the controller to prepare it for a dump.
15942 bxe_dump_debug_writes(struct bxe_softc *sc)
15944 uint32_t write_val;
15947 /* Halt the STORMs to get a consistent device state. */
15948 REG_WR_IND(sc, XSTORM_WAITP_ADDRESS, write_val);
15949 REG_WR_IND(sc, TSTORM_WAITP_ADDRESS, write_val);
15950 REG_WR_IND(sc, USTORM_WAITP_ADDRESS, write_val);
15951 REG_WR_IND(sc, CSTORM_WAITP_ADDRESS, write_val);
15953 if (CHIP_IS_E1H(sc))
15954 REG_WR_IND(sc, TSTORM_CAM_MODE, write_val);
15959 * Cycles through the required register reads and dumps them
15960 * to the debug buffer.
15969 bxe_dump_debug_reg_read(struct bxe_softc *sc, uint32_t *index)
15971 preg_addr preg_addrs;
15972 uint32_t regs_count, *buf;
15973 uint32_t i, reg_addrs_index;
15975 buf = sc->grcdump_buffer;
15978 /* Read different registers for different controllers. */
15979 if (CHIP_IS_E1H(sc)) {
15980 regs_count = regs_count_e1h;
15981 preg_addrs = ®_addrs_e1h[0];
15983 regs_count = regs_count_e1;
15984 preg_addrs = ®_addrs_e1[0];
15987 /* ToDo: Add a buffer size check. */
15988 for (reg_addrs_index = 0; reg_addrs_index < regs_count;
15989 reg_addrs_index++) {
15990 for (i = 0; i < preg_addrs[reg_addrs_index].size; i++) {
15991 buf[(*index)++] = REG_RD_IND(sc,
15992 preg_addrs[reg_addrs_index].addr + (i * 4));
15998 * Cycles through the required wide register reads and dumps them
15999 * to the debug buffer.
16005 bxe_dump_debug_reg_wread(struct bxe_softc *sc, uint32_t *index)
16007 pwreg_addr pwreg_addrs;
16008 uint32_t reg_addrs_index, reg_add_read, reg_add_count;
16009 uint32_t *buf, cam_index, wregs_count;
16011 buf = sc->grcdump_buffer;
16012 pwreg_addrs = NULL;
16014 /* Read different registers for different controllers. */
16015 if (CHIP_IS_E1H(sc)) {
16016 wregs_count = wregs_count_e1h;
16017 pwreg_addrs = &wreg_addrs_e1h[0];
16019 wregs_count = wregs_count_e1;
16020 pwreg_addrs = &wreg_addrs_e1[0];
16023 for (reg_addrs_index = 0; reg_addrs_index < wregs_count;
16024 reg_addrs_index++) {
16025 reg_add_read = pwreg_addrs[reg_addrs_index].addr;
16026 for (reg_add_count = 0; reg_add_count <
16027 pwreg_addrs[reg_addrs_index].size; reg_add_count++) {
16028 buf[(*index)++] = REG_RD_IND(sc, reg_add_read);
16029 reg_add_read += sizeof(uint32_t);
16031 for (cam_index = 0; cam_index <
16032 pwreg_addrs[reg_addrs_index].const_regs_count;
16034 buf[(*index)++] = REG_RD_IND(sc,
16035 pwreg_addrs[reg_addrs_index].const_regs[cam_index]);
16041 * Performs a debug dump for offline diagnostics.
16043 * Note that when this routine is called the STORM
16044 * processors will be stopped in order to create a
16045 * cohesive dump. The controller will need to be
16046 * reset before the device can begin passing traffic
16053 bxe_grcdump(struct bxe_softc *sc, int log)
16055 uint32_t *buf, i, index;
16058 buf = sc->grcdump_buffer;
16061 /* Write the header and regsiters contents to the dump buffer. */
16062 bxe_dump_debug_header(sc, &index);
16063 bxe_dump_debug_writes(sc);
16064 bxe_dump_debug_reg_read(sc,&index);
16065 bxe_dump_debug_reg_wread(sc, &index);
16067 /* Print the results to the system log is necessary. */
16070 "-----------------------------"
16072 "-----------------------------\n");
16073 BXE_PRINTF("Buffer length = 0x%08X bytes\n", index * 4);
16075 for (i = 0; i < index; i += 8) {
16077 "0x%08X - 0x%08X 0x%08X 0x%08X 0x%08X "
16078 "0x%08X 0x%08X 0x%08X 0x%08X\n", i * 4,
16079 buf[i + 0], buf[i + 1], buf[i + 2],
16080 buf[i + 3], buf[i + 4], buf[i + 5],
16081 buf[i + 6], buf[i + 7]);
16085 "-----------------------------"
16087 "-----------------------------\n");
16090 BXE_PRINTF("No grcdump buffer allocated!\n");
16095 * Check that an Etherent frame is valid and prints out debug info if it's
16102 void bxe_validate_rx_packet(struct bxe_fastpath *fp, uint16_t comp_cons,
16103 union eth_rx_cqe *cqe, struct mbuf *m)
16105 struct bxe_softc *sc;
16110 /* Check that the mbuf is sane. */
16111 error = m_sanity(m, FALSE);
16112 if (error != 1 || ((m->m_len < ETHER_HDR_LEN) |
16113 (m->m_len > ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))) {
16115 bxe_dump_enet(sc, m);
16116 bxe_dump_cqe(fp, comp_cons, cqe);
16117 /* Make sure the packet has a valid length. */
16122 * Prints out Ethernet frame information from an mbuf.
16124 * Partially decode an Ethernet frame to look at some important headers.
16130 void bxe_dump_enet(struct bxe_softc *sc, struct mbuf *m)
16132 struct ether_vlan_header *eh;
16141 "-----------------------------"
16143 "-----------------------------\n");
16145 eh = mtod(m, struct ether_vlan_header *);
16147 /* Handle VLAN encapsulation if present. */
16148 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
16149 etype = ntohs(eh->evl_proto);
16150 e_hlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
16152 etype = ntohs(eh->evl_encap_proto);
16153 e_hlen = ETHER_HDR_LEN;
16156 BXE_PRINTF("enet: dest = %6D, src = %6D, type = 0x%04X, e_hlen = %d\n",
16157 eh->evl_dhost, ":", eh->evl_shost, ":", etype, e_hlen);
16161 ip = (struct ip *)(m->m_data + e_hlen);
16163 "--ip: dest = 0x%08X , src = 0x%08X, "
16164 "ip_hlen = %d bytes, len = %d bytes, protocol = 0x%02X, "
16165 "ip_id = 0x%04X, csum = 0x%04X\n",
16166 ntohl(ip->ip_dst.s_addr), ntohl(ip->ip_src.s_addr),
16167 (ip->ip_hl << 2), ntohs(ip->ip_len), ip->ip_p,
16168 ntohs(ip->ip_id), ntohs(ip->ip_sum));
16170 switch (ip->ip_p) {
16172 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
16174 "-tcp: dest = %d, src = %d, tcp_hlen = %d "
16175 "bytes, flags = 0x%b, csum = 0x%04X\n",
16176 ntohs(th->th_dport), ntohs(th->th_sport),
16177 (th->th_off << 2), th->th_flags,
16178 "\20\10CWR\07ECE\06URG\05ACK\04PSH\03RST\02SYN\01FIN",
16179 ntohs(th->th_sum));
16182 uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
16184 "-udp: dest = %d, src = %d, udp_hlen = %d "
16185 "bytes, len = %d bytes, csum = 0x%04X\n",
16186 ntohs(uh->uh_dport), ntohs(uh->uh_sport),
16187 (int)sizeof(struct udphdr), ntohs(uh->uh_ulen),
16188 ntohs(uh->uh_sum));
16191 BXE_PRINTF("icmp:\n");
16194 BXE_PRINTF("----: Other IP protocol.\n");
16197 case ETHERTYPE_IPV6:
16198 /* ToDo: Add IPv6 support. */
16199 BXE_PRINTF("IPv6 not supported!.\n");
16201 case ETHERTYPE_ARP:
16202 BXE_PRINTF("-arp: ");
16203 ah = (struct arphdr *) (m->m_data + e_hlen);
16204 switch (ntohs(ah->ar_op)) {
16205 case ARPOP_REVREQUEST:
16206 printf("reverse ARP request\n");
16208 case ARPOP_REVREPLY:
16209 printf("reverse ARP reply\n");
16211 case ARPOP_REQUEST:
16212 printf("ARP request\n");
16215 printf("ARP reply\n");
16218 printf("other ARP operation\n");
16222 BXE_PRINTF("----: Other protocol.\n");
16226 "-----------------------------"
16228 "-----------------------------\n");
16233 bxe_dump_mbuf_data(struct mbuf *m, int len)
16238 ptr = mtod(m, uint8_t *);
16239 printf("\nmbuf->m_data:");
16241 for (i = 0; i < len; i++){
16242 if (i != 0 && i % 40 == 0)
16244 else if (i != 0 && i % 6 == 0)
16246 printf("%02x", *ptr++);
16254 * Prints out information about an mbuf.
16260 void bxe_dump_mbuf(struct bxe_softc *sc, struct mbuf *m)
16263 BXE_PRINTF("mbuf: null pointer\n");
16268 BXE_PRINTF("mbuf: %p, m_len = %d, m_flags = 0x%b, "
16269 "m_data = %p\n", m, m->m_len, m->m_flags,
16270 "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", m->m_data);
16272 if (m->m_flags & M_PKTHDR) {
16273 BXE_PRINTF("- m_pkthdr: len = %d, flags = 0x%b, "
16274 "csum_flags = %b\n", m->m_pkthdr.len,
16275 m->m_flags, "\20\12M_BCAST\13M_MCAST\14M_FRAG"
16276 "\15M_FIRSTFRAG\16M_LASTFRAG\21M_VLANTAG"
16277 "\22M_PROMISC\23M_NOFREE",
16278 m->m_pkthdr.csum_flags,
16279 "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP"
16280 "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED"
16281 "\12CSUM_IP_VALID\13CSUM_DATA_VALID"
16282 "\14CSUM_PSEUDO_HDR");
16285 if (m->m_flags & M_EXT) {
16286 BXE_PRINTF("- m_ext: %p, ext_size = %d, type = ",
16287 m->m_ext.ext_buf, m->m_ext.ext_size);
16288 switch (m->m_ext.ext_type) {
16290 printf("EXT_CLUSTER\n"); break;
16292 printf("EXT_SFBUF\n"); break;
16294 printf("EXT_JUMBO9\n"); break;
16296 printf("EXT_JUMBO16\n"); break;
16298 printf("EXT_PACKET\n"); break;
16300 printf("EXT_MBUF\n"); break;
16302 printf("EXT_NET_DRV\n"); break;
16304 printf("EXT_MOD_TYPE\n"); break;
16305 case EXT_DISPOSABLE:
16306 printf("EXT_DISPOSABLE\n"); break;
16308 printf("EXT_EXTREF\n"); break;
16310 printf("UNKNOWN\n");
16319 * Prints out information about an rx_bd.
16325 void bxe_dump_rxbd(struct bxe_fastpath *fp, int idx,
16326 struct eth_rx_bd *rx_bd)
16328 struct bxe_softc *sc;
16332 /* Check if index out of range. */
16333 if (idx > MAX_RX_BD) {
16334 BXE_PRINTF("fp[%02d].rx_bd[0x%04X] XX: Invalid rx_bd index!\n",
16336 } else if ((idx & RX_BD_PER_PAGE_MASK) >= USABLE_RX_BD_PER_PAGE) {
16337 /* RX Chain page pointer. */
16338 BXE_PRINTF("fp[%02d].rx_bd[0x%04X] NP: haddr=0x%08X:%08X\n",
16339 fp->index, idx, rx_bd->addr_hi, rx_bd->addr_lo);
16341 BXE_PRINTF("fp[%02d].rx_bd[0x%04X] RX: haddr=0x%08X:%08X\n",
16342 fp->index, idx, rx_bd->addr_hi, rx_bd->addr_lo);
16347 * Prints out a completion queue entry.
16353 void bxe_dump_cqe(struct bxe_fastpath *fp, int idx,
16354 union eth_rx_cqe *cqe)
16356 struct bxe_softc *sc;
16360 if (idx > MAX_RCQ_ENTRIES) {
16361 /* Index out of range. */
16362 BXE_PRINTF("fp[%02d].rx_cqe[0x%04X]: Invalid rx_cqe index!\n",
16364 } else if ((idx & USABLE_RCQ_ENTRIES_PER_PAGE) ==
16365 USABLE_RCQ_ENTRIES_PER_PAGE) {
16366 /* CQE next page pointer. */
16367 BXE_PRINTF("fp[%02d].rx_cqe[0x%04X] NP: haddr=0x%08X:%08X\n",
16369 le32toh(cqe->next_page_cqe.addr_hi),
16370 le32toh(cqe->next_page_cqe.addr_lo));
16373 BXE_PRINTF("fp[%02d].rx_cqe[0x%04X] CQ: error_flags=0x%b, "
16374 "pkt_len=0x%04X, status_flags=0x%02X, vlan=0x%04X "
16375 "rss_hash=0x%08X\n", fp->index, idx,
16376 cqe->fast_path_cqe.type_error_flags,
16377 BXE_ETH_FAST_PATH_RX_CQE_ERROR_FLAGS_PRINTFB,
16378 le16toh(cqe->fast_path_cqe.pkt_len),
16379 cqe->fast_path_cqe.status_flags,
16380 le16toh(cqe->fast_path_cqe.vlan_tag),
16381 le32toh(cqe->fast_path_cqe.rss_hash_result));
16386 * Prints out information about a TX parsing BD.
16392 void bxe_dump_tx_parsing_bd(struct bxe_fastpath *fp, int idx,
16393 struct eth_tx_parse_bd *p_bd)
16395 struct bxe_softc *sc;
16399 if (idx > MAX_TX_BD){
16400 /* Index out of range. */
16401 BXE_PRINTF("fp[%02d].tx_bd[0x%04X] XX: Invalid tx_bd index!\n",
16404 BXE_PRINTF("fp[%02d]:tx_bd[0x%04X] PB: global_data=0x%b, "
16405 "tcp_flags=0x%b, ip_hlen=%04d, total_hlen=%04d, "
16406 "tcp_pseudo_csum=0x%04X, lso_mss=0x%04X, ip_id=0x%04X, "
16407 "tcp_send_seq=0x%08X\n", fp->index, idx,
16408 p_bd->global_data, BXE_ETH_TX_PARSE_BD_GLOBAL_DATA_PRINTFB,
16409 p_bd->tcp_flags, BXE_ETH_TX_PARSE_BD_TCP_FLAGS_PRINTFB,
16410 p_bd->ip_hlen, p_bd->total_hlen, p_bd->tcp_pseudo_csum,
16411 p_bd->lso_mss, p_bd->ip_id, p_bd->tcp_send_seq);
16416 * Prints out information about a tx_bd.
16422 void bxe_dump_txbd(struct bxe_fastpath *fp, int idx,
16423 union eth_tx_bd_types *tx_bd)
16425 struct bxe_softc *sc;
16429 if (idx > MAX_TX_BD){
16430 /* Index out of range. */
16431 BXE_PRINTF("fp[%02d]:tx_bd[0x%04X] XX: Invalid tx_bd index!\n",
16433 } else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) {
16434 /* TX next page BD. */
16435 BXE_PRINTF("fp[%02d]:tx_bd[0x%04X] NP: haddr=0x%08X:%08X\n",
16436 fp->index, idx, tx_bd->next_bd.addr_hi,
16437 tx_bd->next_bd.addr_lo);
16438 } else if ((tx_bd->start_bd.bd_flags.as_bitfield &
16439 ETH_TX_BD_FLAGS_START_BD) != 0) {
16441 BXE_PRINTF("fp[%02d]:tx_bd[0x%04X] ST: haddr=0x%08X:%08X, "
16442 "nbd=%02d, nbytes=%05d, vlan/idx=0x%04X, flags=0x%b, "
16443 "gendata=0x%02X\n",
16444 fp->index, idx, tx_bd->start_bd.addr_hi,
16445 tx_bd->start_bd.addr_lo, tx_bd->start_bd.nbd,
16446 tx_bd->start_bd.nbytes, tx_bd->start_bd.vlan,
16447 tx_bd->start_bd.bd_flags.as_bitfield,
16448 BXE_ETH_TX_BD_FLAGS_PRINTFB,
16449 tx_bd->start_bd.general_data);
16451 /* Regular TX BD. */
16452 BXE_PRINTF("fp[%02d]:tx_bd[0x%04X] TX: haddr=0x%08X:%08X, "
16453 "total_pkt_bytes=%05d, nbytes=%05d\n", fp->index, idx,
16454 tx_bd->reg_bd.addr_hi, tx_bd->reg_bd.addr_lo,
16455 tx_bd->reg_bd.total_pkt_bytes, tx_bd->reg_bd.nbytes);
16461 * Prints out the transmit chain.
16467 void bxe_dump_tx_chain(struct bxe_fastpath * fp, int tx_bd_prod, int count)
16469 struct bxe_softc *sc;
16470 union eth_tx_bd_types *tx_bd;
16471 uint32_t val_hi, val_lo;
16472 int i, parsing_bd = 0;
16476 /* First some info about the tx_bd chain structure. */
16478 "----------------------------"
16480 "----------------------------\n");
16482 val_hi = U64_HI(fp->tx_dma.paddr);
16483 val_lo = U64_LO(fp->tx_dma.paddr);
16485 "0x%08X:%08X - (fp[%02d]->tx_dma.paddr) TX Chain physical address\n",
16486 val_hi, val_lo, fp->index);
16488 "page size = 0x%08X, tx chain pages = 0x%08X\n",
16489 (uint32_t)BCM_PAGE_SIZE, (uint32_t)NUM_TX_PAGES);
16491 "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
16492 (uint32_t)TOTAL_TX_BD_PER_PAGE, (uint32_t)USABLE_TX_BD_PER_PAGE);
16494 "total tx_bd = 0x%08X\n", (uint32_t)TOTAL_TX_BD);
16497 "-----------------------------"
16499 "-----------------------------\n");
16501 /* Now print out the tx_bd's themselves. */
16502 for (i = 0; i < count; i++) {
16503 tx_bd = &fp->tx_chain[tx_bd_prod];
16505 struct eth_tx_parse_bd *p_bd;
16506 p_bd = (struct eth_tx_parse_bd *)
16507 &fp->tx_chain[tx_bd_prod].parse_bd;
16508 bxe_dump_tx_parsing_bd(fp, tx_bd_prod, p_bd);
16511 bxe_dump_txbd(fp, tx_bd_prod, tx_bd);
16512 if ((tx_bd->start_bd.bd_flags.as_bitfield &
16513 ETH_TX_BD_FLAGS_START_BD) != 0)
16515 * There is always a parsing BD following the
16516 * tx_bd with the start bit set.
16520 /* Don't skip next page pointers. */
16521 tx_bd_prod = ((tx_bd_prod + 1) & MAX_TX_BD);
16525 "-----------------------------"
16527 "-----------------------------\n");
16531 * Prints out the receive completion queue chain.
16537 void bxe_dump_rx_cq_chain(struct bxe_fastpath *fp, int rx_cq_prod, int count)
16539 struct bxe_softc *sc;
16540 union eth_rx_cqe *cqe;
16545 /* First some info about the tx_bd chain structure. */
16547 "----------------------------"
16549 "----------------------------\n");
16551 BXE_PRINTF("fp[%02d]->rcq_dma.paddr = 0x%jX\n",
16552 fp->index, (uintmax_t) fp->rcq_dma.paddr);
16554 BXE_PRINTF("page size = 0x%08X, cq chain pages "
16556 (uint32_t)BCM_PAGE_SIZE, (uint32_t) NUM_RCQ_PAGES);
16558 BXE_PRINTF("cqe_bd per page = 0x%08X, usable cqe_bd per "
16560 (uint32_t) TOTAL_RCQ_ENTRIES_PER_PAGE,
16561 (uint32_t) USABLE_RCQ_ENTRIES_PER_PAGE);
16563 BXE_PRINTF("total cqe_bd = 0x%08X\n",(uint32_t) TOTAL_RCQ_ENTRIES);
16565 /* Now the CQE entries themselves. */
16567 "----------------------------"
16569 "----------------------------\n");
16571 for (i = 0; i < count; i++) {
16572 cqe = (union eth_rx_cqe *)&fp->rcq_chain[rx_cq_prod];
16574 bxe_dump_cqe(fp, rx_cq_prod, cqe);
16576 /* Don't skip next page pointers. */
16577 rx_cq_prod = ((rx_cq_prod + 1) & MAX_RCQ_ENTRIES);
16581 "----------------------------"
16583 "----------------------------\n");
16587 * Prints out the receive chain.
16593 void bxe_dump_rx_bd_chain(struct bxe_fastpath *fp, int prod, int count)
16595 struct bxe_softc *sc;
16596 struct eth_rx_bd *rx_bd;
16602 /* First some info about the tx_bd chain structure. */
16604 "----------------------------"
16606 "----------------------------\n");
16609 "----- RX_BD Chain -----\n");
16611 BXE_PRINTF("fp[%02d]->rx_dma.paddr = 0x%jX\n",
16612 fp->index, (uintmax_t) fp->rx_dma.paddr);
16615 "page size = 0x%08X, rx chain pages = 0x%08X\n",
16616 (uint32_t)BCM_PAGE_SIZE, (uint32_t)NUM_RX_PAGES);
16619 "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
16620 (uint32_t)TOTAL_RX_BD_PER_PAGE, (uint32_t)USABLE_RX_BD_PER_PAGE);
16623 "total rx_bd = 0x%08X\n", (uint32_t)TOTAL_RX_BD);
16625 /* Now the rx_bd entries themselves. */
16627 "----------------------------"
16629 "----------------------------\n");
16631 /* Now print out the rx_bd's themselves. */
16632 for (i = 0; i < count; i++) {
16633 rx_bd = (struct eth_rx_bd *) (&fp->rx_chain[prod]);
16634 m = sc->fp->rx_mbuf_ptr[prod];
16636 bxe_dump_rxbd(fp, prod, rx_bd);
16637 bxe_dump_mbuf(sc, m);
16639 /* Don't skip next page pointers. */
16640 prod = ((prod + 1) & MAX_RX_BD);
16644 "----------------------------"
16646 "----------------------------\n");
16650 * Prints out a register dump.
16656 void bxe_dump_hw_state(struct bxe_softc *sc)
16661 "----------------------------"
16663 "----------------------------\n");
16665 for (i = 0x2000; i < 0x10000; i += 0x10)
16666 BXE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i,
16667 REG_RD(sc, 0 + i), REG_RD(sc, 0 + i + 0x4),
16668 REG_RD(sc, 0 + i + 0x8), REG_RD(sc, 0 + i + 0xC));
16671 "----------------------------"
16673 "----------------------------\n");
16677 * Prints out the RX mbuf chain.
16683 void bxe_dump_rx_mbuf_chain(struct bxe_softc *sc, int chain_prod, int count)
16689 "----------------------------"
16691 "----------------------------\n");
16693 for (i = 0; i < count; i++) {
16694 m = sc->fp->rx_mbuf_ptr[chain_prod];
16695 BXE_PRINTF("rxmbuf[0x%04X]\n", chain_prod);
16696 bxe_dump_mbuf(sc, m);
16697 chain_prod = RX_BD(NEXT_RX_BD(chain_prod));
16701 "----------------------------"
16703 "----------------------------\n");
16707 * Prints out the mbufs in the TX mbuf chain.
16713 void bxe_dump_tx_mbuf_chain(struct bxe_softc *sc, int chain_prod, int count)
16719 "----------------------------"
16721 "----------------------------\n");
16723 for (i = 0; i < count; i++) {
16724 m = sc->fp->tx_mbuf_ptr[chain_prod];
16725 BXE_PRINTF("txmbuf[%d]\n", chain_prod);
16726 bxe_dump_mbuf(sc, m);
16727 chain_prod = TX_BD(NEXT_TX_BD(chain_prod));
16731 "----------------------------"
16733 "----------------------------\n");
16737 * Prints out the status block from host memory.
16743 void bxe_dump_status_block(struct bxe_softc *sc)
16745 struct bxe_fastpath *fp;
16746 struct host_def_status_block *def_sb;
16747 struct host_status_block *fpsb;
16750 def_sb = sc->def_sb;
16752 "----------------------------"
16754 "----------------------------\n");
16756 for (i = 0; i < sc->num_queues; i++) {
16758 fpsb = fp->status_block;
16760 "----------------------------"
16762 "----------------------------\n", fp->index);
16764 /* Print the USTORM fields (HC_USTORM_SB_NUM_INDICES). */
16766 "0x%08X - USTORM Flags (F/W RESERVED)\n",
16767 fpsb->u_status_block.__flags);
16769 " 0x%02X - USTORM PCIe Function\n",
16770 fpsb->u_status_block.func);
16772 " 0x%02X - USTORM Status Block ID\n",
16773 fpsb->u_status_block.status_block_id);
16775 " 0x%04X - USTORM Status Block Index (Tag)\n",
16776 fpsb->u_status_block.status_block_index);
16778 " 0x%04X - USTORM [TOE_RX_CQ_CONS]\n",
16779 fpsb->u_status_block.index_values[HC_INDEX_U_TOE_RX_CQ_CONS]);
16781 " 0x%04X - USTORM [ETH_RX_CQ_CONS]\n",
16782 fpsb->u_status_block.index_values[HC_INDEX_U_ETH_RX_CQ_CONS]);
16784 " 0x%04X - USTORM [ETH_RX_BD_CONS]\n",
16785 fpsb->u_status_block.index_values[HC_INDEX_U_ETH_RX_BD_CONS]);
16787 " 0x%04X - USTORM [RESERVED]\n",
16788 fpsb->u_status_block.index_values[3]);
16790 /* Print the CSTORM fields (HC_CSTORM_SB_NUM_INDICES). */
16792 "0x%08X - CSTORM Flags (F/W RESERVED)\n",
16793 fpsb->c_status_block.__flags);
16795 " 0x%02X - CSTORM PCIe Function\n",
16796 fpsb->c_status_block.func);
16798 " 0x%02X - CSTORM Status Block ID\n",
16799 fpsb->c_status_block.status_block_id);
16801 " 0x%04X - CSTORM Status Block Index (Tag)\n",
16802 fpsb->c_status_block.status_block_index);
16804 " 0x%04X - CSTORM [TOE_TX_CQ_CONS]\n",
16805 fpsb->c_status_block.index_values[HC_INDEX_C_TOE_TX_CQ_CONS]);
16807 " 0x%04X - CSTORM [ETH_TX_CQ_CONS]\n",
16808 fpsb->c_status_block.index_values[HC_INDEX_C_ETH_TX_CQ_CONS]);
16810 " 0x%04X - CSTORM [ISCSI_EQ_CONS]\n",
16811 fpsb->c_status_block.index_values[HC_INDEX_C_ISCSI_EQ_CONS]);
16813 " 0x%04X - CSTORM [RESERVED]\n",
16814 fpsb->c_status_block.index_values[3]);
16818 "--------------------------"
16819 " Def Status Block "
16820 "--------------------------\n");
16822 /* Print attention information. */
16824 " 0x%02X - Status Block ID\n",
16825 def_sb->atten_status_block.status_block_id);
16827 "0x%08X - Attn Bits\n",
16828 def_sb->atten_status_block.attn_bits);
16830 "0x%08X - Attn Bits Ack\n",
16831 def_sb->atten_status_block.attn_bits_ack);
16833 " 0x%04X - Attn Block Index\n",
16834 le16toh(def_sb->atten_status_block.attn_bits_index));
16836 /* Print the USTORM fields (HC_USTORM_DEF_SB_NUM_INDICES). */
16838 " 0x%02X - USTORM Status Block ID\n",
16839 def_sb->u_def_status_block.status_block_id);
16841 " 0x%04X - USTORM Status Block Index\n",
16842 le16toh(def_sb->u_def_status_block.status_block_index));
16844 " 0x%04X - USTORM [ETH_RDMA_RX_CQ_CONS]\n",
16845 le16toh(def_sb->u_def_status_block.index_values[HC_INDEX_DEF_U_ETH_RDMA_RX_CQ_CONS]));
16847 " 0x%04X - USTORM [ETH_ISCSI_RX_CQ_CONS]\n",
16848 le16toh(def_sb->u_def_status_block.index_values[HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS]));
16850 " 0x%04X - USTORM [ETH_RDMA_RX_BD_CONS]\n",
16851 le16toh(def_sb->u_def_status_block.index_values[HC_INDEX_DEF_U_ETH_RDMA_RX_BD_CONS]));
16853 " 0x%04X - USTORM [ETH_ISCSI_RX_BD_CONS]\n",
16854 le16toh(def_sb->u_def_status_block.index_values[HC_INDEX_DEF_U_ETH_ISCSI_RX_BD_CONS]));
16856 /* Print the CSTORM fields (HC_CSTORM_DEF_SB_NUM_INDICES). */
16858 " 0x%02X - CSTORM Status Block ID\n",
16859 def_sb->c_def_status_block.status_block_id);
16861 " 0x%04X - CSTORM Status Block Index\n",
16862 le16toh(def_sb->c_def_status_block.status_block_index));
16864 " 0x%04X - CSTORM [RDMA_EQ_CONS]\n",
16865 le16toh(def_sb->c_def_status_block.index_values[HC_INDEX_DEF_C_RDMA_EQ_CONS]));
16867 " 0x%04X - CSTORM [RDMA_NAL_PROD]\n",
16868 le16toh(def_sb->c_def_status_block.index_values[HC_INDEX_DEF_C_RDMA_NAL_PROD]));
16870 " 0x%04X - CSTORM [ETH_FW_TX_CQ_CONS]\n",
16871 le16toh(def_sb->c_def_status_block.index_values[HC_INDEX_DEF_C_ETH_FW_TX_CQ_CONS]));
16873 " 0x%04X - CSTORM [ETH_SLOW_PATH]\n",
16874 le16toh(def_sb->c_def_status_block.index_values[HC_INDEX_DEF_C_ETH_SLOW_PATH]));
16876 " 0x%04X - CSTORM [ETH_RDMA_CQ_CONS]\n",
16877 le16toh(def_sb->c_def_status_block.index_values[HC_INDEX_DEF_C_ETH_RDMA_CQ_CONS]));
16879 " 0x%04X - CSTORM [ETH_ISCSI_CQ_CONS]\n",
16880 le16toh(def_sb->c_def_status_block.index_values[HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS]));
16882 " 0x%04X - CSTORM [UNUSED]\n",
16883 le16toh(def_sb->c_def_status_block.index_values[6]));
16885 " 0x%04X - CSTORM [UNUSED]\n",
16886 le16toh(def_sb->c_def_status_block.index_values[7]));
16888 /* Print the TSTORM fields (HC_TSTORM_DEF_SB_NUM_INDICES). */
16890 " 0x%02X - TSTORM Status Block ID\n",
16891 def_sb->t_def_status_block.status_block_id);
16893 " 0x%04X - TSTORM Status Block Index\n",
16894 le16toh(def_sb->t_def_status_block.status_block_index));
16895 for (i = 0; i < HC_TSTORM_DEF_SB_NUM_INDICES; i++)
16897 " 0x%04X - TSTORM [UNUSED]\n",
16898 le16toh(def_sb->t_def_status_block.index_values[i]));
16900 /* Print the XSTORM fields (HC_XSTORM_DEF_SB_NUM_INDICES). */
16902 " 0x%02X - XSTORM Status Block ID\n",
16903 def_sb->x_def_status_block.status_block_id);
16905 " 0x%04X - XSTORM Status Block Index\n",
16906 le16toh(def_sb->x_def_status_block.status_block_index));
16907 for (i = 0; i < HC_XSTORM_DEF_SB_NUM_INDICES; i++)
16909 " 0x%04X - XSTORM [UNUSED]\n",
16910 le16toh(def_sb->x_def_status_block.index_values[i]));
16913 "----------------------------"
16915 "----------------------------\n");
16920 * Prints out the statistics block from host memory.
16926 void bxe_dump_stats_block(struct bxe_softc *sc)
16932 * Prints out a summary of the fastpath state.
16938 void bxe_dump_fp_state(struct bxe_fastpath *fp)
16940 struct bxe_softc *sc;
16941 uint32_t val_hi, val_lo;
16946 "----------------------------"
16948 "----------------------------\n");
16950 val_hi = U64_HI(fp);
16951 val_lo = U64_LO(fp);
16953 "0x%08X:%08X - (fp[%02d]) fastpath virtual address\n",
16954 val_hi, val_lo, fp->index);
16956 " %3d - (fp[%02d]->sb_id)\n",
16957 fp->sb_id, fp->index);
16959 " %3d - (fp[%02d]->cl_id)\n",
16960 fp->cl_id, fp->index);
16962 " 0x%08X - (fp[%02d]->state)\n",
16963 (uint32_t)fp->state, fp->index);
16965 /* Receive state. */
16967 " 0x%04X - (fp[%02d]->rx_bd_prod)\n",
16968 fp->rx_bd_prod, fp->index);
16970 " 0x%04X - (fp[%02d]->rx_bd_cons)\n",
16971 fp->rx_bd_cons, fp->index);
16973 " 0x%04X - (fp[%02d]->rx_cq_prod)\n",
16974 fp->rx_cq_prod, fp->index);
16976 " 0x%04X - (fp[%02d]->rx_cq_cons)\n",
16977 fp->rx_cq_cons, fp->index);
16979 " %16lu - (fp[%02d]->rx_pkts)\n",
16980 fp->rx_pkts, fp->index);
16982 " 0x%08X - (fp[%02d]->rx_mbuf_alloc)\n",
16983 fp->rx_mbuf_alloc, fp->index);
16985 " %16lu - (fp[%02d]->ipackets)\n",
16986 fp->ipackets, fp->index);
16988 " %16lu - (fp[%02d]->rx_soft_errors)\n",
16989 fp->rx_soft_errors, fp->index);
16991 /* Transmit state. */
16993 " 0x%04X - (fp[%02d]->tx_bd_used)\n",
16994 fp->tx_bd_used, fp->index);
16996 " 0x%04X - (fp[%02d]->tx_bd_prod)\n",
16997 fp->tx_bd_prod, fp->index);
16999 " 0x%04X - (fp[%02d]->tx_bd_cons)\n",
17000 fp->tx_bd_cons, fp->index);
17002 " 0x%04X - (fp[%02d]->tx_pkt_prod)\n",
17003 fp->tx_pkt_prod, fp->index);
17005 " 0x%04X - (fp[%02d]->tx_pkt_cons)\n",
17006 fp->tx_pkt_cons, fp->index);
17008 " %16lu - (fp[%02d]->tx_pkts)\n",
17009 fp->tx_pkts, fp->index);
17011 " 0x%08X - (fp[%02d]->tx_mbuf_alloc)\n",
17012 fp->tx_mbuf_alloc, fp->index);
17014 " %16lu - (fp[%02d]->opackets)\n",
17015 fp->opackets, fp->index);
17017 " %16lu - (fp[%02d]->tx_soft_errors)\n",
17018 fp->tx_soft_errors, fp->index);
17021 if (TPA_ENABLED(sc)) {
17023 " %16lu - (fp[%02d]->rx_tpa_pkts)\n",
17024 fp->rx_tpa_pkts, fp->index);
17026 " 0x%08X - (fp[%02d]->tpa_mbuf_alloc)\n",
17027 fp->tpa_mbuf_alloc, fp->index);
17029 " 0x%08X - (fp[%02d]->sge_mbuf_alloc)\n",
17030 fp->sge_mbuf_alloc, fp->index);
17032 if (CHIP_IS_E1(sc)) {
17033 for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1; i++)
17035 " 0x%08X - (fp[%02d]->tpa_state[%02d])\n",
17036 (uint32_t)fp->tpa_state[i], fp->index, i);
17038 for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1; i++)
17040 " 0x%08X - (fp[%02d]->tpa_state[%02d])\n",
17041 (uint32_t)fp->tpa_state[i], fp->index, i);
17046 "----------------------------"
17048 "----------------------------\n");
17056 void bxe_dump_port_state_locked(struct bxe_softc *sc)
17060 "------------------------------"
17062 "------------------------------\n");
17065 " %2d - (port) pmf\n", sc->port.pmf);
17067 "0x%08X - (port) link_config\n", sc->port.link_config);
17069 "0x%08X - (port) supported\n", sc->port.supported);
17071 "0x%08X - (port) advertising\n", sc->port.advertising);
17073 "0x%08X - (port) port_stx\n", sc->port.port_stx);
17076 "----------------------------"
17078 "----------------------------\n");
17086 void bxe_dump_link_vars_state_locked(struct bxe_softc *sc)
17089 "---------------------------"
17090 " Link Vars State "
17091 "----------------------------\n");
17093 switch (sc->link_vars.mac_type) {
17094 case MAC_TYPE_NONE:
17095 BXE_PRINTF(" NONE");
17097 case MAC_TYPE_EMAC:
17098 BXE_PRINTF(" EMAC");
17100 case MAC_TYPE_BMAC:
17101 BXE_PRINTF(" BMAC");
17104 BXE_PRINTF(" UNKN");
17106 printf(" - (link_vars->mac_type)\n");
17109 " %2d - (link_vars->phy_link_up)\n",
17110 sc->link_vars.phy_link_up);
17112 " %2d - (link_vars->link_up)\n",
17113 sc->link_vars.link_up);
17115 " %2d - (link_vars->duplex)\n",
17116 sc->link_vars.duplex);
17118 " 0x%04X - (link_vars->flow_ctrl)\n",
17119 sc->link_vars.flow_ctrl);
17121 " 0x%04X - (link_vars->line_speed)\n",
17122 sc->link_vars.line_speed);
17124 "0x%08X - (link_vars->ieee_fc)\n",
17125 sc->link_vars.ieee_fc);
17127 "0x%08X - (link_vars->autoneg)\n",
17128 sc->link_vars.autoneg);
17130 "0x%08X - (link_vars->phy_flags)\n",
17131 sc->link_vars.phy_flags);
17133 "0x%08X - (link_vars->link_status)\n",
17134 sc->link_vars.link_status);
17137 "----------------------------"
17139 "----------------------------\n");
17149 void bxe_dump_link_params_state_locked(struct bxe_softc *sc)
17152 "--------------------------"
17153 " Link Params State "
17154 "---------------------------\n");
17157 " %2d - (link_params->port)\n",
17158 sc->link_params.port);
17160 " %2d - (link_params->loopback_mode)\n",
17161 sc->link_params.loopback_mode);
17163 " %3d - (link_params->phy_addr)\n",
17164 sc->link_params.phy_addr);
17166 " 0x%04X - (link_params->req_duplex)\n",
17167 sc->link_params.req_duplex);
17169 " 0x%04X - (link_params->req_flow_ctrl)\n",
17170 sc->link_params.req_flow_ctrl);
17172 " 0x%04X - (link_params->req_line_speed)\n",
17173 sc->link_params.req_line_speed);
17175 " %5d - (link_params->ether_mtu)\n",
17176 sc->port.ether_mtu);
17178 "0x%08X - (link_params->shmem_base) shared memory base address\n",
17179 sc->link_params.shmem_base);
17181 "0x%08X - (link_params->speed_cap_mask)\n",
17182 sc->link_params.speed_cap_mask);
17184 "0x%08X - (link_params->ext_phy_config)\n",
17185 sc->link_params.ext_phy_config);
17187 "0x%08X - (link_params->switch_cfg)\n",
17188 sc->link_params.switch_cfg);
17191 "----------------------------"
17193 "----------------------------\n");
17197 * Prints out a summary of the driver state.
17203 void bxe_dump_driver_state(struct bxe_softc *sc)
17205 uint32_t val_hi, val_lo;
17208 "-----------------------------"
17210 "-----------------------------\n");
17212 val_hi = U64_HI(sc);
17213 val_lo = U64_LO(sc);
17215 "0x%08X:%08X - (sc) driver softc structure virtual address\n",
17218 val_hi = U64_HI(sc->bxe_vhandle);
17219 val_lo = U64_LO(sc->bxe_vhandle);
17221 "0x%08X:%08X - (sc->bxe_vhandle) PCI BAR0 virtual address\n",
17224 val_hi = U64_HI(sc->bxe_db_vhandle);
17225 val_lo = U64_LO(sc->bxe_db_vhandle);
17227 "0x%08X:%08X - (sc->bxe_db_vhandle) PCI BAR2 virtual address\n",
17230 BXE_PRINTF(" 0x%08X - (sc->num_queues) Fastpath queues\n",
17232 BXE_PRINTF(" 0x%08X - (sc->rx_lane_swap) RX XAUI lane swap\n",
17234 BXE_PRINTF(" 0x%08X - (sc->tx_lane_swap) TX XAUI lane swap\n",
17236 BXE_PRINTF(" %16lu - (sc->debug_sim_mbuf_alloc_failed)\n",
17237 sc->debug_sim_mbuf_alloc_failed);
17238 BXE_PRINTF(" %16lu - (sc->debug_sim_mbuf_map_failed)\n",
17239 sc->debug_sim_mbuf_map_failed);
17242 "----------------------------"
17244 "----------------------------\n");
17246 bxe_dump_port_state_locked(sc);
17247 bxe_dump_link_params_state_locked(sc);
17248 bxe_dump_link_vars_state_locked(sc);
17252 * Dump bootcode (MCP) debug buffer to the console.
17258 void bxe_dump_fw(struct bxe_softc *sc)
17260 uint32_t addr, mark, data[9], offset;
17263 addr = sc->common.shmem_base - 0x0800 + 4;
17264 mark = REG_RD(sc, addr);
17265 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
17268 "---------------------------"
17269 " MCP Debug Buffer "
17270 "---------------------------\n");
17272 /* Read from "mark" to the end of the buffer. */
17273 for (offset = mark; offset <= sc->common.shmem_base;
17274 offset += (0x8 * 4)) {
17275 for (word = 0; word < 8; word++)
17276 data[word] = htonl(REG_RD(sc, offset + 4 * word));
17278 printf("%s", (char *) data);
17281 /* Read from the start of the buffer to "mark". */
17282 for (offset = addr + 4; offset <= mark; offset += (0x8 * 4)) {
17283 for (word = 0; word < 8; word++)
17284 data[word] = htonl(REG_RD(sc, offset + 4 * word));
17286 printf("%s", (char *) data);
17290 "----------------------------"
17292 "----------------------------\n");
17296 * Decode firmware messages.
17302 bxe_decode_mb_msgs(struct bxe_softc *sc, uint32_t drv_mb_header,
17303 uint32_t fw_mb_header)
17306 if (drv_mb_header) {
17307 BXE_PRINTF("Driver message is ");
17308 switch (drv_mb_header & DRV_MSG_CODE_MASK) {
17309 case DRV_MSG_CODE_LOAD_REQ:
17311 "LOAD_REQ (0x%08X)",
17312 (uint32_t)DRV_MSG_CODE_LOAD_REQ);
17314 case DRV_MSG_CODE_LOAD_DONE:
17316 "LOAD_DONE (0x%08X)",
17317 (uint32_t)DRV_MSG_CODE_LOAD_DONE);
17319 case DRV_MSG_CODE_UNLOAD_REQ_WOL_EN:
17321 "UNLOAD_REQ_WOL_EN (0x%08X)",
17322 (uint32_t)DRV_MSG_CODE_UNLOAD_REQ_WOL_EN);
17324 case DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS:
17326 "UNLOAD_REQ_WOL_DIS (0x%08X)",
17327 (uint32_t)DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS);
17329 case DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP:
17331 "UNLOADREQ_WOL_MCP (0x%08X)",
17332 (uint32_t)DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
17334 case DRV_MSG_CODE_UNLOAD_DONE:
17336 "UNLOAD_DONE (0x%08X)",
17337 (uint32_t)DRV_MSG_CODE_UNLOAD_DONE);
17339 case DRV_MSG_CODE_DIAG_ENTER_REQ:
17341 "DIAG_ENTER_REQ (0x%08X)",
17342 (uint32_t)DRV_MSG_CODE_DIAG_ENTER_REQ);
17344 case DRV_MSG_CODE_DIAG_EXIT_REQ:
17346 "DIAG_EXIT_REQ (0x%08X)",
17347 (uint32_t)DRV_MSG_CODE_DIAG_EXIT_REQ);
17349 case DRV_MSG_CODE_VALIDATE_KEY:
17351 "CODE_VALIDITY_KEY (0x%08X)",
17352 (uint32_t)DRV_MSG_CODE_VALIDATE_KEY);
17354 case DRV_MSG_CODE_GET_CURR_KEY:
17356 "GET_CURR_KEY (0x%08X)",
17357 (uint32_t) DRV_MSG_CODE_GET_CURR_KEY);
17359 case DRV_MSG_CODE_GET_UPGRADE_KEY:
17361 "GET_UPGRADE_KEY (0x%08X)",
17362 (uint32_t)DRV_MSG_CODE_GET_UPGRADE_KEY);
17364 case DRV_MSG_CODE_GET_MANUF_KEY:
17366 "GET_MANUF_KEY (0x%08X)",
17367 (uint32_t)DRV_MSG_CODE_GET_MANUF_KEY);
17369 case DRV_MSG_CODE_LOAD_L2B_PRAM:
17371 "LOAD_L2B_PRAM (0x%08X)",
17372 (uint32_t)DRV_MSG_CODE_LOAD_L2B_PRAM);
17374 case BIOS_MSG_CODE_LIC_CHALLENGE:
17376 "LIC_CHALLENGE (0x%08X)",
17377 (uint32_t)BIOS_MSG_CODE_LIC_CHALLENGE);
17379 case BIOS_MSG_CODE_LIC_RESPONSE:
17381 "LIC_RESPONSE (0x%08X)",
17382 (uint32_t)BIOS_MSG_CODE_LIC_RESPONSE);
17384 case BIOS_MSG_CODE_VIRT_MAC_PRIM:
17386 "VIRT_MAC_PRIM (0x%08X)",
17387 (uint32_t)BIOS_MSG_CODE_VIRT_MAC_PRIM);
17389 case BIOS_MSG_CODE_VIRT_MAC_ISCSI:
17391 "VIRT_MAC_ISCSI (0x%08X)",
17392 (uint32_t)BIOS_MSG_CODE_VIRT_MAC_ISCSI);
17396 "Unknown command (0x%08X)!",
17397 (drv_mb_header & DRV_MSG_CODE_MASK));
17400 printf(" (seq = 0x%04X)\n", (drv_mb_header &
17401 DRV_MSG_SEQ_NUMBER_MASK));
17404 if (fw_mb_header) {
17405 BXE_PRINTF("Firmware response is ");
17406 switch (fw_mb_header & FW_MSG_CODE_MASK) {
17407 case FW_MSG_CODE_DRV_LOAD_COMMON:
17409 "DRV_LOAD_COMMON (0x%08X)",
17410 (uint32_t)FW_MSG_CODE_DRV_LOAD_COMMON);
17412 case FW_MSG_CODE_DRV_LOAD_PORT:
17414 "DRV_LOAD_PORT (0x%08X)",
17415 (uint32_t)FW_MSG_CODE_DRV_LOAD_PORT);
17417 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
17419 "DRV_LOAD_FUNCTION (0x%08X)",
17420 (uint32_t)FW_MSG_CODE_DRV_LOAD_FUNCTION);
17422 case FW_MSG_CODE_DRV_LOAD_REFUSED:
17424 "DRV_LOAD_REFUSED (0x%08X)",
17425 (uint32_t)FW_MSG_CODE_DRV_LOAD_REFUSED);
17427 case FW_MSG_CODE_DRV_LOAD_DONE:
17429 "DRV_LOAD_DONE (0x%08X)",
17430 (uint32_t)FW_MSG_CODE_DRV_LOAD_DONE);
17432 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
17434 "DRV_UNLOAD_COMMON (0x%08X)",
17435 (uint32_t)FW_MSG_CODE_DRV_UNLOAD_COMMON);
17437 case FW_MSG_CODE_DRV_UNLOAD_PORT:
17439 "DRV_UNLOAD_PORT (0x%08X)",
17440 (uint32_t)FW_MSG_CODE_DRV_UNLOAD_PORT);
17442 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
17444 "DRV_UNLOAD_FUNCTION (0x%08X)",
17445 (uint32_t)FW_MSG_CODE_DRV_UNLOAD_FUNCTION);
17447 case FW_MSG_CODE_DRV_UNLOAD_DONE:
17449 "DRV_UNLOAD_DONE (0x%08X)",
17450 (uint32_t)FW_MSG_CODE_DRV_UNLOAD_DONE);
17452 case FW_MSG_CODE_DIAG_ENTER_DONE:
17454 "DIAG_ENTER_DONE (0x%08X)",
17455 (uint32_t)FW_MSG_CODE_DIAG_ENTER_DONE);
17457 case FW_MSG_CODE_DIAG_REFUSE:
17459 "DIAG_REFUSE (0x%08X)",
17460 (uint32_t)FW_MSG_CODE_DIAG_REFUSE);
17462 case FW_MSG_CODE_DIAG_EXIT_DONE:
17464 "DIAG_EXIT_DONE (0x%08X)",
17465 (uint32_t)FW_MSG_CODE_DIAG_EXIT_DONE);
17467 case FW_MSG_CODE_VALIDATE_KEY_SUCCESS:
17469 "VALIDATE_KEY_SUCCESS (0x%08X)",
17470 (uint32_t)FW_MSG_CODE_VALIDATE_KEY_SUCCESS);
17472 case FW_MSG_CODE_VALIDATE_KEY_FAILURE:
17474 "VALIDATE_KEY_FAILURE (0x%08X)",
17475 (uint32_t)FW_MSG_CODE_VALIDATE_KEY_FAILURE);
17477 case FW_MSG_CODE_GET_KEY_DONE:
17479 "GET_KEY_DONE (0x%08X)",
17480 (uint32_t)FW_MSG_CODE_GET_KEY_DONE);
17482 case FW_MSG_CODE_NO_KEY:
17485 (uint32_t)FW_MSG_CODE_NO_KEY);
17489 "unknown value (0x%08X)!",
17490 (fw_mb_header & FW_MSG_CODE_MASK));
17493 printf(" (seq = 0x%04X)\n", (fw_mb_header &
17494 FW_MSG_SEQ_NUMBER_MASK));
17499 * Prints a text string for the ramrod command.
17505 bxe_decode_ramrod_cmd(struct bxe_softc *sc, int command)
17507 BXE_PRINTF("Ramrod command = ");
17510 case RAMROD_CMD_ID_ETH_PORT_SETUP:
17511 printf("ETH_PORT_SETUP\n");
17513 case RAMROD_CMD_ID_ETH_CLIENT_SETUP:
17514 printf("ETH_CLIENT_SETUP\n");
17516 case RAMROD_CMD_ID_ETH_STAT_QUERY:
17517 printf("ETH_STAT_QUERY\n");
17519 case RAMROD_CMD_ID_ETH_UPDATE:
17520 printf("ETH_UPDATE\n");
17522 case RAMROD_CMD_ID_ETH_HALT:
17523 printf("ETH_HALT\n");
17525 case RAMROD_CMD_ID_ETH_SET_MAC:
17526 printf("ETH_SET_MAC\n");
17528 case RAMROD_CMD_ID_ETH_CFC_DEL:
17529 printf("ETH_CFC_DEL\n");
17531 case RAMROD_CMD_ID_ETH_PORT_DEL:
17532 printf("ETH_PORT_DEL\n");
17534 case RAMROD_CMD_ID_ETH_FORWARD_SETUP:
17535 printf("ETH_FORWARD_SETUP\n");
17538 printf("Unknown ramrod command!\n");
17544 * Prints out driver information and forces a kernel breakpoint.
17550 bxe_breakpoint(struct bxe_softc *sc)
17552 struct bxe_fastpath *fp;
17556 /* Unreachable code to silence the compiler about unused functions. */
17558 bxe_reg_read16(sc, PCICFG_OFFSET);
17559 bxe_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
17560 bxe_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
17561 bxe_dump_tx_chain(fp, 0, USABLE_TX_BD);
17562 bxe_dump_rx_cq_chain(fp, 0, USABLE_RCQ_ENTRIES);
17563 bxe_dump_rx_bd_chain(fp, 0, USABLE_RX_BD);
17564 bxe_dump_status_block(sc);
17565 bxe_dump_stats_block(sc);
17566 bxe_dump_fp_state(fp);
17567 bxe_dump_driver_state(sc);
17568 bxe_dump_hw_state(sc);
17573 * Do some device sanity checking. Run it twice in case
17574 * the hardware is still running so we can identify any
17575 * transient conditions.
17577 bxe_idle_chk(sc); bxe_idle_chk(sc);
17579 bxe_dump_driver_state(sc);
17581 for (i = 0; i < sc->num_queues; i++)
17582 bxe_dump_fp_state(&sc->fp[i]);
17584 bxe_dump_status_block(sc);
17587 /* Call the OS debugger. */