2 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
18 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
24 * THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #define BXE_DRIVER_VERSION "1.78.91"
34 #include "ecore_init.h"
35 #include "ecore_init_ops.h"
37 #include "57710_int_offsets.h"
38 #include "57711_int_offsets.h"
39 #include "57712_int_offsets.h"
42 * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these
43 * explicitly here for older kernels that don't include this changeset.
46 #define CTLTYPE_U64 CTLTYPE_QUAD
47 #define sysctl_handle_64 sysctl_handle_quad
51 * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these
52 * here as zero(0) for older kernels that don't include this changeset
53 * thereby masking the functionality.
56 #define CSUM_TCP_IPV6 0
57 #define CSUM_UDP_IPV6 0
61 * pci_find_cap was added in r219865. Re-define this at pci_find_extcap
62 * for older kernels that don't include this changeset.
64 #if __FreeBSD_version < 900035
65 #define pci_find_cap pci_find_extcap
68 #define BXE_DEF_SB_ATT_IDX 0x0001
69 #define BXE_DEF_SB_IDX 0x0002
72 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per
73 * function HW initialization.
75 #define FLR_WAIT_USEC 10000 /* 10 msecs */
76 #define FLR_WAIT_INTERVAL 50 /* usecs */
77 #define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */
79 struct pbf_pN_buf_regs {
86 struct pbf_pN_cmd_regs {
93 * PCI Device ID Table used by bxe_probe().
95 #define BXE_DEVDESC_MAX 64
96 static struct bxe_device_type bxe_devs[] = {
100 PCI_ANY_ID, PCI_ANY_ID,
101 "QLogic NetXtreme II BCM57710 10GbE"
106 PCI_ANY_ID, PCI_ANY_ID,
107 "QLogic NetXtreme II BCM57711 10GbE"
112 PCI_ANY_ID, PCI_ANY_ID,
113 "QLogic NetXtreme II BCM57711E 10GbE"
118 PCI_ANY_ID, PCI_ANY_ID,
119 "QLogic NetXtreme II BCM57712 10GbE"
124 PCI_ANY_ID, PCI_ANY_ID,
125 "QLogic NetXtreme II BCM57712 MF 10GbE"
130 PCI_ANY_ID, PCI_ANY_ID,
131 "QLogic NetXtreme II BCM57800 10GbE"
136 PCI_ANY_ID, PCI_ANY_ID,
137 "QLogic NetXtreme II BCM57800 MF 10GbE"
142 PCI_ANY_ID, PCI_ANY_ID,
143 "QLogic NetXtreme II BCM57810 10GbE"
148 PCI_ANY_ID, PCI_ANY_ID,
149 "QLogic NetXtreme II BCM57810 MF 10GbE"
154 PCI_ANY_ID, PCI_ANY_ID,
155 "QLogic NetXtreme II BCM57811 10GbE"
160 PCI_ANY_ID, PCI_ANY_ID,
161 "QLogic NetXtreme II BCM57811 MF 10GbE"
166 PCI_ANY_ID, PCI_ANY_ID,
167 "QLogic NetXtreme II BCM57840 4x10GbE"
172 PCI_ANY_ID, PCI_ANY_ID,
173 "QLogic NetXtreme II BCM57840 4x10GbE"
178 PCI_ANY_ID, PCI_ANY_ID,
179 "QLogic NetXtreme II BCM57840 MF 10GbE"
186 MALLOC_DECLARE(M_BXE_ILT);
187 MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer");
190 * FreeBSD device entry points.
192 static int bxe_probe(device_t);
193 static int bxe_attach(device_t);
194 static int bxe_detach(device_t);
195 static int bxe_shutdown(device_t);
199 * FreeBSD KLD module/device interface event handler method.
201 static device_method_t bxe_methods[] = {
202 /* Device interface (device_if.h) */
203 DEVMETHOD(device_probe, bxe_probe),
204 DEVMETHOD(device_attach, bxe_attach),
205 DEVMETHOD(device_detach, bxe_detach),
206 DEVMETHOD(device_shutdown, bxe_shutdown),
207 /* Bus interface (bus_if.h) */
208 DEVMETHOD(bus_print_child, bus_generic_print_child),
209 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
214 * FreeBSD KLD Module data declaration
216 static driver_t bxe_driver = {
217 "bxe", /* module name */
218 bxe_methods, /* event handler */
219 sizeof(struct bxe_softc) /* extra data */
223 * FreeBSD dev class is needed to manage dev instances and
224 * to associate with a bus type
226 static devclass_t bxe_devclass;
228 MODULE_DEPEND(bxe, pci, 1, 1, 1);
229 MODULE_DEPEND(bxe, ether, 1, 1, 1);
230 DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0);
232 /* resources needed for unloading a previously loaded device */
234 #define BXE_PREV_WAIT_NEEDED 1
235 struct mtx bxe_prev_mtx;
236 MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF);
237 struct bxe_prev_list_node {
238 LIST_ENTRY(bxe_prev_list_node) node;
242 uint8_t aer; /* XXX automatic error recovery */
245 static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list);
247 static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
249 /* Tunable device values... */
251 SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters");
254 unsigned long bxe_debug = 0;
255 TUNABLE_ULONG("hw.bxe.debug", &bxe_debug);
256 SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, (CTLFLAG_RDTUN),
257 &bxe_debug, 0, "Debug logging mode");
259 /* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
260 static int bxe_interrupt_mode = INTR_MODE_MSIX;
261 TUNABLE_INT("hw.bxe.interrupt_mode", &bxe_interrupt_mode);
262 SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN,
263 &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode");
265 /* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */
266 static int bxe_queue_count = 4;
267 TUNABLE_INT("hw.bxe.queue_count", &bxe_queue_count);
268 SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
269 &bxe_queue_count, 0, "Multi-Queue queue count");
271 /* max number of buffers per queue (default RX_BD_USABLE) */
272 static int bxe_max_rx_bufs = 0;
273 TUNABLE_INT("hw.bxe.max_rx_bufs", &bxe_max_rx_bufs);
274 SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN,
275 &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue");
277 /* Host interrupt coalescing RX tick timer (usecs) */
278 static int bxe_hc_rx_ticks = 25;
279 TUNABLE_INT("hw.bxe.hc_rx_ticks", &bxe_hc_rx_ticks);
280 SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN,
281 &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks");
283 /* Host interrupt coalescing TX tick timer (usecs) */
284 static int bxe_hc_tx_ticks = 50;
285 TUNABLE_INT("hw.bxe.hc_tx_ticks", &bxe_hc_tx_ticks);
286 SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN,
287 &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks");
289 /* Maximum number of Rx packets to process at a time */
290 static int bxe_rx_budget = 0xffffffff;
291 TUNABLE_INT("hw.bxe.rx_budget", &bxe_rx_budget);
292 SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN,
293 &bxe_rx_budget, 0, "Rx processing budget");
295 /* Maximum LRO aggregation size */
296 static int bxe_max_aggregation_size = 0;
297 TUNABLE_INT("hw.bxe.max_aggregation_size", &bxe_max_aggregation_size);
298 SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN,
299 &bxe_max_aggregation_size, 0, "max aggregation size");
301 /* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */
302 static int bxe_mrrs = -1;
303 TUNABLE_INT("hw.bxe.mrrs", &bxe_mrrs);
304 SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN,
305 &bxe_mrrs, 0, "PCIe maximum read request size");
307 /* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */
308 static int bxe_autogreeen = 0;
309 TUNABLE_INT("hw.bxe.autogreeen", &bxe_autogreeen);
310 SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN,
311 &bxe_autogreeen, 0, "AutoGrEEEn support");
313 /* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */
314 static int bxe_udp_rss = 0;
315 TUNABLE_INT("hw.bxe.udp_rss", &bxe_udp_rss);
316 SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN,
317 &bxe_udp_rss, 0, "UDP RSS support");
320 #define STAT_NAME_LEN 32 /* no stat names below can be longer than this */
322 #define STATS_OFFSET32(stat_name) \
323 (offsetof(struct bxe_eth_stats, stat_name) / 4)
325 #define Q_STATS_OFFSET32(stat_name) \
326 (offsetof(struct bxe_eth_q_stats, stat_name) / 4)
328 static const struct {
332 #define STATS_FLAGS_PORT 1
333 #define STATS_FLAGS_FUNC 2 /* MF only cares about function stats */
334 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
335 char string[STAT_NAME_LEN];
336 } bxe_eth_stats_arr[] = {
337 { STATS_OFFSET32(total_bytes_received_hi),
338 8, STATS_FLAGS_BOTH, "rx_bytes" },
339 { STATS_OFFSET32(error_bytes_received_hi),
340 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
341 { STATS_OFFSET32(total_unicast_packets_received_hi),
342 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
343 { STATS_OFFSET32(total_multicast_packets_received_hi),
344 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
345 { STATS_OFFSET32(total_broadcast_packets_received_hi),
346 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
347 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
348 8, STATS_FLAGS_PORT, "rx_crc_errors" },
349 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
350 8, STATS_FLAGS_PORT, "rx_align_errors" },
351 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
352 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
353 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
354 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
355 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
356 8, STATS_FLAGS_PORT, "rx_fragments" },
357 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
358 8, STATS_FLAGS_PORT, "rx_jabbers" },
359 { STATS_OFFSET32(no_buff_discard_hi),
360 8, STATS_FLAGS_BOTH, "rx_discards" },
361 { STATS_OFFSET32(mac_filter_discard),
362 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
363 { STATS_OFFSET32(mf_tag_discard),
364 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
365 { STATS_OFFSET32(pfc_frames_received_hi),
366 8, STATS_FLAGS_PORT, "pfc_frames_received" },
367 { STATS_OFFSET32(pfc_frames_sent_hi),
368 8, STATS_FLAGS_PORT, "pfc_frames_sent" },
369 { STATS_OFFSET32(brb_drop_hi),
370 8, STATS_FLAGS_PORT, "rx_brb_discard" },
371 { STATS_OFFSET32(brb_truncate_hi),
372 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
373 { STATS_OFFSET32(pause_frames_received_hi),
374 8, STATS_FLAGS_PORT, "rx_pause_frames" },
375 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
376 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
377 { STATS_OFFSET32(nig_timer_max),
378 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
379 { STATS_OFFSET32(total_bytes_transmitted_hi),
380 8, STATS_FLAGS_BOTH, "tx_bytes" },
381 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
382 8, STATS_FLAGS_PORT, "tx_error_bytes" },
383 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
384 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
385 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
386 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
387 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
388 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
389 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
390 8, STATS_FLAGS_PORT, "tx_mac_errors" },
391 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
392 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
393 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
394 8, STATS_FLAGS_PORT, "tx_single_collisions" },
395 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
396 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
397 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
398 8, STATS_FLAGS_PORT, "tx_deferred" },
399 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
400 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
401 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
402 8, STATS_FLAGS_PORT, "tx_late_collisions" },
403 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
404 8, STATS_FLAGS_PORT, "tx_total_collisions" },
405 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
406 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
407 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
408 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
409 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
410 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
411 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
412 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
413 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
414 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
415 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
416 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
417 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
418 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
419 { STATS_OFFSET32(pause_frames_sent_hi),
420 8, STATS_FLAGS_PORT, "tx_pause_frames" },
421 { STATS_OFFSET32(total_tpa_aggregations_hi),
422 8, STATS_FLAGS_FUNC, "tpa_aggregations" },
423 { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
424 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
425 { STATS_OFFSET32(total_tpa_bytes_hi),
426 8, STATS_FLAGS_FUNC, "tpa_bytes"},
427 { STATS_OFFSET32(eee_tx_lpi),
428 4, STATS_FLAGS_PORT, "eee_tx_lpi"},
429 { STATS_OFFSET32(rx_calls),
430 4, STATS_FLAGS_FUNC, "rx_calls"},
431 { STATS_OFFSET32(rx_pkts),
432 4, STATS_FLAGS_FUNC, "rx_pkts"},
433 { STATS_OFFSET32(rx_tpa_pkts),
434 4, STATS_FLAGS_FUNC, "rx_tpa_pkts"},
435 { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
436 4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"},
437 { STATS_OFFSET32(rx_bxe_service_rxsgl),
438 4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"},
439 { STATS_OFFSET32(rx_jumbo_sge_pkts),
440 4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"},
441 { STATS_OFFSET32(rx_soft_errors),
442 4, STATS_FLAGS_FUNC, "rx_soft_errors"},
443 { STATS_OFFSET32(rx_hw_csum_errors),
444 4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"},
445 { STATS_OFFSET32(rx_ofld_frames_csum_ip),
446 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"},
447 { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
448 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"},
449 { STATS_OFFSET32(rx_budget_reached),
450 4, STATS_FLAGS_FUNC, "rx_budget_reached"},
451 { STATS_OFFSET32(tx_pkts),
452 4, STATS_FLAGS_FUNC, "tx_pkts"},
453 { STATS_OFFSET32(tx_soft_errors),
454 4, STATS_FLAGS_FUNC, "tx_soft_errors"},
455 { STATS_OFFSET32(tx_ofld_frames_csum_ip),
456 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"},
457 { STATS_OFFSET32(tx_ofld_frames_csum_tcp),
458 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"},
459 { STATS_OFFSET32(tx_ofld_frames_csum_udp),
460 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"},
461 { STATS_OFFSET32(tx_ofld_frames_lso),
462 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"},
463 { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
464 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"},
465 { STATS_OFFSET32(tx_encap_failures),
466 4, STATS_FLAGS_FUNC, "tx_encap_failures"},
467 { STATS_OFFSET32(tx_hw_queue_full),
468 4, STATS_FLAGS_FUNC, "tx_hw_queue_full"},
469 { STATS_OFFSET32(tx_hw_max_queue_depth),
470 4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"},
471 { STATS_OFFSET32(tx_dma_mapping_failure),
472 4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"},
473 { STATS_OFFSET32(tx_max_drbr_queue_depth),
474 4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"},
475 { STATS_OFFSET32(tx_window_violation_std),
476 4, STATS_FLAGS_FUNC, "tx_window_violation_std"},
477 { STATS_OFFSET32(tx_window_violation_tso),
478 4, STATS_FLAGS_FUNC, "tx_window_violation_tso"},
479 { STATS_OFFSET32(tx_chain_lost_mbuf),
480 4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"},
481 { STATS_OFFSET32(tx_frames_deferred),
482 4, STATS_FLAGS_FUNC, "tx_frames_deferred"},
483 { STATS_OFFSET32(tx_queue_xoff),
484 4, STATS_FLAGS_FUNC, "tx_queue_xoff"},
485 { STATS_OFFSET32(mbuf_defrag_attempts),
486 4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"},
487 { STATS_OFFSET32(mbuf_defrag_failures),
488 4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"},
489 { STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
490 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"},
491 { STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
492 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"},
493 { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
494 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"},
495 { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
496 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"},
497 { STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
498 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"},
499 { STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
500 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"},
501 { STATS_OFFSET32(mbuf_alloc_tx),
502 4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"},
503 { STATS_OFFSET32(mbuf_alloc_rx),
504 4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"},
505 { STATS_OFFSET32(mbuf_alloc_sge),
506 4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"},
507 { STATS_OFFSET32(mbuf_alloc_tpa),
508 4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"},
509 { STATS_OFFSET32(tx_queue_full_return),
510 4, STATS_FLAGS_FUNC, "tx_queue_full_return"},
511 { STATS_OFFSET32(bxe_tx_mq_sc_state_failures),
512 4, STATS_FLAGS_FUNC, "bxe_tx_mq_sc_state_failures"},
513 { STATS_OFFSET32(tx_request_link_down_failures),
514 4, STATS_FLAGS_FUNC, "tx_request_link_down_failures"},
515 { STATS_OFFSET32(bd_avail_too_less_failures),
516 4, STATS_FLAGS_FUNC, "bd_avail_too_less_failures"},
517 { STATS_OFFSET32(tx_mq_not_empty),
518 4, STATS_FLAGS_FUNC, "tx_mq_not_empty"},
519 { STATS_OFFSET32(nsegs_path1_errors),
520 4, STATS_FLAGS_FUNC, "nsegs_path1_errors"},
521 { STATS_OFFSET32(nsegs_path2_errors),
522 4, STATS_FLAGS_FUNC, "nsegs_path2_errors"}
527 static const struct {
530 char string[STAT_NAME_LEN];
531 } bxe_eth_q_stats_arr[] = {
532 { Q_STATS_OFFSET32(total_bytes_received_hi),
534 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
535 8, "rx_ucast_packets" },
536 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
537 8, "rx_mcast_packets" },
538 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
539 8, "rx_bcast_packets" },
540 { Q_STATS_OFFSET32(no_buff_discard_hi),
542 { Q_STATS_OFFSET32(total_bytes_transmitted_hi),
544 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
545 8, "tx_ucast_packets" },
546 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
547 8, "tx_mcast_packets" },
548 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
549 8, "tx_bcast_packets" },
550 { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
551 8, "tpa_aggregations" },
552 { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
553 8, "tpa_aggregated_frames"},
554 { Q_STATS_OFFSET32(total_tpa_bytes_hi),
556 { Q_STATS_OFFSET32(rx_calls),
558 { Q_STATS_OFFSET32(rx_pkts),
560 { Q_STATS_OFFSET32(rx_tpa_pkts),
562 { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
563 4, "rx_erroneous_jumbo_sge_pkts"},
564 { Q_STATS_OFFSET32(rx_bxe_service_rxsgl),
565 4, "rx_bxe_service_rxsgl"},
566 { Q_STATS_OFFSET32(rx_jumbo_sge_pkts),
567 4, "rx_jumbo_sge_pkts"},
568 { Q_STATS_OFFSET32(rx_soft_errors),
569 4, "rx_soft_errors"},
570 { Q_STATS_OFFSET32(rx_hw_csum_errors),
571 4, "rx_hw_csum_errors"},
572 { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip),
573 4, "rx_ofld_frames_csum_ip"},
574 { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
575 4, "rx_ofld_frames_csum_tcp_udp"},
576 { Q_STATS_OFFSET32(rx_budget_reached),
577 4, "rx_budget_reached"},
578 { Q_STATS_OFFSET32(tx_pkts),
580 { Q_STATS_OFFSET32(tx_soft_errors),
581 4, "tx_soft_errors"},
582 { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip),
583 4, "tx_ofld_frames_csum_ip"},
584 { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp),
585 4, "tx_ofld_frames_csum_tcp"},
586 { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp),
587 4, "tx_ofld_frames_csum_udp"},
588 { Q_STATS_OFFSET32(tx_ofld_frames_lso),
589 4, "tx_ofld_frames_lso"},
590 { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
591 4, "tx_ofld_frames_lso_hdr_splits"},
592 { Q_STATS_OFFSET32(tx_encap_failures),
593 4, "tx_encap_failures"},
594 { Q_STATS_OFFSET32(tx_hw_queue_full),
595 4, "tx_hw_queue_full"},
596 { Q_STATS_OFFSET32(tx_hw_max_queue_depth),
597 4, "tx_hw_max_queue_depth"},
598 { Q_STATS_OFFSET32(tx_dma_mapping_failure),
599 4, "tx_dma_mapping_failure"},
600 { Q_STATS_OFFSET32(tx_max_drbr_queue_depth),
601 4, "tx_max_drbr_queue_depth"},
602 { Q_STATS_OFFSET32(tx_window_violation_std),
603 4, "tx_window_violation_std"},
604 { Q_STATS_OFFSET32(tx_window_violation_tso),
605 4, "tx_window_violation_tso"},
606 { Q_STATS_OFFSET32(tx_chain_lost_mbuf),
607 4, "tx_chain_lost_mbuf"},
608 { Q_STATS_OFFSET32(tx_frames_deferred),
609 4, "tx_frames_deferred"},
610 { Q_STATS_OFFSET32(tx_queue_xoff),
612 { Q_STATS_OFFSET32(mbuf_defrag_attempts),
613 4, "mbuf_defrag_attempts"},
614 { Q_STATS_OFFSET32(mbuf_defrag_failures),
615 4, "mbuf_defrag_failures"},
616 { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
617 4, "mbuf_rx_bd_alloc_failed"},
618 { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
619 4, "mbuf_rx_bd_mapping_failed"},
620 { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
621 4, "mbuf_rx_tpa_alloc_failed"},
622 { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
623 4, "mbuf_rx_tpa_mapping_failed"},
624 { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
625 4, "mbuf_rx_sge_alloc_failed"},
626 { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
627 4, "mbuf_rx_sge_mapping_failed"},
628 { Q_STATS_OFFSET32(mbuf_alloc_tx),
630 { Q_STATS_OFFSET32(mbuf_alloc_rx),
632 { Q_STATS_OFFSET32(mbuf_alloc_sge),
633 4, "mbuf_alloc_sge"},
634 { Q_STATS_OFFSET32(mbuf_alloc_tpa),
635 4, "mbuf_alloc_tpa"},
636 { Q_STATS_OFFSET32(tx_queue_full_return),
637 4, "tx_queue_full_return"},
638 { Q_STATS_OFFSET32(bxe_tx_mq_sc_state_failures),
639 4, "bxe_tx_mq_sc_state_failures"},
640 { Q_STATS_OFFSET32(tx_request_link_down_failures),
641 4, "tx_request_link_down_failures"},
642 { Q_STATS_OFFSET32(bd_avail_too_less_failures),
643 4, "bd_avail_too_less_failures"},
644 { Q_STATS_OFFSET32(tx_mq_not_empty),
645 4, "tx_mq_not_empty"},
646 { Q_STATS_OFFSET32(nsegs_path1_errors),
647 4, "nsegs_path1_errors"},
648 { Q_STATS_OFFSET32(nsegs_path2_errors),
649 4, "nsegs_path2_errors"}
654 #define BXE_NUM_ETH_STATS ARRAY_SIZE(bxe_eth_stats_arr)
655 #define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr)
658 static void bxe_cmng_fns_init(struct bxe_softc *sc,
661 static int bxe_get_cmng_fns_mode(struct bxe_softc *sc);
662 static void storm_memset_cmng(struct bxe_softc *sc,
663 struct cmng_init *cmng,
665 static void bxe_set_reset_global(struct bxe_softc *sc);
666 static void bxe_set_reset_in_progress(struct bxe_softc *sc);
667 static uint8_t bxe_reset_is_done(struct bxe_softc *sc,
669 static uint8_t bxe_clear_pf_load(struct bxe_softc *sc);
670 static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc,
673 static void bxe_int_disable(struct bxe_softc *sc);
674 static int bxe_release_leader_lock(struct bxe_softc *sc);
675 static void bxe_pf_disable(struct bxe_softc *sc);
676 static void bxe_free_fp_buffers(struct bxe_softc *sc);
677 static inline void bxe_update_rx_prod(struct bxe_softc *sc,
678 struct bxe_fastpath *fp,
681 uint16_t rx_sge_prod);
682 static void bxe_link_report_locked(struct bxe_softc *sc);
683 static void bxe_link_report(struct bxe_softc *sc);
684 static void bxe_link_status_update(struct bxe_softc *sc);
685 static void bxe_periodic_callout_func(void *xsc);
686 static void bxe_periodic_start(struct bxe_softc *sc);
687 static void bxe_periodic_stop(struct bxe_softc *sc);
688 static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
691 static int bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
693 static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
695 static uint8_t bxe_txeof(struct bxe_softc *sc,
696 struct bxe_fastpath *fp);
697 static void bxe_task_fp(struct bxe_fastpath *fp);
698 static __noinline void bxe_dump_mbuf(struct bxe_softc *sc,
701 static int bxe_alloc_mem(struct bxe_softc *sc);
702 static void bxe_free_mem(struct bxe_softc *sc);
703 static int bxe_alloc_fw_stats_mem(struct bxe_softc *sc);
704 static void bxe_free_fw_stats_mem(struct bxe_softc *sc);
705 static int bxe_interrupt_attach(struct bxe_softc *sc);
706 static void bxe_interrupt_detach(struct bxe_softc *sc);
707 static void bxe_set_rx_mode(struct bxe_softc *sc);
708 static int bxe_init_locked(struct bxe_softc *sc);
709 static int bxe_stop_locked(struct bxe_softc *sc);
710 static void bxe_sp_err_timeout_task(void *arg, int pending);
711 void bxe_parity_recover(struct bxe_softc *sc);
712 void bxe_handle_error(struct bxe_softc *sc);
713 static __noinline int bxe_nic_load(struct bxe_softc *sc,
715 static __noinline int bxe_nic_unload(struct bxe_softc *sc,
716 uint32_t unload_mode,
719 static void bxe_handle_sp_tq(void *context, int pending);
720 static void bxe_handle_fp_tq(void *context, int pending);
722 static int bxe_add_cdev(struct bxe_softc *sc);
723 static void bxe_del_cdev(struct bxe_softc *sc);
724 int bxe_grc_dump(struct bxe_softc *sc);
725 static int bxe_alloc_buf_rings(struct bxe_softc *sc);
726 static void bxe_free_buf_rings(struct bxe_softc *sc);
728 /* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */
730 calc_crc32(uint8_t *crc32_packet,
731 uint32_t crc32_length,
740 uint8_t current_byte = 0;
741 uint32_t crc32_result = crc32_seed;
742 const uint32_t CRC32_POLY = 0x1edc6f41;
744 if ((crc32_packet == NULL) ||
745 (crc32_length == 0) ||
746 ((crc32_length % 8) != 0))
748 return (crc32_result);
751 for (byte = 0; byte < crc32_length; byte = byte + 1)
753 current_byte = crc32_packet[byte];
754 for (bit = 0; bit < 8; bit = bit + 1)
756 /* msb = crc32_result[31]; */
757 msb = (uint8_t)(crc32_result >> 31);
759 crc32_result = crc32_result << 1;
761 /* it (msb != current_byte[bit]) */
762 if (msb != (0x1 & (current_byte >> bit)))
764 crc32_result = crc32_result ^ CRC32_POLY;
765 /* crc32_result[0] = 1 */
772 * 1. "mirror" every bit
773 * 2. swap the 4 bytes
774 * 3. complement each bit
779 shft = sizeof(crc32_result) * 8 - 1;
781 for (crc32_result >>= 1; crc32_result; crc32_result >>= 1)
784 temp |= crc32_result & 1;
788 /* temp[31-bit] = crc32_result[bit] */
792 /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */
794 uint32_t t0, t1, t2, t3;
795 t0 = (0x000000ff & (temp >> 24));
796 t1 = (0x0000ff00 & (temp >> 8));
797 t2 = (0x00ff0000 & (temp << 8));
798 t3 = (0xff000000 & (temp << 24));
799 crc32_result = t0 | t1 | t2 | t3;
805 crc32_result = ~crc32_result;
808 return (crc32_result);
813 volatile unsigned long *addr)
815 return ((atomic_load_acq_long(addr) & (1 << nr)) != 0);
819 bxe_set_bit(unsigned int nr,
820 volatile unsigned long *addr)
822 atomic_set_acq_long(addr, (1 << nr));
826 bxe_clear_bit(int nr,
827 volatile unsigned long *addr)
829 atomic_clear_acq_long(addr, (1 << nr));
833 bxe_test_and_set_bit(int nr,
834 volatile unsigned long *addr)
840 } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0);
841 // if (x & nr) bit_was_set; else bit_was_not_set;
846 bxe_test_and_clear_bit(int nr,
847 volatile unsigned long *addr)
853 } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0);
854 // if (x & nr) bit_was_set; else bit_was_not_set;
859 bxe_cmpxchg(volatile int *addr,
866 } while (atomic_cmpset_acq_int(addr, old, new) == 0);
871 * Get DMA memory from the OS.
873 * Validates that the OS has provided DMA buffers in response to a
874 * bus_dmamap_load call and saves the physical address of those buffers.
875 * When the callback is used the OS will return 0 for the mapping function
876 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any
877 * failures back to the caller.
883 bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
885 struct bxe_dma *dma = arg;
890 BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error);
892 dma->paddr = segs->ds_addr;
898 * Allocate a block of memory and map it for DMA. No partial completions
899 * allowed and release any resources acquired if we can't acquire all
903 * 0 = Success, !0 = Failure
906 bxe_dma_alloc(struct bxe_softc *sc,
914 BLOGE(sc, "dma block '%s' already has size %lu\n", msg,
915 (unsigned long)dma->size);
919 memset(dma, 0, sizeof(*dma)); /* sanity */
922 snprintf(dma->msg, sizeof(dma->msg), "%s", msg);
924 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
925 BCM_PAGE_SIZE, /* alignment */
926 0, /* boundary limit */
927 BUS_SPACE_MAXADDR, /* restricted low */
928 BUS_SPACE_MAXADDR, /* restricted hi */
929 NULL, /* addr filter() */
930 NULL, /* addr filter() arg */
931 size, /* max map size */
932 1, /* num discontinuous */
933 size, /* max seg size */
934 BUS_DMA_ALLOCNOW, /* flags */
936 NULL, /* lock() arg */
937 &dma->tag); /* returned dma tag */
939 BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc);
940 memset(dma, 0, sizeof(*dma));
944 rc = bus_dmamem_alloc(dma->tag,
945 (void **)&dma->vaddr,
946 (BUS_DMA_NOWAIT | BUS_DMA_ZERO),
949 BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc);
950 bus_dma_tag_destroy(dma->tag);
951 memset(dma, 0, sizeof(*dma));
955 rc = bus_dmamap_load(dma->tag,
959 bxe_dma_map_addr, /* BLOGD in here */
963 BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc);
964 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
965 bus_dma_tag_destroy(dma->tag);
966 memset(dma, 0, sizeof(*dma));
974 bxe_dma_free(struct bxe_softc *sc,
978 DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL"));
980 bus_dmamap_sync(dma->tag, dma->map,
981 (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE));
982 bus_dmamap_unload(dma->tag, dma->map);
983 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
984 bus_dma_tag_destroy(dma->tag);
987 memset(dma, 0, sizeof(*dma));
991 * These indirect read and write routines are only during init.
992 * The locking is handled by the MCP.
996 bxe_reg_wr_ind(struct bxe_softc *sc,
1000 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
1001 pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4);
1002 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
1006 bxe_reg_rd_ind(struct bxe_softc *sc,
1011 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
1012 val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4);
1013 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
1019 bxe_acquire_hw_lock(struct bxe_softc *sc,
1022 uint32_t lock_status;
1023 uint32_t resource_bit = (1 << resource);
1024 int func = SC_FUNC(sc);
1025 uint32_t hw_lock_control_reg;
1028 /* validate the resource is within range */
1029 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1030 BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1031 " resource_bit 0x%x\n", resource, resource_bit);
1036 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1038 hw_lock_control_reg =
1039 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1042 /* validate the resource is not already taken */
1043 lock_status = REG_RD(sc, hw_lock_control_reg);
1044 if (lock_status & resource_bit) {
1045 BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n",
1046 resource, lock_status, resource_bit);
1050 /* try every 5ms for 5 seconds */
1051 for (cnt = 0; cnt < 1000; cnt++) {
1052 REG_WR(sc, (hw_lock_control_reg + 4), resource_bit);
1053 lock_status = REG_RD(sc, hw_lock_control_reg);
1054 if (lock_status & resource_bit) {
1060 BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n",
1061 resource, resource_bit);
1066 bxe_release_hw_lock(struct bxe_softc *sc,
1069 uint32_t lock_status;
1070 uint32_t resource_bit = (1 << resource);
1071 int func = SC_FUNC(sc);
1072 uint32_t hw_lock_control_reg;
1074 /* validate the resource is within range */
1075 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1076 BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1077 " resource_bit 0x%x\n", resource, resource_bit);
1082 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1084 hw_lock_control_reg =
1085 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1088 /* validate the resource is currently taken */
1089 lock_status = REG_RD(sc, hw_lock_control_reg);
1090 if (!(lock_status & resource_bit)) {
1091 BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n",
1092 resource, lock_status, resource_bit);
1096 REG_WR(sc, hw_lock_control_reg, resource_bit);
1099 static void bxe_acquire_phy_lock(struct bxe_softc *sc)
1102 bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1105 static void bxe_release_phy_lock(struct bxe_softc *sc)
1107 bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1111 * Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
1112 * had we done things the other way around, if two pfs from the same port
1113 * would attempt to access nvram at the same time, we could run into a
1115 * pf A takes the port lock.
1116 * pf B succeeds in taking the same lock since they are from the same port.
1117 * pf A takes the per pf misc lock. Performs eeprom access.
1118 * pf A finishes. Unlocks the per pf misc lock.
1119 * Pf B takes the lock and proceeds to perform it's own access.
1120 * pf A unlocks the per port lock, while pf B is still working (!).
1121 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
1122 * access corrupted by pf B).*
1125 bxe_acquire_nvram_lock(struct bxe_softc *sc)
1127 int port = SC_PORT(sc);
1131 /* acquire HW lock: protect against other PFs in PF Direct Assignment */
1132 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1134 /* adjust timeout for emulation/FPGA */
1135 count = NVRAM_TIMEOUT_COUNT;
1136 if (CHIP_REV_IS_SLOW(sc)) {
1140 /* request access to nvram interface */
1141 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1142 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
1144 for (i = 0; i < count*10; i++) {
1145 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1146 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1153 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1154 BLOGE(sc, "Cannot get access to nvram interface "
1155 "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1164 bxe_release_nvram_lock(struct bxe_softc *sc)
1166 int port = SC_PORT(sc);
1170 /* adjust timeout for emulation/FPGA */
1171 count = NVRAM_TIMEOUT_COUNT;
1172 if (CHIP_REV_IS_SLOW(sc)) {
1176 /* relinquish nvram interface */
1177 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1178 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
1180 for (i = 0; i < count*10; i++) {
1181 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1182 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1189 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1190 BLOGE(sc, "Cannot free access to nvram interface "
1191 "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1196 /* release HW lock: protect against other PFs in PF Direct Assignment */
1197 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1203 bxe_enable_nvram_access(struct bxe_softc *sc)
1207 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1209 /* enable both bits, even on read */
1210 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1211 (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN));
1215 bxe_disable_nvram_access(struct bxe_softc *sc)
1219 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1221 /* disable both bits, even after read */
1222 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1223 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
1224 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
1228 bxe_nvram_read_dword(struct bxe_softc *sc,
1236 /* build the command word */
1237 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
1239 /* need to clear DONE bit separately */
1240 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1242 /* address of the NVRAM to read from */
1243 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1244 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1246 /* issue a read command */
1247 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1249 /* adjust timeout for emulation/FPGA */
1250 count = NVRAM_TIMEOUT_COUNT;
1251 if (CHIP_REV_IS_SLOW(sc)) {
1255 /* wait for completion */
1258 for (i = 0; i < count; i++) {
1260 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1262 if (val & MCPR_NVM_COMMAND_DONE) {
1263 val = REG_RD(sc, MCP_REG_MCPR_NVM_READ);
1264 /* we read nvram data in cpu order
1265 * but ethtool sees it as an array of bytes
1266 * converting to big-endian will do the work
1268 *ret_val = htobe32(val);
1275 BLOGE(sc, "nvram read timeout expired "
1276 "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1277 offset, cmd_flags, val);
1284 bxe_nvram_read(struct bxe_softc *sc,
1293 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
1294 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1299 if ((offset + buf_size) > sc->devinfo.flash_size) {
1300 BLOGE(sc, "Invalid parameter, "
1301 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1302 offset, buf_size, sc->devinfo.flash_size);
1306 /* request access to nvram interface */
1307 rc = bxe_acquire_nvram_lock(sc);
1312 /* enable access to nvram interface */
1313 bxe_enable_nvram_access(sc);
1315 /* read the first word(s) */
1316 cmd_flags = MCPR_NVM_COMMAND_FIRST;
1317 while ((buf_size > sizeof(uint32_t)) && (rc == 0)) {
1318 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1319 memcpy(ret_buf, &val, 4);
1321 /* advance to the next dword */
1322 offset += sizeof(uint32_t);
1323 ret_buf += sizeof(uint32_t);
1324 buf_size -= sizeof(uint32_t);
1329 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1330 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1331 memcpy(ret_buf, &val, 4);
1334 /* disable access to nvram interface */
1335 bxe_disable_nvram_access(sc);
1336 bxe_release_nvram_lock(sc);
1342 bxe_nvram_write_dword(struct bxe_softc *sc,
1349 /* build the command word */
1350 cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR);
1352 /* need to clear DONE bit separately */
1353 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1355 /* write the data */
1356 REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val);
1358 /* address of the NVRAM to write to */
1359 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1360 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1362 /* issue the write command */
1363 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1365 /* adjust timeout for emulation/FPGA */
1366 count = NVRAM_TIMEOUT_COUNT;
1367 if (CHIP_REV_IS_SLOW(sc)) {
1371 /* wait for completion */
1373 for (i = 0; i < count; i++) {
1375 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1376 if (val & MCPR_NVM_COMMAND_DONE) {
1383 BLOGE(sc, "nvram write timeout expired "
1384 "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1385 offset, cmd_flags, val);
1391 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
1394 bxe_nvram_write1(struct bxe_softc *sc,
1400 uint32_t align_offset;
1404 if ((offset + buf_size) > sc->devinfo.flash_size) {
1405 BLOGE(sc, "Invalid parameter, "
1406 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1407 offset, buf_size, sc->devinfo.flash_size);
1411 /* request access to nvram interface */
1412 rc = bxe_acquire_nvram_lock(sc);
1417 /* enable access to nvram interface */
1418 bxe_enable_nvram_access(sc);
1420 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
1421 align_offset = (offset & ~0x03);
1422 rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags);
1425 val &= ~(0xff << BYTE_OFFSET(offset));
1426 val |= (*data_buf << BYTE_OFFSET(offset));
1428 /* nvram data is returned as an array of bytes
1429 * convert it back to cpu order
1433 rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags);
1436 /* disable access to nvram interface */
1437 bxe_disable_nvram_access(sc);
1438 bxe_release_nvram_lock(sc);
1444 bxe_nvram_write(struct bxe_softc *sc,
1451 uint32_t written_so_far;
1454 if (buf_size == 1) {
1455 return (bxe_nvram_write1(sc, offset, data_buf, buf_size));
1458 if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) {
1459 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1464 if (buf_size == 0) {
1465 return (0); /* nothing to do */
1468 if ((offset + buf_size) > sc->devinfo.flash_size) {
1469 BLOGE(sc, "Invalid parameter, "
1470 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1471 offset, buf_size, sc->devinfo.flash_size);
1475 /* request access to nvram interface */
1476 rc = bxe_acquire_nvram_lock(sc);
1481 /* enable access to nvram interface */
1482 bxe_enable_nvram_access(sc);
1485 cmd_flags = MCPR_NVM_COMMAND_FIRST;
1486 while ((written_so_far < buf_size) && (rc == 0)) {
1487 if (written_so_far == (buf_size - sizeof(uint32_t))) {
1488 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1489 } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) {
1490 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1491 } else if ((offset % NVRAM_PAGE_SIZE) == 0) {
1492 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
1495 memcpy(&val, data_buf, 4);
1497 rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags);
1499 /* advance to the next dword */
1500 offset += sizeof(uint32_t);
1501 data_buf += sizeof(uint32_t);
1502 written_so_far += sizeof(uint32_t);
1506 /* disable access to nvram interface */
1507 bxe_disable_nvram_access(sc);
1508 bxe_release_nvram_lock(sc);
1513 /* copy command into DMAE command memory and set DMAE command Go */
1515 bxe_post_dmae(struct bxe_softc *sc,
1516 struct dmae_cmd *dmae,
1519 uint32_t cmd_offset;
1522 cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_cmd) * idx));
1523 for (i = 0; i < ((sizeof(struct dmae_cmd) / 4)); i++) {
1524 REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i));
1527 REG_WR(sc, dmae_reg_go_c[idx], 1);
1531 bxe_dmae_opcode_add_comp(uint32_t opcode,
1534 return (opcode | ((comp_type << DMAE_CMD_C_DST_SHIFT) |
1535 DMAE_CMD_C_TYPE_ENABLE));
1539 bxe_dmae_opcode_clr_src_reset(uint32_t opcode)
1541 return (opcode & ~DMAE_CMD_SRC_RESET);
1545 bxe_dmae_opcode(struct bxe_softc *sc,
1551 uint32_t opcode = 0;
1553 opcode |= ((src_type << DMAE_CMD_SRC_SHIFT) |
1554 (dst_type << DMAE_CMD_DST_SHIFT));
1556 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
1558 opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
1560 opcode |= ((SC_VN(sc) << DMAE_CMD_E1HVN_SHIFT) |
1561 (SC_VN(sc) << DMAE_CMD_DST_VN_SHIFT));
1563 opcode |= (DMAE_COM_SET_ERR << DMAE_CMD_ERR_POLICY_SHIFT);
1566 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
1568 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
1572 opcode = bxe_dmae_opcode_add_comp(opcode, comp_type);
1579 bxe_prep_dmae_with_comp(struct bxe_softc *sc,
1580 struct dmae_cmd *dmae,
1584 memset(dmae, 0, sizeof(struct dmae_cmd));
1586 /* set the opcode */
1587 dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type,
1588 TRUE, DMAE_COMP_PCI);
1590 /* fill in the completion parameters */
1591 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp));
1592 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp));
1593 dmae->comp_val = DMAE_COMP_VAL;
1596 /* issue a DMAE command over the init channel and wait for completion */
1598 bxe_issue_dmae_with_comp(struct bxe_softc *sc,
1599 struct dmae_cmd *dmae)
1601 uint32_t *wb_comp = BXE_SP(sc, wb_comp);
1602 int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000;
1606 /* reset completion */
1609 /* post the command on the channel used for initializations */
1610 bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
1612 /* wait for completion */
1615 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
1617 (sc->recovery_state != BXE_RECOVERY_DONE &&
1618 sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) {
1619 BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n",
1620 *wb_comp, sc->recovery_state);
1621 BXE_DMAE_UNLOCK(sc);
1622 return (DMAE_TIMEOUT);
1629 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
1630 BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n",
1631 *wb_comp, sc->recovery_state);
1632 BXE_DMAE_UNLOCK(sc);
1633 return (DMAE_PCI_ERROR);
1636 BXE_DMAE_UNLOCK(sc);
1641 bxe_read_dmae(struct bxe_softc *sc,
1645 struct dmae_cmd dmae;
1649 DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32));
1651 if (!sc->dmae_ready) {
1652 data = BXE_SP(sc, wb_data[0]);
1654 for (i = 0; i < len32; i++) {
1655 data[i] = (CHIP_IS_E1(sc)) ?
1656 bxe_reg_rd_ind(sc, (src_addr + (i * 4))) :
1657 REG_RD(sc, (src_addr + (i * 4)));
1663 /* set opcode and fixed command fields */
1664 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
1666 /* fill in addresses and len */
1667 dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */
1668 dmae.src_addr_hi = 0;
1669 dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data));
1670 dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data));
1673 /* issue the command and wait for completion */
1674 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1675 bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1680 bxe_write_dmae(struct bxe_softc *sc,
1681 bus_addr_t dma_addr,
1685 struct dmae_cmd dmae;
1688 if (!sc->dmae_ready) {
1689 DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32));
1691 if (CHIP_IS_E1(sc)) {
1692 ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1694 ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1700 /* set opcode and fixed command fields */
1701 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
1703 /* fill in addresses and len */
1704 dmae.src_addr_lo = U64_LO(dma_addr);
1705 dmae.src_addr_hi = U64_HI(dma_addr);
1706 dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */
1707 dmae.dst_addr_hi = 0;
1710 /* issue the command and wait for completion */
1711 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1712 bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1717 bxe_write_dmae_phys_len(struct bxe_softc *sc,
1718 bus_addr_t phys_addr,
1722 int dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
1725 while (len > dmae_wr_max) {
1727 (phys_addr + offset), /* src DMA address */
1728 (addr + offset), /* dst GRC address */
1730 offset += (dmae_wr_max * 4);
1735 (phys_addr + offset), /* src DMA address */
1736 (addr + offset), /* dst GRC address */
1741 bxe_set_ctx_validation(struct bxe_softc *sc,
1742 struct eth_context *cxt,
1745 /* ustorm cxt validation */
1746 cxt->ustorm_ag_context.cdu_usage =
1747 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1748 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
1749 /* xcontext validation */
1750 cxt->xstorm_ag_context.cdu_reserved =
1751 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1752 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
1756 bxe_storm_memset_hc_timeout(struct bxe_softc *sc,
1763 (BAR_CSTRORM_INTMEM +
1764 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index));
1766 REG_WR8(sc, addr, ticks);
1769 "port %d fw_sb_id %d sb_index %d ticks %d\n",
1770 port, fw_sb_id, sb_index, ticks);
1774 bxe_storm_memset_hc_disable(struct bxe_softc *sc,
1780 uint32_t enable_flag =
1781 (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
1783 (BAR_CSTRORM_INTMEM +
1784 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index));
1788 flags = REG_RD8(sc, addr);
1789 flags &= ~HC_INDEX_DATA_HC_ENABLED;
1790 flags |= enable_flag;
1791 REG_WR8(sc, addr, flags);
1794 "port %d fw_sb_id %d sb_index %d disable %d\n",
1795 port, fw_sb_id, sb_index, disable);
1799 bxe_update_coalesce_sb_index(struct bxe_softc *sc,
1805 int port = SC_PORT(sc);
1806 uint8_t ticks = (usec / 4); /* XXX ??? */
1808 bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks);
1810 disable = (disable) ? 1 : ((usec) ? 0 : 1);
1811 bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable);
1815 elink_cb_udelay(struct bxe_softc *sc,
1822 elink_cb_reg_read(struct bxe_softc *sc,
1825 return (REG_RD(sc, reg_addr));
1829 elink_cb_reg_write(struct bxe_softc *sc,
1833 REG_WR(sc, reg_addr, val);
1837 elink_cb_reg_wb_write(struct bxe_softc *sc,
1842 REG_WR_DMAE(sc, offset, wb_write, len);
1846 elink_cb_reg_wb_read(struct bxe_softc *sc,
1851 REG_RD_DMAE(sc, offset, wb_write, len);
1855 elink_cb_path_id(struct bxe_softc *sc)
1857 return (SC_PATH(sc));
1861 elink_cb_event_log(struct bxe_softc *sc,
1862 const elink_log_id_t elink_log_id,
1866 BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id);
1870 bxe_set_spio(struct bxe_softc *sc,
1876 /* Only 2 SPIOs are configurable */
1877 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
1878 BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode);
1882 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1884 /* read SPIO and mask except the float bits */
1885 spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
1888 case MISC_SPIO_OUTPUT_LOW:
1889 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio);
1890 /* clear FLOAT and set CLR */
1891 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1892 spio_reg |= (spio << MISC_SPIO_CLR_POS);
1895 case MISC_SPIO_OUTPUT_HIGH:
1896 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio);
1897 /* clear FLOAT and set SET */
1898 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1899 spio_reg |= (spio << MISC_SPIO_SET_POS);
1902 case MISC_SPIO_INPUT_HI_Z:
1903 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio);
1905 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
1912 REG_WR(sc, MISC_REG_SPIO, spio_reg);
1913 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1919 bxe_gpio_read(struct bxe_softc *sc,
1923 /* The GPIO should be swapped if swap register is set and active */
1924 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1925 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1926 int gpio_shift = (gpio_num +
1927 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1928 uint32_t gpio_mask = (1 << gpio_shift);
1931 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1932 BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d"
1933 " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift,
1938 /* read GPIO value */
1939 gpio_reg = REG_RD(sc, MISC_REG_GPIO);
1941 /* get the requested pin value */
1942 return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0;
1946 bxe_gpio_write(struct bxe_softc *sc,
1951 /* The GPIO should be swapped if swap register is set and active */
1952 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1953 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1954 int gpio_shift = (gpio_num +
1955 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1956 uint32_t gpio_mask = (1 << gpio_shift);
1959 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1960 BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
1961 " gpio_shift %d gpio_mask 0x%x\n",
1962 gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
1966 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1968 /* read GPIO and mask except the float bits */
1969 gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1972 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1974 "Set GPIO %d (shift %d) -> output low\n",
1975 gpio_num, gpio_shift);
1976 /* clear FLOAT and set CLR */
1977 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1978 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1981 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1983 "Set GPIO %d (shift %d) -> output high\n",
1984 gpio_num, gpio_shift);
1985 /* clear FLOAT and set SET */
1986 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1987 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1990 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1992 "Set GPIO %d (shift %d) -> input\n",
1993 gpio_num, gpio_shift);
1995 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2002 REG_WR(sc, MISC_REG_GPIO, gpio_reg);
2003 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2009 bxe_gpio_mult_write(struct bxe_softc *sc,
2015 /* any port swapping should be handled by caller */
2017 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2019 /* read GPIO and mask except the float bits */
2020 gpio_reg = REG_RD(sc, MISC_REG_GPIO);
2021 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2022 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2023 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2026 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2027 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins);
2029 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2032 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2033 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins);
2035 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2038 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2039 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins);
2041 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2045 BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x"
2046 " gpio_reg 0x%x\n", pins, mode, gpio_reg);
2047 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2051 REG_WR(sc, MISC_REG_GPIO, gpio_reg);
2052 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2058 bxe_gpio_int_write(struct bxe_softc *sc,
2063 /* The GPIO should be swapped if swap register is set and active */
2064 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
2065 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
2066 int gpio_shift = (gpio_num +
2067 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
2068 uint32_t gpio_mask = (1 << gpio_shift);
2071 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2072 BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
2073 " gpio_shift %d gpio_mask 0x%x\n",
2074 gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
2078 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2081 gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT);
2084 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2086 "Clear GPIO INT %d (shift %d) -> output low\n",
2087 gpio_num, gpio_shift);
2088 /* clear SET and set CLR */
2089 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2090 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2093 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2095 "Set GPIO INT %d (shift %d) -> output high\n",
2096 gpio_num, gpio_shift);
2097 /* clear CLR and set SET */
2098 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2099 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2106 REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg);
2107 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2113 elink_cb_gpio_read(struct bxe_softc *sc,
2117 return (bxe_gpio_read(sc, gpio_num, port));
2121 elink_cb_gpio_write(struct bxe_softc *sc,
2123 uint8_t mode, /* 0=low 1=high */
2126 return (bxe_gpio_write(sc, gpio_num, mode, port));
2130 elink_cb_gpio_mult_write(struct bxe_softc *sc,
2132 uint8_t mode) /* 0=low 1=high */
2134 return (bxe_gpio_mult_write(sc, pins, mode));
2138 elink_cb_gpio_int_write(struct bxe_softc *sc,
2140 uint8_t mode, /* 0=low 1=high */
2143 return (bxe_gpio_int_write(sc, gpio_num, mode, port));
2147 elink_cb_notify_link_changed(struct bxe_softc *sc)
2149 REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 +
2150 (SC_FUNC(sc) * sizeof(uint32_t))), 1);
2153 /* send the MCP a request, block until there is a reply */
2155 elink_cb_fw_command(struct bxe_softc *sc,
2159 int mb_idx = SC_FW_MB_IDX(sc);
2163 uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10;
2168 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param);
2169 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq));
2172 "wrote command 0x%08x to FW MB param 0x%08x\n",
2173 (command | seq), param);
2175 /* Let the FW do it's magic. GIve it up to 5 seconds... */
2177 DELAY(delay * 1000);
2178 rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header);
2179 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2182 "[after %d ms] read 0x%x seq 0x%x from FW MB\n",
2183 cnt*delay, rc, seq);
2185 /* is this a reply to our command? */
2186 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
2187 rc &= FW_MSG_CODE_MASK;
2190 BLOGE(sc, "FW failed to respond!\n");
2191 // XXX bxe_fw_dump(sc);
2195 BXE_FWMB_UNLOCK(sc);
2200 bxe_fw_command(struct bxe_softc *sc,
2204 return (elink_cb_fw_command(sc, command, param));
2208 __storm_memset_dma_mapping(struct bxe_softc *sc,
2212 REG_WR(sc, addr, U64_LO(mapping));
2213 REG_WR(sc, (addr + 4), U64_HI(mapping));
2217 storm_memset_spq_addr(struct bxe_softc *sc,
2221 uint32_t addr = (XSEM_REG_FAST_MEMORY +
2222 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid));
2223 __storm_memset_dma_mapping(sc, addr, mapping);
2227 storm_memset_vf_to_pf(struct bxe_softc *sc,
2231 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2232 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2233 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2234 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2238 storm_memset_func_en(struct bxe_softc *sc,
2242 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2243 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2244 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2245 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2249 storm_memset_eq_data(struct bxe_softc *sc,
2250 struct event_ring_data *eq_data,
2256 addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid));
2257 size = sizeof(struct event_ring_data);
2258 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data);
2262 storm_memset_eq_prod(struct bxe_softc *sc,
2266 uint32_t addr = (BAR_CSTRORM_INTMEM +
2267 CSTORM_EVENT_RING_PROD_OFFSET(pfid));
2268 REG_WR16(sc, addr, eq_prod);
2272 * Post a slowpath command.
2274 * A slowpath command is used to propogate a configuration change through
2275 * the controller in a controlled manner, allowing each STORM processor and
2276 * other H/W blocks to phase in the change. The commands sent on the
2277 * slowpath are referred to as ramrods. Depending on the ramrod used the
2278 * completion of the ramrod will occur in different ways. Here's a
2279 * breakdown of ramrods and how they complete:
2281 * RAMROD_CMD_ID_ETH_PORT_SETUP
2282 * Used to setup the leading connection on a port. Completes on the
2283 * Receive Completion Queue (RCQ) of that port (typically fp[0]).
2285 * RAMROD_CMD_ID_ETH_CLIENT_SETUP
2286 * Used to setup an additional connection on a port. Completes on the
2287 * RCQ of the multi-queue/RSS connection being initialized.
2289 * RAMROD_CMD_ID_ETH_STAT_QUERY
2290 * Used to force the storm processors to update the statistics database
2291 * in host memory. This ramrod is send on the leading connection CID and
2292 * completes as an index increment of the CSTORM on the default status
2295 * RAMROD_CMD_ID_ETH_UPDATE
2296 * Used to update the state of the leading connection, usually to udpate
2297 * the RSS indirection table. Completes on the RCQ of the leading
2298 * connection. (Not currently used under FreeBSD until OS support becomes
2301 * RAMROD_CMD_ID_ETH_HALT
2302 * Used when tearing down a connection prior to driver unload. Completes
2303 * on the RCQ of the multi-queue/RSS connection being torn down. Don't
2304 * use this on the leading connection.
2306 * RAMROD_CMD_ID_ETH_SET_MAC
2307 * Sets the Unicast/Broadcast/Multicast used by the port. Completes on
2308 * the RCQ of the leading connection.
2310 * RAMROD_CMD_ID_ETH_CFC_DEL
2311 * Used when tearing down a conneciton prior to driver unload. Completes
2312 * on the RCQ of the leading connection (since the current connection
2313 * has been completely removed from controller memory).
2315 * RAMROD_CMD_ID_ETH_PORT_DEL
2316 * Used to tear down the leading connection prior to driver unload,
2317 * typically fp[0]. Completes as an index increment of the CSTORM on the
2318 * default status block.
2320 * RAMROD_CMD_ID_ETH_FORWARD_SETUP
2321 * Used for connection offload. Completes on the RCQ of the multi-queue
2322 * RSS connection that is being offloaded. (Not currently used under
2325 * There can only be one command pending per function.
2328 * 0 = Success, !0 = Failure.
2331 /* must be called under the spq lock */
2333 struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc)
2335 struct eth_spe *next_spe = sc->spq_prod_bd;
2337 if (sc->spq_prod_bd == sc->spq_last_bd) {
2338 /* wrap back to the first eth_spq */
2339 sc->spq_prod_bd = sc->spq;
2340 sc->spq_prod_idx = 0;
2349 /* must be called under the spq lock */
2351 void bxe_sp_prod_update(struct bxe_softc *sc)
2353 int func = SC_FUNC(sc);
2356 * Make sure that BD data is updated before writing the producer.
2357 * BD data is written to the memory, the producer is read from the
2358 * memory, thus we need a full memory barrier to ensure the ordering.
2362 REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)),
2365 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
2366 BUS_SPACE_BARRIER_WRITE);
2370 * bxe_is_contextless_ramrod - check if the current command ends on EQ
2372 * @cmd: command to check
2373 * @cmd_type: command type
2376 int bxe_is_contextless_ramrod(int cmd,
2379 if ((cmd_type == NONE_CONNECTION_TYPE) ||
2380 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
2381 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
2382 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
2383 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
2384 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
2385 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) {
2393 * bxe_sp_post - place a single command on an SP ring
2395 * @sc: driver handle
2396 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.)
2397 * @cid: SW CID the command is related to
2398 * @data_hi: command private data address (high 32 bits)
2399 * @data_lo: command private data address (low 32 bits)
2400 * @cmd_type: command type (e.g. NONE, ETH)
2402 * SP data is handled as if it's always an address pair, thus data fields are
2403 * not swapped to little endian in upper functions. Instead this function swaps
2404 * data as if it's two uint32 fields.
2407 bxe_sp_post(struct bxe_softc *sc,
2414 struct eth_spe *spe;
2418 common = bxe_is_contextless_ramrod(command, cmd_type);
2423 if (!atomic_load_acq_long(&sc->eq_spq_left)) {
2424 BLOGE(sc, "EQ ring is full!\n");
2429 if (!atomic_load_acq_long(&sc->cq_spq_left)) {
2430 BLOGE(sc, "SPQ ring is full!\n");
2436 spe = bxe_sp_get_next(sc);
2438 /* CID needs port number to be encoded int it */
2439 spe->hdr.conn_and_cmd_data =
2440 htole32((command << SPE_HDR_T_CMD_ID_SHIFT) | HW_CID(sc, cid));
2442 type = (cmd_type << SPE_HDR_T_CONN_TYPE_SHIFT) & SPE_HDR_T_CONN_TYPE;
2444 /* TBD: Check if it works for VFs */
2445 type |= ((SC_FUNC(sc) << SPE_HDR_T_FUNCTION_ID_SHIFT) &
2446 SPE_HDR_T_FUNCTION_ID);
2448 spe->hdr.type = htole16(type);
2450 spe->data.update_data_addr.hi = htole32(data_hi);
2451 spe->data.update_data_addr.lo = htole32(data_lo);
2454 * It's ok if the actual decrement is issued towards the memory
2455 * somewhere between the lock and unlock. Thus no more explict
2456 * memory barrier is needed.
2459 atomic_subtract_acq_long(&sc->eq_spq_left, 1);
2461 atomic_subtract_acq_long(&sc->cq_spq_left, 1);
2464 BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr);
2465 BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n",
2466 BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata));
2468 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n",
2470 (uint32_t)U64_HI(sc->spq_dma.paddr),
2471 (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq),
2478 atomic_load_acq_long(&sc->cq_spq_left),
2479 atomic_load_acq_long(&sc->eq_spq_left));
2481 bxe_sp_prod_update(sc);
2488 * bxe_debug_print_ind_table - prints the indirection table configuration.
2490 * @sc: driver hanlde
2491 * @p: pointer to rss configuration
2495 * FreeBSD Device probe function.
2497 * Compares the device found to the driver's list of supported devices and
2498 * reports back to the bsd loader whether this is the right driver for the device.
2499 * This is the driver entry function called from the "kldload" command.
2502 * BUS_PROBE_DEFAULT on success, positive value on failure.
2505 bxe_probe(device_t dev)
2507 struct bxe_softc *sc;
2508 struct bxe_device_type *t;
2510 uint16_t did, sdid, svid, vid;
2512 /* Find our device structure */
2513 sc = device_get_softc(dev);
2517 /* Get the data for the device to be probed. */
2518 vid = pci_get_vendor(dev);
2519 did = pci_get_device(dev);
2520 svid = pci_get_subvendor(dev);
2521 sdid = pci_get_subdevice(dev);
2524 "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
2525 "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
2527 /* Look through the list of known devices for a match. */
2528 while (t->bxe_name != NULL) {
2529 if ((vid == t->bxe_vid) && (did == t->bxe_did) &&
2530 ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) &&
2531 ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) {
2532 descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
2533 if (descbuf == NULL)
2536 /* Print out the device identity. */
2537 snprintf(descbuf, BXE_DEVDESC_MAX,
2538 "%s (%c%d) BXE v:%s\n", t->bxe_name,
2539 (((pci_read_config(dev, PCIR_REVID, 4) &
2541 (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
2542 BXE_DRIVER_VERSION);
2544 device_set_desc_copy(dev, descbuf);
2545 free(descbuf, M_TEMP);
2546 return (BUS_PROBE_DEFAULT);
2555 bxe_init_mutexes(struct bxe_softc *sc)
2557 #ifdef BXE_CORE_LOCK_SX
2558 snprintf(sc->core_sx_name, sizeof(sc->core_sx_name),
2559 "bxe%d_core_lock", sc->unit);
2560 sx_init(&sc->core_sx, sc->core_sx_name);
2562 snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name),
2563 "bxe%d_core_lock", sc->unit);
2564 mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF);
2567 snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name),
2568 "bxe%d_sp_lock", sc->unit);
2569 mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF);
2571 snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name),
2572 "bxe%d_dmae_lock", sc->unit);
2573 mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF);
2575 snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name),
2576 "bxe%d_phy_lock", sc->unit);
2577 mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF);
2579 snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name),
2580 "bxe%d_fwmb_lock", sc->unit);
2581 mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF);
2583 snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name),
2584 "bxe%d_print_lock", sc->unit);
2585 mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF);
2587 snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name),
2588 "bxe%d_stats_lock", sc->unit);
2589 mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF);
2591 snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name),
2592 "bxe%d_mcast_lock", sc->unit);
2593 mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF);
2597 bxe_release_mutexes(struct bxe_softc *sc)
2599 #ifdef BXE_CORE_LOCK_SX
2600 sx_destroy(&sc->core_sx);
2602 if (mtx_initialized(&sc->core_mtx)) {
2603 mtx_destroy(&sc->core_mtx);
2607 if (mtx_initialized(&sc->sp_mtx)) {
2608 mtx_destroy(&sc->sp_mtx);
2611 if (mtx_initialized(&sc->dmae_mtx)) {
2612 mtx_destroy(&sc->dmae_mtx);
2615 if (mtx_initialized(&sc->port.phy_mtx)) {
2616 mtx_destroy(&sc->port.phy_mtx);
2619 if (mtx_initialized(&sc->fwmb_mtx)) {
2620 mtx_destroy(&sc->fwmb_mtx);
2623 if (mtx_initialized(&sc->print_mtx)) {
2624 mtx_destroy(&sc->print_mtx);
2627 if (mtx_initialized(&sc->stats_mtx)) {
2628 mtx_destroy(&sc->stats_mtx);
2631 if (mtx_initialized(&sc->mcast_mtx)) {
2632 mtx_destroy(&sc->mcast_mtx);
2637 bxe_tx_disable(struct bxe_softc* sc)
2639 struct ifnet *ifp = sc->ifnet;
2641 /* tell the stack the driver is stopped and TX queue is full */
2643 ifp->if_drv_flags = 0;
2648 bxe_drv_pulse(struct bxe_softc *sc)
2650 SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb,
2651 sc->fw_drv_pulse_wr_seq);
2654 static inline uint16_t
2655 bxe_tx_avail(struct bxe_softc *sc,
2656 struct bxe_fastpath *fp)
2662 prod = fp->tx_bd_prod;
2663 cons = fp->tx_bd_cons;
2665 used = SUB_S16(prod, cons);
2667 return (int16_t)(sc->tx_ring_size) - used;
2671 bxe_tx_queue_has_work(struct bxe_fastpath *fp)
2675 mb(); /* status block fields can change */
2676 hw_cons = le16toh(*fp->tx_cons_sb);
2677 return (hw_cons != fp->tx_pkt_cons);
2680 static inline uint8_t
2681 bxe_has_tx_work(struct bxe_fastpath *fp)
2683 /* expand this for multi-cos if ever supported */
2684 return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE;
2688 bxe_has_rx_work(struct bxe_fastpath *fp)
2690 uint16_t rx_cq_cons_sb;
2692 mb(); /* status block fields can change */
2693 rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb);
2694 if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX)
2696 return (fp->rx_cq_cons != rx_cq_cons_sb);
2700 bxe_sp_event(struct bxe_softc *sc,
2701 struct bxe_fastpath *fp,
2702 union eth_rx_cqe *rr_cqe)
2704 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2705 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2706 enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX;
2707 struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
2709 BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n",
2710 fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type);
2713 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
2714 BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid);
2715 drv_cmd = ECORE_Q_CMD_UPDATE;
2718 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
2719 BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid);
2720 drv_cmd = ECORE_Q_CMD_SETUP;
2723 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
2724 BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
2725 drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY;
2728 case (RAMROD_CMD_ID_ETH_HALT):
2729 BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid);
2730 drv_cmd = ECORE_Q_CMD_HALT;
2733 case (RAMROD_CMD_ID_ETH_TERMINATE):
2734 BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid);
2735 drv_cmd = ECORE_Q_CMD_TERMINATE;
2738 case (RAMROD_CMD_ID_ETH_EMPTY):
2739 BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid);
2740 drv_cmd = ECORE_Q_CMD_EMPTY;
2744 BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n",
2745 command, fp->index);
2749 if ((drv_cmd != ECORE_Q_CMD_MAX) &&
2750 q_obj->complete_cmd(sc, q_obj, drv_cmd)) {
2752 * q_obj->complete_cmd() failure means that this was
2753 * an unexpected completion.
2755 * In this case we don't want to increase the sc->spq_left
2756 * because apparently we haven't sent this command the first
2759 // bxe_panic(sc, ("Unexpected SP completion\n"));
2763 atomic_add_acq_long(&sc->cq_spq_left, 1);
2765 BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n",
2766 atomic_load_acq_long(&sc->cq_spq_left));
2770 * The current mbuf is part of an aggregation. Move the mbuf into the TPA
2771 * aggregation queue, put an empty mbuf back onto the receive chain, and mark
2772 * the current aggregation queue as in-progress.
2775 bxe_tpa_start(struct bxe_softc *sc,
2776 struct bxe_fastpath *fp,
2780 struct eth_fast_path_rx_cqe *cqe)
2782 struct bxe_sw_rx_bd tmp_bd;
2783 struct bxe_sw_rx_bd *rx_buf;
2784 struct eth_rx_bd *rx_bd;
2786 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
2789 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START "
2790 "cons=%d prod=%d\n",
2791 fp->index, queue, cons, prod);
2793 max_agg_queues = MAX_AGG_QS(sc);
2795 KASSERT((queue < max_agg_queues),
2796 ("fp[%02d] invalid aggr queue (%d >= %d)!",
2797 fp->index, queue, max_agg_queues));
2799 KASSERT((tpa_info->state == BXE_TPA_STATE_STOP),
2800 ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!",
2803 /* copy the existing mbuf and mapping from the TPA pool */
2804 tmp_bd = tpa_info->bd;
2806 if (tmp_bd.m == NULL) {
2809 tmp = (uint32_t *)cqe;
2811 BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n",
2812 fp->index, queue, cons, prod);
2813 BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2814 *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2816 /* XXX Error handling? */
2820 /* change the TPA queue to the start state */
2821 tpa_info->state = BXE_TPA_STATE_START;
2822 tpa_info->placement_offset = cqe->placement_offset;
2823 tpa_info->parsing_flags = le16toh(cqe->pars_flags.flags);
2824 tpa_info->vlan_tag = le16toh(cqe->vlan_tag);
2825 tpa_info->len_on_bd = le16toh(cqe->len_on_bd);
2827 fp->rx_tpa_queue_used |= (1 << queue);
2830 * If all the buffer descriptors are filled with mbufs then fill in
2831 * the current consumer index with a new BD. Else if a maximum Rx
2832 * buffer limit is imposed then fill in the next producer index.
2834 index = (sc->max_rx_bufs != RX_BD_USABLE) ?
2837 /* move the received mbuf and mapping to TPA pool */
2838 tpa_info->bd = fp->rx_mbuf_chain[cons];
2840 /* release any existing RX BD mbuf mappings */
2841 if (cons != index) {
2842 rx_buf = &fp->rx_mbuf_chain[cons];
2844 if (rx_buf->m_map != NULL) {
2845 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
2846 BUS_DMASYNC_POSTREAD);
2847 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
2851 * We get here when the maximum number of rx buffers is less than
2852 * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL
2853 * it out here without concern of a memory leak.
2855 fp->rx_mbuf_chain[cons].m = NULL;
2858 /* update the Rx SW BD with the mbuf info from the TPA pool */
2859 fp->rx_mbuf_chain[index] = tmp_bd;
2861 /* update the Rx BD with the empty mbuf phys address from the TPA pool */
2862 rx_bd = &fp->rx_chain[index];
2863 rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr));
2864 rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr));
2868 * When a TPA aggregation is completed, loop through the individual mbufs
2869 * of the aggregation, combining them into a single mbuf which will be sent
2870 * up the stack. Refill all freed SGEs with mbufs as we go along.
2873 bxe_fill_frag_mbuf(struct bxe_softc *sc,
2874 struct bxe_fastpath *fp,
2875 struct bxe_sw_tpa_info *tpa_info,
2879 struct eth_end_agg_rx_cqe *cqe,
2882 struct mbuf *m_frag;
2883 uint32_t frag_len, frag_size, i;
2888 frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd;
2891 "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n",
2892 fp->index, queue, tpa_info->len_on_bd, frag_size, pages);
2894 /* make sure the aggregated frame is not too big to handle */
2895 if (pages > 8 * PAGES_PER_SGE) {
2897 uint32_t *tmp = (uint32_t *)cqe;
2899 BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! "
2900 "pkt_len=%d len_on_bd=%d frag_size=%d\n",
2901 fp->index, cqe_idx, pages, le16toh(cqe->pkt_len),
2902 tpa_info->len_on_bd, frag_size);
2904 BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2905 *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2907 bxe_panic(sc, ("sge page count error\n"));
2912 * Scan through the scatter gather list pulling individual mbufs into a
2913 * single mbuf for the host stack.
2915 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
2916 sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j]));
2919 * Firmware gives the indices of the SGE as if the ring is an array
2920 * (meaning that the "next" element will consume 2 indices).
2922 frag_len = min(frag_size, (uint32_t)(SGE_PAGES));
2924 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d "
2925 "sge_idx=%d frag_size=%d frag_len=%d\n",
2926 fp->index, queue, i, j, sge_idx, frag_size, frag_len);
2928 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
2930 /* allocate a new mbuf for the SGE */
2931 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
2933 /* Leave all remaining SGEs in the ring! */
2937 /* update the fragment length */
2938 m_frag->m_len = frag_len;
2940 /* concatenate the fragment to the head mbuf */
2942 fp->eth_q_stats.mbuf_alloc_sge--;
2944 /* update the TPA mbuf size and remaining fragment size */
2945 m->m_pkthdr.len += frag_len;
2946 frag_size -= frag_len;
2950 "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n",
2951 fp->index, queue, frag_size);
2957 bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp)
2961 for (i = 1; i <= RX_SGE_NUM_PAGES; i++) {
2962 int idx = RX_SGE_TOTAL_PER_PAGE * i - 1;
2964 for (j = 0; j < 2; j++) {
2965 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
2972 bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp)
2974 /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */
2975 memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
2978 * Clear the two last indices in the page to 1. These are the indices that
2979 * correspond to the "next" element, hence will never be indicated and
2980 * should be removed from the calculations.
2982 bxe_clear_sge_mask_next_elems(fp);
2986 bxe_update_last_max_sge(struct bxe_fastpath *fp,
2989 uint16_t last_max = fp->last_max_sge;
2991 if (SUB_S16(idx, last_max) > 0) {
2992 fp->last_max_sge = idx;
2997 bxe_update_sge_prod(struct bxe_softc *sc,
2998 struct bxe_fastpath *fp,
3000 union eth_sgl_or_raw_data *cqe)
3002 uint16_t last_max, last_elem, first_elem;
3010 /* first mark all used pages */
3011 for (i = 0; i < sge_len; i++) {
3012 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
3013 RX_SGE(le16toh(cqe->sgl[i])));
3017 "fp[%02d] fp_cqe->sgl[%d] = %d\n",
3018 fp->index, sge_len - 1,
3019 le16toh(cqe->sgl[sge_len - 1]));
3021 /* assume that the last SGE index is the biggest */
3022 bxe_update_last_max_sge(fp,
3023 le16toh(cqe->sgl[sge_len - 1]));
3025 last_max = RX_SGE(fp->last_max_sge);
3026 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
3027 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
3029 /* if ring is not full */
3030 if (last_elem + 1 != first_elem) {
3034 /* now update the prod */
3035 for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) {
3036 if (__predict_true(fp->sge_mask[i])) {
3040 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
3041 delta += BIT_VEC64_ELEM_SZ;
3045 fp->rx_sge_prod += delta;
3046 /* clear page-end entries */
3047 bxe_clear_sge_mask_next_elems(fp);
3051 "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n",
3052 fp->index, fp->last_max_sge, fp->rx_sge_prod);
3056 * The aggregation on the current TPA queue has completed. Pull the individual
3057 * mbuf fragments together into a single mbuf, perform all necessary checksum
3058 * calculations, and send the resuting mbuf to the stack.
3061 bxe_tpa_stop(struct bxe_softc *sc,
3062 struct bxe_fastpath *fp,
3063 struct bxe_sw_tpa_info *tpa_info,
3066 struct eth_end_agg_rx_cqe *cqe,
3069 struct ifnet *ifp = sc->ifnet;
3074 "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n",
3075 fp->index, queue, tpa_info->placement_offset,
3076 le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag);
3080 /* allocate a replacement before modifying existing mbuf */
3081 rc = bxe_alloc_rx_tpa_mbuf(fp, queue);
3083 /* drop the frame and log an error */
3084 fp->eth_q_stats.rx_soft_errors++;
3085 goto bxe_tpa_stop_exit;
3088 /* we have a replacement, fixup the current mbuf */
3089 m_adj(m, tpa_info->placement_offset);
3090 m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd;
3092 /* mark the checksums valid (taken care of by the firmware) */
3093 fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3094 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3095 m->m_pkthdr.csum_data = 0xffff;
3096 m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
3101 /* aggregate all of the SGEs into a single mbuf */
3102 rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx);
3104 /* drop the packet and log an error */
3105 fp->eth_q_stats.rx_soft_errors++;
3108 if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3109 m->m_pkthdr.ether_vtag = tpa_info->vlan_tag;
3110 m->m_flags |= M_VLANTAG;
3113 /* assign packet to this interface interface */
3114 m->m_pkthdr.rcvif = ifp;
3116 #if __FreeBSD_version >= 800000
3117 /* specify what RSS queue was used for this flow */
3118 m->m_pkthdr.flowid = fp->index;
3123 fp->eth_q_stats.rx_tpa_pkts++;
3125 /* pass the frame to the stack */
3126 (*ifp->if_input)(ifp, m);
3129 /* we passed an mbuf up the stack or dropped the frame */
3130 fp->eth_q_stats.mbuf_alloc_tpa--;
3134 fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP;
3135 fp->rx_tpa_queue_used &= ~(1 << queue);
3140 struct bxe_fastpath *fp,
3144 struct eth_fast_path_rx_cqe *cqe_fp)
3146 struct mbuf *m_frag;
3147 uint16_t frags, frag_len;
3148 uint16_t sge_idx = 0;
3153 /* adjust the mbuf */
3156 frag_size = len - lenonbd;
3157 frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3159 for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) {
3160 sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j]));
3162 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
3163 frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE));
3164 m_frag->m_len = frag_len;
3166 /* allocate a new mbuf for the SGE */
3167 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
3169 /* Leave all remaining SGEs in the ring! */
3172 fp->eth_q_stats.mbuf_alloc_sge--;
3174 /* concatenate the fragment to the head mbuf */
3177 frag_size -= frag_len;
3180 bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data);
3186 bxe_rxeof(struct bxe_softc *sc,
3187 struct bxe_fastpath *fp)
3189 struct ifnet *ifp = sc->ifnet;
3190 uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
3191 uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
3197 /* CQ "next element" is of the size of the regular element */
3198 hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
3199 if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) {
3203 bd_cons = fp->rx_bd_cons;
3204 bd_prod = fp->rx_bd_prod;
3205 bd_prod_fw = bd_prod;
3206 sw_cq_cons = fp->rx_cq_cons;
3207 sw_cq_prod = fp->rx_cq_prod;
3210 * Memory barrier necessary as speculative reads of the rx
3211 * buffer can be ahead of the index in the status block
3216 "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n",
3217 fp->index, hw_cq_cons, sw_cq_cons);
3219 while (sw_cq_cons != hw_cq_cons) {
3220 struct bxe_sw_rx_bd *rx_buf = NULL;
3221 union eth_rx_cqe *cqe;
3222 struct eth_fast_path_rx_cqe *cqe_fp;
3223 uint8_t cqe_fp_flags;
3224 enum eth_rx_cqe_type cqe_fp_type;
3225 uint16_t len, lenonbd, pad;
3226 struct mbuf *m = NULL;
3228 comp_ring_cons = RCQ(sw_cq_cons);
3229 bd_prod = RX_BD(bd_prod);
3230 bd_cons = RX_BD(bd_cons);
3232 cqe = &fp->rcq_chain[comp_ring_cons];
3233 cqe_fp = &cqe->fast_path_cqe;
3234 cqe_fp_flags = cqe_fp->type_error_flags;
3235 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
3238 "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d "
3239 "BD prod=%d cons=%d CQE type=0x%x err=0x%x "
3240 "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n",
3246 CQE_TYPE(cqe_fp_flags),
3248 cqe_fp->status_flags,
3249 le32toh(cqe_fp->rss_hash_result),
3250 le16toh(cqe_fp->vlan_tag),
3251 le16toh(cqe_fp->pkt_len_or_gro_seg_len),
3252 le16toh(cqe_fp->len_on_bd));
3254 /* is this a slowpath msg? */
3255 if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) {
3256 bxe_sp_event(sc, fp, cqe);
3260 rx_buf = &fp->rx_mbuf_chain[bd_cons];
3262 if (!CQE_TYPE_FAST(cqe_fp_type)) {
3263 struct bxe_sw_tpa_info *tpa_info;
3264 uint16_t frag_size, pages;
3267 if (CQE_TYPE_START(cqe_fp_type)) {
3268 bxe_tpa_start(sc, fp, cqe_fp->queue_index,
3269 bd_cons, bd_prod, cqe_fp);
3270 m = NULL; /* packet not ready yet */
3274 KASSERT(CQE_TYPE_STOP(cqe_fp_type),
3275 ("CQE type is not STOP! (0x%x)\n", cqe_fp_type));
3277 queue = cqe->end_agg_cqe.queue_index;
3278 tpa_info = &fp->rx_tpa_info[queue];
3280 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n",
3283 frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) -
3284 tpa_info->len_on_bd);
3285 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3287 bxe_tpa_stop(sc, fp, tpa_info, queue, pages,
3288 &cqe->end_agg_cqe, comp_ring_cons);
3290 bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data);
3297 /* is this an error packet? */
3298 if (__predict_false(cqe_fp_flags &
3299 ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
3300 BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons);
3301 fp->eth_q_stats.rx_soft_errors++;
3305 len = le16toh(cqe_fp->pkt_len_or_gro_seg_len);
3306 lenonbd = le16toh(cqe_fp->len_on_bd);
3307 pad = cqe_fp->placement_offset;
3311 if (__predict_false(m == NULL)) {
3312 BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n",
3313 bd_cons, fp->index);
3317 /* XXX double copy if packet length under a threshold */
3320 * If all the buffer descriptors are filled with mbufs then fill in
3321 * the current consumer index with a new BD. Else if a maximum Rx
3322 * buffer limit is imposed then fill in the next producer index.
3324 rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons,
3325 (sc->max_rx_bufs != RX_BD_USABLE) ?
3329 /* we simply reuse the received mbuf and don't post it to the stack */
3332 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
3334 fp->eth_q_stats.rx_soft_errors++;
3336 if (sc->max_rx_bufs != RX_BD_USABLE) {
3337 /* copy this consumer index to the producer index */
3338 memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf,
3339 sizeof(struct bxe_sw_rx_bd));
3340 memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd));
3346 /* current mbuf was detached from the bd */
3347 fp->eth_q_stats.mbuf_alloc_rx--;
3349 /* we allocated a replacement mbuf, fixup the current one */
3351 m->m_pkthdr.len = m->m_len = len;
3353 if ((len > 60) && (len > lenonbd)) {
3354 fp->eth_q_stats.rx_bxe_service_rxsgl++;
3355 rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp);
3358 fp->eth_q_stats.rx_jumbo_sge_pkts++;
3359 } else if (lenonbd < len) {
3360 fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++;
3363 /* assign packet to this interface interface */
3364 m->m_pkthdr.rcvif = ifp;
3366 /* assume no hardware checksum has complated */
3367 m->m_pkthdr.csum_flags = 0;
3369 /* validate checksum if offload enabled */
3370 if (ifp->if_capenable & IFCAP_RXCSUM) {
3371 /* check for a valid IP frame */
3372 if (!(cqe->fast_path_cqe.status_flags &
3373 ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) {
3374 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3375 if (__predict_false(cqe_fp_flags &
3376 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) {
3377 fp->eth_q_stats.rx_hw_csum_errors++;
3379 fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3380 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3384 /* check for a valid TCP/UDP frame */
3385 if (!(cqe->fast_path_cqe.status_flags &
3386 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) {
3387 if (__predict_false(cqe_fp_flags &
3388 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) {
3389 fp->eth_q_stats.rx_hw_csum_errors++;
3391 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3392 m->m_pkthdr.csum_data = 0xFFFF;
3393 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
3399 /* if there is a VLAN tag then flag that info */
3400 if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3401 m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag;
3402 m->m_flags |= M_VLANTAG;
3405 #if __FreeBSD_version >= 800000
3406 /* specify what RSS queue was used for this flow */
3407 m->m_pkthdr.flowid = fp->index;
3413 bd_cons = RX_BD_NEXT(bd_cons);
3414 bd_prod = RX_BD_NEXT(bd_prod);
3415 bd_prod_fw = RX_BD_NEXT(bd_prod_fw);
3417 /* pass the frame to the stack */
3418 if (__predict_true(m != NULL)) {
3421 (*ifp->if_input)(ifp, m);
3426 sw_cq_prod = RCQ_NEXT(sw_cq_prod);
3427 sw_cq_cons = RCQ_NEXT(sw_cq_cons);
3429 /* limit spinning on the queue */
3433 if (rx_pkts == sc->rx_budget) {
3434 fp->eth_q_stats.rx_budget_reached++;
3437 } /* while work to do */
3439 fp->rx_bd_cons = bd_cons;
3440 fp->rx_bd_prod = bd_prod_fw;
3441 fp->rx_cq_cons = sw_cq_cons;
3442 fp->rx_cq_prod = sw_cq_prod;
3444 /* Update producers */
3445 bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod);
3447 fp->eth_q_stats.rx_pkts += rx_pkts;
3448 fp->eth_q_stats.rx_calls++;
3450 BXE_FP_RX_UNLOCK(fp);
3452 return (sw_cq_cons != hw_cq_cons);
3456 bxe_free_tx_pkt(struct bxe_softc *sc,
3457 struct bxe_fastpath *fp,
3460 struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx];
3461 struct eth_tx_start_bd *tx_start_bd;
3462 uint16_t bd_idx = TX_BD(tx_buf->first_bd);
3466 /* unmap the mbuf from non-paged memory */
3467 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
3469 tx_start_bd = &fp->tx_chain[bd_idx].start_bd;
3470 nbd = le16toh(tx_start_bd->nbd) - 1;
3472 new_cons = (tx_buf->first_bd + nbd);
3475 if (__predict_true(tx_buf->m != NULL)) {
3477 fp->eth_q_stats.mbuf_alloc_tx--;
3479 fp->eth_q_stats.tx_chain_lost_mbuf++;
3483 tx_buf->first_bd = 0;
3488 /* transmit timeout watchdog */
3490 bxe_watchdog(struct bxe_softc *sc,
3491 struct bxe_fastpath *fp)
3495 if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) {
3496 BXE_FP_TX_UNLOCK(fp);
3500 BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index);
3502 BXE_FP_TX_UNLOCK(fp);
3503 BXE_SET_ERROR_BIT(sc, BXE_ERR_TXQ_STUCK);
3504 taskqueue_enqueue_timeout(taskqueue_thread,
3505 &sc->sp_err_timeout_task, hz/10);
3510 /* processes transmit completions */
3512 bxe_txeof(struct bxe_softc *sc,
3513 struct bxe_fastpath *fp)
3515 struct ifnet *ifp = sc->ifnet;
3516 uint16_t bd_cons, hw_cons, sw_cons, pkt_cons;
3517 uint16_t tx_bd_avail;
3519 BXE_FP_TX_LOCK_ASSERT(fp);
3521 bd_cons = fp->tx_bd_cons;
3522 hw_cons = le16toh(*fp->tx_cons_sb);
3523 sw_cons = fp->tx_pkt_cons;
3525 while (sw_cons != hw_cons) {
3526 pkt_cons = TX_BD(sw_cons);
3529 "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n",
3530 fp->index, hw_cons, sw_cons, pkt_cons);
3532 bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons);
3537 fp->tx_pkt_cons = sw_cons;
3538 fp->tx_bd_cons = bd_cons;
3541 "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n",
3542 fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod);
3546 tx_bd_avail = bxe_tx_avail(sc, fp);
3548 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
3549 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3551 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3554 if (fp->tx_pkt_prod != fp->tx_pkt_cons) {
3555 /* reset the watchdog timer if there are pending transmits */
3556 fp->watchdog_timer = BXE_TX_TIMEOUT;
3559 /* clear watchdog when there are no pending transmits */
3560 fp->watchdog_timer = 0;
3566 bxe_drain_tx_queues(struct bxe_softc *sc)
3568 struct bxe_fastpath *fp;
3571 /* wait until all TX fastpath tasks have completed */
3572 for (i = 0; i < sc->num_queues; i++) {
3577 while (bxe_has_tx_work(fp)) {
3581 BXE_FP_TX_UNLOCK(fp);
3584 BLOGE(sc, "Timeout waiting for fp[%d] "
3585 "transmits to complete!\n", i);
3586 bxe_panic(sc, ("tx drain failure\n"));
3600 bxe_del_all_macs(struct bxe_softc *sc,
3601 struct ecore_vlan_mac_obj *mac_obj,
3603 uint8_t wait_for_comp)
3605 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3608 /* wait for completion of requested */
3609 if (wait_for_comp) {
3610 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3613 /* Set the mac type of addresses we want to clear */
3614 bxe_set_bit(mac_type, &vlan_mac_flags);
3616 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
3618 BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n",
3619 rc, mac_type, wait_for_comp);
3626 bxe_fill_accept_flags(struct bxe_softc *sc,
3628 unsigned long *rx_accept_flags,
3629 unsigned long *tx_accept_flags)
3631 /* Clear the flags first */
3632 *rx_accept_flags = 0;
3633 *tx_accept_flags = 0;
3636 case BXE_RX_MODE_NONE:
3638 * 'drop all' supersedes any accept flags that may have been
3639 * passed to the function.
3643 case BXE_RX_MODE_NORMAL:
3644 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3645 bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags);
3646 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3648 /* internal switching mode */
3649 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3650 bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags);
3651 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3655 case BXE_RX_MODE_ALLMULTI:
3656 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3657 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3658 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3660 /* internal switching mode */
3661 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3662 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3663 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3667 case BXE_RX_MODE_PROMISC:
3669 * According to deffinition of SI mode, iface in promisc mode
3670 * should receive matched and unmatched (in resolution of port)
3673 bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags);
3674 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3675 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3676 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3678 /* internal switching mode */
3679 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3680 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3683 bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags);
3685 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3691 BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode);
3695 /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
3696 if (rx_mode != BXE_RX_MODE_NONE) {
3697 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags);
3698 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags);
3705 bxe_set_q_rx_mode(struct bxe_softc *sc,
3707 unsigned long rx_mode_flags,
3708 unsigned long rx_accept_flags,
3709 unsigned long tx_accept_flags,
3710 unsigned long ramrod_flags)
3712 struct ecore_rx_mode_ramrod_params ramrod_param;
3715 memset(&ramrod_param, 0, sizeof(ramrod_param));
3717 /* Prepare ramrod parameters */
3718 ramrod_param.cid = 0;
3719 ramrod_param.cl_id = cl_id;
3720 ramrod_param.rx_mode_obj = &sc->rx_mode_obj;
3721 ramrod_param.func_id = SC_FUNC(sc);
3723 ramrod_param.pstate = &sc->sp_state;
3724 ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING;
3726 ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata);
3727 ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata);
3729 bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
3731 ramrod_param.ramrod_flags = ramrod_flags;
3732 ramrod_param.rx_mode_flags = rx_mode_flags;
3734 ramrod_param.rx_accept_flags = rx_accept_flags;
3735 ramrod_param.tx_accept_flags = tx_accept_flags;
3737 rc = ecore_config_rx_mode(sc, &ramrod_param);
3739 BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x "
3740 "rx_accept_flags 0x%x tx_accept_flags 0x%x "
3741 "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id,
3742 (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags,
3743 (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc);
3751 bxe_set_storm_rx_mode(struct bxe_softc *sc)
3753 unsigned long rx_mode_flags = 0, ramrod_flags = 0;
3754 unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
3757 rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags,
3763 bxe_set_bit(RAMROD_RX, &ramrod_flags);
3764 bxe_set_bit(RAMROD_TX, &ramrod_flags);
3766 /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */
3767 return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags,
3768 rx_accept_flags, tx_accept_flags,
3772 /* returns the "mcp load_code" according to global load_count array */
3774 bxe_nic_load_no_mcp(struct bxe_softc *sc)
3776 int path = SC_PATH(sc);
3777 int port = SC_PORT(sc);
3779 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n",
3780 path, load_count[path][0], load_count[path][1],
3781 load_count[path][2]);
3782 load_count[path][0]++;
3783 load_count[path][1 + port]++;
3784 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n",
3785 path, load_count[path][0], load_count[path][1],
3786 load_count[path][2]);
3787 if (load_count[path][0] == 1) {
3788 return (FW_MSG_CODE_DRV_LOAD_COMMON);
3789 } else if (load_count[path][1 + port] == 1) {
3790 return (FW_MSG_CODE_DRV_LOAD_PORT);
3792 return (FW_MSG_CODE_DRV_LOAD_FUNCTION);
3796 /* returns the "mcp load_code" according to global load_count array */
3798 bxe_nic_unload_no_mcp(struct bxe_softc *sc)
3800 int port = SC_PORT(sc);
3801 int path = SC_PATH(sc);
3803 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n",
3804 path, load_count[path][0], load_count[path][1],
3805 load_count[path][2]);
3806 load_count[path][0]--;
3807 load_count[path][1 + port]--;
3808 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n",
3809 path, load_count[path][0], load_count[path][1],
3810 load_count[path][2]);
3811 if (load_count[path][0] == 0) {
3812 return (FW_MSG_CODE_DRV_UNLOAD_COMMON);
3813 } else if (load_count[path][1 + port] == 0) {
3814 return (FW_MSG_CODE_DRV_UNLOAD_PORT);
3816 return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION);
3820 /* request unload mode from the MCP: COMMON, PORT or FUNCTION */
3822 bxe_send_unload_req(struct bxe_softc *sc,
3825 uint32_t reset_code = 0;
3827 /* Select the UNLOAD request mode */
3828 if (unload_mode == UNLOAD_NORMAL) {
3829 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3831 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3834 /* Send the request to the MCP */
3835 if (!BXE_NOMCP(sc)) {
3836 reset_code = bxe_fw_command(sc, reset_code, 0);
3838 reset_code = bxe_nic_unload_no_mcp(sc);
3841 return (reset_code);
3844 /* send UNLOAD_DONE command to the MCP */
3846 bxe_send_unload_done(struct bxe_softc *sc,
3849 uint32_t reset_param =
3850 keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
3852 /* Report UNLOAD_DONE to MCP */
3853 if (!BXE_NOMCP(sc)) {
3854 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
3859 bxe_func_wait_started(struct bxe_softc *sc)
3863 if (!sc->port.pmf) {
3868 * (assumption: No Attention from MCP at this stage)
3869 * PMF probably in the middle of TX disable/enable transaction
3870 * 1. Sync IRS for default SB
3871 * 2. Sync SP queue - this guarantees us that attention handling started
3872 * 3. Wait, that TX disable/enable transaction completes
3874 * 1+2 guarantee that if DCBX attention was scheduled it already changed
3875 * pending bit of transaction from STARTED-->TX_STOPPED, if we already
3876 * received completion for the transaction the state is TX_STOPPED.
3877 * State will return to STARTED after completion of TX_STOPPED-->STARTED
3881 /* XXX make sure default SB ISR is done */
3882 /* need a way to synchronize an irq (intr_mtx?) */
3884 /* XXX flush any work queues */
3886 while (ecore_func_get_state(sc, &sc->func_obj) !=
3887 ECORE_F_STATE_STARTED && tout--) {
3891 if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) {
3893 * Failed to complete the transaction in a "good way"
3894 * Force both transactions with CLR bit.
3896 struct ecore_func_state_params func_params = { NULL };
3898 BLOGE(sc, "Unexpected function state! "
3899 "Forcing STARTED-->TX_STOPPED-->STARTED\n");
3901 func_params.f_obj = &sc->func_obj;
3902 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3904 /* STARTED-->TX_STOPPED */
3905 func_params.cmd = ECORE_F_CMD_TX_STOP;
3906 ecore_func_state_change(sc, &func_params);
3908 /* TX_STOPPED-->STARTED */
3909 func_params.cmd = ECORE_F_CMD_TX_START;
3910 return (ecore_func_state_change(sc, &func_params));
3917 bxe_stop_queue(struct bxe_softc *sc,
3920 struct bxe_fastpath *fp = &sc->fp[index];
3921 struct ecore_queue_state_params q_params = { NULL };
3924 BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index);
3926 q_params.q_obj = &sc->sp_objs[fp->index].q_obj;
3927 /* We want to wait for completion in this context */
3928 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3930 /* Stop the primary connection: */
3932 /* ...halt the connection */
3933 q_params.cmd = ECORE_Q_CMD_HALT;
3934 rc = ecore_queue_state_change(sc, &q_params);
3939 /* ...terminate the connection */
3940 q_params.cmd = ECORE_Q_CMD_TERMINATE;
3941 memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate));
3942 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
3943 rc = ecore_queue_state_change(sc, &q_params);
3948 /* ...delete cfc entry */
3949 q_params.cmd = ECORE_Q_CMD_CFC_DEL;
3950 memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del));
3951 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
3952 return (ecore_queue_state_change(sc, &q_params));
3955 /* wait for the outstanding SP commands */
3956 static inline uint8_t
3957 bxe_wait_sp_comp(struct bxe_softc *sc,
3961 int tout = 5000; /* wait for 5 secs tops */
3965 if (!(atomic_load_acq_long(&sc->sp_state) & mask)) {
3974 tmp = atomic_load_acq_long(&sc->sp_state);
3976 BLOGE(sc, "Filtering completion timed out: "
3977 "sp_state 0x%lx, mask 0x%lx\n",
3986 bxe_func_stop(struct bxe_softc *sc)
3988 struct ecore_func_state_params func_params = { NULL };
3991 /* prepare parameters for function state transitions */
3992 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3993 func_params.f_obj = &sc->func_obj;
3994 func_params.cmd = ECORE_F_CMD_STOP;
3997 * Try to stop the function the 'good way'. If it fails (in case
3998 * of a parity error during bxe_chip_cleanup()) and we are
3999 * not in a debug mode, perform a state transaction in order to
4000 * enable further HW_RESET transaction.
4002 rc = ecore_func_state_change(sc, &func_params);
4004 BLOGE(sc, "FUNC_STOP ramrod failed. "
4005 "Running a dry transaction (%d)\n", rc);
4006 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
4007 return (ecore_func_state_change(sc, &func_params));
4014 bxe_reset_hw(struct bxe_softc *sc,
4017 struct ecore_func_state_params func_params = { NULL };
4019 /* Prepare parameters for function state transitions */
4020 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
4022 func_params.f_obj = &sc->func_obj;
4023 func_params.cmd = ECORE_F_CMD_HW_RESET;
4025 func_params.params.hw_init.load_phase = load_code;
4027 return (ecore_func_state_change(sc, &func_params));
4031 bxe_int_disable_sync(struct bxe_softc *sc,
4035 /* prevent the HW from sending interrupts */
4036 bxe_int_disable(sc);
4039 /* XXX need a way to synchronize ALL irqs (intr_mtx?) */
4040 /* make sure all ISRs are done */
4042 /* XXX make sure sp_task is not running */
4043 /* cancel and flush work queues */
4047 bxe_chip_cleanup(struct bxe_softc *sc,
4048 uint32_t unload_mode,
4051 int port = SC_PORT(sc);
4052 struct ecore_mcast_ramrod_params rparam = { NULL };
4053 uint32_t reset_code;
4056 bxe_drain_tx_queues(sc);
4058 /* give HW time to discard old tx messages */
4061 /* Clean all ETH MACs */
4062 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE);
4064 BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc);
4067 /* Clean up UC list */
4068 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE);
4070 BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc);
4074 if (!CHIP_IS_E1(sc)) {
4075 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
4078 /* Set "drop all" to stop Rx */
4081 * We need to take the BXE_MCAST_LOCK() here in order to prevent
4082 * a race between the completion code and this code.
4086 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
4087 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
4089 bxe_set_storm_rx_mode(sc);
4092 /* Clean up multicast configuration */
4093 rparam.mcast_obj = &sc->mcast_obj;
4094 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4096 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4099 BXE_MCAST_UNLOCK(sc);
4101 // XXX bxe_iov_chip_cleanup(sc);
4104 * Send the UNLOAD_REQUEST to the MCP. This will return if
4105 * this function should perform FUNCTION, PORT, or COMMON HW
4108 reset_code = bxe_send_unload_req(sc, unload_mode);
4111 * (assumption: No Attention from MCP at this stage)
4112 * PMF probably in the middle of TX disable/enable transaction
4114 rc = bxe_func_wait_started(sc);
4116 BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc);
4120 * Close multi and leading connections
4121 * Completions for ramrods are collected in a synchronous way
4123 for (i = 0; i < sc->num_queues; i++) {
4124 if (bxe_stop_queue(sc, i)) {
4130 * If SP settings didn't get completed so far - something
4131 * very wrong has happen.
4133 if (!bxe_wait_sp_comp(sc, ~0x0UL)) {
4134 BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc);
4139 rc = bxe_func_stop(sc);
4141 BLOGE(sc, "Function stop failed!(%d)\n", rc);
4144 /* disable HW interrupts */
4145 bxe_int_disable_sync(sc, TRUE);
4147 /* detach interrupts */
4148 bxe_interrupt_detach(sc);
4150 /* Reset the chip */
4151 rc = bxe_reset_hw(sc, reset_code);
4153 BLOGE(sc, "Hardware reset failed(%d)\n", rc);
4156 /* Report UNLOAD_DONE to MCP */
4157 bxe_send_unload_done(sc, keep_link);
4161 bxe_disable_close_the_gate(struct bxe_softc *sc)
4164 int port = SC_PORT(sc);
4167 "Disabling 'close the gates'\n");
4169 if (CHIP_IS_E1(sc)) {
4170 uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4171 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4172 val = REG_RD(sc, addr);
4174 REG_WR(sc, addr, val);
4176 val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK);
4177 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
4178 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
4179 REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val);
4184 * Cleans the object that have internal lists without sending
4185 * ramrods. Should be run when interrutps are disabled.
4188 bxe_squeeze_objects(struct bxe_softc *sc)
4190 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
4191 struct ecore_mcast_ramrod_params rparam = { NULL };
4192 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
4195 /* Cleanup MACs' object first... */
4197 /* Wait for completion of requested */
4198 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
4199 /* Perform a dry cleanup */
4200 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
4202 /* Clean ETH primary MAC */
4203 bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags);
4204 rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags,
4207 BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc);
4210 /* Cleanup UC list */
4212 bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags);
4213 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags,
4216 BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc);
4219 /* Now clean mcast object... */
4221 rparam.mcast_obj = &sc->mcast_obj;
4222 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
4224 /* Add a DEL command... */
4225 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4227 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4230 /* now wait until all pending commands are cleared */
4232 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4235 BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc);
4239 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4243 /* stop the controller */
4244 static __noinline int
4245 bxe_nic_unload(struct bxe_softc *sc,
4246 uint32_t unload_mode,
4249 uint8_t global = FALSE;
4253 BXE_CORE_LOCK_ASSERT(sc);
4255 sc->ifnet->if_drv_flags &= ~IFF_DRV_RUNNING;
4257 for (i = 0; i < sc->num_queues; i++) {
4258 struct bxe_fastpath *fp;
4261 fp->watchdog_timer = 0;
4263 BXE_FP_TX_UNLOCK(fp);
4266 BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n");
4268 /* mark driver as unloaded in shmem2 */
4269 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
4270 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
4271 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
4272 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
4275 if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE &&
4276 (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) {
4278 if(CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
4280 * We can get here if the driver has been unloaded
4281 * during parity error recovery and is either waiting for a
4282 * leader to complete or for other functions to unload and
4283 * then ifconfig down has been issued. In this case we want to
4284 * unload and let other functions to complete a recovery
4287 sc->recovery_state = BXE_RECOVERY_DONE;
4289 bxe_release_leader_lock(sc);
4291 BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n");
4293 BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x"
4294 " state = 0x%x\n", sc->recovery_state, sc->state);
4299 * Nothing to do during unload if previous bxe_nic_load()
4300 * did not completed succesfully - all resourses are released.
4302 if ((sc->state == BXE_STATE_CLOSED) ||
4303 (sc->state == BXE_STATE_ERROR)) {
4307 sc->state = BXE_STATE_CLOSING_WAITING_HALT;
4313 sc->rx_mode = BXE_RX_MODE_NONE;
4314 /* XXX set rx mode ??? */
4316 if (IS_PF(sc) && !sc->grcdump_done) {
4317 /* set ALWAYS_ALIVE bit in shmem */
4318 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
4322 bxe_stats_handle(sc, STATS_EVENT_STOP);
4323 bxe_save_statistics(sc);
4326 /* wait till consumers catch up with producers in all queues */
4327 bxe_drain_tx_queues(sc);
4329 /* if VF indicate to PF this function is going down (PF will delete sp
4330 * elements and clear initializations
4333 ; /* bxe_vfpf_close_vf(sc); */
4334 } else if (unload_mode != UNLOAD_RECOVERY) {
4335 /* if this is a normal/close unload need to clean up chip */
4336 if (!sc->grcdump_done)
4337 bxe_chip_cleanup(sc, unload_mode, keep_link);
4339 /* Send the UNLOAD_REQUEST to the MCP */
4340 bxe_send_unload_req(sc, unload_mode);
4343 * Prevent transactions to host from the functions on the
4344 * engine that doesn't reset global blocks in case of global
4345 * attention once gloabl blocks are reset and gates are opened
4346 * (the engine which leader will perform the recovery
4349 if (!CHIP_IS_E1x(sc)) {
4353 /* disable HW interrupts */
4354 bxe_int_disable_sync(sc, TRUE);
4356 /* detach interrupts */
4357 bxe_interrupt_detach(sc);
4359 /* Report UNLOAD_DONE to MCP */
4360 bxe_send_unload_done(sc, FALSE);
4364 * At this stage no more interrupts will arrive so we may safely clean
4365 * the queue'able objects here in case they failed to get cleaned so far.
4368 bxe_squeeze_objects(sc);
4371 /* There should be no more pending SP commands at this stage */
4376 bxe_free_fp_buffers(sc);
4382 bxe_free_fw_stats_mem(sc);
4384 sc->state = BXE_STATE_CLOSED;
4387 * Check if there are pending parity attentions. If there are - set
4388 * RECOVERY_IN_PROGRESS.
4390 if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) {
4391 bxe_set_reset_in_progress(sc);
4393 /* Set RESET_IS_GLOBAL if needed */
4395 bxe_set_reset_global(sc);
4400 * The last driver must disable a "close the gate" if there is no
4401 * parity attention or "process kill" pending.
4403 if (IS_PF(sc) && !bxe_clear_pf_load(sc) &&
4404 bxe_reset_is_done(sc, SC_PATH(sc))) {
4405 bxe_disable_close_the_gate(sc);
4408 BLOGD(sc, DBG_LOAD, "Ended NIC unload\n");
4410 bxe_link_report(sc);
4416 * Called by the OS to set various media options (i.e. link, speed, etc.) when
4417 * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...".
4420 bxe_ifmedia_update(struct ifnet *ifp)
4422 struct bxe_softc *sc = (struct bxe_softc *)ifp->if_softc;
4423 struct ifmedia *ifm;
4427 /* We only support Ethernet media type. */
4428 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
4432 switch (IFM_SUBTYPE(ifm->ifm_media)) {
4438 case IFM_10G_TWINAX:
4440 /* We don't support changing the media type. */
4441 BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n",
4442 IFM_SUBTYPE(ifm->ifm_media));
4450 * Called by the OS to get the current media status (i.e. link, speed, etc.).
4453 bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
4455 struct bxe_softc *sc = ifp->if_softc;
4457 /* Bug 165447: the 'ifconfig' tool skips printing of the "status: ..."
4458 line if the IFM_AVALID flag is *NOT* set. So we need to set this
4459 flag unconditionally (irrespective of the admininistrative
4460 'up/down' state of the interface) to ensure that that line is always
4463 ifmr->ifm_status = IFM_AVALID;
4465 /* Setup the default interface info. */
4466 ifmr->ifm_active = IFM_ETHER;
4468 /* Report link down if the driver isn't running. */
4469 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
4470 ifmr->ifm_active |= IFM_NONE;
4471 BLOGD(sc, DBG_PHY, "in %s : nic still not loaded fully\n", __func__);
4472 BLOGD(sc, DBG_PHY, "in %s : link_up (1) : %d\n",
4473 __func__, sc->link_vars.link_up);
4478 if (sc->link_vars.link_up) {
4479 ifmr->ifm_status |= IFM_ACTIVE;
4480 ifmr->ifm_active |= IFM_FDX;
4482 ifmr->ifm_active |= IFM_NONE;
4483 BLOGD(sc, DBG_PHY, "in %s : setting IFM_NONE\n",
4488 ifmr->ifm_active |= sc->media;
4493 bxe_handle_chip_tq(void *context,
4496 struct bxe_softc *sc = (struct bxe_softc *)context;
4497 long work = atomic_load_acq_long(&sc->chip_tq_flags);
4501 case CHIP_TQ_REINIT:
4502 if (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING) {
4503 /* restart the interface */
4504 BLOGD(sc, DBG_LOAD, "Restarting the interface...\n");
4505 bxe_periodic_stop(sc);
4507 bxe_stop_locked(sc);
4508 bxe_init_locked(sc);
4509 BXE_CORE_UNLOCK(sc);
4519 * Handles any IOCTL calls from the operating system.
4522 * 0 = Success, >0 Failure
4525 bxe_ioctl(struct ifnet *ifp,
4529 struct bxe_softc *sc = ifp->if_softc;
4530 struct ifreq *ifr = (struct ifreq *)data;
4535 int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN);
4536 int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING);
4541 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n",
4544 if (sc->mtu == ifr->ifr_mtu) {
4545 /* nothing to change */
4549 if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) {
4550 BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n",
4551 ifr->ifr_mtu, mtu_min, mtu_max);
4556 atomic_store_rel_int((volatile unsigned int *)&sc->mtu,
4557 (unsigned long)ifr->ifr_mtu);
4558 atomic_store_rel_long((volatile unsigned long *)&ifp->if_mtu,
4559 (unsigned long)ifr->ifr_mtu);
4565 /* toggle the interface state up or down */
4566 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n");
4569 /* check if the interface is up */
4570 if (ifp->if_flags & IFF_UP) {
4571 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4572 /* set the receive mode flags */
4573 bxe_set_rx_mode(sc);
4574 } else if(sc->state != BXE_STATE_DISABLED) {
4575 bxe_init_locked(sc);
4578 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4579 bxe_periodic_stop(sc);
4580 bxe_stop_locked(sc);
4583 BXE_CORE_UNLOCK(sc);
4589 /* add/delete multicast addresses */
4590 BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n");
4592 /* check if the interface is up */
4593 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4594 /* set the receive mode flags */
4596 bxe_set_rx_mode(sc);
4597 BXE_CORE_UNLOCK(sc);
4603 /* find out which capabilities have changed */
4604 mask = (ifr->ifr_reqcap ^ ifp->if_capenable);
4606 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n",
4609 /* toggle the LRO capabilites enable flag */
4610 if (mask & IFCAP_LRO) {
4611 ifp->if_capenable ^= IFCAP_LRO;
4612 BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n",
4613 (ifp->if_capenable & IFCAP_LRO) ? "ON" : "OFF");
4617 /* toggle the TXCSUM checksum capabilites enable flag */
4618 if (mask & IFCAP_TXCSUM) {
4619 ifp->if_capenable ^= IFCAP_TXCSUM;
4620 BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n",
4621 (ifp->if_capenable & IFCAP_TXCSUM) ? "ON" : "OFF");
4622 if (ifp->if_capenable & IFCAP_TXCSUM) {
4623 ifp->if_hwassist = (CSUM_IP |
4630 ifp->if_hwassist = 0;
4634 /* toggle the RXCSUM checksum capabilities enable flag */
4635 if (mask & IFCAP_RXCSUM) {
4636 ifp->if_capenable ^= IFCAP_RXCSUM;
4637 BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n",
4638 (ifp->if_capenable & IFCAP_RXCSUM) ? "ON" : "OFF");
4639 if (ifp->if_capenable & IFCAP_RXCSUM) {
4640 ifp->if_hwassist = (CSUM_IP |
4647 ifp->if_hwassist = 0;
4651 /* toggle TSO4 capabilities enabled flag */
4652 if (mask & IFCAP_TSO4) {
4653 ifp->if_capenable ^= IFCAP_TSO4;
4654 BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n",
4655 (ifp->if_capenable & IFCAP_TSO4) ? "ON" : "OFF");
4658 /* toggle TSO6 capabilities enabled flag */
4659 if (mask & IFCAP_TSO6) {
4660 ifp->if_capenable ^= IFCAP_TSO6;
4661 BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n",
4662 (ifp->if_capenable & IFCAP_TSO6) ? "ON" : "OFF");
4665 /* toggle VLAN_HWTSO capabilities enabled flag */
4666 if (mask & IFCAP_VLAN_HWTSO) {
4667 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
4668 BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n",
4669 (ifp->if_capenable & IFCAP_VLAN_HWTSO) ? "ON" : "OFF");
4672 /* toggle VLAN_HWCSUM capabilities enabled flag */
4673 if (mask & IFCAP_VLAN_HWCSUM) {
4674 /* XXX investigate this... */
4675 BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n");
4679 /* toggle VLAN_MTU capabilities enable flag */
4680 if (mask & IFCAP_VLAN_MTU) {
4681 /* XXX investigate this... */
4682 BLOGE(sc, "Changing VLAN_MTU is not supported!\n");
4686 /* toggle VLAN_HWTAGGING capabilities enabled flag */
4687 if (mask & IFCAP_VLAN_HWTAGGING) {
4688 /* XXX investigate this... */
4689 BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n");
4693 /* toggle VLAN_HWFILTER capabilities enabled flag */
4694 if (mask & IFCAP_VLAN_HWFILTER) {
4695 /* XXX investigate this... */
4696 BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n");
4708 /* set/get interface media */
4709 BLOGD(sc, DBG_IOCTL,
4710 "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n",
4712 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
4716 BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n",
4718 error = ether_ioctl(ifp, command, data);
4722 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4723 BLOGD(sc, DBG_LOAD | DBG_IOCTL,
4724 "Re-initializing hardware from IOCTL change\n");
4725 bxe_periodic_stop(sc);
4727 bxe_stop_locked(sc);
4728 bxe_init_locked(sc);
4729 BXE_CORE_UNLOCK(sc);
4735 static __noinline void
4736 bxe_dump_mbuf(struct bxe_softc *sc,
4743 if (!(sc->debug & DBG_MBUF)) {
4748 BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n");
4754 #if __FreeBSD_version >= 1000000
4756 "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4757 i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data);
4759 if (m->m_flags & M_PKTHDR) {
4761 "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4762 i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS,
4763 (int)m->m_pkthdr.csum_flags, CSUM_BITS);
4767 "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4768 i, m, m->m_len, m->m_flags,
4769 "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", m->m_data);
4771 if (m->m_flags & M_PKTHDR) {
4773 "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4774 i, m->m_pkthdr.len, m->m_flags,
4775 "\20\12M_BCAST\13M_MCAST\14M_FRAG"
4776 "\15M_FIRSTFRAG\16M_LASTFRAG\21M_VLANTAG"
4777 "\22M_PROMISC\23M_NOFREE",
4778 (int)m->m_pkthdr.csum_flags,
4779 "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS"
4780 "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED"
4781 "\12CSUM_IP_VALID\13CSUM_DATA_VALID"
4782 "\14CSUM_PSEUDO_HDR");
4784 #endif /* #if __FreeBSD_version >= 1000000 */
4786 if (m->m_flags & M_EXT) {
4787 switch (m->m_ext.ext_type) {
4788 case EXT_CLUSTER: type = "EXT_CLUSTER"; break;
4789 case EXT_SFBUF: type = "EXT_SFBUF"; break;
4790 case EXT_JUMBOP: type = "EXT_JUMBOP"; break;
4791 case EXT_JUMBO9: type = "EXT_JUMBO9"; break;
4792 case EXT_JUMBO16: type = "EXT_JUMBO16"; break;
4793 case EXT_PACKET: type = "EXT_PACKET"; break;
4794 case EXT_MBUF: type = "EXT_MBUF"; break;
4795 case EXT_NET_DRV: type = "EXT_NET_DRV"; break;
4796 case EXT_MOD_TYPE: type = "EXT_MOD_TYPE"; break;
4797 case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break;
4798 case EXT_EXTREF: type = "EXT_EXTREF"; break;
4799 default: type = "UNKNOWN"; break;
4803 "%02d: - m_ext: %p ext_size=%d type=%s\n",
4804 i, m->m_ext.ext_buf, m->m_ext.ext_size, type);
4808 bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE);
4817 * Checks to ensure the 13 bd sliding window is >= MSS for TSO.
4818 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS.
4819 * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD
4820 * The headers comes in a seperate bd in FreeBSD so 13-3=10.
4821 * Returns: 0 if OK to send, 1 if packet needs further defragmentation
4824 bxe_chktso_window(struct bxe_softc *sc,
4826 bus_dma_segment_t *segs,
4829 uint32_t num_wnds, wnd_size, wnd_sum;
4830 int32_t frag_idx, wnd_idx;
4831 unsigned short lso_mss;
4837 num_wnds = nsegs - wnd_size;
4838 lso_mss = htole16(m->m_pkthdr.tso_segsz);
4841 * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the
4842 * first window sum of data while skipping the first assuming it is the
4843 * header in FreeBSD.
4845 for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) {
4846 wnd_sum += htole16(segs[frag_idx].ds_len);
4849 /* check the first 10 bd window size */
4850 if (wnd_sum < lso_mss) {
4854 /* run through the windows */
4855 for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) {
4856 /* subtract the first mbuf->m_len of the last wndw(-header) */
4857 wnd_sum -= htole16(segs[wnd_idx+1].ds_len);
4858 /* add the next mbuf len to the len of our new window */
4859 wnd_sum += htole16(segs[frag_idx].ds_len);
4860 if (wnd_sum < lso_mss) {
4869 bxe_set_pbd_csum_e2(struct bxe_fastpath *fp,
4871 uint32_t *parsing_data)
4873 struct ether_vlan_header *eh = NULL;
4874 struct ip *ip4 = NULL;
4875 struct ip6_hdr *ip6 = NULL;
4877 struct tcphdr *th = NULL;
4878 int e_hlen, ip_hlen, l4_off;
4881 if (m->m_pkthdr.csum_flags == CSUM_IP) {
4882 /* no L4 checksum offload needed */
4886 /* get the Ethernet header */
4887 eh = mtod(m, struct ether_vlan_header *);
4889 /* handle VLAN encapsulation if present */
4890 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4891 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4892 proto = ntohs(eh->evl_proto);
4894 e_hlen = ETHER_HDR_LEN;
4895 proto = ntohs(eh->evl_encap_proto);
4900 /* get the IP header, if mbuf len < 20 then header in next mbuf */
4901 ip4 = (m->m_len < sizeof(struct ip)) ?
4902 (struct ip *)m->m_next->m_data :
4903 (struct ip *)(m->m_data + e_hlen);
4904 /* ip_hl is number of 32-bit words */
4905 ip_hlen = (ip4->ip_hl << 2);
4908 case ETHERTYPE_IPV6:
4909 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4910 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4911 (struct ip6_hdr *)m->m_next->m_data :
4912 (struct ip6_hdr *)(m->m_data + e_hlen);
4913 /* XXX cannot support offload with IPv6 extensions */
4914 ip_hlen = sizeof(struct ip6_hdr);
4918 /* We can't offload in this case... */
4919 /* XXX error stat ??? */
4923 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4924 l4_off = (e_hlen + ip_hlen);
4927 (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
4928 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W);
4930 if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4933 fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
4934 th = (struct tcphdr *)(ip + ip_hlen);
4935 /* th_off is number of 32-bit words */
4936 *parsing_data |= ((th->th_off <<
4937 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
4938 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW);
4939 return (l4_off + (th->th_off << 2)); /* entire header length */
4940 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4942 fp->eth_q_stats.tx_ofld_frames_csum_udp++;
4943 return (l4_off + sizeof(struct udphdr)); /* entire header length */
4945 /* XXX error stat ??? */
4951 bxe_set_pbd_csum(struct bxe_fastpath *fp,
4953 struct eth_tx_parse_bd_e1x *pbd)
4955 struct ether_vlan_header *eh = NULL;
4956 struct ip *ip4 = NULL;
4957 struct ip6_hdr *ip6 = NULL;
4959 struct tcphdr *th = NULL;
4960 struct udphdr *uh = NULL;
4961 int e_hlen, ip_hlen;
4967 /* get the Ethernet header */
4968 eh = mtod(m, struct ether_vlan_header *);
4970 /* handle VLAN encapsulation if present */
4971 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4972 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4973 proto = ntohs(eh->evl_proto);
4975 e_hlen = ETHER_HDR_LEN;
4976 proto = ntohs(eh->evl_encap_proto);
4981 /* get the IP header, if mbuf len < 20 then header in next mbuf */
4982 ip4 = (m->m_len < sizeof(struct ip)) ?
4983 (struct ip *)m->m_next->m_data :
4984 (struct ip *)(m->m_data + e_hlen);
4985 /* ip_hl is number of 32-bit words */
4986 ip_hlen = (ip4->ip_hl << 1);
4989 case ETHERTYPE_IPV6:
4990 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4991 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4992 (struct ip6_hdr *)m->m_next->m_data :
4993 (struct ip6_hdr *)(m->m_data + e_hlen);
4994 /* XXX cannot support offload with IPv6 extensions */
4995 ip_hlen = (sizeof(struct ip6_hdr) >> 1);
4999 /* We can't offload in this case... */
5000 /* XXX error stat ??? */
5004 hlen = (e_hlen >> 1);
5006 /* note that rest of global_data is indirectly zeroed here */
5007 if (m->m_flags & M_VLANTAG) {
5009 htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
5011 pbd->global_data = htole16(hlen);
5014 pbd->ip_hlen_w = ip_hlen;
5016 hlen += pbd->ip_hlen_w;
5018 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
5020 if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5023 th = (struct tcphdr *)(ip + (ip_hlen << 1));
5024 /* th_off is number of 32-bit words */
5025 hlen += (uint16_t)(th->th_off << 1);
5026 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5028 uh = (struct udphdr *)(ip + (ip_hlen << 1));
5029 hlen += (sizeof(struct udphdr) / 2);
5031 /* valid case as only CSUM_IP was set */
5035 pbd->total_hlen_w = htole16(hlen);
5037 if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5040 fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
5041 pbd->tcp_pseudo_csum = ntohs(th->th_sum);
5042 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5044 fp->eth_q_stats.tx_ofld_frames_csum_udp++;
5047 * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP
5048 * checksums and does not know anything about the UDP header and where
5049 * the checksum field is located. It only knows about TCP. Therefore
5050 * we "lie" to the hardware for outgoing UDP packets w/ checksum
5051 * offload. Since the checksum field offset for TCP is 16 bytes and
5052 * for UDP it is 6 bytes we pass a pointer to the hardware that is 10
5053 * bytes less than the start of the UDP header. This allows the
5054 * hardware to write the checksum in the correct spot. But the
5055 * hardware will compute a checksum which includes the last 10 bytes
5056 * of the IP header. To correct this we tweak the stack computed
5057 * pseudo checksum by folding in the calculation of the inverse
5058 * checksum for those final 10 bytes of the IP header. This allows
5059 * the correct checksum to be computed by the hardware.
5062 /* set pointer 10 bytes before UDP header */
5063 tmp_uh = (uint32_t *)((uint8_t *)uh - 10);
5065 /* calculate a pseudo header checksum over the first 10 bytes */
5066 tmp_csum = in_pseudo(*tmp_uh,
5068 *(uint16_t *)(tmp_uh + 2));
5070 pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum));
5073 return (hlen * 2); /* entire header length, number of bytes */
5077 bxe_set_pbd_lso_e2(struct mbuf *m,
5078 uint32_t *parsing_data)
5080 *parsing_data |= ((m->m_pkthdr.tso_segsz <<
5081 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
5082 ETH_TX_PARSE_BD_E2_LSO_MSS);
5084 /* XXX test for IPv6 with extension header... */
5088 bxe_set_pbd_lso(struct mbuf *m,
5089 struct eth_tx_parse_bd_e1x *pbd)
5091 struct ether_vlan_header *eh = NULL;
5092 struct ip *ip = NULL;
5093 struct tcphdr *th = NULL;
5096 /* get the Ethernet header */
5097 eh = mtod(m, struct ether_vlan_header *);
5099 /* handle VLAN encapsulation if present */
5100 e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ?
5101 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN;
5103 /* get the IP and TCP header, with LSO entire header in first mbuf */
5104 /* XXX assuming IPv4 */
5105 ip = (struct ip *)(m->m_data + e_hlen);
5106 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
5108 pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz);
5109 pbd->tcp_send_seq = ntohl(th->th_seq);
5110 pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff);
5114 pbd->ip_id = ntohs(ip->ip_id);
5115 pbd->tcp_pseudo_csum =
5116 ntohs(in_pseudo(ip->ip_src.s_addr,
5118 htons(IPPROTO_TCP)));
5121 pbd->tcp_pseudo_csum =
5122 ntohs(in_pseudo(&ip6->ip6_src,
5124 htons(IPPROTO_TCP)));
5128 htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
5132 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory
5133 * visible to the controller.
5135 * If an mbuf is submitted to this routine and cannot be given to the
5136 * controller (e.g. it has too many fragments) then the function may free
5137 * the mbuf and return to the caller.
5140 * 0 = Success, !0 = Failure
5141 * Note the side effect that an mbuf may be freed if it causes a problem.
5144 bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
5146 bus_dma_segment_t segs[32];
5148 struct bxe_sw_tx_bd *tx_buf;
5149 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
5150 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
5151 /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */
5152 struct eth_tx_bd *tx_data_bd;
5153 struct eth_tx_bd *tx_total_pkt_size_bd;
5154 struct eth_tx_start_bd *tx_start_bd;
5155 uint16_t bd_prod, pkt_prod, total_pkt_size;
5157 int defragged, error, nsegs, rc, nbds, vlan_off, ovlan;
5158 struct bxe_softc *sc;
5159 uint16_t tx_bd_avail;
5160 struct ether_vlan_header *eh;
5161 uint32_t pbd_e2_parsing_data = 0;
5168 #if __FreeBSD_version >= 800000
5169 M_ASSERTPKTHDR(*m_head);
5170 #endif /* #if __FreeBSD_version >= 800000 */
5173 rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0;
5176 tx_total_pkt_size_bd = NULL;
5178 /* get the H/W pointer for packets and BDs */
5179 pkt_prod = fp->tx_pkt_prod;
5180 bd_prod = fp->tx_bd_prod;
5182 mac_type = UNICAST_ADDRESS;
5184 /* map the mbuf into the next open DMAable memory */
5185 tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)];
5186 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5188 segs, &nsegs, BUS_DMA_NOWAIT);
5190 /* mapping errors */
5191 if(__predict_false(error != 0)) {
5192 fp->eth_q_stats.tx_dma_mapping_failure++;
5193 if (error == ENOMEM) {
5194 /* resource issue, try again later */
5196 } else if (error == EFBIG) {
5197 /* possibly recoverable with defragmentation */
5198 fp->eth_q_stats.mbuf_defrag_attempts++;
5199 m0 = m_defrag(*m_head, M_DONTWAIT);
5201 fp->eth_q_stats.mbuf_defrag_failures++;
5204 /* defrag successful, try mapping again */
5206 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5208 segs, &nsegs, BUS_DMA_NOWAIT);
5210 fp->eth_q_stats.tx_dma_mapping_failure++;
5215 /* unknown, unrecoverable mapping error */
5216 BLOGE(sc, "Unknown TX mapping error rc=%d\n", error);
5217 bxe_dump_mbuf(sc, m0, FALSE);
5221 goto bxe_tx_encap_continue;
5224 tx_bd_avail = bxe_tx_avail(sc, fp);
5226 /* make sure there is enough room in the send queue */
5227 if (__predict_false(tx_bd_avail < (nsegs + 2))) {
5228 /* Recoverable, try again later. */
5229 fp->eth_q_stats.tx_hw_queue_full++;
5230 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5232 goto bxe_tx_encap_continue;
5235 /* capture the current H/W TX chain high watermark */
5236 if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth <
5237 (TX_BD_USABLE - tx_bd_avail))) {
5238 fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail);
5241 /* make sure it fits in the packet window */
5242 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5244 * The mbuf may be to big for the controller to handle. If the frame
5245 * is a TSO frame we'll need to do an additional check.
5247 if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5248 if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) {
5249 goto bxe_tx_encap_continue; /* OK to send */
5251 fp->eth_q_stats.tx_window_violation_tso++;
5254 fp->eth_q_stats.tx_window_violation_std++;
5257 /* lets try to defragment this mbuf and remap it */
5258 fp->eth_q_stats.mbuf_defrag_attempts++;
5259 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5261 m0 = m_defrag(*m_head, M_DONTWAIT);
5263 fp->eth_q_stats.mbuf_defrag_failures++;
5264 /* Ugh, just drop the frame... :( */
5267 /* defrag successful, try mapping again */
5269 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5271 segs, &nsegs, BUS_DMA_NOWAIT);
5273 fp->eth_q_stats.tx_dma_mapping_failure++;
5274 /* No sense in trying to defrag/copy chain, drop it. :( */
5277 /* if the chain is still too long then drop it */
5278 if(m0->m_pkthdr.csum_flags & CSUM_TSO) {
5280 * in case TSO is enabled nsegs should be checked against
5281 * BXE_TSO_MAX_SEGMENTS
5283 if (__predict_false(nsegs > BXE_TSO_MAX_SEGMENTS)) {
5284 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5285 fp->eth_q_stats.nsegs_path1_errors++;
5289 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5290 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5291 fp->eth_q_stats.nsegs_path2_errors++;
5299 bxe_tx_encap_continue:
5301 /* Check for errors */
5304 /* recoverable try again later */
5306 fp->eth_q_stats.tx_soft_errors++;
5307 fp->eth_q_stats.mbuf_alloc_tx--;
5315 /* set flag according to packet type (UNICAST_ADDRESS is default) */
5316 if (m0->m_flags & M_BCAST) {
5317 mac_type = BROADCAST_ADDRESS;
5318 } else if (m0->m_flags & M_MCAST) {
5319 mac_type = MULTICAST_ADDRESS;
5322 /* store the mbuf into the mbuf ring */
5324 tx_buf->first_bd = fp->tx_bd_prod;
5327 /* prepare the first transmit (start) BD for the mbuf */
5328 tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd;
5331 "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n",
5332 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
5334 tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
5335 tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
5336 tx_start_bd->nbytes = htole16(segs[0].ds_len);
5337 total_pkt_size += tx_start_bd->nbytes;
5338 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
5340 tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
5342 /* all frames have at least Start BD + Parsing BD */
5344 tx_start_bd->nbd = htole16(nbds);
5346 if (m0->m_flags & M_VLANTAG) {
5347 tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag);
5348 tx_start_bd->bd_flags.as_bitfield |=
5349 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
5351 /* vf tx, start bd must hold the ethertype for fw to enforce it */
5353 /* map ethernet header to find type and header length */
5354 eh = mtod(m0, struct ether_vlan_header *);
5355 tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto;
5357 /* used by FW for packet accounting */
5358 tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod);
5363 * add a parsing BD from the chain. The parsing BD is always added
5364 * though it is only used for TSO and chksum
5366 bd_prod = TX_BD_NEXT(bd_prod);
5368 if (m0->m_pkthdr.csum_flags) {
5369 if (m0->m_pkthdr.csum_flags & CSUM_IP) {
5370 fp->eth_q_stats.tx_ofld_frames_csum_ip++;
5371 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
5374 if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) {
5375 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5376 ETH_TX_BD_FLAGS_L4_CSUM);
5377 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) {
5378 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5379 ETH_TX_BD_FLAGS_IS_UDP |
5380 ETH_TX_BD_FLAGS_L4_CSUM);
5381 } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) ||
5382 (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5383 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
5384 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) {
5385 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM |
5386 ETH_TX_BD_FLAGS_IS_UDP);
5390 if (!CHIP_IS_E1x(sc)) {
5391 pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2;
5392 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
5394 if (m0->m_pkthdr.csum_flags) {
5395 hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data);
5398 SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE,
5401 uint16_t global_data = 0;
5403 pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x;
5404 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
5406 if (m0->m_pkthdr.csum_flags) {
5407 hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x);
5410 SET_FLAG(global_data,
5411 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
5412 pbd_e1x->global_data |= htole16(global_data);
5415 /* setup the parsing BD with TSO specific info */
5416 if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5417 fp->eth_q_stats.tx_ofld_frames_lso++;
5418 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
5420 if (__predict_false(tx_start_bd->nbytes > hlen)) {
5421 fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++;
5423 /* split the first BD into header/data making the fw job easy */
5425 tx_start_bd->nbd = htole16(nbds);
5426 tx_start_bd->nbytes = htole16(hlen);
5428 bd_prod = TX_BD_NEXT(bd_prod);
5430 /* new transmit BD after the tx_parse_bd */
5431 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5432 tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen));
5433 tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen));
5434 tx_data_bd->nbytes = htole16(segs[0].ds_len - hlen);
5435 if (tx_total_pkt_size_bd == NULL) {
5436 tx_total_pkt_size_bd = tx_data_bd;
5440 "TSO split header size is %d (%x:%x) nbds %d\n",
5441 le16toh(tx_start_bd->nbytes),
5442 le32toh(tx_start_bd->addr_hi),
5443 le32toh(tx_start_bd->addr_lo),
5447 if (!CHIP_IS_E1x(sc)) {
5448 bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data);
5450 bxe_set_pbd_lso(m0, pbd_e1x);
5454 if (pbd_e2_parsing_data) {
5455 pbd_e2->parsing_data = htole32(pbd_e2_parsing_data);
5458 /* prepare remaining BDs, start tx bd contains first seg/frag */
5459 for (i = 1; i < nsegs ; i++) {
5460 bd_prod = TX_BD_NEXT(bd_prod);
5461 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5462 tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr));
5463 tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr));
5464 tx_data_bd->nbytes = htole16(segs[i].ds_len);
5465 if (tx_total_pkt_size_bd == NULL) {
5466 tx_total_pkt_size_bd = tx_data_bd;
5468 total_pkt_size += tx_data_bd->nbytes;
5471 BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd);
5473 if (tx_total_pkt_size_bd != NULL) {
5474 tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size;
5477 if (__predict_false(sc->debug & DBG_TX)) {
5478 tmp_bd = tx_buf->first_bd;
5479 for (i = 0; i < nbds; i++)
5483 "TX Strt: %p bd=%d nbd=%d vlan=0x%x "
5484 "bd_flags=0x%x hdr_nbds=%d\n",
5487 le16toh(tx_start_bd->nbd),
5488 le16toh(tx_start_bd->vlan_or_ethertype),
5489 tx_start_bd->bd_flags.as_bitfield,
5490 (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS));
5491 } else if (i == 1) {
5494 "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u "
5495 "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x "
5496 "tcp_seq=%u total_hlen_w=%u\n",
5499 pbd_e1x->global_data,
5504 pbd_e1x->tcp_pseudo_csum,
5505 pbd_e1x->tcp_send_seq,
5506 le16toh(pbd_e1x->total_hlen_w));
5507 } else { /* if (pbd_e2) */
5509 "-> Parse: %p bd=%d dst=%02x:%02x:%02x "
5510 "src=%02x:%02x:%02x parsing_data=0x%x\n",
5513 pbd_e2->data.mac_addr.dst_hi,
5514 pbd_e2->data.mac_addr.dst_mid,
5515 pbd_e2->data.mac_addr.dst_lo,
5516 pbd_e2->data.mac_addr.src_hi,
5517 pbd_e2->data.mac_addr.src_mid,
5518 pbd_e2->data.mac_addr.src_lo,
5519 pbd_e2->parsing_data);
5523 if (i != 1) { /* skip parse db as it doesn't hold data */
5524 tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd;
5526 "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n",
5529 le16toh(tx_data_bd->nbytes),
5530 le32toh(tx_data_bd->addr_hi),
5531 le32toh(tx_data_bd->addr_lo));
5534 tmp_bd = TX_BD_NEXT(tmp_bd);
5538 BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod);
5540 /* update TX BD producer index value for next TX */
5541 bd_prod = TX_BD_NEXT(bd_prod);
5544 * If the chain of tx_bd's describing this frame is adjacent to or spans
5545 * an eth_tx_next_bd element then we need to increment the nbds value.
5547 if (TX_BD_IDX(bd_prod) < nbds) {
5551 /* don't allow reordering of writes for nbd and packets */
5554 fp->tx_db.data.prod += nbds;
5556 /* producer points to the next free tx_bd at this point */
5558 fp->tx_bd_prod = bd_prod;
5560 DOORBELL(sc, fp->index, fp->tx_db.raw);
5562 fp->eth_q_stats.tx_pkts++;
5564 /* Prevent speculative reads from getting ahead of the status block. */
5565 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle,
5566 0, 0, BUS_SPACE_BARRIER_READ);
5568 /* Prevent speculative reads from getting ahead of the doorbell. */
5569 bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle,
5570 0, 0, BUS_SPACE_BARRIER_READ);
5576 bxe_tx_start_locked(struct bxe_softc *sc,
5578 struct bxe_fastpath *fp)
5580 struct mbuf *m = NULL;
5582 uint16_t tx_bd_avail;
5584 BXE_FP_TX_LOCK_ASSERT(fp);
5586 /* keep adding entries while there are frames to send */
5587 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
5590 * check for any frames to send
5591 * dequeue can still be NULL even if queue is not empty
5593 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
5594 if (__predict_false(m == NULL)) {
5598 /* the mbuf now belongs to us */
5599 fp->eth_q_stats.mbuf_alloc_tx++;
5602 * Put the frame into the transmit ring. If we don't have room,
5603 * place the mbuf back at the head of the TX queue, set the
5604 * OACTIVE flag, and wait for the NIC to drain the chain.
5606 if (__predict_false(bxe_tx_encap(fp, &m))) {
5607 fp->eth_q_stats.tx_encap_failures++;
5609 /* mark the TX queue as full and return the frame */
5610 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
5611 IFQ_DRV_PREPEND(&ifp->if_snd, m);
5612 fp->eth_q_stats.mbuf_alloc_tx--;
5613 fp->eth_q_stats.tx_queue_xoff++;
5616 /* stop looking for more work */
5620 /* the frame was enqueued successfully */
5623 /* send a copy of the frame to any BPF listeners. */
5626 tx_bd_avail = bxe_tx_avail(sc, fp);
5628 /* handle any completions if we're running low */
5629 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5630 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5632 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
5638 /* all TX packets were dequeued and/or the tx ring is full */
5640 /* reset the TX watchdog timeout timer */
5641 fp->watchdog_timer = BXE_TX_TIMEOUT;
5645 /* Legacy (non-RSS) dispatch routine */
5647 bxe_tx_start(struct ifnet *ifp)
5649 struct bxe_softc *sc;
5650 struct bxe_fastpath *fp;
5654 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
5655 BLOGW(sc, "Interface not running, ignoring transmit request\n");
5659 if (!sc->link_vars.link_up) {
5660 BLOGW(sc, "Interface link is down, ignoring transmit request\n");
5666 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
5667 fp->eth_q_stats.tx_queue_full_return++;
5672 bxe_tx_start_locked(sc, ifp, fp);
5673 BXE_FP_TX_UNLOCK(fp);
5676 #if __FreeBSD_version >= 901504
5679 bxe_tx_mq_start_locked(struct bxe_softc *sc,
5681 struct bxe_fastpath *fp,
5684 struct buf_ring *tx_br = fp->tx_br;
5686 int depth, rc, tx_count;
5687 uint16_t tx_bd_avail;
5691 BXE_FP_TX_LOCK_ASSERT(fp);
5693 if (sc->state != BXE_STATE_OPEN) {
5694 fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
5699 BLOGE(sc, "Multiqueue TX and no buf_ring!\n");
5704 rc = drbr_enqueue(ifp, tx_br, m);
5706 fp->eth_q_stats.tx_soft_errors++;
5707 goto bxe_tx_mq_start_locked_exit;
5711 if (!sc->link_vars.link_up || !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
5712 fp->eth_q_stats.tx_request_link_down_failures++;
5713 goto bxe_tx_mq_start_locked_exit;
5716 /* fetch the depth of the driver queue */
5717 depth = drbr_inuse(ifp, tx_br);
5718 if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) {
5719 fp->eth_q_stats.tx_max_drbr_queue_depth = depth;
5722 /* keep adding entries while there are frames to send */
5723 while ((next = drbr_peek(ifp, tx_br)) != NULL) {
5724 /* handle any completions if we're running low */
5725 tx_bd_avail = bxe_tx_avail(sc, fp);
5726 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5727 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5729 tx_bd_avail = bxe_tx_avail(sc, fp);
5730 if (tx_bd_avail < (BXE_TSO_MAX_SEGMENTS + 1)) {
5731 fp->eth_q_stats.bd_avail_too_less_failures++;
5733 drbr_advance(ifp, tx_br);
5739 /* the mbuf now belongs to us */
5740 fp->eth_q_stats.mbuf_alloc_tx++;
5743 * Put the frame into the transmit ring. If we don't have room,
5744 * place the mbuf back at the head of the TX queue, set the
5745 * OACTIVE flag, and wait for the NIC to drain the chain.
5747 rc = bxe_tx_encap(fp, &next);
5748 if (__predict_false(rc != 0)) {
5749 fp->eth_q_stats.tx_encap_failures++;
5751 /* mark the TX queue as full and save the frame */
5752 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
5753 drbr_putback(ifp, tx_br, next);
5754 fp->eth_q_stats.mbuf_alloc_tx--;
5755 fp->eth_q_stats.tx_frames_deferred++;
5757 drbr_advance(ifp, tx_br);
5759 /* stop looking for more work */
5763 /* the transmit frame was enqueued successfully */
5766 /* send a copy of the frame to any BPF listeners */
5767 BPF_MTAP(ifp, next);
5769 drbr_advance(ifp, tx_br);
5772 /* all TX packets were dequeued and/or the tx ring is full */
5774 /* reset the TX watchdog timeout timer */
5775 fp->watchdog_timer = BXE_TX_TIMEOUT;
5778 bxe_tx_mq_start_locked_exit:
5779 /* If we didn't drain the drbr, enqueue a task in the future to do it. */
5780 if (!drbr_empty(ifp, tx_br)) {
5781 fp->eth_q_stats.tx_mq_not_empty++;
5782 taskqueue_enqueue_timeout(fp->tq, &fp->tx_timeout_task, 1);
5789 bxe_tx_mq_start_deferred(void *arg,
5792 struct bxe_fastpath *fp = (struct bxe_fastpath *)arg;
5793 struct bxe_softc *sc = fp->sc;
5794 struct ifnet *ifp = sc->ifnet;
5797 bxe_tx_mq_start_locked(sc, ifp, fp, NULL);
5798 BXE_FP_TX_UNLOCK(fp);
5801 /* Multiqueue (TSS) dispatch routine. */
5803 bxe_tx_mq_start(struct ifnet *ifp,
5806 struct bxe_softc *sc = ifp->if_softc;
5807 struct bxe_fastpath *fp;
5810 fp_index = 0; /* default is the first queue */
5812 /* check if flowid is set */
5814 if (BXE_VALID_FLOWID(m))
5815 fp_index = (m->m_pkthdr.flowid % sc->num_queues);
5817 fp = &sc->fp[fp_index];
5819 if (sc->state != BXE_STATE_OPEN) {
5820 fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
5824 if (BXE_FP_TX_TRYLOCK(fp)) {
5825 rc = bxe_tx_mq_start_locked(sc, ifp, fp, m);
5826 BXE_FP_TX_UNLOCK(fp);
5828 rc = drbr_enqueue(ifp, fp->tx_br, m);
5829 taskqueue_enqueue(fp->tq, &fp->tx_task);
5836 bxe_mq_flush(struct ifnet *ifp)
5838 struct bxe_softc *sc = ifp->if_softc;
5839 struct bxe_fastpath *fp;
5843 for (i = 0; i < sc->num_queues; i++) {
5846 if (fp->state != BXE_FP_STATE_IRQ) {
5847 BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n",
5848 fp->index, fp->state);
5852 if (fp->tx_br != NULL) {
5853 BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index);
5855 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) {
5858 BXE_FP_TX_UNLOCK(fp);
5865 #endif /* FreeBSD_version >= 901504 */
5868 bxe_cid_ilt_lines(struct bxe_softc *sc)
5871 return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS);
5873 return (L2_ILT_LINES(sc));
5877 bxe_ilt_set_info(struct bxe_softc *sc)
5879 struct ilt_client_info *ilt_client;
5880 struct ecore_ilt *ilt = sc->ilt;
5883 ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc));
5884 BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line);
5887 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
5888 ilt_client->client_num = ILT_CLIENT_CDU;
5889 ilt_client->page_size = CDU_ILT_PAGE_SZ;
5890 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
5891 ilt_client->start = line;
5892 line += bxe_cid_ilt_lines(sc);
5894 if (CNIC_SUPPORT(sc)) {
5895 line += CNIC_ILT_LINES;
5898 ilt_client->end = (line - 1);
5901 "ilt client[CDU]: start %d, end %d, "
5902 "psz 0x%x, flags 0x%x, hw psz %d\n",
5903 ilt_client->start, ilt_client->end,
5904 ilt_client->page_size,
5906 ilog2(ilt_client->page_size >> 12));
5909 if (QM_INIT(sc->qm_cid_count)) {
5910 ilt_client = &ilt->clients[ILT_CLIENT_QM];
5911 ilt_client->client_num = ILT_CLIENT_QM;
5912 ilt_client->page_size = QM_ILT_PAGE_SZ;
5913 ilt_client->flags = 0;
5914 ilt_client->start = line;
5916 /* 4 bytes for each cid */
5917 line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
5920 ilt_client->end = (line - 1);
5923 "ilt client[QM]: start %d, end %d, "
5924 "psz 0x%x, flags 0x%x, hw psz %d\n",
5925 ilt_client->start, ilt_client->end,
5926 ilt_client->page_size, ilt_client->flags,
5927 ilog2(ilt_client->page_size >> 12));
5930 if (CNIC_SUPPORT(sc)) {
5932 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
5933 ilt_client->client_num = ILT_CLIENT_SRC;
5934 ilt_client->page_size = SRC_ILT_PAGE_SZ;
5935 ilt_client->flags = 0;
5936 ilt_client->start = line;
5937 line += SRC_ILT_LINES;
5938 ilt_client->end = (line - 1);
5941 "ilt client[SRC]: start %d, end %d, "
5942 "psz 0x%x, flags 0x%x, hw psz %d\n",
5943 ilt_client->start, ilt_client->end,
5944 ilt_client->page_size, ilt_client->flags,
5945 ilog2(ilt_client->page_size >> 12));
5948 ilt_client = &ilt->clients[ILT_CLIENT_TM];
5949 ilt_client->client_num = ILT_CLIENT_TM;
5950 ilt_client->page_size = TM_ILT_PAGE_SZ;
5951 ilt_client->flags = 0;
5952 ilt_client->start = line;
5953 line += TM_ILT_LINES;
5954 ilt_client->end = (line - 1);
5957 "ilt client[TM]: start %d, end %d, "
5958 "psz 0x%x, flags 0x%x, hw psz %d\n",
5959 ilt_client->start, ilt_client->end,
5960 ilt_client->page_size, ilt_client->flags,
5961 ilog2(ilt_client->page_size >> 12));
5964 KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!"));
5968 bxe_set_fp_rx_buf_size(struct bxe_softc *sc)
5971 uint32_t rx_buf_size;
5973 rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu);
5975 for (i = 0; i < sc->num_queues; i++) {
5976 if(rx_buf_size <= MCLBYTES){
5977 sc->fp[i].rx_buf_size = rx_buf_size;
5978 sc->fp[i].mbuf_alloc_size = MCLBYTES;
5979 }else if (rx_buf_size <= MJUMPAGESIZE){
5980 sc->fp[i].rx_buf_size = rx_buf_size;
5981 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5982 }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){
5983 sc->fp[i].rx_buf_size = MCLBYTES;
5984 sc->fp[i].mbuf_alloc_size = MCLBYTES;
5985 }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){
5986 sc->fp[i].rx_buf_size = MJUMPAGESIZE;
5987 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5989 sc->fp[i].rx_buf_size = MCLBYTES;
5990 sc->fp[i].mbuf_alloc_size = MCLBYTES;
5996 bxe_alloc_ilt_mem(struct bxe_softc *sc)
6001 (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt),
6003 (M_NOWAIT | M_ZERO))) == NULL) {
6011 bxe_alloc_ilt_lines_mem(struct bxe_softc *sc)
6015 if ((sc->ilt->lines =
6016 (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES),
6018 (M_NOWAIT | M_ZERO))) == NULL) {
6026 bxe_free_ilt_mem(struct bxe_softc *sc)
6028 if (sc->ilt != NULL) {
6029 free(sc->ilt, M_BXE_ILT);
6035 bxe_free_ilt_lines_mem(struct bxe_softc *sc)
6037 if (sc->ilt->lines != NULL) {
6038 free(sc->ilt->lines, M_BXE_ILT);
6039 sc->ilt->lines = NULL;
6044 bxe_free_mem(struct bxe_softc *sc)
6048 for (i = 0; i < L2_ILT_LINES(sc); i++) {
6049 bxe_dma_free(sc, &sc->context[i].vcxt_dma);
6050 sc->context[i].vcxt = NULL;
6051 sc->context[i].size = 0;
6054 ecore_ilt_mem_op(sc, ILT_MEMOP_FREE);
6056 bxe_free_ilt_lines_mem(sc);
6061 bxe_alloc_mem(struct bxe_softc *sc)
6069 * Allocate memory for CDU context:
6070 * This memory is allocated separately and not in the generic ILT
6071 * functions because CDU differs in few aspects:
6072 * 1. There can be multiple entities allocating memory for context -
6073 * regular L2, CNIC, and SRIOV drivers. Each separately controls
6074 * its own ILT lines.
6075 * 2. Since CDU page-size is not a single 4KB page (which is the case
6076 * for the other ILT clients), to be efficient we want to support
6077 * allocation of sub-page-size in the last entry.
6078 * 3. Context pointers are used by the driver to pass to FW / update
6079 * the context (for the other ILT clients the pointers are used just to
6080 * free the memory during unload).
6082 context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc));
6083 for (i = 0, allocated = 0; allocated < context_size; i++) {
6084 sc->context[i].size = min(CDU_ILT_PAGE_SZ,
6085 (context_size - allocated));
6087 if (bxe_dma_alloc(sc, sc->context[i].size,
6088 &sc->context[i].vcxt_dma,
6089 "cdu context") != 0) {
6094 sc->context[i].vcxt =
6095 (union cdu_context *)sc->context[i].vcxt_dma.vaddr;
6097 allocated += sc->context[i].size;
6100 bxe_alloc_ilt_lines_mem(sc);
6102 BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n",
6103 sc->ilt, sc->ilt->start_line, sc->ilt->lines);
6105 for (i = 0; i < 4; i++) {
6107 "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n",
6109 sc->ilt->clients[i].page_size,
6110 sc->ilt->clients[i].start,
6111 sc->ilt->clients[i].end,
6112 sc->ilt->clients[i].client_num,
6113 sc->ilt->clients[i].flags);
6116 if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) {
6117 BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n");
6126 bxe_free_rx_bd_chain(struct bxe_fastpath *fp)
6128 struct bxe_softc *sc;
6133 if (fp->rx_mbuf_tag == NULL) {
6137 /* free all mbufs and unload all maps */
6138 for (i = 0; i < RX_BD_TOTAL; i++) {
6139 if (fp->rx_mbuf_chain[i].m_map != NULL) {
6140 bus_dmamap_sync(fp->rx_mbuf_tag,
6141 fp->rx_mbuf_chain[i].m_map,
6142 BUS_DMASYNC_POSTREAD);
6143 bus_dmamap_unload(fp->rx_mbuf_tag,
6144 fp->rx_mbuf_chain[i].m_map);
6147 if (fp->rx_mbuf_chain[i].m != NULL) {
6148 m_freem(fp->rx_mbuf_chain[i].m);
6149 fp->rx_mbuf_chain[i].m = NULL;
6150 fp->eth_q_stats.mbuf_alloc_rx--;
6156 bxe_free_tpa_pool(struct bxe_fastpath *fp)
6158 struct bxe_softc *sc;
6159 int i, max_agg_queues;
6163 if (fp->rx_mbuf_tag == NULL) {
6167 max_agg_queues = MAX_AGG_QS(sc);
6169 /* release all mbufs and unload all DMA maps in the TPA pool */
6170 for (i = 0; i < max_agg_queues; i++) {
6171 if (fp->rx_tpa_info[i].bd.m_map != NULL) {
6172 bus_dmamap_sync(fp->rx_mbuf_tag,
6173 fp->rx_tpa_info[i].bd.m_map,
6174 BUS_DMASYNC_POSTREAD);
6175 bus_dmamap_unload(fp->rx_mbuf_tag,
6176 fp->rx_tpa_info[i].bd.m_map);
6179 if (fp->rx_tpa_info[i].bd.m != NULL) {
6180 m_freem(fp->rx_tpa_info[i].bd.m);
6181 fp->rx_tpa_info[i].bd.m = NULL;
6182 fp->eth_q_stats.mbuf_alloc_tpa--;
6188 bxe_free_sge_chain(struct bxe_fastpath *fp)
6190 struct bxe_softc *sc;
6195 if (fp->rx_sge_mbuf_tag == NULL) {
6199 /* rree all mbufs and unload all maps */
6200 for (i = 0; i < RX_SGE_TOTAL; i++) {
6201 if (fp->rx_sge_mbuf_chain[i].m_map != NULL) {
6202 bus_dmamap_sync(fp->rx_sge_mbuf_tag,
6203 fp->rx_sge_mbuf_chain[i].m_map,
6204 BUS_DMASYNC_POSTREAD);
6205 bus_dmamap_unload(fp->rx_sge_mbuf_tag,
6206 fp->rx_sge_mbuf_chain[i].m_map);
6209 if (fp->rx_sge_mbuf_chain[i].m != NULL) {
6210 m_freem(fp->rx_sge_mbuf_chain[i].m);
6211 fp->rx_sge_mbuf_chain[i].m = NULL;
6212 fp->eth_q_stats.mbuf_alloc_sge--;
6218 bxe_free_fp_buffers(struct bxe_softc *sc)
6220 struct bxe_fastpath *fp;
6223 for (i = 0; i < sc->num_queues; i++) {
6226 #if __FreeBSD_version >= 901504
6227 if (fp->tx_br != NULL) {
6228 /* just in case bxe_mq_flush() wasn't called */
6229 if (mtx_initialized(&fp->tx_mtx)) {
6233 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL)
6235 BXE_FP_TX_UNLOCK(fp);
6240 /* free all RX buffers */
6241 bxe_free_rx_bd_chain(fp);
6242 bxe_free_tpa_pool(fp);
6243 bxe_free_sge_chain(fp);
6245 if (fp->eth_q_stats.mbuf_alloc_rx != 0) {
6246 BLOGE(sc, "failed to claim all rx mbufs (%d left)\n",
6247 fp->eth_q_stats.mbuf_alloc_rx);
6250 if (fp->eth_q_stats.mbuf_alloc_sge != 0) {
6251 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6252 fp->eth_q_stats.mbuf_alloc_sge);
6255 if (fp->eth_q_stats.mbuf_alloc_tpa != 0) {
6256 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6257 fp->eth_q_stats.mbuf_alloc_tpa);
6260 if (fp->eth_q_stats.mbuf_alloc_tx != 0) {
6261 BLOGE(sc, "failed to release tx mbufs (%d left)\n",
6262 fp->eth_q_stats.mbuf_alloc_tx);
6265 /* XXX verify all mbufs were reclaimed */
6270 bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
6271 uint16_t prev_index,
6274 struct bxe_sw_rx_bd *rx_buf;
6275 struct eth_rx_bd *rx_bd;
6276 bus_dma_segment_t segs[1];
6283 /* allocate the new RX BD mbuf */
6284 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6285 if (__predict_false(m == NULL)) {
6286 fp->eth_q_stats.mbuf_rx_bd_alloc_failed++;
6290 fp->eth_q_stats.mbuf_alloc_rx++;
6292 /* initialize the mbuf buffer length */
6293 m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6295 /* map the mbuf into non-paged pool */
6296 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6297 fp->rx_mbuf_spare_map,
6298 m, segs, &nsegs, BUS_DMA_NOWAIT);
6299 if (__predict_false(rc != 0)) {
6300 fp->eth_q_stats.mbuf_rx_bd_mapping_failed++;
6302 fp->eth_q_stats.mbuf_alloc_rx--;
6306 /* all mbufs must map to a single segment */
6307 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6309 /* release any existing RX BD mbuf mappings */
6311 if (prev_index != index) {
6312 rx_buf = &fp->rx_mbuf_chain[prev_index];
6314 if (rx_buf->m_map != NULL) {
6315 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6316 BUS_DMASYNC_POSTREAD);
6317 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6321 * We only get here from bxe_rxeof() when the maximum number
6322 * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already
6323 * holds the mbuf in the prev_index so it's OK to NULL it out
6324 * here without concern of a memory leak.
6326 fp->rx_mbuf_chain[prev_index].m = NULL;
6329 rx_buf = &fp->rx_mbuf_chain[index];
6331 if (rx_buf->m_map != NULL) {
6332 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6333 BUS_DMASYNC_POSTREAD);
6334 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6337 /* save the mbuf and mapping info for a future packet */
6338 map = (prev_index != index) ?
6339 fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map;
6340 rx_buf->m_map = fp->rx_mbuf_spare_map;
6341 fp->rx_mbuf_spare_map = map;
6342 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6343 BUS_DMASYNC_PREREAD);
6346 rx_bd = &fp->rx_chain[index];
6347 rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6348 rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6354 bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
6357 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
6358 bus_dma_segment_t segs[1];
6364 /* allocate the new TPA mbuf */
6365 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6366 if (__predict_false(m == NULL)) {
6367 fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++;
6371 fp->eth_q_stats.mbuf_alloc_tpa++;
6373 /* initialize the mbuf buffer length */
6374 m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6376 /* map the mbuf into non-paged pool */
6377 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6378 fp->rx_tpa_info_mbuf_spare_map,
6379 m, segs, &nsegs, BUS_DMA_NOWAIT);
6380 if (__predict_false(rc != 0)) {
6381 fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++;
6383 fp->eth_q_stats.mbuf_alloc_tpa--;
6387 /* all mbufs must map to a single segment */
6388 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6390 /* release any existing TPA mbuf mapping */
6391 if (tpa_info->bd.m_map != NULL) {
6392 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6393 BUS_DMASYNC_POSTREAD);
6394 bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map);
6397 /* save the mbuf and mapping info for the TPA mbuf */
6398 map = tpa_info->bd.m_map;
6399 tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map;
6400 fp->rx_tpa_info_mbuf_spare_map = map;
6401 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6402 BUS_DMASYNC_PREREAD);
6404 tpa_info->seg = segs[0];
6410 * Allocate an mbuf and assign it to the receive scatter gather chain. The
6411 * caller must take care to save a copy of the existing mbuf in the SG mbuf
6415 bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
6418 struct bxe_sw_rx_bd *sge_buf;
6419 struct eth_rx_sge *sge;
6420 bus_dma_segment_t segs[1];
6426 /* allocate a new SGE mbuf */
6427 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
6428 if (__predict_false(m == NULL)) {
6429 fp->eth_q_stats.mbuf_rx_sge_alloc_failed++;
6433 fp->eth_q_stats.mbuf_alloc_sge++;
6435 /* initialize the mbuf buffer length */
6436 m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE;
6438 /* map the SGE mbuf into non-paged pool */
6439 rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag,
6440 fp->rx_sge_mbuf_spare_map,
6441 m, segs, &nsegs, BUS_DMA_NOWAIT);
6442 if (__predict_false(rc != 0)) {
6443 fp->eth_q_stats.mbuf_rx_sge_mapping_failed++;
6445 fp->eth_q_stats.mbuf_alloc_sge--;
6449 /* all mbufs must map to a single segment */
6450 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6452 sge_buf = &fp->rx_sge_mbuf_chain[index];
6454 /* release any existing SGE mbuf mapping */
6455 if (sge_buf->m_map != NULL) {
6456 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6457 BUS_DMASYNC_POSTREAD);
6458 bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map);
6461 /* save the mbuf and mapping info for a future packet */
6462 map = sge_buf->m_map;
6463 sge_buf->m_map = fp->rx_sge_mbuf_spare_map;
6464 fp->rx_sge_mbuf_spare_map = map;
6465 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6466 BUS_DMASYNC_PREREAD);
6469 sge = &fp->rx_sge_chain[index];
6470 sge->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6471 sge->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6476 static __noinline int
6477 bxe_alloc_fp_buffers(struct bxe_softc *sc)
6479 struct bxe_fastpath *fp;
6481 int ring_prod, cqe_ring_prod;
6484 for (i = 0; i < sc->num_queues; i++) {
6487 ring_prod = cqe_ring_prod = 0;
6491 /* allocate buffers for the RX BDs in RX BD chain */
6492 for (j = 0; j < sc->max_rx_bufs; j++) {
6493 rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod);
6495 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
6497 goto bxe_alloc_fp_buffers_error;
6500 ring_prod = RX_BD_NEXT(ring_prod);
6501 cqe_ring_prod = RCQ_NEXT(cqe_ring_prod);
6504 fp->rx_bd_prod = ring_prod;
6505 fp->rx_cq_prod = cqe_ring_prod;
6506 fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0;
6508 max_agg_queues = MAX_AGG_QS(sc);
6510 fp->tpa_enable = TRUE;
6512 /* fill the TPA pool */
6513 for (j = 0; j < max_agg_queues; j++) {
6514 rc = bxe_alloc_rx_tpa_mbuf(fp, j);
6516 BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n",
6518 fp->tpa_enable = FALSE;
6519 goto bxe_alloc_fp_buffers_error;
6522 fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP;
6525 if (fp->tpa_enable) {
6526 /* fill the RX SGE chain */
6528 for (j = 0; j < RX_SGE_USABLE; j++) {
6529 rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod);
6531 BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n",
6533 fp->tpa_enable = FALSE;
6535 goto bxe_alloc_fp_buffers_error;
6538 ring_prod = RX_SGE_NEXT(ring_prod);
6541 fp->rx_sge_prod = ring_prod;
6547 bxe_alloc_fp_buffers_error:
6549 /* unwind what was already allocated */
6550 bxe_free_rx_bd_chain(fp);
6551 bxe_free_tpa_pool(fp);
6552 bxe_free_sge_chain(fp);
6558 bxe_free_fw_stats_mem(struct bxe_softc *sc)
6560 bxe_dma_free(sc, &sc->fw_stats_dma);
6562 sc->fw_stats_num = 0;
6564 sc->fw_stats_req_size = 0;
6565 sc->fw_stats_req = NULL;
6566 sc->fw_stats_req_mapping = 0;
6568 sc->fw_stats_data_size = 0;
6569 sc->fw_stats_data = NULL;
6570 sc->fw_stats_data_mapping = 0;
6574 bxe_alloc_fw_stats_mem(struct bxe_softc *sc)
6576 uint8_t num_queue_stats;
6579 /* number of queues for statistics is number of eth queues */
6580 num_queue_stats = BXE_NUM_ETH_QUEUES(sc);
6583 * Total number of FW statistics requests =
6584 * 1 for port stats + 1 for PF stats + num of queues
6586 sc->fw_stats_num = (2 + num_queue_stats);
6589 * Request is built from stats_query_header and an array of
6590 * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT
6591 * rules. The real number or requests is configured in the
6592 * stats_query_header.
6595 ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) +
6596 ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0));
6598 BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n",
6599 sc->fw_stats_num, num_groups);
6601 sc->fw_stats_req_size =
6602 (sizeof(struct stats_query_header) +
6603 (num_groups * sizeof(struct stats_query_cmd_group)));
6606 * Data for statistics requests + stats_counter.
6607 * stats_counter holds per-STORM counters that are incremented when
6608 * STORM has finished with the current request. Memory for FCoE
6609 * offloaded statistics are counted anyway, even if they will not be sent.
6610 * VF stats are not accounted for here as the data of VF stats is stored
6611 * in memory allocated by the VF, not here.
6613 sc->fw_stats_data_size =
6614 (sizeof(struct stats_counter) +
6615 sizeof(struct per_port_stats) +
6616 sizeof(struct per_pf_stats) +
6617 /* sizeof(struct fcoe_statistics_params) + */
6618 (sizeof(struct per_queue_stats) * num_queue_stats));
6620 if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size),
6621 &sc->fw_stats_dma, "fw stats") != 0) {
6622 bxe_free_fw_stats_mem(sc);
6626 /* set up the shortcuts */
6629 (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr;
6630 sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr;
6633 (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr +
6634 sc->fw_stats_req_size);
6635 sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr +
6636 sc->fw_stats_req_size);
6638 BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n",
6639 (uintmax_t)sc->fw_stats_req_mapping);
6641 BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n",
6642 (uintmax_t)sc->fw_stats_data_mapping);
6649 * 0-7 - Engine0 load counter.
6650 * 8-15 - Engine1 load counter.
6651 * 16 - Engine0 RESET_IN_PROGRESS bit.
6652 * 17 - Engine1 RESET_IN_PROGRESS bit.
6653 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active
6654 * function on the engine
6655 * 19 - Engine1 ONE_IS_LOADED.
6656 * 20 - Chip reset flow bit. When set none-leader must wait for both engines
6657 * leader to complete (check for both RESET_IN_PROGRESS bits and not
6658 * for just the one belonging to its engine).
6660 #define BXE_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
6661 #define BXE_PATH0_LOAD_CNT_MASK 0x000000ff
6662 #define BXE_PATH0_LOAD_CNT_SHIFT 0
6663 #define BXE_PATH1_LOAD_CNT_MASK 0x0000ff00
6664 #define BXE_PATH1_LOAD_CNT_SHIFT 8
6665 #define BXE_PATH0_RST_IN_PROG_BIT 0x00010000
6666 #define BXE_PATH1_RST_IN_PROG_BIT 0x00020000
6667 #define BXE_GLOBAL_RESET_BIT 0x00040000
6669 /* set the GLOBAL_RESET bit, should be run under rtnl lock */
6671 bxe_set_reset_global(struct bxe_softc *sc)
6674 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6675 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6676 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT);
6677 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6680 /* clear the GLOBAL_RESET bit, should be run under rtnl lock */
6682 bxe_clear_reset_global(struct bxe_softc *sc)
6685 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6686 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6687 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT));
6688 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6691 /* checks the GLOBAL_RESET bit, should be run under rtnl lock */
6693 bxe_reset_is_global(struct bxe_softc *sc)
6695 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6696 BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val);
6697 return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE;
6700 /* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */
6702 bxe_set_reset_done(struct bxe_softc *sc)
6705 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6706 BXE_PATH0_RST_IN_PROG_BIT;
6708 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6710 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6713 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6715 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6718 /* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */
6720 bxe_set_reset_in_progress(struct bxe_softc *sc)
6723 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6724 BXE_PATH0_RST_IN_PROG_BIT;
6726 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6728 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6731 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6733 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6736 /* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */
6738 bxe_reset_is_done(struct bxe_softc *sc,
6741 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6742 uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT :
6743 BXE_PATH0_RST_IN_PROG_BIT;
6745 /* return false if bit is set */
6746 return (val & bit) ? FALSE : TRUE;
6749 /* get the load status for an engine, should be run under rtnl lock */
6751 bxe_get_load_status(struct bxe_softc *sc,
6754 uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK :
6755 BXE_PATH0_LOAD_CNT_MASK;
6756 uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT :
6757 BXE_PATH0_LOAD_CNT_SHIFT;
6758 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6760 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6762 val = ((val & mask) >> shift);
6764 BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val);
6769 /* set pf load mark */
6770 /* XXX needs to be under rtnl lock */
6772 bxe_set_pf_load(struct bxe_softc *sc)
6776 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6777 BXE_PATH0_LOAD_CNT_MASK;
6778 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6779 BXE_PATH0_LOAD_CNT_SHIFT;
6781 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6783 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6784 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6786 /* get the current counter value */
6787 val1 = ((val & mask) >> shift);
6789 /* set bit of this PF */
6790 val1 |= (1 << SC_ABS_FUNC(sc));
6792 /* clear the old value */
6795 /* set the new one */
6796 val |= ((val1 << shift) & mask);
6798 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6800 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6803 /* clear pf load mark */
6804 /* XXX needs to be under rtnl lock */
6806 bxe_clear_pf_load(struct bxe_softc *sc)
6809 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6810 BXE_PATH0_LOAD_CNT_MASK;
6811 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6812 BXE_PATH0_LOAD_CNT_SHIFT;
6814 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6815 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6816 BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val);
6818 /* get the current counter value */
6819 val1 = (val & mask) >> shift;
6821 /* clear bit of that PF */
6822 val1 &= ~(1 << SC_ABS_FUNC(sc));
6824 /* clear the old value */
6827 /* set the new one */
6828 val |= ((val1 << shift) & mask);
6830 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6831 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6835 /* send load requrest to mcp and analyze response */
6837 bxe_nic_load_request(struct bxe_softc *sc,
6838 uint32_t *load_code)
6842 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
6843 DRV_MSG_SEQ_NUMBER_MASK);
6845 BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq);
6847 /* get the current FW pulse sequence */
6848 sc->fw_drv_pulse_wr_seq =
6849 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) &
6850 DRV_PULSE_SEQ_MASK);
6852 BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n",
6853 sc->fw_drv_pulse_wr_seq);
6856 (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
6857 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
6859 /* if the MCP fails to respond we must abort */
6860 if (!(*load_code)) {
6861 BLOGE(sc, "MCP response failure!\n");
6865 /* if MCP refused then must abort */
6866 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6867 BLOGE(sc, "MCP refused load request\n");
6875 * Check whether another PF has already loaded FW to chip. In virtualized
6876 * environments a pf from anoth VM may have already initialized the device
6877 * including loading FW.
6880 bxe_nic_load_analyze_req(struct bxe_softc *sc,
6883 uint32_t my_fw, loaded_fw;
6885 /* is another pf loaded on this engine? */
6886 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
6887 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
6888 /* build my FW version dword */
6889 my_fw = (BCM_5710_FW_MAJOR_VERSION +
6890 (BCM_5710_FW_MINOR_VERSION << 8 ) +
6891 (BCM_5710_FW_REVISION_VERSION << 16) +
6892 (BCM_5710_FW_ENGINEERING_VERSION << 24));
6894 /* read loaded FW from chip */
6895 loaded_fw = REG_RD(sc, XSEM_REG_PRAM);
6896 BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n",
6899 /* abort nic load if version mismatch */
6900 if (my_fw != loaded_fw) {
6901 BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)",
6910 /* mark PMF if applicable */
6912 bxe_nic_load_pmf(struct bxe_softc *sc,
6915 uint32_t ncsi_oem_data_addr;
6917 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6918 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
6919 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
6921 * Barrier here for ordering between the writing to sc->port.pmf here
6922 * and reading it from the periodic task.
6930 BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf);
6933 if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) {
6934 if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) {
6935 ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr);
6936 if (ncsi_oem_data_addr) {
6938 (ncsi_oem_data_addr +
6939 offsetof(struct glob_ncsi_oem_data, driver_version)),
6947 bxe_read_mf_cfg(struct bxe_softc *sc)
6949 int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1);
6953 if (BXE_NOMCP(sc)) {
6954 return; /* what should be the default bvalue in this case */
6958 * The formula for computing the absolute function number is...
6959 * For 2 port configuration (4 functions per port):
6960 * abs_func = 2 * vn + SC_PORT + SC_PATH
6961 * For 4 port configuration (2 functions per port):
6962 * abs_func = 4 * vn + 2 * SC_PORT + SC_PATH
6964 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
6965 abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc));
6966 if (abs_func >= E1H_FUNC_MAX) {
6969 sc->devinfo.mf_info.mf_config[vn] =
6970 MFCFG_RD(sc, func_mf_config[abs_func].config);
6973 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] &
6974 FUNC_MF_CFG_FUNC_DISABLED) {
6975 BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n");
6976 sc->flags |= BXE_MF_FUNC_DIS;
6978 BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n");
6979 sc->flags &= ~BXE_MF_FUNC_DIS;
6983 /* acquire split MCP access lock register */
6984 static int bxe_acquire_alr(struct bxe_softc *sc)
6988 for (j = 0; j < 1000; j++) {
6990 REG_WR(sc, GRCBASE_MCP + 0x9c, val);
6991 val = REG_RD(sc, GRCBASE_MCP + 0x9c);
6992 if (val & (1L << 31))
6998 if (!(val & (1L << 31))) {
6999 BLOGE(sc, "Cannot acquire MCP access lock register\n");
7006 /* release split MCP access lock register */
7007 static void bxe_release_alr(struct bxe_softc *sc)
7009 REG_WR(sc, GRCBASE_MCP + 0x9c, 0);
7013 bxe_fan_failure(struct bxe_softc *sc)
7015 int port = SC_PORT(sc);
7016 uint32_t ext_phy_config;
7018 /* mark the failure */
7020 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
7022 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
7023 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
7024 SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config,
7027 /* log the failure */
7028 BLOGW(sc, "Fan Failure has caused the driver to shutdown "
7029 "the card to prevent permanent damage. "
7030 "Please contact OEM Support for assistance\n");
7034 bxe_panic(sc, ("Schedule task to handle fan failure\n"));
7037 * Schedule device reset (unload)
7038 * This is due to some boards consuming sufficient power when driver is
7039 * up to overheat if fan fails.
7041 bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state);
7042 schedule_delayed_work(&sc->sp_rtnl_task, 0);
7046 /* this function is called upon a link interrupt */
7048 bxe_link_attn(struct bxe_softc *sc)
7050 uint32_t pause_enabled = 0;
7051 struct host_port_stats *pstats;
7053 struct bxe_fastpath *fp;
7056 /* Make sure that we are synced with the current statistics */
7057 bxe_stats_handle(sc, STATS_EVENT_STOP);
7058 BLOGD(sc, DBG_LOAD, "link_vars phy_flags : %x\n", sc->link_vars.phy_flags);
7059 elink_link_update(&sc->link_params, &sc->link_vars);
7061 if (sc->link_vars.link_up) {
7063 /* dropless flow control */
7064 if (!CHIP_IS_E1(sc) && sc->dropless_fc) {
7067 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
7072 (BAR_USTRORM_INTMEM +
7073 USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))),
7077 if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) {
7078 pstats = BXE_SP(sc, port_stats);
7079 /* reset old mac stats */
7080 memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx));
7083 if (sc->state == BXE_STATE_OPEN) {
7084 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
7085 /* Restart tx when the link comes back. */
7086 FOR_EACH_ETH_QUEUE(sc, i) {
7088 taskqueue_enqueue(fp->tq, &fp->tx_task);
7094 if (sc->link_vars.link_up && sc->link_vars.line_speed) {
7095 cmng_fns = bxe_get_cmng_fns_mode(sc);
7097 if (cmng_fns != CMNG_FNS_NONE) {
7098 bxe_cmng_fns_init(sc, FALSE, cmng_fns);
7099 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7101 /* rate shaping and fairness are disabled */
7102 BLOGD(sc, DBG_LOAD, "single function mode without fairness\n");
7106 bxe_link_report_locked(sc);
7109 ; // XXX bxe_link_sync_notify(sc);
7114 bxe_attn_int_asserted(struct bxe_softc *sc,
7117 int port = SC_PORT(sc);
7118 uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7119 MISC_REG_AEU_MASK_ATTN_FUNC_0;
7120 uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
7121 NIG_REG_MASK_INTERRUPT_PORT0;
7123 uint32_t nig_mask = 0;
7128 if (sc->attn_state & asserted) {
7129 BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted);
7132 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7134 aeu_mask = REG_RD(sc, aeu_addr);
7136 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n",
7137 aeu_mask, asserted);
7139 aeu_mask &= ~(asserted & 0x3ff);
7141 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
7143 REG_WR(sc, aeu_addr, aeu_mask);
7145 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7147 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
7148 sc->attn_state |= asserted;
7149 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
7151 if (asserted & ATTN_HARD_WIRED_MASK) {
7152 if (asserted & ATTN_NIG_FOR_FUNC) {
7154 bxe_acquire_phy_lock(sc);
7155 /* save nig interrupt mask */
7156 nig_mask = REG_RD(sc, nig_int_mask_addr);
7158 /* If nig_mask is not set, no need to call the update function */
7160 REG_WR(sc, nig_int_mask_addr, 0);
7165 /* handle unicore attn? */
7168 if (asserted & ATTN_SW_TIMER_4_FUNC) {
7169 BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n");
7172 if (asserted & GPIO_2_FUNC) {
7173 BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n");
7176 if (asserted & GPIO_3_FUNC) {
7177 BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n");
7180 if (asserted & GPIO_4_FUNC) {
7181 BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n");
7185 if (asserted & ATTN_GENERAL_ATTN_1) {
7186 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n");
7187 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
7189 if (asserted & ATTN_GENERAL_ATTN_2) {
7190 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n");
7191 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
7193 if (asserted & ATTN_GENERAL_ATTN_3) {
7194 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n");
7195 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
7198 if (asserted & ATTN_GENERAL_ATTN_4) {
7199 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n");
7200 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
7202 if (asserted & ATTN_GENERAL_ATTN_5) {
7203 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n");
7204 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
7206 if (asserted & ATTN_GENERAL_ATTN_6) {
7207 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n");
7208 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
7213 if (sc->devinfo.int_block == INT_BLOCK_HC) {
7214 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET);
7216 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
7219 BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n",
7221 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
7222 REG_WR(sc, reg_addr, asserted);
7224 /* now set back the mask */
7225 if (asserted & ATTN_NIG_FOR_FUNC) {
7227 * Verify that IGU ack through BAR was written before restoring
7228 * NIG mask. This loop should exit after 2-3 iterations max.
7230 if (sc->devinfo.int_block != INT_BLOCK_HC) {
7234 igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS);
7235 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
7236 (++cnt < MAX_IGU_ATTN_ACK_TO));
7239 BLOGE(sc, "Failed to verify IGU ack on time\n");
7245 REG_WR(sc, nig_int_mask_addr, nig_mask);
7247 bxe_release_phy_lock(sc);
7252 bxe_print_next_block(struct bxe_softc *sc,
7256 BLOGI(sc, "%s%s", idx ? ", " : "", blk);
7260 bxe_check_blocks_with_parity0(struct bxe_softc *sc,
7265 uint32_t cur_bit = 0;
7268 for (i = 0; sig; i++) {
7269 cur_bit = ((uint32_t)0x1 << i);
7270 if (sig & cur_bit) {
7272 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
7274 bxe_print_next_block(sc, par_num++, "BRB");
7276 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
7278 bxe_print_next_block(sc, par_num++, "PARSER");
7280 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
7282 bxe_print_next_block(sc, par_num++, "TSDM");
7284 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
7286 bxe_print_next_block(sc, par_num++, "SEARCHER");
7288 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
7290 bxe_print_next_block(sc, par_num++, "TCM");
7292 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
7294 bxe_print_next_block(sc, par_num++, "TSEMI");
7296 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
7298 bxe_print_next_block(sc, par_num++, "XPB");
7311 bxe_check_blocks_with_parity1(struct bxe_softc *sc,
7318 uint32_t cur_bit = 0;
7319 for (i = 0; sig; i++) {
7320 cur_bit = ((uint32_t)0x1 << i);
7321 if (sig & cur_bit) {
7323 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
7325 bxe_print_next_block(sc, par_num++, "PBF");
7327 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
7329 bxe_print_next_block(sc, par_num++, "QM");
7331 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
7333 bxe_print_next_block(sc, par_num++, "TM");
7335 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
7337 bxe_print_next_block(sc, par_num++, "XSDM");
7339 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
7341 bxe_print_next_block(sc, par_num++, "XCM");
7343 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
7345 bxe_print_next_block(sc, par_num++, "XSEMI");
7347 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
7349 bxe_print_next_block(sc, par_num++, "DOORBELLQ");
7351 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
7353 bxe_print_next_block(sc, par_num++, "NIG");
7355 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
7357 bxe_print_next_block(sc, par_num++, "VAUX PCI CORE");
7360 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
7362 bxe_print_next_block(sc, par_num++, "DEBUG");
7364 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
7366 bxe_print_next_block(sc, par_num++, "USDM");
7368 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
7370 bxe_print_next_block(sc, par_num++, "UCM");
7372 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
7374 bxe_print_next_block(sc, par_num++, "USEMI");
7376 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
7378 bxe_print_next_block(sc, par_num++, "UPB");
7380 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
7382 bxe_print_next_block(sc, par_num++, "CSDM");
7384 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
7386 bxe_print_next_block(sc, par_num++, "CCM");
7399 bxe_check_blocks_with_parity2(struct bxe_softc *sc,
7404 uint32_t cur_bit = 0;
7407 for (i = 0; sig; i++) {
7408 cur_bit = ((uint32_t)0x1 << i);
7409 if (sig & cur_bit) {
7411 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
7413 bxe_print_next_block(sc, par_num++, "CSEMI");
7415 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
7417 bxe_print_next_block(sc, par_num++, "PXP");
7419 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
7421 bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT");
7423 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
7425 bxe_print_next_block(sc, par_num++, "CFC");
7427 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
7429 bxe_print_next_block(sc, par_num++, "CDU");
7431 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
7433 bxe_print_next_block(sc, par_num++, "DMAE");
7435 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
7437 bxe_print_next_block(sc, par_num++, "IGU");
7439 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
7441 bxe_print_next_block(sc, par_num++, "MISC");
7454 bxe_check_blocks_with_parity3(struct bxe_softc *sc,
7460 uint32_t cur_bit = 0;
7463 for (i = 0; sig; i++) {
7464 cur_bit = ((uint32_t)0x1 << i);
7465 if (sig & cur_bit) {
7467 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
7469 bxe_print_next_block(sc, par_num++, "MCP ROM");
7472 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
7474 bxe_print_next_block(sc, par_num++,
7478 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
7480 bxe_print_next_block(sc, par_num++,
7484 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
7486 bxe_print_next_block(sc, par_num++,
7501 bxe_check_blocks_with_parity4(struct bxe_softc *sc,
7506 uint32_t cur_bit = 0;
7509 for (i = 0; sig; i++) {
7510 cur_bit = ((uint32_t)0x1 << i);
7511 if (sig & cur_bit) {
7513 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
7515 bxe_print_next_block(sc, par_num++, "PGLUE_B");
7517 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
7519 bxe_print_next_block(sc, par_num++, "ATC");
7532 bxe_parity_attn(struct bxe_softc *sc,
7539 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
7540 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
7541 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
7542 (sig[3] & HW_PRTY_ASSERT_SET_3) ||
7543 (sig[4] & HW_PRTY_ASSERT_SET_4)) {
7544 BLOGE(sc, "Parity error: HW block parity attention:\n"
7545 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
7546 (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0),
7547 (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1),
7548 (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2),
7549 (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3),
7550 (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4));
7553 BLOGI(sc, "Parity errors detected in blocks: ");
7556 bxe_check_blocks_with_parity0(sc, sig[0] &
7557 HW_PRTY_ASSERT_SET_0,
7560 bxe_check_blocks_with_parity1(sc, sig[1] &
7561 HW_PRTY_ASSERT_SET_1,
7562 par_num, global, print);
7564 bxe_check_blocks_with_parity2(sc, sig[2] &
7565 HW_PRTY_ASSERT_SET_2,
7568 bxe_check_blocks_with_parity3(sc, sig[3] &
7569 HW_PRTY_ASSERT_SET_3,
7570 par_num, global, print);
7572 bxe_check_blocks_with_parity4(sc, sig[4] &
7573 HW_PRTY_ASSERT_SET_4,
7579 if( *global == TRUE ) {
7580 BXE_SET_ERROR_BIT(sc, BXE_ERR_GLOBAL);
7590 bxe_chk_parity_attn(struct bxe_softc *sc,
7594 struct attn_route attn = { {0} };
7595 int port = SC_PORT(sc);
7597 if(sc->state != BXE_STATE_OPEN)
7600 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
7601 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
7602 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
7603 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
7606 * Since MCP attentions can't be disabled inside the block, we need to
7607 * read AEU registers to see whether they're currently disabled
7609 attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
7610 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) &
7611 MISC_AEU_ENABLE_MCP_PRTY_BITS) |
7612 ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
7615 if (!CHIP_IS_E1x(sc))
7616 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
7618 return (bxe_parity_attn(sc, global, print, attn.sig));
7622 bxe_attn_int_deasserted4(struct bxe_softc *sc,
7626 boolean_t err_flg = FALSE;
7628 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
7629 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
7630 BLOGE(sc, "PGLUE hw attention 0x%08x\n", val);
7632 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
7633 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
7634 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
7635 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
7636 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
7637 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
7638 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
7639 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
7640 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
7641 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
7642 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
7643 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
7644 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
7645 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
7646 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
7647 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
7648 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
7649 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
7652 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
7653 val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR);
7654 BLOGE(sc, "ATC hw attention 0x%08x\n", val);
7656 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
7657 BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
7658 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
7659 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
7660 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
7661 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
7662 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
7663 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
7664 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
7665 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
7666 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
7667 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
7670 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7671 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
7672 BLOGE(sc, "FATAL parity attention set4 0x%08x\n",
7673 (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7674 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
7678 BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC);
7679 taskqueue_enqueue_timeout(taskqueue_thread,
7680 &sc->sp_err_timeout_task, hz/10);
7686 bxe_e1h_disable(struct bxe_softc *sc)
7688 int port = SC_PORT(sc);
7692 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7696 bxe_e1h_enable(struct bxe_softc *sc)
7698 int port = SC_PORT(sc);
7700 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7702 // XXX bxe_tx_enable(sc);
7706 * called due to MCP event (on pmf):
7707 * reread new bandwidth configuration
7709 * notify others function about the change
7712 bxe_config_mf_bw(struct bxe_softc *sc)
7714 if (sc->link_vars.link_up) {
7715 bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX);
7716 // XXX bxe_link_sync_notify(sc);
7719 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7723 bxe_set_mf_bw(struct bxe_softc *sc)
7725 bxe_config_mf_bw(sc);
7726 bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
7730 bxe_handle_eee_event(struct bxe_softc *sc)
7732 BLOGD(sc, DBG_INTR, "EEE - LLDP event\n");
7733 bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
7736 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
7739 bxe_drv_info_ether_stat(struct bxe_softc *sc)
7741 struct eth_stats_info *ether_stat =
7742 &sc->sp->drv_info_to_mcp.ether_stat;
7744 strlcpy(ether_stat->version, BXE_DRIVER_VERSION,
7745 ETH_STAT_INFO_VERSION_LEN);
7747 /* XXX (+ MAC_PAD) taken from other driver... verify this is right */
7748 sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj,
7749 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
7750 ether_stat->mac_local + MAC_PAD,
7753 ether_stat->mtu_size = sc->mtu;
7755 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
7756 if (sc->ifnet->if_capenable & (IFCAP_TSO4 | IFCAP_TSO6)) {
7757 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
7760 // XXX ether_stat->feature_flags |= ???;
7762 ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0;
7764 ether_stat->txq_size = sc->tx_ring_size;
7765 ether_stat->rxq_size = sc->rx_ring_size;
7769 bxe_handle_drv_info_req(struct bxe_softc *sc)
7771 enum drv_info_opcode op_code;
7772 uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control);
7774 /* if drv_info version supported by MFW doesn't match - send NACK */
7775 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
7776 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7780 op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
7781 DRV_INFO_CONTROL_OP_CODE_SHIFT);
7783 memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp));
7786 case ETH_STATS_OPCODE:
7787 bxe_drv_info_ether_stat(sc);
7789 case FCOE_STATS_OPCODE:
7790 case ISCSI_STATS_OPCODE:
7792 /* if op code isn't supported - send NACK */
7793 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7798 * If we got drv_info attn from MFW then these fields are defined in
7801 SHMEM2_WR(sc, drv_info_host_addr_lo,
7802 U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7803 SHMEM2_WR(sc, drv_info_host_addr_hi,
7804 U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7806 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0);
7810 bxe_dcc_event(struct bxe_softc *sc,
7813 BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event);
7815 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
7817 * This is the only place besides the function initialization
7818 * where the sc->flags can change so it is done without any
7821 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
7822 BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n");
7823 sc->flags |= BXE_MF_FUNC_DIS;
7824 bxe_e1h_disable(sc);
7826 BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n");
7827 sc->flags &= ~BXE_MF_FUNC_DIS;
7830 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
7833 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
7834 bxe_config_mf_bw(sc);
7835 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
7838 /* Report results to MCP */
7840 bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0);
7842 bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0);
7846 bxe_pmf_update(struct bxe_softc *sc)
7848 int port = SC_PORT(sc);
7852 BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf);
7855 * We need the mb() to ensure the ordering between the writing to
7856 * sc->port.pmf here and reading it from the bxe_periodic_task().
7860 /* queue a periodic task */
7861 // XXX schedule task...
7863 // XXX bxe_dcbx_pmf_update(sc);
7865 /* enable nig attention */
7866 val = (0xff0f | (1 << (SC_VN(sc) + 4)));
7867 if (sc->devinfo.int_block == INT_BLOCK_HC) {
7868 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val);
7869 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val);
7870 } else if (!CHIP_IS_E1x(sc)) {
7871 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
7872 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
7875 bxe_stats_handle(sc, STATS_EVENT_PMF);
7879 bxe_mc_assert(struct bxe_softc *sc)
7883 uint32_t row0, row1, row2, row3;
7886 last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET);
7888 BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7890 /* print the asserts */
7891 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7893 row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i));
7894 row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4);
7895 row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8);
7896 row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12);
7898 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7899 BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7900 i, row3, row2, row1, row0);
7908 last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET);
7910 BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7913 /* print the asserts */
7914 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7916 row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i));
7917 row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4);
7918 row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8);
7919 row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12);
7921 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7922 BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7923 i, row3, row2, row1, row0);
7931 last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET);
7933 BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7936 /* print the asserts */
7937 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7939 row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i));
7940 row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4);
7941 row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8);
7942 row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12);
7944 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7945 BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7946 i, row3, row2, row1, row0);
7954 last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET);
7956 BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7959 /* print the asserts */
7960 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7962 row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i));
7963 row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4);
7964 row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8);
7965 row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12);
7967 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7968 BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7969 i, row3, row2, row1, row0);
7980 bxe_attn_int_deasserted3(struct bxe_softc *sc,
7983 int func = SC_FUNC(sc);
7986 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
7988 if (attn & BXE_PMF_LINK_ASSERT(sc)) {
7990 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7991 bxe_read_mf_cfg(sc);
7992 sc->devinfo.mf_info.mf_config[SC_VN(sc)] =
7993 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
7994 val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status);
7996 if (val & DRV_STATUS_DCC_EVENT_MASK)
7997 bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK));
7999 if (val & DRV_STATUS_SET_MF_BW)
8002 if (val & DRV_STATUS_DRV_INFO_REQ)
8003 bxe_handle_drv_info_req(sc);
8005 if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF))
8008 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
8009 bxe_handle_eee_event(sc);
8011 if (sc->link_vars.periodic_flags &
8012 ELINK_PERIODIC_FLAGS_LINK_EVENT) {
8013 /* sync with link */
8014 bxe_acquire_phy_lock(sc);
8015 sc->link_vars.periodic_flags &=
8016 ~ELINK_PERIODIC_FLAGS_LINK_EVENT;
8017 bxe_release_phy_lock(sc);
8019 ; // XXX bxe_link_sync_notify(sc);
8020 bxe_link_report(sc);
8024 * Always call it here: bxe_link_report() will
8025 * prevent the link indication duplication.
8027 bxe_link_status_update(sc);
8029 } else if (attn & BXE_MC_ASSERT_BITS) {
8031 BLOGE(sc, "MC assert!\n");
8033 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0);
8034 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0);
8035 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0);
8036 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0);
8037 bxe_int_disable(sc);
8038 BXE_SET_ERROR_BIT(sc, BXE_ERR_MC_ASSERT);
8039 taskqueue_enqueue_timeout(taskqueue_thread,
8040 &sc->sp_err_timeout_task, hz/10);
8042 } else if (attn & BXE_MCP_ASSERT) {
8044 BLOGE(sc, "MCP assert!\n");
8045 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0);
8046 BXE_SET_ERROR_BIT(sc, BXE_ERR_MCP_ASSERT);
8047 taskqueue_enqueue_timeout(taskqueue_thread,
8048 &sc->sp_err_timeout_task, hz/10);
8049 bxe_int_disable(sc); /*avoid repetive assert alert */
8053 BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn);
8057 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
8058 BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn);
8059 if (attn & BXE_GRC_TIMEOUT) {
8060 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN);
8061 BLOGE(sc, "GRC time-out 0x%08x\n", val);
8063 if (attn & BXE_GRC_RSV) {
8064 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN);
8065 BLOGE(sc, "GRC reserved 0x%08x\n", val);
8067 REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
8072 bxe_attn_int_deasserted2(struct bxe_softc *sc,
8075 int port = SC_PORT(sc);
8077 uint32_t val0, mask0, val1, mask1;
8079 boolean_t err_flg = FALSE;
8081 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
8082 val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR);
8083 BLOGE(sc, "CFC hw attention 0x%08x\n", val);
8084 /* CFC error attention */
8086 BLOGE(sc, "FATAL error from CFC\n");
8091 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
8092 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0);
8093 BLOGE(sc, "PXP hw attention-0 0x%08x\n", val);
8094 /* RQ_USDMDP_FIFO_OVERFLOW */
8095 if (val & 0x18000) {
8096 BLOGE(sc, "FATAL error from PXP\n");
8100 if (!CHIP_IS_E1x(sc)) {
8101 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1);
8102 BLOGE(sc, "PXP hw attention-1 0x%08x\n", val);
8107 #define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR
8108 #define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT
8110 if (attn & AEU_PXP2_HW_INT_BIT) {
8111 /* CQ47854 workaround do not panic on
8112 * PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8114 if (!CHIP_IS_E1x(sc)) {
8115 mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0);
8116 val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1);
8117 mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1);
8118 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0);
8120 * If the olny PXP2_EOP_ERROR_BIT is set in
8121 * STS0 and STS1 - clear it
8123 * probably we lose additional attentions between
8124 * STS0 and STS_CLR0, in this case user will not
8125 * be notified about them
8127 if (val0 & mask0 & PXP2_EOP_ERROR_BIT &&
8129 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
8131 /* print the register, since no one can restore it */
8132 BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0);
8135 * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8138 if (val0 & PXP2_EOP_ERROR_BIT) {
8139 BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n");
8143 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is
8144 * set then clear attention from PXP2 block without panic
8146 if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) &&
8147 ((val1 & mask1) == 0))
8148 attn &= ~AEU_PXP2_HW_INT_BIT;
8153 if (attn & HW_INTERRUT_ASSERT_SET_2) {
8154 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
8155 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
8157 val = REG_RD(sc, reg_offset);
8158 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
8159 REG_WR(sc, reg_offset, val);
8161 BLOGE(sc, "FATAL HW block attention set2 0x%x\n",
8162 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2));
8164 bxe_panic(sc, ("HW block attention set2\n"));
8167 BXE_SET_ERROR_BIT(sc, BXE_ERR_GLOBAL);
8168 taskqueue_enqueue_timeout(taskqueue_thread,
8169 &sc->sp_err_timeout_task, hz/10);
8175 bxe_attn_int_deasserted1(struct bxe_softc *sc,
8178 int port = SC_PORT(sc);
8181 boolean_t err_flg = FALSE;
8183 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
8184 val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR);
8185 BLOGE(sc, "DB hw attention 0x%08x\n", val);
8186 /* DORQ discard attention */
8188 BLOGE(sc, "FATAL error from DORQ\n");
8193 if (attn & HW_INTERRUT_ASSERT_SET_1) {
8194 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
8195 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
8197 val = REG_RD(sc, reg_offset);
8198 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
8199 REG_WR(sc, reg_offset, val);
8201 BLOGE(sc, "FATAL HW block attention set1 0x%08x\n",
8202 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1));
8204 bxe_panic(sc, ("HW block attention set1\n"));
8207 BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC);
8208 taskqueue_enqueue_timeout(taskqueue_thread,
8209 &sc->sp_err_timeout_task, hz/10);
8215 bxe_attn_int_deasserted0(struct bxe_softc *sc,
8218 int port = SC_PORT(sc);
8222 reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
8223 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
8225 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
8226 val = REG_RD(sc, reg_offset);
8227 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
8228 REG_WR(sc, reg_offset, val);
8230 BLOGW(sc, "SPIO5 hw attention\n");
8232 /* Fan failure attention */
8233 elink_hw_reset_phy(&sc->link_params);
8234 bxe_fan_failure(sc);
8237 if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) {
8238 bxe_acquire_phy_lock(sc);
8239 elink_handle_module_detect_int(&sc->link_params);
8240 bxe_release_phy_lock(sc);
8243 if (attn & HW_INTERRUT_ASSERT_SET_0) {
8244 val = REG_RD(sc, reg_offset);
8245 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
8246 REG_WR(sc, reg_offset, val);
8249 BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC);
8250 taskqueue_enqueue_timeout(taskqueue_thread,
8251 &sc->sp_err_timeout_task, hz/10);
8253 bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n",
8254 (attn & HW_INTERRUT_ASSERT_SET_0)));
8259 bxe_attn_int_deasserted(struct bxe_softc *sc,
8260 uint32_t deasserted)
8262 struct attn_route attn;
8263 struct attn_route *group_mask;
8264 int port = SC_PORT(sc);
8269 uint8_t global = FALSE;
8272 * Need to take HW lock because MCP or other port might also
8273 * try to handle this event.
8275 bxe_acquire_alr(sc);
8277 if (bxe_chk_parity_attn(sc, &global, TRUE)) {
8279 * In case of parity errors don't handle attentions so that
8280 * other function would "see" parity errors.
8282 // XXX schedule a recovery task...
8283 /* disable HW interrupts */
8284 bxe_int_disable(sc);
8285 BXE_SET_ERROR_BIT(sc, BXE_ERR_PARITY);
8286 taskqueue_enqueue_timeout(taskqueue_thread,
8287 &sc->sp_err_timeout_task, hz/10);
8288 bxe_release_alr(sc);
8292 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
8293 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
8294 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
8295 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
8296 if (!CHIP_IS_E1x(sc)) {
8297 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
8302 BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
8303 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
8305 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
8306 if (deasserted & (1 << index)) {
8307 group_mask = &sc->attn_group[index];
8310 "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index,
8311 group_mask->sig[0], group_mask->sig[1],
8312 group_mask->sig[2], group_mask->sig[3],
8313 group_mask->sig[4]);
8315 bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]);
8316 bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]);
8317 bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]);
8318 bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]);
8319 bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]);
8323 bxe_release_alr(sc);
8325 if (sc->devinfo.int_block == INT_BLOCK_HC) {
8326 reg_addr = (HC_REG_COMMAND_REG + port*32 +
8327 COMMAND_REG_ATTN_BITS_CLR);
8329 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
8334 "about to mask 0x%08x at %s addr 0x%08x\n", val,
8335 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
8336 REG_WR(sc, reg_addr, val);
8338 if (~sc->attn_state & deasserted) {
8339 BLOGE(sc, "IGU error\n");
8342 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8343 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8345 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8347 aeu_mask = REG_RD(sc, reg_addr);
8349 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n",
8350 aeu_mask, deasserted);
8351 aeu_mask |= (deasserted & 0x3ff);
8352 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
8354 REG_WR(sc, reg_addr, aeu_mask);
8355 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8357 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
8358 sc->attn_state &= ~deasserted;
8359 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
8363 bxe_attn_int(struct bxe_softc *sc)
8365 /* read local copy of bits */
8366 uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits);
8367 uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack);
8368 uint32_t attn_state = sc->attn_state;
8370 /* look for changed bits */
8371 uint32_t asserted = attn_bits & ~attn_ack & ~attn_state;
8372 uint32_t deasserted = ~attn_bits & attn_ack & attn_state;
8375 "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n",
8376 attn_bits, attn_ack, asserted, deasserted);
8378 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) {
8379 BLOGE(sc, "BAD attention state\n");
8382 /* handle bits that were raised */
8384 bxe_attn_int_asserted(sc, asserted);
8388 bxe_attn_int_deasserted(sc, deasserted);
8393 bxe_update_dsb_idx(struct bxe_softc *sc)
8395 struct host_sp_status_block *def_sb = sc->def_sb;
8398 mb(); /* status block is written to by the chip */
8400 if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
8401 sc->def_att_idx = def_sb->atten_status_block.attn_bits_index;
8402 rc |= BXE_DEF_SB_ATT_IDX;
8405 if (sc->def_idx != def_sb->sp_sb.running_index) {
8406 sc->def_idx = def_sb->sp_sb.running_index;
8407 rc |= BXE_DEF_SB_IDX;
8415 static inline struct ecore_queue_sp_obj *
8416 bxe_cid_to_q_obj(struct bxe_softc *sc,
8419 BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid);
8420 return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj);
8424 bxe_handle_mcast_eqe(struct bxe_softc *sc)
8426 struct ecore_mcast_ramrod_params rparam;
8429 memset(&rparam, 0, sizeof(rparam));
8431 rparam.mcast_obj = &sc->mcast_obj;
8435 /* clear pending state for the last command */
8436 sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw);
8438 /* if there are pending mcast commands - send them */
8439 if (sc->mcast_obj.check_pending(&sc->mcast_obj)) {
8440 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
8443 "ERROR: Failed to send pending mcast commands (%d)\n", rc);
8447 BXE_MCAST_UNLOCK(sc);
8451 bxe_handle_classification_eqe(struct bxe_softc *sc,
8452 union event_ring_elem *elem)
8454 unsigned long ramrod_flags = 0;
8456 uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8457 struct ecore_vlan_mac_obj *vlan_mac_obj;
8459 /* always push next commands out, don't wait here */
8460 bit_set(&ramrod_flags, RAMROD_CONT);
8462 switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) {
8463 case ECORE_FILTER_MAC_PENDING:
8464 BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n");
8465 vlan_mac_obj = &sc->sp_objs[cid].mac_obj;
8468 case ECORE_FILTER_MCAST_PENDING:
8469 BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n");
8471 * This is only relevant for 57710 where multicast MACs are
8472 * configured as unicast MACs using the same ramrod.
8474 bxe_handle_mcast_eqe(sc);
8478 BLOGE(sc, "Unsupported classification command: %d\n",
8479 elem->message.data.eth_event.echo);
8483 rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags);
8486 BLOGE(sc, "Failed to schedule new commands (%d)\n", rc);
8487 } else if (rc > 0) {
8488 BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n");
8493 bxe_handle_rx_mode_eqe(struct bxe_softc *sc,
8494 union event_ring_elem *elem)
8496 bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
8498 /* send rx_mode command again if was requested */
8499 if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED,
8501 bxe_set_storm_rx_mode(sc);
8506 bxe_update_eq_prod(struct bxe_softc *sc,
8509 storm_memset_eq_prod(sc, prod, SC_FUNC(sc));
8510 wmb(); /* keep prod updates ordered */
8514 bxe_eq_int(struct bxe_softc *sc)
8516 uint16_t hw_cons, sw_cons, sw_prod;
8517 union event_ring_elem *elem;
8522 struct ecore_queue_sp_obj *q_obj;
8523 struct ecore_func_sp_obj *f_obj = &sc->func_obj;
8524 struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw;
8526 hw_cons = le16toh(*sc->eq_cons_sb);
8529 * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256.
8530 * when we get to the next-page we need to adjust so the loop
8531 * condition below will be met. The next element is the size of a
8532 * regular element and hence incrementing by 1
8534 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) {
8539 * This function may never run in parallel with itself for a
8540 * specific sc and no need for a read memory barrier here.
8542 sw_cons = sc->eq_cons;
8543 sw_prod = sc->eq_prod;
8545 BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n",
8546 hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left));
8550 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
8552 elem = &sc->eq[EQ_DESC(sw_cons)];
8554 /* elem CID originates from FW, actually LE */
8555 cid = SW_CID(elem->message.data.cfc_del_event.cid);
8556 opcode = elem->message.opcode;
8558 /* handle eq element */
8561 case EVENT_RING_OPCODE_STAT_QUERY:
8562 BLOGD(sc, DBG_SP, "got statistics completion event %d\n",
8564 /* nothing to do with stats comp */
8567 case EVENT_RING_OPCODE_CFC_DEL:
8568 /* handle according to cid range */
8569 /* we may want to verify here that the sc state is HALTING */
8570 BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid);
8571 q_obj = bxe_cid_to_q_obj(sc, cid);
8572 if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) {
8577 case EVENT_RING_OPCODE_STOP_TRAFFIC:
8578 BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n");
8579 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) {
8582 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED);
8585 case EVENT_RING_OPCODE_START_TRAFFIC:
8586 BLOGD(sc, DBG_SP, "got START TRAFFIC\n");
8587 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) {
8590 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED);
8593 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
8594 echo = elem->message.data.function_update_event.echo;
8595 if (echo == SWITCH_UPDATE) {
8596 BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n");
8597 if (f_obj->complete_cmd(sc, f_obj,
8598 ECORE_F_CMD_SWITCH_UPDATE)) {
8604 "AFEX: ramrod completed FUNCTION_UPDATE\n");
8608 case EVENT_RING_OPCODE_FORWARD_SETUP:
8609 q_obj = &bxe_fwd_sp_obj(sc, q_obj);
8610 if (q_obj->complete_cmd(sc, q_obj,
8611 ECORE_Q_CMD_SETUP_TX_ONLY)) {
8616 case EVENT_RING_OPCODE_FUNCTION_START:
8617 BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n");
8618 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) {
8623 case EVENT_RING_OPCODE_FUNCTION_STOP:
8624 BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n");
8625 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) {
8631 switch (opcode | sc->state) {
8632 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN):
8633 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT):
8634 cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8635 BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid);
8636 rss_raw->clear_pending(rss_raw);
8639 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN):
8640 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG):
8641 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT):
8642 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN):
8643 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG):
8644 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8645 BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n");
8646 bxe_handle_classification_eqe(sc, elem);
8649 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN):
8650 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG):
8651 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8652 BLOGD(sc, DBG_SP, "got mcast ramrod\n");
8653 bxe_handle_mcast_eqe(sc);
8656 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN):
8657 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG):
8658 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8659 BLOGD(sc, DBG_SP, "got rx_mode ramrod\n");
8660 bxe_handle_rx_mode_eqe(sc, elem);
8664 /* unknown event log error and continue */
8665 BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n",
8666 elem->message.opcode, sc->state);
8674 atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt);
8676 sc->eq_cons = sw_cons;
8677 sc->eq_prod = sw_prod;
8679 /* make sure that above mem writes were issued towards the memory */
8682 /* update producer */
8683 bxe_update_eq_prod(sc, sc->eq_prod);
8687 bxe_handle_sp_tq(void *context,
8690 struct bxe_softc *sc = (struct bxe_softc *)context;
8693 BLOGD(sc, DBG_SP, "---> SP TASK <---\n");
8695 /* what work needs to be performed? */
8696 status = bxe_update_dsb_idx(sc);
8698 BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status);
8701 if (status & BXE_DEF_SB_ATT_IDX) {
8702 BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n");
8704 status &= ~BXE_DEF_SB_ATT_IDX;
8707 /* SP events: STAT_QUERY and others */
8708 if (status & BXE_DEF_SB_IDX) {
8709 /* handle EQ completions */
8710 BLOGD(sc, DBG_SP, "---> EQ INTR <---\n");
8712 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
8713 le16toh(sc->def_idx), IGU_INT_NOP, 1);
8714 status &= ~BXE_DEF_SB_IDX;
8717 /* if status is non zero then something went wrong */
8718 if (__predict_false(status)) {
8719 BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status);
8722 /* ack status block only if something was actually handled */
8723 bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID,
8724 le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1);
8727 * Must be called after the EQ processing (since eq leads to sriov
8728 * ramrod completion flows).
8729 * This flow may have been scheduled by the arrival of a ramrod
8730 * completion, or by the sriov code rescheduling itself.
8732 // XXX bxe_iov_sp_task(sc);
8737 bxe_handle_fp_tq(void *context,
8740 struct bxe_fastpath *fp = (struct bxe_fastpath *)context;
8741 struct bxe_softc *sc = fp->sc;
8742 uint8_t more_tx = FALSE;
8743 uint8_t more_rx = FALSE;
8745 BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index);
8748 * IFF_DRV_RUNNING state can't be checked here since we process
8749 * slowpath events on a client queue during setup. Instead
8750 * we need to add a "process/continue" flag here that the driver
8751 * can use to tell the task here not to do anything.
8754 if (!(sc->ifnet->if_drv_flags & IFF_DRV_RUNNING)) {
8759 /* update the fastpath index */
8760 bxe_update_fp_sb_idx(fp);
8762 /* XXX add loop here if ever support multiple tx CoS */
8763 /* fp->txdata[cos] */
8764 if (bxe_has_tx_work(fp)) {
8766 more_tx = bxe_txeof(sc, fp);
8767 BXE_FP_TX_UNLOCK(fp);
8770 if (bxe_has_rx_work(fp)) {
8771 more_rx = bxe_rxeof(sc, fp);
8774 if (more_rx /*|| more_tx*/) {
8775 /* still more work to do */
8776 taskqueue_enqueue_fast(fp->tq, &fp->tq_task);
8780 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8781 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8785 bxe_task_fp(struct bxe_fastpath *fp)
8787 struct bxe_softc *sc = fp->sc;
8788 uint8_t more_tx = FALSE;
8789 uint8_t more_rx = FALSE;
8791 BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index);
8793 /* update the fastpath index */
8794 bxe_update_fp_sb_idx(fp);
8796 /* XXX add loop here if ever support multiple tx CoS */
8797 /* fp->txdata[cos] */
8798 if (bxe_has_tx_work(fp)) {
8800 more_tx = bxe_txeof(sc, fp);
8801 BXE_FP_TX_UNLOCK(fp);
8804 if (bxe_has_rx_work(fp)) {
8805 more_rx = bxe_rxeof(sc, fp);
8808 if (more_rx /*|| more_tx*/) {
8809 /* still more work to do, bail out if this ISR and process later */
8810 taskqueue_enqueue_fast(fp->tq, &fp->tq_task);
8815 * Here we write the fastpath index taken before doing any tx or rx work.
8816 * It is very well possible other hw events occurred up to this point and
8817 * they were actually processed accordingly above. Since we're going to
8818 * write an older fastpath index, an interrupt is coming which we might
8819 * not do any work in.
8821 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8822 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8826 * Legacy interrupt entry point.
8828 * Verifies that the controller generated the interrupt and
8829 * then calls a separate routine to handle the various
8830 * interrupt causes: link, RX, and TX.
8833 bxe_intr_legacy(void *xsc)
8835 struct bxe_softc *sc = (struct bxe_softc *)xsc;
8836 struct bxe_fastpath *fp;
8837 uint16_t status, mask;
8840 BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n");
8843 * 0 for ustorm, 1 for cstorm
8844 * the bits returned from ack_int() are 0-15
8845 * bit 0 = attention status block
8846 * bit 1 = fast path status block
8847 * a mask of 0x2 or more = tx/rx event
8848 * a mask of 1 = slow path event
8851 status = bxe_ack_int(sc);
8853 /* the interrupt is not for us */
8854 if (__predict_false(status == 0)) {
8855 BLOGD(sc, DBG_INTR, "Not our interrupt!\n");
8859 BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status);
8861 FOR_EACH_ETH_QUEUE(sc, i) {
8863 mask = (0x2 << (fp->index + CNIC_SUPPORT(sc)));
8864 if (status & mask) {
8865 /* acknowledge and disable further fastpath interrupts */
8866 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8872 if (__predict_false(status & 0x1)) {
8873 /* acknowledge and disable further slowpath interrupts */
8874 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8876 /* schedule slowpath handler */
8877 taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task);
8882 if (__predict_false(status)) {
8883 BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status);
8887 /* slowpath interrupt entry point */
8889 bxe_intr_sp(void *xsc)
8891 struct bxe_softc *sc = (struct bxe_softc *)xsc;
8893 BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n");
8895 /* acknowledge and disable further slowpath interrupts */
8896 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8898 /* schedule slowpath handler */
8899 taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task);
8902 /* fastpath interrupt entry point */
8904 bxe_intr_fp(void *xfp)
8906 struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp;
8907 struct bxe_softc *sc = fp->sc;
8909 BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index);
8912 "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n",
8913 curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id);
8915 /* acknowledge and disable further fastpath interrupts */
8916 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8921 /* Release all interrupts allocated by the driver. */
8923 bxe_interrupt_free(struct bxe_softc *sc)
8927 switch (sc->interrupt_mode) {
8928 case INTR_MODE_INTX:
8929 BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n");
8930 if (sc->intr[0].resource != NULL) {
8931 bus_release_resource(sc->dev,
8934 sc->intr[0].resource);
8938 for (i = 0; i < sc->intr_count; i++) {
8939 BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i);
8940 if (sc->intr[i].resource && sc->intr[i].rid) {
8941 bus_release_resource(sc->dev,
8944 sc->intr[i].resource);
8947 pci_release_msi(sc->dev);
8949 case INTR_MODE_MSIX:
8950 for (i = 0; i < sc->intr_count; i++) {
8951 BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i);
8952 if (sc->intr[i].resource && sc->intr[i].rid) {
8953 bus_release_resource(sc->dev,
8956 sc->intr[i].resource);
8959 pci_release_msi(sc->dev);
8962 /* nothing to do as initial allocation failed */
8968 * This function determines and allocates the appropriate
8969 * interrupt based on system capabilites and user request.
8971 * The user may force a particular interrupt mode, specify
8972 * the number of receive queues, specify the method for
8973 * distribuitng received frames to receive queues, or use
8974 * the default settings which will automatically select the
8975 * best supported combination. In addition, the OS may or
8976 * may not support certain combinations of these settings.
8977 * This routine attempts to reconcile the settings requested
8978 * by the user with the capabilites available from the system
8979 * to select the optimal combination of features.
8982 * 0 = Success, !0 = Failure.
8985 bxe_interrupt_alloc(struct bxe_softc *sc)
8989 int num_requested = 0;
8990 int num_allocated = 0;
8994 /* get the number of available MSI/MSI-X interrupts from the OS */
8995 if (sc->interrupt_mode > 0) {
8996 if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) {
8997 msix_count = pci_msix_count(sc->dev);
9000 if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) {
9001 msi_count = pci_msi_count(sc->dev);
9004 BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n",
9005 msi_count, msix_count);
9008 do { /* try allocating MSI-X interrupt resources (at least 2) */
9009 if (sc->interrupt_mode != INTR_MODE_MSIX) {
9013 if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) ||
9015 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9019 /* ask for the necessary number of MSI-X vectors */
9020 num_requested = min((sc->num_queues + 1), msix_count);
9022 BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested);
9024 num_allocated = num_requested;
9025 if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) {
9026 BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc);
9027 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9031 if (num_allocated < 2) { /* possible? */
9032 BLOGE(sc, "MSI-X allocation less than 2!\n");
9033 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9034 pci_release_msi(sc->dev);
9038 BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n",
9039 num_requested, num_allocated);
9041 /* best effort so use the number of vectors allocated to us */
9042 sc->intr_count = num_allocated;
9043 sc->num_queues = num_allocated - 1;
9045 rid = 1; /* initial resource identifier */
9047 /* allocate the MSI-X vectors */
9048 for (i = 0; i < num_allocated; i++) {
9049 sc->intr[i].rid = (rid + i);
9051 if ((sc->intr[i].resource =
9052 bus_alloc_resource_any(sc->dev,
9055 RF_ACTIVE)) == NULL) {
9056 BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n",
9059 for (j = (i - 1); j >= 0; j--) {
9060 bus_release_resource(sc->dev,
9063 sc->intr[j].resource);
9068 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9069 pci_release_msi(sc->dev);
9073 BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i));
9077 do { /* try allocating MSI vector resources (at least 2) */
9078 if (sc->interrupt_mode != INTR_MODE_MSI) {
9082 if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) ||
9084 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9088 /* ask for a single MSI vector */
9091 BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested);
9093 num_allocated = num_requested;
9094 if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) {
9095 BLOGE(sc, "MSI alloc failed (%d)!\n", rc);
9096 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9100 if (num_allocated != 1) { /* possible? */
9101 BLOGE(sc, "MSI allocation is not 1!\n");
9102 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9103 pci_release_msi(sc->dev);
9107 BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n",
9108 num_requested, num_allocated);
9110 /* best effort so use the number of vectors allocated to us */
9111 sc->intr_count = num_allocated;
9112 sc->num_queues = num_allocated;
9114 rid = 1; /* initial resource identifier */
9116 sc->intr[0].rid = rid;
9118 if ((sc->intr[0].resource =
9119 bus_alloc_resource_any(sc->dev,
9122 RF_ACTIVE)) == NULL) {
9123 BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid);
9126 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9127 pci_release_msi(sc->dev);
9131 BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid);
9134 do { /* try allocating INTx vector resources */
9135 if (sc->interrupt_mode != INTR_MODE_INTX) {
9139 BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n");
9141 /* only one vector for INTx */
9145 rid = 0; /* initial resource identifier */
9147 sc->intr[0].rid = rid;
9149 if ((sc->intr[0].resource =
9150 bus_alloc_resource_any(sc->dev,
9153 (RF_ACTIVE | RF_SHAREABLE))) == NULL) {
9154 BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid);
9157 sc->interrupt_mode = -1; /* Failed! */
9161 BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid);
9164 if (sc->interrupt_mode == -1) {
9165 BLOGE(sc, "Interrupt Allocation: FAILED!!!\n");
9169 "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n",
9170 sc->interrupt_mode, sc->num_queues);
9178 bxe_interrupt_detach(struct bxe_softc *sc)
9180 struct bxe_fastpath *fp;
9183 /* release interrupt resources */
9184 for (i = 0; i < sc->intr_count; i++) {
9185 if (sc->intr[i].resource && sc->intr[i].tag) {
9186 BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i);
9187 bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag);
9191 for (i = 0; i < sc->num_queues; i++) {
9194 taskqueue_drain(fp->tq, &fp->tq_task);
9195 taskqueue_drain(fp->tq, &fp->tx_task);
9196 while (taskqueue_cancel_timeout(fp->tq, &fp->tx_timeout_task,
9198 taskqueue_drain_timeout(fp->tq, &fp->tx_timeout_task);
9201 for (i = 0; i < sc->num_queues; i++) {
9203 if (fp->tq != NULL) {
9204 taskqueue_free(fp->tq);
9211 taskqueue_drain(sc->sp_tq, &sc->sp_tq_task);
9212 taskqueue_free(sc->sp_tq);
9218 * Enables interrupts and attach to the ISR.
9220 * When using multiple MSI/MSI-X vectors the first vector
9221 * is used for slowpath operations while all remaining
9222 * vectors are used for fastpath operations. If only a
9223 * single MSI/MSI-X vector is used (SINGLE_ISR) then the
9224 * ISR must look for both slowpath and fastpath completions.
9227 bxe_interrupt_attach(struct bxe_softc *sc)
9229 struct bxe_fastpath *fp;
9233 snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name),
9234 "bxe%d_sp_tq", sc->unit);
9235 TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc);
9236 sc->sp_tq = taskqueue_create(sc->sp_tq_name, M_NOWAIT,
9237 taskqueue_thread_enqueue,
9239 taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */
9240 "%s", sc->sp_tq_name);
9243 for (i = 0; i < sc->num_queues; i++) {
9245 snprintf(fp->tq_name, sizeof(fp->tq_name),
9246 "bxe%d_fp%d_tq", sc->unit, i);
9247 TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
9248 TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp);
9249 fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT,
9250 taskqueue_thread_enqueue,
9252 TIMEOUT_TASK_INIT(fp->tq, &fp->tx_timeout_task, 0,
9253 bxe_tx_mq_start_deferred, fp);
9254 taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */
9258 /* setup interrupt handlers */
9259 if (sc->interrupt_mode == INTR_MODE_MSIX) {
9260 BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n");
9263 * Setup the interrupt handler. Note that we pass the driver instance
9264 * to the interrupt handler for the slowpath.
9266 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9267 (INTR_TYPE_NET | INTR_MPSAFE),
9268 NULL, bxe_intr_sp, sc,
9269 &sc->intr[0].tag)) != 0) {
9270 BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc);
9271 goto bxe_interrupt_attach_exit;
9274 bus_describe_intr(sc->dev, sc->intr[0].resource,
9275 sc->intr[0].tag, "sp");
9277 /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */
9279 /* initialize the fastpath vectors (note the first was used for sp) */
9280 for (i = 0; i < sc->num_queues; i++) {
9282 BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1));
9285 * Setup the interrupt handler. Note that we pass the
9286 * fastpath context to the interrupt handler in this
9289 if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource,
9290 (INTR_TYPE_NET | INTR_MPSAFE),
9291 NULL, bxe_intr_fp, fp,
9292 &sc->intr[i + 1].tag)) != 0) {
9293 BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n",
9295 goto bxe_interrupt_attach_exit;
9298 bus_describe_intr(sc->dev, sc->intr[i + 1].resource,
9299 sc->intr[i + 1].tag, "fp%02d", i);
9301 /* bind the fastpath instance to a cpu */
9302 if (sc->num_queues > 1) {
9303 bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i);
9306 fp->state = BXE_FP_STATE_IRQ;
9308 } else if (sc->interrupt_mode == INTR_MODE_MSI) {
9309 BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n");
9312 * Setup the interrupt handler. Note that we pass the
9313 * driver instance to the interrupt handler which
9314 * will handle both the slowpath and fastpath.
9316 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9317 (INTR_TYPE_NET | INTR_MPSAFE),
9318 NULL, bxe_intr_legacy, sc,
9319 &sc->intr[0].tag)) != 0) {
9320 BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc);
9321 goto bxe_interrupt_attach_exit;
9324 } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */
9325 BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n");
9328 * Setup the interrupt handler. Note that we pass the
9329 * driver instance to the interrupt handler which
9330 * will handle both the slowpath and fastpath.
9332 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9333 (INTR_TYPE_NET | INTR_MPSAFE),
9334 NULL, bxe_intr_legacy, sc,
9335 &sc->intr[0].tag)) != 0) {
9336 BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc);
9337 goto bxe_interrupt_attach_exit;
9341 bxe_interrupt_attach_exit:
9346 static int bxe_init_hw_common_chip(struct bxe_softc *sc);
9347 static int bxe_init_hw_common(struct bxe_softc *sc);
9348 static int bxe_init_hw_port(struct bxe_softc *sc);
9349 static int bxe_init_hw_func(struct bxe_softc *sc);
9350 static void bxe_reset_common(struct bxe_softc *sc);
9351 static void bxe_reset_port(struct bxe_softc *sc);
9352 static void bxe_reset_func(struct bxe_softc *sc);
9353 static int bxe_gunzip_init(struct bxe_softc *sc);
9354 static void bxe_gunzip_end(struct bxe_softc *sc);
9355 static int bxe_init_firmware(struct bxe_softc *sc);
9356 static void bxe_release_firmware(struct bxe_softc *sc);
9359 ecore_func_sp_drv_ops bxe_func_sp_drv = {
9360 .init_hw_cmn_chip = bxe_init_hw_common_chip,
9361 .init_hw_cmn = bxe_init_hw_common,
9362 .init_hw_port = bxe_init_hw_port,
9363 .init_hw_func = bxe_init_hw_func,
9365 .reset_hw_cmn = bxe_reset_common,
9366 .reset_hw_port = bxe_reset_port,
9367 .reset_hw_func = bxe_reset_func,
9369 .gunzip_init = bxe_gunzip_init,
9370 .gunzip_end = bxe_gunzip_end,
9372 .init_fw = bxe_init_firmware,
9373 .release_fw = bxe_release_firmware,
9377 bxe_init_func_obj(struct bxe_softc *sc)
9381 ecore_init_func_obj(sc,
9383 BXE_SP(sc, func_rdata),
9384 BXE_SP_MAPPING(sc, func_rdata),
9385 BXE_SP(sc, func_afex_rdata),
9386 BXE_SP_MAPPING(sc, func_afex_rdata),
9391 bxe_init_hw(struct bxe_softc *sc,
9394 struct ecore_func_state_params func_params = { NULL };
9397 /* prepare the parameters for function state transitions */
9398 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
9400 func_params.f_obj = &sc->func_obj;
9401 func_params.cmd = ECORE_F_CMD_HW_INIT;
9403 func_params.params.hw_init.load_phase = load_code;
9406 * Via a plethora of function pointers, we will eventually reach
9407 * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func().
9409 rc = ecore_func_state_change(sc, &func_params);
9415 bxe_fill(struct bxe_softc *sc,
9422 if (!(len % 4) && !(addr % 4)) {
9423 for (i = 0; i < len; i += 4) {
9424 REG_WR(sc, (addr + i), fill);
9427 for (i = 0; i < len; i++) {
9428 REG_WR8(sc, (addr + i), fill);
9433 /* writes FP SP data to FW - data_size in dwords */
9435 bxe_wr_fp_sb_data(struct bxe_softc *sc,
9437 uint32_t *sb_data_p,
9442 for (index = 0; index < data_size; index++) {
9444 (BAR_CSTRORM_INTMEM +
9445 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
9446 (sizeof(uint32_t) * index)),
9447 *(sb_data_p + index));
9452 bxe_zero_fp_sb(struct bxe_softc *sc,
9455 struct hc_status_block_data_e2 sb_data_e2;
9456 struct hc_status_block_data_e1x sb_data_e1x;
9457 uint32_t *sb_data_p;
9458 uint32_t data_size = 0;
9460 if (!CHIP_IS_E1x(sc)) {
9461 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9462 sb_data_e2.common.state = SB_DISABLED;
9463 sb_data_e2.common.p_func.vf_valid = FALSE;
9464 sb_data_p = (uint32_t *)&sb_data_e2;
9465 data_size = (sizeof(struct hc_status_block_data_e2) /
9468 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9469 sb_data_e1x.common.state = SB_DISABLED;
9470 sb_data_e1x.common.p_func.vf_valid = FALSE;
9471 sb_data_p = (uint32_t *)&sb_data_e1x;
9472 data_size = (sizeof(struct hc_status_block_data_e1x) /
9476 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9478 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)),
9479 0, CSTORM_STATUS_BLOCK_SIZE);
9480 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)),
9481 0, CSTORM_SYNC_BLOCK_SIZE);
9485 bxe_wr_sp_sb_data(struct bxe_softc *sc,
9486 struct hc_sp_status_block_data *sp_sb_data)
9491 i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t));
9494 (BAR_CSTRORM_INTMEM +
9495 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) +
9496 (i * sizeof(uint32_t))),
9497 *((uint32_t *)sp_sb_data + i));
9502 bxe_zero_sp_sb(struct bxe_softc *sc)
9504 struct hc_sp_status_block_data sp_sb_data;
9506 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9508 sp_sb_data.state = SB_DISABLED;
9509 sp_sb_data.p_func.vf_valid = FALSE;
9511 bxe_wr_sp_sb_data(sc, &sp_sb_data);
9514 (BAR_CSTRORM_INTMEM +
9515 CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))),
9516 0, CSTORM_SP_STATUS_BLOCK_SIZE);
9518 (BAR_CSTRORM_INTMEM +
9519 CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))),
9520 0, CSTORM_SP_SYNC_BLOCK_SIZE);
9524 bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
9528 hc_sm->igu_sb_id = igu_sb_id;
9529 hc_sm->igu_seg_id = igu_seg_id;
9530 hc_sm->timer_value = 0xFF;
9531 hc_sm->time_to_expire = 0xFFFFFFFF;
9535 bxe_map_sb_state_machines(struct hc_index_data *index_data)
9537 /* zero out state machine indices */
9540 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9543 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9544 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
9545 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
9546 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
9551 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
9552 (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9555 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
9556 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9557 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
9558 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9559 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
9560 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9561 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
9562 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9566 bxe_init_sb(struct bxe_softc *sc,
9573 struct hc_status_block_data_e2 sb_data_e2;
9574 struct hc_status_block_data_e1x sb_data_e1x;
9575 struct hc_status_block_sm *hc_sm_p;
9576 uint32_t *sb_data_p;
9580 if (CHIP_INT_MODE_IS_BC(sc)) {
9581 igu_seg_id = HC_SEG_ACCESS_NORM;
9583 igu_seg_id = IGU_SEG_ACCESS_NORM;
9586 bxe_zero_fp_sb(sc, fw_sb_id);
9588 if (!CHIP_IS_E1x(sc)) {
9589 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9590 sb_data_e2.common.state = SB_ENABLED;
9591 sb_data_e2.common.p_func.pf_id = SC_FUNC(sc);
9592 sb_data_e2.common.p_func.vf_id = vfid;
9593 sb_data_e2.common.p_func.vf_valid = vf_valid;
9594 sb_data_e2.common.p_func.vnic_id = SC_VN(sc);
9595 sb_data_e2.common.same_igu_sb_1b = TRUE;
9596 sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr);
9597 sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr);
9598 hc_sm_p = sb_data_e2.common.state_machine;
9599 sb_data_p = (uint32_t *)&sb_data_e2;
9600 data_size = (sizeof(struct hc_status_block_data_e2) /
9602 bxe_map_sb_state_machines(sb_data_e2.index_data);
9604 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9605 sb_data_e1x.common.state = SB_ENABLED;
9606 sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc);
9607 sb_data_e1x.common.p_func.vf_id = 0xff;
9608 sb_data_e1x.common.p_func.vf_valid = FALSE;
9609 sb_data_e1x.common.p_func.vnic_id = SC_VN(sc);
9610 sb_data_e1x.common.same_igu_sb_1b = TRUE;
9611 sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr);
9612 sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr);
9613 hc_sm_p = sb_data_e1x.common.state_machine;
9614 sb_data_p = (uint32_t *)&sb_data_e1x;
9615 data_size = (sizeof(struct hc_status_block_data_e1x) /
9617 bxe_map_sb_state_machines(sb_data_e1x.index_data);
9620 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id);
9621 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id);
9623 BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id);
9625 /* write indices to HW - PCI guarantees endianity of regpairs */
9626 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9629 static inline uint8_t
9630 bxe_fp_qzone_id(struct bxe_fastpath *fp)
9632 if (CHIP_IS_E1x(fp->sc)) {
9633 return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H);
9639 static inline uint32_t
9640 bxe_rx_ustorm_prods_offset(struct bxe_softc *sc,
9641 struct bxe_fastpath *fp)
9643 uint32_t offset = BAR_USTRORM_INTMEM;
9645 if (!CHIP_IS_E1x(sc)) {
9646 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
9648 offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id);
9655 bxe_init_eth_fp(struct bxe_softc *sc,
9658 struct bxe_fastpath *fp = &sc->fp[idx];
9659 uint32_t cids[ECORE_MULTI_TX_COS] = { 0 };
9660 unsigned long q_type = 0;
9666 fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc));
9667 fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc));
9669 fp->cl_id = (CHIP_IS_E1x(sc)) ?
9670 (SC_L_ID(sc) + idx) :
9671 /* want client ID same as IGU SB ID for non-E1 */
9673 fp->cl_qzone_id = bxe_fp_qzone_id(fp);
9675 /* setup sb indices */
9676 if (!CHIP_IS_E1x(sc)) {
9677 fp->sb_index_values = fp->status_block.e2_sb->sb.index_values;
9678 fp->sb_running_index = fp->status_block.e2_sb->sb.running_index;
9680 fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values;
9681 fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index;
9685 fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp);
9687 fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS];
9690 * XXX If multiple CoS is ever supported then each fastpath structure
9691 * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
9693 for (cos = 0; cos < sc->max_cos; cos++) {
9696 fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0];
9698 /* nothing more for a VF to do */
9703 bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE,
9704 fp->fw_sb_id, fp->igu_sb_id);
9706 bxe_update_fp_sb_idx(fp);
9708 /* Configure Queue State object */
9709 bit_set(&q_type, ECORE_Q_TYPE_HAS_RX);
9710 bit_set(&q_type, ECORE_Q_TYPE_HAS_TX);
9712 ecore_init_queue_obj(sc,
9713 &sc->sp_objs[idx].q_obj,
9718 BXE_SP(sc, q_rdata),
9719 BXE_SP_MAPPING(sc, q_rdata),
9722 /* configure classification DBs */
9723 ecore_init_mac_obj(sc,
9724 &sc->sp_objs[idx].mac_obj,
9728 BXE_SP(sc, mac_rdata),
9729 BXE_SP_MAPPING(sc, mac_rdata),
9730 ECORE_FILTER_MAC_PENDING,
9732 ECORE_OBJ_TYPE_RX_TX,
9735 BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n",
9736 idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id);
9740 bxe_update_rx_prod(struct bxe_softc *sc,
9741 struct bxe_fastpath *fp,
9742 uint16_t rx_bd_prod,
9743 uint16_t rx_cq_prod,
9744 uint16_t rx_sge_prod)
9746 struct ustorm_eth_rx_producers rx_prods = { 0 };
9749 /* update producers */
9750 rx_prods.bd_prod = rx_bd_prod;
9751 rx_prods.cqe_prod = rx_cq_prod;
9752 rx_prods.sge_prod = rx_sge_prod;
9755 * Make sure that the BD and SGE data is updated before updating the
9756 * producers since FW might read the BD/SGE right after the producer
9758 * This is only applicable for weak-ordered memory model archs such
9759 * as IA-64. The following barrier is also mandatory since FW will
9760 * assumes BDs must have buffers.
9764 for (i = 0; i < (sizeof(rx_prods) / 4); i++) {
9766 (fp->ustorm_rx_prods_offset + (i * 4)),
9767 ((uint32_t *)&rx_prods)[i]);
9770 wmb(); /* keep prod updates ordered */
9773 "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n",
9774 fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod);
9778 bxe_init_rx_rings(struct bxe_softc *sc)
9780 struct bxe_fastpath *fp;
9783 for (i = 0; i < sc->num_queues; i++) {
9789 * Activate the BD ring...
9790 * Warning, this will generate an interrupt (to the TSTORM)
9791 * so this can only be done after the chip is initialized
9793 bxe_update_rx_prod(sc, fp,
9802 if (CHIP_IS_E1(sc)) {
9804 (BAR_USTRORM_INTMEM +
9805 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))),
9806 U64_LO(fp->rcq_dma.paddr));
9808 (BAR_USTRORM_INTMEM +
9809 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4),
9810 U64_HI(fp->rcq_dma.paddr));
9816 bxe_init_tx_ring_one(struct bxe_fastpath *fp)
9818 SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1);
9819 fp->tx_db.data.zero_fill1 = 0;
9820 fp->tx_db.data.prod = 0;
9822 fp->tx_pkt_prod = 0;
9823 fp->tx_pkt_cons = 0;
9826 fp->eth_q_stats.tx_pkts = 0;
9830 bxe_init_tx_rings(struct bxe_softc *sc)
9834 for (i = 0; i < sc->num_queues; i++) {
9835 bxe_init_tx_ring_one(&sc->fp[i]);
9840 bxe_init_def_sb(struct bxe_softc *sc)
9842 struct host_sp_status_block *def_sb = sc->def_sb;
9843 bus_addr_t mapping = sc->def_sb_dma.paddr;
9844 int igu_sp_sb_index;
9846 int port = SC_PORT(sc);
9847 int func = SC_FUNC(sc);
9848 int reg_offset, reg_offset_en5;
9851 struct hc_sp_status_block_data sp_sb_data;
9853 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9855 if (CHIP_INT_MODE_IS_BC(sc)) {
9856 igu_sp_sb_index = DEF_SB_IGU_ID;
9857 igu_seg_id = HC_SEG_ACCESS_DEF;
9859 igu_sp_sb_index = sc->igu_dsb_id;
9860 igu_seg_id = IGU_SEG_ACCESS_DEF;
9864 section = ((uint64_t)mapping +
9865 offsetof(struct host_sp_status_block, atten_status_block));
9866 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
9869 reg_offset = (port) ?
9870 MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
9871 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
9872 reg_offset_en5 = (port) ?
9873 MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
9874 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0;
9876 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
9877 /* take care of sig[0]..sig[4] */
9878 for (sindex = 0; sindex < 4; sindex++) {
9879 sc->attn_group[index].sig[sindex] =
9880 REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index)));
9883 if (!CHIP_IS_E1x(sc)) {
9885 * enable5 is separate from the rest of the registers,
9886 * and the address skip is 4 and not 16 between the
9889 sc->attn_group[index].sig[4] =
9890 REG_RD(sc, (reg_offset_en5 + (0x4 * index)));
9892 sc->attn_group[index].sig[4] = 0;
9896 if (sc->devinfo.int_block == INT_BLOCK_HC) {
9897 reg_offset = (port) ?
9898 HC_REG_ATTN_MSG1_ADDR_L :
9899 HC_REG_ATTN_MSG0_ADDR_L;
9900 REG_WR(sc, reg_offset, U64_LO(section));
9901 REG_WR(sc, (reg_offset + 4), U64_HI(section));
9902 } else if (!CHIP_IS_E1x(sc)) {
9903 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
9904 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
9907 section = ((uint64_t)mapping +
9908 offsetof(struct host_sp_status_block, sp_sb));
9912 /* PCI guarantees endianity of regpair */
9913 sp_sb_data.state = SB_ENABLED;
9914 sp_sb_data.host_sb_addr.lo = U64_LO(section);
9915 sp_sb_data.host_sb_addr.hi = U64_HI(section);
9916 sp_sb_data.igu_sb_id = igu_sp_sb_index;
9917 sp_sb_data.igu_seg_id = igu_seg_id;
9918 sp_sb_data.p_func.pf_id = func;
9919 sp_sb_data.p_func.vnic_id = SC_VN(sc);
9920 sp_sb_data.p_func.vf_id = 0xff;
9922 bxe_wr_sp_sb_data(sc, &sp_sb_data);
9924 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
9928 bxe_init_sp_ring(struct bxe_softc *sc)
9930 atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING);
9931 sc->spq_prod_idx = 0;
9932 sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS];
9933 sc->spq_prod_bd = sc->spq;
9934 sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT);
9938 bxe_init_eq_ring(struct bxe_softc *sc)
9940 union event_ring_elem *elem;
9943 for (i = 1; i <= NUM_EQ_PAGES; i++) {
9944 elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1];
9946 elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr +
9948 (i % NUM_EQ_PAGES)));
9949 elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr +
9951 (i % NUM_EQ_PAGES)));
9955 sc->eq_prod = NUM_EQ_DESC;
9956 sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS];
9958 atomic_store_rel_long(&sc->eq_spq_left,
9959 (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING),
9964 bxe_init_internal_common(struct bxe_softc *sc)
9969 * Zero this manually as its initialization is currently missing
9972 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) {
9974 (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)),
9978 if (!CHIP_IS_E1x(sc)) {
9979 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET),
9980 CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
9985 bxe_init_internal(struct bxe_softc *sc,
9988 switch (load_code) {
9989 case FW_MSG_CODE_DRV_LOAD_COMMON:
9990 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
9991 bxe_init_internal_common(sc);
9994 case FW_MSG_CODE_DRV_LOAD_PORT:
9998 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
9999 /* internal memory per function is initialized inside bxe_pf_init */
10003 BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code);
10009 storm_memset_func_cfg(struct bxe_softc *sc,
10010 struct tstorm_eth_function_common_config *tcfg,
10016 addr = (BAR_TSTRORM_INTMEM +
10017 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid));
10018 size = sizeof(struct tstorm_eth_function_common_config);
10019 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg);
10023 bxe_func_init(struct bxe_softc *sc,
10024 struct bxe_func_init_params *p)
10026 struct tstorm_eth_function_common_config tcfg = { 0 };
10028 if (CHIP_IS_E1x(sc)) {
10029 storm_memset_func_cfg(sc, &tcfg, p->func_id);
10032 /* Enable the function in the FW */
10033 storm_memset_vf_to_pf(sc, p->func_id, p->pf_id);
10034 storm_memset_func_en(sc, p->func_id, 1);
10037 if (p->func_flgs & FUNC_FLG_SPQ) {
10038 storm_memset_spq_addr(sc, p->spq_map, p->func_id);
10040 (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)),
10046 * Calculates the sum of vn_min_rates.
10047 * It's needed for further normalizing of the min_rates.
10049 * sum of vn_min_rates.
10051 * 0 - if all the min_rates are 0.
10052 * In the later case fainess algorithm should be deactivated.
10053 * If all min rates are not zero then those that are zeroes will be set to 1.
10056 bxe_calc_vn_min(struct bxe_softc *sc,
10057 struct cmng_init_input *input)
10060 uint32_t vn_min_rate;
10064 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10065 vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10066 vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
10067 FUNC_MF_CFG_MIN_BW_SHIFT) * 100);
10069 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10070 /* skip hidden VNs */
10072 } else if (!vn_min_rate) {
10073 /* If min rate is zero - set it to 100 */
10074 vn_min_rate = DEF_MIN_RATE;
10079 input->vnic_min_rate[vn] = vn_min_rate;
10082 /* if ETS or all min rates are zeros - disable fairness */
10083 if (BXE_IS_ETS_ENABLED(sc)) {
10084 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10085 BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n");
10086 } else if (all_zero) {
10087 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10088 BLOGD(sc, DBG_LOAD,
10089 "Fariness disabled (all MIN values are zeroes)\n");
10091 input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10095 static inline uint16_t
10096 bxe_extract_max_cfg(struct bxe_softc *sc,
10099 uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
10100 FUNC_MF_CFG_MAX_BW_SHIFT);
10103 BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n");
10111 bxe_calc_vn_max(struct bxe_softc *sc,
10113 struct cmng_init_input *input)
10115 uint16_t vn_max_rate;
10116 uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10119 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10122 max_cfg = bxe_extract_max_cfg(sc, vn_cfg);
10124 if (IS_MF_SI(sc)) {
10125 /* max_cfg in percents of linkspeed */
10126 vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100);
10127 } else { /* SD modes */
10128 /* max_cfg is absolute in 100Mb units */
10129 vn_max_rate = (max_cfg * 100);
10133 BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
10135 input->vnic_max_rate[vn] = vn_max_rate;
10139 bxe_cmng_fns_init(struct bxe_softc *sc,
10143 struct cmng_init_input input;
10146 memset(&input, 0, sizeof(struct cmng_init_input));
10148 input.port_rate = sc->link_vars.line_speed;
10150 if (cmng_type == CMNG_FNS_MINMAX) {
10151 /* read mf conf from shmem */
10153 bxe_read_mf_cfg(sc);
10156 /* get VN min rate and enable fairness if not 0 */
10157 bxe_calc_vn_min(sc, &input);
10159 /* get VN max rate */
10160 if (sc->port.pmf) {
10161 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10162 bxe_calc_vn_max(sc, vn, &input);
10166 /* always enable rate shaping and fairness */
10167 input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
10169 ecore_init_cmng(&input, &sc->cmng);
10173 /* rate shaping and fairness are disabled */
10174 BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n");
10178 bxe_get_cmng_fns_mode(struct bxe_softc *sc)
10180 if (CHIP_REV_IS_SLOW(sc)) {
10181 return (CMNG_FNS_NONE);
10185 return (CMNG_FNS_MINMAX);
10188 return (CMNG_FNS_NONE);
10192 storm_memset_cmng(struct bxe_softc *sc,
10193 struct cmng_init *cmng,
10201 addr = (BAR_XSTRORM_INTMEM +
10202 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port));
10203 size = sizeof(struct cmng_struct_per_port);
10204 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port);
10206 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10207 func = func_by_vn(sc, vn);
10209 addr = (BAR_XSTRORM_INTMEM +
10210 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func));
10211 size = sizeof(struct rate_shaping_vars_per_vn);
10212 ecore_storm_memset_struct(sc, addr, size,
10213 (uint32_t *)&cmng->vnic.vnic_max_rate[vn]);
10215 addr = (BAR_XSTRORM_INTMEM +
10216 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func));
10217 size = sizeof(struct fairness_vars_per_vn);
10218 ecore_storm_memset_struct(sc, addr, size,
10219 (uint32_t *)&cmng->vnic.vnic_min_rate[vn]);
10224 bxe_pf_init(struct bxe_softc *sc)
10226 struct bxe_func_init_params func_init = { 0 };
10227 struct event_ring_data eq_data = { { 0 } };
10230 if (!CHIP_IS_E1x(sc)) {
10231 /* reset IGU PF statistics: MSIX + ATTN */
10234 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10235 (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10236 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10240 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10241 (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10242 (BXE_IGU_STAS_MSG_PF_CNT * 4) +
10243 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10247 /* function setup flags */
10248 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
10251 * This flag is relevant for E1x only.
10252 * E2 doesn't have a TPA configuration in a function level.
10254 flags |= (sc->ifnet->if_capenable & IFCAP_LRO) ? FUNC_FLG_TPA : 0;
10256 func_init.func_flgs = flags;
10257 func_init.pf_id = SC_FUNC(sc);
10258 func_init.func_id = SC_FUNC(sc);
10259 func_init.spq_map = sc->spq_dma.paddr;
10260 func_init.spq_prod = sc->spq_prod_idx;
10262 bxe_func_init(sc, &func_init);
10264 memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port));
10267 * Congestion management values depend on the link rate.
10268 * There is no active link so initial link rate is set to 10Gbps.
10269 * When the link comes up the congestion management values are
10270 * re-calculated according to the actual link rate.
10272 sc->link_vars.line_speed = SPEED_10000;
10273 bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc));
10275 /* Only the PMF sets the HW */
10276 if (sc->port.pmf) {
10277 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
10280 /* init Event Queue - PCI bus guarantees correct endainity */
10281 eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr);
10282 eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr);
10283 eq_data.producer = sc->eq_prod;
10284 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
10285 eq_data.sb_id = DEF_SB_ID;
10286 storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc));
10290 bxe_hc_int_enable(struct bxe_softc *sc)
10292 int port = SC_PORT(sc);
10293 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10294 uint32_t val = REG_RD(sc, addr);
10295 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10296 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10297 (sc->intr_count == 1)) ? TRUE : FALSE;
10298 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10301 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10302 HC_CONFIG_0_REG_INT_LINE_EN_0);
10303 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10304 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10306 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
10309 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
10310 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10311 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10312 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10314 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10315 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10316 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10317 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10319 if (!CHIP_IS_E1(sc)) {
10320 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n",
10323 REG_WR(sc, addr, val);
10325 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
10329 if (CHIP_IS_E1(sc)) {
10330 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF);
10333 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
10334 val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10336 REG_WR(sc, addr, val);
10338 /* ensure that HC_CONFIG is written before leading/trailing edge config */
10341 if (!CHIP_IS_E1(sc)) {
10342 /* init leading/trailing edge */
10344 val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10345 if (sc->port.pmf) {
10346 /* enable nig and gpio3 attention */
10353 REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val);
10354 REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val);
10357 /* make sure that interrupts are indeed enabled from here on */
10362 bxe_igu_int_enable(struct bxe_softc *sc)
10365 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10366 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10367 (sc->intr_count == 1)) ? TRUE : FALSE;
10368 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10370 val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10373 val &= ~(IGU_PF_CONF_INT_LINE_EN |
10374 IGU_PF_CONF_SINGLE_ISR_EN);
10375 val |= (IGU_PF_CONF_MSI_MSIX_EN |
10376 IGU_PF_CONF_ATTN_BIT_EN);
10378 val |= IGU_PF_CONF_SINGLE_ISR_EN;
10381 val &= ~IGU_PF_CONF_INT_LINE_EN;
10382 val |= (IGU_PF_CONF_MSI_MSIX_EN |
10383 IGU_PF_CONF_ATTN_BIT_EN |
10384 IGU_PF_CONF_SINGLE_ISR_EN);
10386 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
10387 val |= (IGU_PF_CONF_INT_LINE_EN |
10388 IGU_PF_CONF_ATTN_BIT_EN |
10389 IGU_PF_CONF_SINGLE_ISR_EN);
10392 /* clean previous status - need to configure igu prior to ack*/
10393 if ((!msix) || single_msix) {
10394 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10398 val |= IGU_PF_CONF_FUNC_EN;
10400 BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n",
10401 val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10403 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10407 /* init leading/trailing edge */
10409 val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10410 if (sc->port.pmf) {
10411 /* enable nig and gpio3 attention */
10418 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
10419 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
10421 /* make sure that interrupts are indeed enabled from here on */
10426 bxe_int_enable(struct bxe_softc *sc)
10428 if (sc->devinfo.int_block == INT_BLOCK_HC) {
10429 bxe_hc_int_enable(sc);
10431 bxe_igu_int_enable(sc);
10436 bxe_hc_int_disable(struct bxe_softc *sc)
10438 int port = SC_PORT(sc);
10439 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10440 uint32_t val = REG_RD(sc, addr);
10443 * In E1 we must use only PCI configuration space to disable MSI/MSIX
10444 * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC
10447 if (CHIP_IS_E1(sc)) {
10449 * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register
10450 * to prevent from HC sending interrupts after we exit the function
10452 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0);
10454 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10455 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10456 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10458 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10459 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10460 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10461 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10464 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr);
10466 /* flush all outstanding writes */
10469 REG_WR(sc, addr, val);
10470 if (REG_RD(sc, addr) != val) {
10471 BLOGE(sc, "proper val not read from HC IGU!\n");
10476 bxe_igu_int_disable(struct bxe_softc *sc)
10478 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10480 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
10481 IGU_PF_CONF_INT_LINE_EN |
10482 IGU_PF_CONF_ATTN_BIT_EN);
10484 BLOGD(sc, DBG_INTR, "write %x to IGU\n", val);
10486 /* flush all outstanding writes */
10489 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10490 if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) {
10491 BLOGE(sc, "proper val not read from IGU!\n");
10496 bxe_int_disable(struct bxe_softc *sc)
10498 if (sc->devinfo.int_block == INT_BLOCK_HC) {
10499 bxe_hc_int_disable(sc);
10501 bxe_igu_int_disable(sc);
10506 bxe_nic_init(struct bxe_softc *sc,
10511 for (i = 0; i < sc->num_queues; i++) {
10512 bxe_init_eth_fp(sc, i);
10515 rmb(); /* ensure status block indices were read */
10517 bxe_init_rx_rings(sc);
10518 bxe_init_tx_rings(sc);
10524 /* initialize MOD_ABS interrupts */
10525 elink_init_mod_abs_int(sc, &sc->link_vars,
10526 sc->devinfo.chip_id,
10527 sc->devinfo.shmem_base,
10528 sc->devinfo.shmem2_base,
10531 bxe_init_def_sb(sc);
10532 bxe_update_dsb_idx(sc);
10533 bxe_init_sp_ring(sc);
10534 bxe_init_eq_ring(sc);
10535 bxe_init_internal(sc, load_code);
10537 bxe_stats_init(sc);
10539 /* flush all before enabling interrupts */
10542 bxe_int_enable(sc);
10544 /* check for SPIO5 */
10545 bxe_attn_int_deasserted0(sc,
10547 (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
10549 AEU_INPUTS_ATTN_BITS_SPIO5);
10553 bxe_init_objs(struct bxe_softc *sc)
10555 /* mcast rules must be added to tx if tx switching is enabled */
10556 ecore_obj_type o_type =
10557 (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX :
10560 /* RX_MODE controlling object */
10561 ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj);
10563 /* multicast configuration controlling object */
10564 ecore_init_mcast_obj(sc,
10570 BXE_SP(sc, mcast_rdata),
10571 BXE_SP_MAPPING(sc, mcast_rdata),
10572 ECORE_FILTER_MCAST_PENDING,
10576 /* Setup CAM credit pools */
10577 ecore_init_mac_credit_pool(sc,
10580 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10581 VNICS_PER_PATH(sc));
10583 ecore_init_vlan_credit_pool(sc,
10585 SC_ABS_FUNC(sc) >> 1,
10586 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10587 VNICS_PER_PATH(sc));
10589 /* RSS configuration object */
10590 ecore_init_rss_config_obj(sc,
10596 BXE_SP(sc, rss_rdata),
10597 BXE_SP_MAPPING(sc, rss_rdata),
10598 ECORE_FILTER_RSS_CONF_PENDING,
10599 &sc->sp_state, ECORE_OBJ_TYPE_RX);
10603 * Initialize the function. This must be called before sending CLIENT_SETUP
10604 * for the first client.
10607 bxe_func_start(struct bxe_softc *sc)
10609 struct ecore_func_state_params func_params = { NULL };
10610 struct ecore_func_start_params *start_params = &func_params.params.start;
10612 /* Prepare parameters for function state transitions */
10613 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
10615 func_params.f_obj = &sc->func_obj;
10616 func_params.cmd = ECORE_F_CMD_START;
10618 /* Function parameters */
10619 start_params->mf_mode = sc->devinfo.mf_info.mf_mode;
10620 start_params->sd_vlan_tag = OVLAN(sc);
10622 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
10623 start_params->network_cos_mode = STATIC_COS;
10624 } else { /* CHIP_IS_E1X */
10625 start_params->network_cos_mode = FW_WRR;
10628 //start_params->gre_tunnel_mode = 0;
10629 //start_params->gre_tunnel_rss = 0;
10631 return (ecore_func_state_change(sc, &func_params));
10635 bxe_set_power_state(struct bxe_softc *sc,
10640 /* If there is no power capability, silently succeed */
10641 if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) {
10642 BLOGW(sc, "No power capability\n");
10646 pmcsr = pci_read_config(sc->dev,
10647 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10652 pci_write_config(sc->dev,
10653 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10654 ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2);
10656 if (pmcsr & PCIM_PSTAT_DMASK) {
10657 /* delay required during transition out of D3hot */
10664 /* XXX if there are other clients above don't shut down the power */
10666 /* don't shut down the power for emulation and FPGA */
10667 if (CHIP_REV_IS_SLOW(sc)) {
10671 pmcsr &= ~PCIM_PSTAT_DMASK;
10672 pmcsr |= PCIM_PSTAT_D3;
10675 pmcsr |= PCIM_PSTAT_PMEENABLE;
10678 pci_write_config(sc->dev,
10679 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10683 * No more memory access after this point until device is brought back
10689 BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n",
10698 /* return true if succeeded to acquire the lock */
10700 bxe_trylock_hw_lock(struct bxe_softc *sc,
10703 uint32_t lock_status;
10704 uint32_t resource_bit = (1 << resource);
10705 int func = SC_FUNC(sc);
10706 uint32_t hw_lock_control_reg;
10708 BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource);
10710 /* Validating that the resource is within range */
10711 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
10712 BLOGD(sc, DBG_LOAD,
10713 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
10714 resource, HW_LOCK_MAX_RESOURCE_VALUE);
10719 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
10721 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
10724 /* try to acquire the lock */
10725 REG_WR(sc, hw_lock_control_reg + 4, resource_bit);
10726 lock_status = REG_RD(sc, hw_lock_control_reg);
10727 if (lock_status & resource_bit) {
10731 BLOGE(sc, "Failed to get a resource lock 0x%x func %d "
10732 "lock_status 0x%x resource_bit 0x%x\n", resource, func,
10733 lock_status, resource_bit);
10739 * Get the recovery leader resource id according to the engine this function
10740 * belongs to. Currently only only 2 engines is supported.
10743 bxe_get_leader_lock_resource(struct bxe_softc *sc)
10746 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1);
10748 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0);
10752 /* try to acquire a leader lock for current engine */
10754 bxe_trylock_leader_lock(struct bxe_softc *sc)
10756 return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10760 bxe_release_leader_lock(struct bxe_softc *sc)
10762 return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10765 /* close gates #2, #3 and #4 */
10767 bxe_set_234_gates(struct bxe_softc *sc,
10772 /* gates #2 and #4a are closed/opened for "not E1" only */
10773 if (!CHIP_IS_E1(sc)) {
10775 REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
10777 REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
10781 if (CHIP_IS_E1x(sc)) {
10782 /* prevent interrupts from HC on both ports */
10783 val = REG_RD(sc, HC_REG_CONFIG_1);
10784 REG_WR(sc, HC_REG_CONFIG_1,
10785 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
10786 (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
10788 val = REG_RD(sc, HC_REG_CONFIG_0);
10789 REG_WR(sc, HC_REG_CONFIG_0,
10790 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
10791 (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
10793 /* Prevent incomming interrupts in IGU */
10794 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
10796 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION,
10798 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
10799 (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
10802 BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n",
10803 close ? "closing" : "opening");
10808 /* poll for pending writes bit, it should get cleared in no more than 1s */
10810 bxe_er_poll_igu_vq(struct bxe_softc *sc)
10812 uint32_t cnt = 1000;
10813 uint32_t pend_bits = 0;
10816 pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS);
10818 if (pend_bits == 0) {
10823 } while (--cnt > 0);
10826 BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits);
10833 #define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */
10836 bxe_clp_reset_prep(struct bxe_softc *sc,
10837 uint32_t *magic_val)
10839 /* Do some magic... */
10840 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10841 *magic_val = val & SHARED_MF_CLP_MAGIC;
10842 MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
10845 /* restore the value of the 'magic' bit */
10847 bxe_clp_reset_done(struct bxe_softc *sc,
10848 uint32_t magic_val)
10850 /* Restore the 'magic' bit value... */
10851 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10852 MFCFG_WR(sc, shared_mf_config.clp_mb,
10853 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
10856 /* prepare for MCP reset, takes care of CLP configurations */
10858 bxe_reset_mcp_prep(struct bxe_softc *sc,
10859 uint32_t *magic_val)
10862 uint32_t validity_offset;
10864 /* set `magic' bit in order to save MF config */
10865 if (!CHIP_IS_E1(sc)) {
10866 bxe_clp_reset_prep(sc, magic_val);
10869 /* get shmem offset */
10870 shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10872 offsetof(struct shmem_region, validity_map[SC_PORT(sc)]);
10874 /* Clear validity map flags */
10876 REG_WR(sc, shmem + validity_offset, 0);
10880 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
10881 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
10884 bxe_mcp_wait_one(struct bxe_softc *sc)
10886 /* special handling for emulation and FPGA (10 times longer) */
10887 if (CHIP_REV_IS_SLOW(sc)) {
10888 DELAY((MCP_ONE_TIMEOUT*10) * 1000);
10890 DELAY((MCP_ONE_TIMEOUT) * 1000);
10894 /* initialize shmem_base and waits for validity signature to appear */
10896 bxe_init_shmem(struct bxe_softc *sc)
10902 sc->devinfo.shmem_base =
10903 sc->link_params.shmem_base =
10904 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10906 if (sc->devinfo.shmem_base) {
10907 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
10908 if (val & SHR_MEM_VALIDITY_MB)
10912 bxe_mcp_wait_one(sc);
10914 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
10916 BLOGE(sc, "BAD MCP validity signature\n");
10922 bxe_reset_mcp_comp(struct bxe_softc *sc,
10923 uint32_t magic_val)
10925 int rc = bxe_init_shmem(sc);
10927 /* Restore the `magic' bit value */
10928 if (!CHIP_IS_E1(sc)) {
10929 bxe_clp_reset_done(sc, magic_val);
10936 bxe_pxp_prep(struct bxe_softc *sc)
10938 if (!CHIP_IS_E1(sc)) {
10939 REG_WR(sc, PXP2_REG_RD_START_INIT, 0);
10940 REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0);
10946 * Reset the whole chip except for:
10948 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit)
10950 * - MISC (including AEU)
10955 bxe_process_kill_chip_reset(struct bxe_softc *sc,
10958 uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
10959 uint32_t global_bits2, stay_reset2;
10962 * Bits that have to be set in reset_mask2 if we want to reset 'global'
10963 * (per chip) blocks.
10966 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
10967 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
10970 * Don't reset the following blocks.
10971 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
10972 * reset, as in 4 port device they might still be owned
10973 * by the MCP (there is only one leader per path).
10976 MISC_REGISTERS_RESET_REG_1_RST_HC |
10977 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
10978 MISC_REGISTERS_RESET_REG_1_RST_PXP;
10981 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
10982 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
10983 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
10984 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
10985 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
10986 MISC_REGISTERS_RESET_REG_2_RST_GRC |
10987 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
10988 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
10989 MISC_REGISTERS_RESET_REG_2_RST_ATC |
10990 MISC_REGISTERS_RESET_REG_2_PGLC |
10991 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
10992 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
10993 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
10994 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
10995 MISC_REGISTERS_RESET_REG_2_UMAC0 |
10996 MISC_REGISTERS_RESET_REG_2_UMAC1;
10999 * Keep the following blocks in reset:
11000 * - all xxMACs are handled by the elink code.
11003 MISC_REGISTERS_RESET_REG_2_XMAC |
11004 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
11006 /* Full reset masks according to the chip */
11007 reset_mask1 = 0xffffffff;
11009 if (CHIP_IS_E1(sc))
11010 reset_mask2 = 0xffff;
11011 else if (CHIP_IS_E1H(sc))
11012 reset_mask2 = 0x1ffff;
11013 else if (CHIP_IS_E2(sc))
11014 reset_mask2 = 0xfffff;
11015 else /* CHIP_IS_E3 */
11016 reset_mask2 = 0x3ffffff;
11018 /* Don't reset global blocks unless we need to */
11020 reset_mask2 &= ~global_bits2;
11023 * In case of attention in the QM, we need to reset PXP
11024 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
11025 * because otherwise QM reset would release 'close the gates' shortly
11026 * before resetting the PXP, then the PSWRQ would send a write
11027 * request to PGLUE. Then when PXP is reset, PGLUE would try to
11028 * read the payload data from PSWWR, but PSWWR would not
11029 * respond. The write queue in PGLUE would stuck, dmae commands
11030 * would not return. Therefore it's important to reset the second
11031 * reset register (containing the
11032 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
11033 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
11036 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
11037 reset_mask2 & (~not_reset_mask2));
11039 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
11040 reset_mask1 & (~not_reset_mask1));
11045 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
11046 reset_mask2 & (~stay_reset2));
11051 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
11056 bxe_process_kill(struct bxe_softc *sc,
11061 uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
11062 uint32_t tags_63_32 = 0;
11064 /* Empty the Tetris buffer, wait for 1s */
11066 sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT);
11067 blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT);
11068 port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0);
11069 port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1);
11070 pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2);
11071 if (CHIP_IS_E3(sc)) {
11072 tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32);
11075 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
11076 ((port_is_idle_0 & 0x1) == 0x1) &&
11077 ((port_is_idle_1 & 0x1) == 0x1) &&
11078 (pgl_exp_rom2 == 0xffffffff) &&
11079 (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff)))
11082 } while (cnt-- > 0);
11085 BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there "
11086 "are still outstanding read requests after 1s! "
11087 "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, "
11088 "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
11089 sr_cnt, blk_cnt, port_is_idle_0,
11090 port_is_idle_1, pgl_exp_rom2);
11096 /* Close gates #2, #3 and #4 */
11097 bxe_set_234_gates(sc, TRUE);
11099 /* Poll for IGU VQs for 57712 and newer chips */
11100 if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) {
11104 /* XXX indicate that "process kill" is in progress to MCP */
11106 /* clear "unprepared" bit */
11107 REG_WR(sc, MISC_REG_UNPREPARED, 0);
11110 /* Make sure all is written to the chip before the reset */
11114 * Wait for 1ms to empty GLUE and PCI-E core queues,
11115 * PSWHST, GRC and PSWRD Tetris buffer.
11119 /* Prepare to chip reset: */
11122 bxe_reset_mcp_prep(sc, &val);
11129 /* reset the chip */
11130 bxe_process_kill_chip_reset(sc, global);
11133 /* clear errors in PGB */
11134 if (!CHIP_IS_E1(sc))
11135 REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
11137 /* Recover after reset: */
11139 if (global && bxe_reset_mcp_comp(sc, val)) {
11143 /* XXX add resetting the NO_MCP mode DB here */
11145 /* Open the gates #2, #3 and #4 */
11146 bxe_set_234_gates(sc, FALSE);
11149 * IGU/AEU preparation bring back the AEU/IGU to a reset state
11150 * re-enable attentions
11157 bxe_leader_reset(struct bxe_softc *sc)
11160 uint8_t global = bxe_reset_is_global(sc);
11161 uint32_t load_code;
11164 * If not going to reset MCP, load "fake" driver to reset HW while
11165 * driver is owner of the HW.
11167 if (!global && !BXE_NOMCP(sc)) {
11168 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
11169 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
11171 BLOGE(sc, "MCP response failure, aborting\n");
11173 goto exit_leader_reset;
11176 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
11177 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
11178 BLOGE(sc, "MCP unexpected response, aborting\n");
11180 goto exit_leader_reset2;
11183 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
11185 BLOGE(sc, "MCP response failure, aborting\n");
11187 goto exit_leader_reset2;
11191 /* try to recover after the failure */
11192 if (bxe_process_kill(sc, global)) {
11193 BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc));
11195 goto exit_leader_reset2;
11199 * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver
11202 bxe_set_reset_done(sc);
11204 bxe_clear_reset_global(sc);
11207 exit_leader_reset2:
11209 /* unload "fake driver" if it was loaded */
11210 if (!global && !BXE_NOMCP(sc)) {
11211 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
11212 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
11218 bxe_release_leader_lock(sc);
11225 * prepare INIT transition, parameters configured:
11226 * - HC configuration
11227 * - Queue's CDU context
11230 bxe_pf_q_prep_init(struct bxe_softc *sc,
11231 struct bxe_fastpath *fp,
11232 struct ecore_queue_init_params *init_params)
11235 int cxt_index, cxt_offset;
11237 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags);
11238 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags);
11240 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags);
11241 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags);
11244 init_params->rx.hc_rate =
11245 sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0;
11246 init_params->tx.hc_rate =
11247 sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0;
11250 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id;
11252 /* CQ index among the SB indices */
11253 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11254 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
11256 /* set maximum number of COSs supported by this queue */
11257 init_params->max_cos = sc->max_cos;
11259 BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n",
11260 fp->index, init_params->max_cos);
11262 /* set the context pointers queue object */
11263 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
11264 /* XXX change index/cid here if ever support multiple tx CoS */
11265 /* fp->txdata[cos]->cid */
11266 cxt_index = fp->index / ILT_PAGE_CIDS;
11267 cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS);
11268 init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth;
11272 /* set flags that are common for the Tx-only and not normal connections */
11273 static unsigned long
11274 bxe_get_common_flags(struct bxe_softc *sc,
11275 struct bxe_fastpath *fp,
11276 uint8_t zero_stats)
11278 unsigned long flags = 0;
11280 /* PF driver will always initialize the Queue to an ACTIVE state */
11281 bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags);
11284 * tx only connections collect statistics (on the same index as the
11285 * parent connection). The statistics are zeroed when the parent
11286 * connection is initialized.
11289 bxe_set_bit(ECORE_Q_FLG_STATS, &flags);
11291 bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags);
11295 * tx only connections can support tx-switching, though their
11296 * CoS-ness doesn't survive the loopback
11298 if (sc->flags & BXE_TX_SWITCHING) {
11299 bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags);
11302 bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags);
11307 static unsigned long
11308 bxe_get_q_flags(struct bxe_softc *sc,
11309 struct bxe_fastpath *fp,
11312 unsigned long flags = 0;
11314 if (IS_MF_SD(sc)) {
11315 bxe_set_bit(ECORE_Q_FLG_OV, &flags);
11318 if (sc->ifnet->if_capenable & IFCAP_LRO) {
11319 bxe_set_bit(ECORE_Q_FLG_TPA, &flags);
11320 #if __FreeBSD_version >= 800000
11321 bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags);
11326 bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags);
11327 bxe_set_bit(ECORE_Q_FLG_MCAST, &flags);
11330 bxe_set_bit(ECORE_Q_FLG_VLAN, &flags);
11332 /* merge with common flags */
11333 return (flags | bxe_get_common_flags(sc, fp, TRUE));
11337 bxe_pf_q_prep_general(struct bxe_softc *sc,
11338 struct bxe_fastpath *fp,
11339 struct ecore_general_setup_params *gen_init,
11342 gen_init->stat_id = bxe_stats_id(fp);
11343 gen_init->spcl_id = fp->cl_id;
11344 gen_init->mtu = sc->mtu;
11345 gen_init->cos = cos;
11349 bxe_pf_rx_q_prep(struct bxe_softc *sc,
11350 struct bxe_fastpath *fp,
11351 struct rxq_pause_params *pause,
11352 struct ecore_rxq_setup_params *rxq_init)
11354 uint8_t max_sge = 0;
11355 uint16_t sge_sz = 0;
11356 uint16_t tpa_agg_size = 0;
11358 pause->sge_th_lo = SGE_TH_LO(sc);
11359 pause->sge_th_hi = SGE_TH_HI(sc);
11361 /* validate SGE ring has enough to cross high threshold */
11362 if (sc->dropless_fc &&
11363 (pause->sge_th_hi + FW_PREFETCH_CNT) >
11364 (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) {
11365 BLOGW(sc, "sge ring threshold limit\n");
11368 /* minimum max_aggregation_size is 2*MTU (two full buffers) */
11369 tpa_agg_size = (2 * sc->mtu);
11370 if (tpa_agg_size < sc->max_aggregation_size) {
11371 tpa_agg_size = sc->max_aggregation_size;
11374 max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT;
11375 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
11376 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
11377 sge_sz = (uint16_t)min(SGE_PAGES, 0xffff);
11379 /* pause - not for e1 */
11380 if (!CHIP_IS_E1(sc)) {
11381 pause->bd_th_lo = BD_TH_LO(sc);
11382 pause->bd_th_hi = BD_TH_HI(sc);
11384 pause->rcq_th_lo = RCQ_TH_LO(sc);
11385 pause->rcq_th_hi = RCQ_TH_HI(sc);
11387 /* validate rings have enough entries to cross high thresholds */
11388 if (sc->dropless_fc &&
11389 pause->bd_th_hi + FW_PREFETCH_CNT >
11390 sc->rx_ring_size) {
11391 BLOGW(sc, "rx bd ring threshold limit\n");
11394 if (sc->dropless_fc &&
11395 pause->rcq_th_hi + FW_PREFETCH_CNT >
11396 RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) {
11397 BLOGW(sc, "rcq ring threshold limit\n");
11400 pause->pri_map = 1;
11404 rxq_init->dscr_map = fp->rx_dma.paddr;
11405 rxq_init->sge_map = fp->rx_sge_dma.paddr;
11406 rxq_init->rcq_map = fp->rcq_dma.paddr;
11407 rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE);
11410 * This should be a maximum number of data bytes that may be
11411 * placed on the BD (not including paddings).
11413 rxq_init->buf_sz = (fp->rx_buf_size -
11414 IP_HEADER_ALIGNMENT_PADDING);
11416 rxq_init->cl_qzone_id = fp->cl_qzone_id;
11417 rxq_init->tpa_agg_sz = tpa_agg_size;
11418 rxq_init->sge_buf_sz = sge_sz;
11419 rxq_init->max_sges_pkt = max_sge;
11420 rxq_init->rss_engine_id = SC_FUNC(sc);
11421 rxq_init->mcast_engine_id = SC_FUNC(sc);
11424 * Maximum number or simultaneous TPA aggregation for this Queue.
11425 * For PF Clients it should be the maximum available number.
11426 * VF driver(s) may want to define it to a smaller value.
11428 rxq_init->max_tpa_queues = MAX_AGG_QS(sc);
11430 rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT;
11431 rxq_init->fw_sb_id = fp->fw_sb_id;
11433 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11436 * configure silent vlan removal
11437 * if multi function mode is afex, then mask default vlan
11439 if (IS_MF_AFEX(sc)) {
11440 rxq_init->silent_removal_value =
11441 sc->devinfo.mf_info.afex_def_vlan_tag;
11442 rxq_init->silent_removal_mask = EVL_VLID_MASK;
11447 bxe_pf_tx_q_prep(struct bxe_softc *sc,
11448 struct bxe_fastpath *fp,
11449 struct ecore_txq_setup_params *txq_init,
11453 * XXX If multiple CoS is ever supported then each fastpath structure
11454 * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
11455 * fp->txdata[cos]->tx_dma.paddr;
11457 txq_init->dscr_map = fp->tx_dma.paddr;
11458 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
11459 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
11460 txq_init->fw_sb_id = fp->fw_sb_id;
11463 * set the TSS leading client id for TX classfication to the
11464 * leading RSS client id
11466 txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id);
11470 * This function performs 2 steps in a queue state machine:
11475 bxe_setup_queue(struct bxe_softc *sc,
11476 struct bxe_fastpath *fp,
11479 struct ecore_queue_state_params q_params = { NULL };
11480 struct ecore_queue_setup_params *setup_params =
11481 &q_params.params.setup;
11484 BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index);
11486 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
11488 q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
11490 /* we want to wait for completion in this context */
11491 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
11493 /* prepare the INIT parameters */
11494 bxe_pf_q_prep_init(sc, fp, &q_params.params.init);
11496 /* Set the command */
11497 q_params.cmd = ECORE_Q_CMD_INIT;
11499 /* Change the state to INIT */
11500 rc = ecore_queue_state_change(sc, &q_params);
11502 BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc);
11506 BLOGD(sc, DBG_LOAD, "init complete\n");
11508 /* now move the Queue to the SETUP state */
11509 memset(setup_params, 0, sizeof(*setup_params));
11511 /* set Queue flags */
11512 setup_params->flags = bxe_get_q_flags(sc, fp, leading);
11514 /* set general SETUP parameters */
11515 bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params,
11516 FIRST_TX_COS_INDEX);
11518 bxe_pf_rx_q_prep(sc, fp,
11519 &setup_params->pause_params,
11520 &setup_params->rxq_params);
11522 bxe_pf_tx_q_prep(sc, fp,
11523 &setup_params->txq_params,
11524 FIRST_TX_COS_INDEX);
11526 /* Set the command */
11527 q_params.cmd = ECORE_Q_CMD_SETUP;
11529 /* change the state to SETUP */
11530 rc = ecore_queue_state_change(sc, &q_params);
11532 BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc);
11540 bxe_setup_leading(struct bxe_softc *sc)
11542 return (bxe_setup_queue(sc, &sc->fp[0], TRUE));
11546 bxe_config_rss_pf(struct bxe_softc *sc,
11547 struct ecore_rss_config_obj *rss_obj,
11548 uint8_t config_hash)
11550 struct ecore_config_rss_params params = { NULL };
11554 * Although RSS is meaningless when there is a single HW queue we
11555 * still need it enabled in order to have HW Rx hash generated.
11558 params.rss_obj = rss_obj;
11560 bxe_set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
11562 bxe_set_bit(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags);
11564 /* RSS configuration */
11565 bxe_set_bit(ECORE_RSS_IPV4, ¶ms.rss_flags);
11566 bxe_set_bit(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags);
11567 bxe_set_bit(ECORE_RSS_IPV6, ¶ms.rss_flags);
11568 bxe_set_bit(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags);
11569 if (rss_obj->udp_rss_v4) {
11570 bxe_set_bit(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags);
11572 if (rss_obj->udp_rss_v6) {
11573 bxe_set_bit(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags);
11577 params.rss_result_mask = MULTI_MASK;
11579 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
11583 for (i = 0; i < sizeof(params.rss_key) / 4; i++) {
11584 params.rss_key[i] = arc4random();
11587 bxe_set_bit(ECORE_RSS_SET_SRCH, ¶ms.rss_flags);
11590 return (ecore_config_rss(sc, ¶ms));
11594 bxe_config_rss_eth(struct bxe_softc *sc,
11595 uint8_t config_hash)
11597 return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash));
11601 bxe_init_rss_pf(struct bxe_softc *sc)
11603 uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc);
11607 * Prepare the initial contents of the indirection table if
11610 for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) {
11611 sc->rss_conf_obj.ind_table[i] =
11612 (sc->fp->cl_id + (i % num_eth_queues));
11616 sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1;
11620 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
11621 * per-port, so if explicit configuration is needed, do it only
11624 * For 57712 and newer it's a per-function configuration.
11626 return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)));
11630 bxe_set_mac_one(struct bxe_softc *sc,
11632 struct ecore_vlan_mac_obj *obj,
11635 unsigned long *ramrod_flags)
11637 struct ecore_vlan_mac_ramrod_params ramrod_param;
11640 memset(&ramrod_param, 0, sizeof(ramrod_param));
11642 /* fill in general parameters */
11643 ramrod_param.vlan_mac_obj = obj;
11644 ramrod_param.ramrod_flags = *ramrod_flags;
11646 /* fill a user request section if needed */
11647 if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) {
11648 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
11650 bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
11652 /* Set the command: ADD or DEL */
11653 ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD :
11654 ECORE_VLAN_MAC_DEL;
11657 rc = ecore_config_vlan_mac(sc, &ramrod_param);
11659 if (rc == ECORE_EXISTS) {
11660 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
11661 /* do not treat adding same MAC as error */
11663 } else if (rc < 0) {
11664 BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc);
11671 bxe_set_eth_mac(struct bxe_softc *sc,
11674 unsigned long ramrod_flags = 0;
11676 BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n");
11678 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
11680 /* Eth MAC is set on RSS leading client (fp[0]) */
11681 return (bxe_set_mac_one(sc, sc->link_params.mac_addr,
11682 &sc->sp_objs->mac_obj,
11683 set, ECORE_ETH_MAC, &ramrod_flags));
11687 bxe_get_cur_phy_idx(struct bxe_softc *sc)
11689 uint32_t sel_phy_idx = 0;
11691 if (sc->link_params.num_phys <= 1) {
11692 return (ELINK_INT_PHY);
11695 if (sc->link_vars.link_up) {
11696 sel_phy_idx = ELINK_EXT_PHY1;
11697 /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */
11698 if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
11699 (sc->link_params.phy[ELINK_EXT_PHY2].supported &
11700 ELINK_SUPPORTED_FIBRE))
11701 sel_phy_idx = ELINK_EXT_PHY2;
11703 switch (elink_phy_selection(&sc->link_params)) {
11704 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
11705 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
11706 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
11707 sel_phy_idx = ELINK_EXT_PHY1;
11709 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
11710 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
11711 sel_phy_idx = ELINK_EXT_PHY2;
11716 return (sel_phy_idx);
11720 bxe_get_link_cfg_idx(struct bxe_softc *sc)
11722 uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc);
11725 * The selected activated PHY is always after swapping (in case PHY
11726 * swapping is enabled). So when swapping is enabled, we need to reverse
11727 * the configuration
11730 if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11731 if (sel_phy_idx == ELINK_EXT_PHY1)
11732 sel_phy_idx = ELINK_EXT_PHY2;
11733 else if (sel_phy_idx == ELINK_EXT_PHY2)
11734 sel_phy_idx = ELINK_EXT_PHY1;
11737 return (ELINK_LINK_CONFIG_IDX(sel_phy_idx));
11741 bxe_set_requested_fc(struct bxe_softc *sc)
11744 * Initialize link parameters structure variables
11745 * It is recommended to turn off RX FC for jumbo frames
11746 * for better performance
11748 if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) {
11749 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX;
11751 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH;
11756 bxe_calc_fc_adv(struct bxe_softc *sc)
11758 uint8_t cfg_idx = bxe_get_link_cfg_idx(sc);
11761 sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
11764 switch (sc->link_vars.ieee_fc &
11765 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
11767 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
11768 sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
11772 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
11773 sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
11783 bxe_get_mf_speed(struct bxe_softc *sc)
11785 uint16_t line_speed = sc->link_vars.line_speed;
11788 bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]);
11790 /* calculate the current MAX line speed limit for the MF devices */
11791 if (IS_MF_SI(sc)) {
11792 line_speed = (line_speed * maxCfg) / 100;
11793 } else { /* SD mode */
11794 uint16_t vn_max_rate = maxCfg * 100;
11796 if (vn_max_rate < line_speed) {
11797 line_speed = vn_max_rate;
11802 return (line_speed);
11806 bxe_fill_report_data(struct bxe_softc *sc,
11807 struct bxe_link_report_data *data)
11809 uint16_t line_speed = bxe_get_mf_speed(sc);
11811 memset(data, 0, sizeof(*data));
11813 /* fill the report data with the effective line speed */
11814 data->line_speed = line_speed;
11817 if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) {
11818 bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags);
11822 if (sc->link_vars.duplex == DUPLEX_FULL) {
11823 bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags);
11826 /* Rx Flow Control is ON */
11827 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) {
11828 bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
11831 /* Tx Flow Control is ON */
11832 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
11833 bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
11837 /* report link status to OS, should be called under phy_lock */
11839 bxe_link_report_locked(struct bxe_softc *sc)
11841 struct bxe_link_report_data cur_data;
11843 /* reread mf_cfg */
11844 if (IS_PF(sc) && !CHIP_IS_E1(sc)) {
11845 bxe_read_mf_cfg(sc);
11848 /* Read the current link report info */
11849 bxe_fill_report_data(sc, &cur_data);
11851 /* Don't report link down or exactly the same link status twice */
11852 if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) ||
11853 (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11854 &sc->last_reported_link.link_report_flags) &&
11855 bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11856 &cur_data.link_report_flags))) {
11860 ELINK_DEBUG_P2(sc, "Change in link status : cur_data = %x, last_reported_link = %x\n",
11861 cur_data.link_report_flags, sc->last_reported_link.link_report_flags);
11864 ELINK_DEBUG_P1(sc, "link status change count = %x\n", sc->link_cnt);
11865 /* report new link params and remember the state for the next time */
11866 memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
11868 if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11869 &cur_data.link_report_flags)) {
11870 if_link_state_change(sc->ifnet, LINK_STATE_DOWN);
11872 const char *duplex;
11875 if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX,
11876 &cur_data.link_report_flags)) {
11878 ELINK_DEBUG_P0(sc, "link set to full duplex\n");
11881 ELINK_DEBUG_P0(sc, "link set to half duplex\n");
11885 * Handle the FC at the end so that only these flags would be
11886 * possibly set. This way we may easily check if there is no FC
11889 if (cur_data.link_report_flags) {
11890 if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11891 &cur_data.link_report_flags) &&
11892 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11893 &cur_data.link_report_flags)) {
11894 flow = "ON - receive & transmit";
11895 } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11896 &cur_data.link_report_flags) &&
11897 !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11898 &cur_data.link_report_flags)) {
11899 flow = "ON - receive";
11900 } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11901 &cur_data.link_report_flags) &&
11902 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11903 &cur_data.link_report_flags)) {
11904 flow = "ON - transmit";
11906 flow = "none"; /* possible? */
11912 if_link_state_change(sc->ifnet, LINK_STATE_UP);
11913 BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
11914 cur_data.line_speed, duplex, flow);
11919 bxe_link_report(struct bxe_softc *sc)
11921 bxe_acquire_phy_lock(sc);
11922 bxe_link_report_locked(sc);
11923 bxe_release_phy_lock(sc);
11927 bxe_link_status_update(struct bxe_softc *sc)
11929 if (sc->state != BXE_STATE_OPEN) {
11933 if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) {
11934 elink_link_status_update(&sc->link_params, &sc->link_vars);
11936 sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half |
11937 ELINK_SUPPORTED_10baseT_Full |
11938 ELINK_SUPPORTED_100baseT_Half |
11939 ELINK_SUPPORTED_100baseT_Full |
11940 ELINK_SUPPORTED_1000baseT_Full |
11941 ELINK_SUPPORTED_2500baseX_Full |
11942 ELINK_SUPPORTED_10000baseT_Full |
11943 ELINK_SUPPORTED_TP |
11944 ELINK_SUPPORTED_FIBRE |
11945 ELINK_SUPPORTED_Autoneg |
11946 ELINK_SUPPORTED_Pause |
11947 ELINK_SUPPORTED_Asym_Pause);
11948 sc->port.advertising[0] = sc->port.supported[0];
11950 sc->link_params.sc = sc;
11951 sc->link_params.port = SC_PORT(sc);
11952 sc->link_params.req_duplex[0] = DUPLEX_FULL;
11953 sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE;
11954 sc->link_params.req_line_speed[0] = SPEED_10000;
11955 sc->link_params.speed_cap_mask[0] = 0x7f0000;
11956 sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G;
11958 if (CHIP_REV_IS_FPGA(sc)) {
11959 sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC;
11960 sc->link_vars.line_speed = ELINK_SPEED_1000;
11961 sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11962 LINK_STATUS_SPEED_AND_DUPLEX_1000TFD);
11964 sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC;
11965 sc->link_vars.line_speed = ELINK_SPEED_10000;
11966 sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11967 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
11970 sc->link_vars.link_up = 1;
11972 sc->link_vars.duplex = DUPLEX_FULL;
11973 sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE;
11976 REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0);
11977 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11978 bxe_link_report(sc);
11983 if (sc->link_vars.link_up) {
11984 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11986 bxe_stats_handle(sc, STATS_EVENT_STOP);
11988 bxe_link_report(sc);
11990 bxe_link_report(sc);
11991 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11996 bxe_initial_phy_init(struct bxe_softc *sc,
11999 int rc, cfg_idx = bxe_get_link_cfg_idx(sc);
12000 uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx];
12001 struct elink_params *lp = &sc->link_params;
12003 bxe_set_requested_fc(sc);
12005 if (CHIP_REV_IS_SLOW(sc)) {
12006 uint32_t bond = CHIP_BOND_ID(sc);
12009 if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) {
12010 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
12011 } else if (bond & 0x4) {
12012 if (CHIP_IS_E3(sc)) {
12013 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC;
12015 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
12017 } else if (bond & 0x8) {
12018 if (CHIP_IS_E3(sc)) {
12019 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC;
12021 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
12025 /* disable EMAC for E3 and above */
12027 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
12030 sc->link_params.feature_config_flags |= feat;
12033 bxe_acquire_phy_lock(sc);
12035 if (load_mode == LOAD_DIAG) {
12036 lp->loopback_mode = ELINK_LOOPBACK_XGXS;
12037 /* Prefer doing PHY loopback at 10G speed, if possible */
12038 if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) {
12039 if (lp->speed_cap_mask[cfg_idx] &
12040 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
12041 lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000;
12043 lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000;
12048 if (load_mode == LOAD_LOOPBACK_EXT) {
12049 lp->loopback_mode = ELINK_LOOPBACK_EXT;
12052 rc = elink_phy_init(&sc->link_params, &sc->link_vars);
12054 bxe_release_phy_lock(sc);
12056 bxe_calc_fc_adv(sc);
12058 if (sc->link_vars.link_up) {
12059 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
12060 bxe_link_report(sc);
12063 if (!CHIP_REV_IS_SLOW(sc)) {
12064 bxe_periodic_start(sc);
12067 sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
12071 /* must be called under IF_ADDR_LOCK */
12073 bxe_init_mcast_macs_list(struct bxe_softc *sc,
12074 struct ecore_mcast_ramrod_params *p)
12076 struct ifnet *ifp = sc->ifnet;
12078 struct ifmultiaddr *ifma;
12079 struct ecore_mcast_list_elem *mc_mac;
12081 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
12082 if (ifma->ifma_addr->sa_family != AF_LINK) {
12089 ECORE_LIST_INIT(&p->mcast_list);
12090 p->mcast_list_len = 0;
12096 mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF,
12097 (M_NOWAIT | M_ZERO));
12099 BLOGE(sc, "Failed to allocate temp mcast list\n");
12102 bzero(mc_mac, (sizeof(*mc_mac) * mc_count));
12104 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
12105 if (ifma->ifma_addr->sa_family != AF_LINK) {
12109 mc_mac->mac = (uint8_t *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
12110 ECORE_LIST_PUSH_TAIL(&mc_mac->link, &p->mcast_list);
12112 BLOGD(sc, DBG_LOAD,
12113 "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X and mc_count %d\n",
12114 mc_mac->mac[0], mc_mac->mac[1], mc_mac->mac[2],
12115 mc_mac->mac[3], mc_mac->mac[4], mc_mac->mac[5], mc_count);
12119 p->mcast_list_len = mc_count;
12125 bxe_free_mcast_macs_list(struct ecore_mcast_ramrod_params *p)
12127 struct ecore_mcast_list_elem *mc_mac =
12128 ECORE_LIST_FIRST_ENTRY(&p->mcast_list,
12129 struct ecore_mcast_list_elem,
12133 /* only a single free as all mc_macs are in the same heap array */
12134 free(mc_mac, M_DEVBUF);
12139 bxe_set_mc_list(struct bxe_softc *sc)
12141 struct ecore_mcast_ramrod_params rparam = { NULL };
12144 rparam.mcast_obj = &sc->mcast_obj;
12146 BXE_MCAST_LOCK(sc);
12148 /* first, clear all configured multicast MACs */
12149 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
12151 BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc);
12152 /* Manual backport parts of FreeBSD upstream r284470. */
12153 BXE_MCAST_UNLOCK(sc);
12157 /* configure a new MACs list */
12158 rc = bxe_init_mcast_macs_list(sc, &rparam);
12160 BLOGE(sc, "Failed to create mcast MACs list (%d)\n", rc);
12161 BXE_MCAST_UNLOCK(sc);
12165 /* Now add the new MACs */
12166 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD);
12168 BLOGE(sc, "Failed to set new mcast config (%d)\n", rc);
12171 bxe_free_mcast_macs_list(&rparam);
12173 BXE_MCAST_UNLOCK(sc);
12179 bxe_set_uc_list(struct bxe_softc *sc)
12181 struct ifnet *ifp = sc->ifnet;
12182 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
12183 struct ifaddr *ifa;
12184 unsigned long ramrod_flags = 0;
12187 #if __FreeBSD_version < 800000
12190 if_addr_rlock(ifp);
12193 /* first schedule a cleanup up of old configuration */
12194 rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE);
12196 BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc);
12197 #if __FreeBSD_version < 800000
12198 IF_ADDR_UNLOCK(ifp);
12200 if_addr_runlock(ifp);
12205 ifa = ifp->if_addr;
12207 if (ifa->ifa_addr->sa_family != AF_LINK) {
12208 ifa = TAILQ_NEXT(ifa, ifa_link);
12212 rc = bxe_set_mac_one(sc, (uint8_t *)LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
12213 mac_obj, TRUE, ECORE_UC_LIST_MAC, &ramrod_flags);
12214 if (rc == -EEXIST) {
12215 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
12216 /* do not treat adding same MAC as an error */
12218 } else if (rc < 0) {
12219 BLOGE(sc, "Failed to schedule ADD operations (%d)\n", rc);
12220 #if __FreeBSD_version < 800000
12221 IF_ADDR_UNLOCK(ifp);
12223 if_addr_runlock(ifp);
12228 ifa = TAILQ_NEXT(ifa, ifa_link);
12231 #if __FreeBSD_version < 800000
12232 IF_ADDR_UNLOCK(ifp);
12234 if_addr_runlock(ifp);
12237 /* Execute the pending commands */
12238 bit_set(&ramrod_flags, RAMROD_CONT);
12239 return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */,
12240 ECORE_UC_LIST_MAC, &ramrod_flags));
12244 bxe_set_rx_mode(struct bxe_softc *sc)
12246 struct ifnet *ifp = sc->ifnet;
12247 uint32_t rx_mode = BXE_RX_MODE_NORMAL;
12249 if (sc->state != BXE_STATE_OPEN) {
12250 BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state);
12254 BLOGD(sc, DBG_SP, "ifp->if_flags=0x%x\n", ifp->if_flags);
12256 if (ifp->if_flags & IFF_PROMISC) {
12257 rx_mode = BXE_RX_MODE_PROMISC;
12258 } else if ((ifp->if_flags & IFF_ALLMULTI) ||
12259 ((ifp->if_amcount > BXE_MAX_MULTICAST) &&
12261 rx_mode = BXE_RX_MODE_ALLMULTI;
12264 /* some multicasts */
12265 if (bxe_set_mc_list(sc) < 0) {
12266 rx_mode = BXE_RX_MODE_ALLMULTI;
12268 if (bxe_set_uc_list(sc) < 0) {
12269 rx_mode = BXE_RX_MODE_PROMISC;
12274 sc->rx_mode = rx_mode;
12276 /* schedule the rx_mode command */
12277 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
12278 BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n");
12279 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
12284 bxe_set_storm_rx_mode(sc);
12289 /* update flags in shmem */
12291 bxe_update_drv_flags(struct bxe_softc *sc,
12295 uint32_t drv_flags;
12297 if (SHMEM2_HAS(sc, drv_flags)) {
12298 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12299 drv_flags = SHMEM2_RD(sc, drv_flags);
12302 SET_FLAGS(drv_flags, flags);
12304 RESET_FLAGS(drv_flags, flags);
12307 SHMEM2_WR(sc, drv_flags, drv_flags);
12308 BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags);
12310 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12314 /* periodic timer callout routine, only runs when the interface is up */
12317 bxe_periodic_callout_func(void *xsc)
12319 struct bxe_softc *sc = (struct bxe_softc *)xsc;
12322 if (!BXE_CORE_TRYLOCK(sc)) {
12323 /* just bail and try again next time */
12325 if ((sc->state == BXE_STATE_OPEN) &&
12326 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12327 /* schedule the next periodic callout */
12328 callout_reset(&sc->periodic_callout, hz,
12329 bxe_periodic_callout_func, sc);
12335 if ((sc->state != BXE_STATE_OPEN) ||
12336 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
12337 BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state);
12338 BXE_CORE_UNLOCK(sc);
12343 /* Check for TX timeouts on any fastpath. */
12344 FOR_EACH_QUEUE(sc, i) {
12345 if (bxe_watchdog(sc, &sc->fp[i]) != 0) {
12346 /* Ruh-Roh, chip was reset! */
12351 if (!CHIP_REV_IS_SLOW(sc)) {
12353 * This barrier is needed to ensure the ordering between the writing
12354 * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and
12355 * the reading here.
12358 if (sc->port.pmf) {
12359 bxe_acquire_phy_lock(sc);
12360 elink_period_func(&sc->link_params, &sc->link_vars);
12361 bxe_release_phy_lock(sc);
12365 if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) {
12366 int mb_idx = SC_FW_MB_IDX(sc);
12367 uint32_t drv_pulse;
12368 uint32_t mcp_pulse;
12370 ++sc->fw_drv_pulse_wr_seq;
12371 sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
12373 drv_pulse = sc->fw_drv_pulse_wr_seq;
12376 mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) &
12377 MCP_PULSE_SEQ_MASK);
12380 * The delta between driver pulse and mcp response should
12381 * be 1 (before mcp response) or 0 (after mcp response).
12383 if ((drv_pulse != mcp_pulse) &&
12384 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
12385 /* someone lost a heartbeat... */
12386 BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
12387 drv_pulse, mcp_pulse);
12391 /* state is BXE_STATE_OPEN */
12392 bxe_stats_handle(sc, STATS_EVENT_UPDATE);
12394 BXE_CORE_UNLOCK(sc);
12396 if ((sc->state == BXE_STATE_OPEN) &&
12397 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12398 /* schedule the next periodic callout */
12399 callout_reset(&sc->periodic_callout, hz,
12400 bxe_periodic_callout_func, sc);
12405 bxe_periodic_start(struct bxe_softc *sc)
12407 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
12408 callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc);
12412 bxe_periodic_stop(struct bxe_softc *sc)
12414 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
12415 callout_drain(&sc->periodic_callout);
12419 bxe_parity_recover(struct bxe_softc *sc)
12421 uint8_t global = FALSE;
12422 uint32_t error_recovered, error_unrecovered;
12426 if ((sc->recovery_state == BXE_RECOVERY_FAILED) &&
12427 (sc->state == BXE_STATE_ERROR)) {
12428 BLOGE(sc, "RECOVERY failed, "
12429 "stack notified driver is NOT running! "
12430 "Please reboot/power cycle the system.\n");
12436 "%s sc=%p state=0x%x rec_state=0x%x error_status=%x\n",
12437 __func__, sc, sc->state, sc->recovery_state, sc->error_status);
12439 switch(sc->recovery_state) {
12441 case BXE_RECOVERY_INIT:
12442 is_parity = bxe_chk_parity_attn(sc, &global, FALSE);
12444 if ((CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ||
12445 (sc->error_status & BXE_ERR_MCP_ASSERT) ||
12446 (sc->error_status & BXE_ERR_GLOBAL)) {
12449 if (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING) {
12450 bxe_periodic_stop(sc);
12452 bxe_nic_unload(sc, UNLOAD_RECOVERY, false);
12453 sc->state = BXE_STATE_ERROR;
12454 sc->recovery_state = BXE_RECOVERY_FAILED;
12455 BLOGE(sc, " No Recovery tried for error 0x%x"
12456 " stack notified driver is NOT running!"
12457 " Please reboot/power cycle the system.\n",
12459 BXE_CORE_UNLOCK(sc);
12464 /* Try to get a LEADER_LOCK HW lock */
12465 if (bxe_trylock_leader_lock(sc)) {
12467 bxe_set_reset_in_progress(sc);
12469 * Check if there is a global attention and if
12470 * there was a global attention, set the global
12474 bxe_set_reset_global(sc);
12479 /* If interface has been removed - break */
12481 if (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING) {
12482 bxe_periodic_stop(sc);
12486 bxe_nic_unload(sc,UNLOAD_RECOVERY, false);
12487 sc->recovery_state = BXE_RECOVERY_WAIT;
12488 BXE_CORE_UNLOCK(sc);
12491 * Ensure "is_leader", MCP command sequence and
12492 * "recovery_state" update values are seen on other
12497 case BXE_RECOVERY_WAIT:
12499 if (sc->is_leader) {
12500 int other_engine = SC_PATH(sc) ? 0 : 1;
12501 bool other_load_status =
12502 bxe_get_load_status(sc, other_engine);
12504 bxe_get_load_status(sc, SC_PATH(sc));
12505 global = bxe_reset_is_global(sc);
12508 * In case of a parity in a global block, let
12509 * the first leader that performs a
12510 * leader_reset() reset the global blocks in
12511 * order to clear global attentions. Otherwise
12512 * the gates will remain closed for that
12516 (global && other_load_status)) {
12518 * Wait until all other functions get
12521 taskqueue_enqueue_timeout(taskqueue_thread,
12522 &sc->sp_err_timeout_task, hz/10);
12526 * If all other functions got down
12527 * try to bring the chip back to
12528 * normal. In any case it's an exit
12529 * point for a leader.
12531 if (bxe_leader_reset(sc)) {
12532 BLOGE(sc, "RECOVERY failed, "
12533 "stack notified driver is NOT running!\n");
12534 sc->recovery_state = BXE_RECOVERY_FAILED;
12535 sc->state = BXE_STATE_ERROR;
12541 * If we are here, means that the
12542 * leader has succeeded and doesn't
12543 * want to be a leader any more. Try
12544 * to continue as a none-leader.
12549 } else { /* non-leader */
12550 if (!bxe_reset_is_done(sc, SC_PATH(sc))) {
12552 * Try to get a LEADER_LOCK HW lock as
12553 * long as a former leader may have
12554 * been unloaded by the user or
12555 * released a leadership by another
12558 if (bxe_trylock_leader_lock(sc)) {
12560 * I'm a leader now! Restart a
12567 taskqueue_enqueue_timeout(taskqueue_thread,
12568 &sc->sp_err_timeout_task, hz/10);
12573 * If there was a global attention, wait
12574 * for it to be cleared.
12576 if (bxe_reset_is_global(sc)) {
12577 taskqueue_enqueue_timeout(taskqueue_thread,
12578 &sc->sp_err_timeout_task, hz/10);
12583 sc->eth_stats.recoverable_error;
12584 error_unrecovered =
12585 sc->eth_stats.unrecoverable_error;
12587 sc->recovery_state =
12588 BXE_RECOVERY_NIC_LOADING;
12589 if (bxe_nic_load(sc, LOAD_NORMAL)) {
12590 error_unrecovered++;
12591 sc->recovery_state = BXE_RECOVERY_FAILED;
12592 sc->state = BXE_STATE_ERROR;
12593 BLOGE(sc, "Recovery is NOT successfull, "
12594 " state=0x%x recovery_state=0x%x error=%x\n",
12595 sc->state, sc->recovery_state, sc->error_status);
12596 sc->error_status = 0;
12598 sc->recovery_state =
12601 BLOGI(sc, "Recovery is successfull from errors %x,"
12603 " recovery_state=0x%x \n", sc->error_status,
12604 sc->state, sc->recovery_state);
12607 sc->error_status = 0;
12608 BXE_CORE_UNLOCK(sc);
12609 sc->eth_stats.recoverable_error =
12611 sc->eth_stats.unrecoverable_error =
12623 bxe_handle_error(struct bxe_softc * sc)
12626 if(sc->recovery_state == BXE_RECOVERY_WAIT) {
12629 if(sc->error_status) {
12630 if (sc->state == BXE_STATE_OPEN) {
12631 bxe_int_disable(sc);
12633 if (sc->link_vars.link_up) {
12634 if_link_state_change(sc->ifnet, LINK_STATE_DOWN);
12636 sc->recovery_state = BXE_RECOVERY_INIT;
12637 BLOGI(sc, "bxe%d: Recovery started errors 0x%x recovery state 0x%x\n",
12638 sc->unit, sc->error_status, sc->recovery_state);
12639 bxe_parity_recover(sc);
12644 bxe_sp_err_timeout_task(void *arg, int pending)
12647 struct bxe_softc *sc = (struct bxe_softc *)arg;
12650 "%s state = 0x%x rec state=0x%x error_status=%x\n",
12651 __func__, sc->state, sc->recovery_state, sc->error_status);
12653 if((sc->recovery_state == BXE_RECOVERY_FAILED) &&
12654 (sc->state == BXE_STATE_ERROR)) {
12657 /* if can be taken */
12658 if ((sc->error_status) && (sc->trigger_grcdump)) {
12661 if (sc->recovery_state != BXE_RECOVERY_DONE) {
12662 bxe_handle_error(sc);
12663 bxe_parity_recover(sc);
12664 } else if (sc->error_status) {
12665 bxe_handle_error(sc);
12671 /* start the controller */
12672 static __noinline int
12673 bxe_nic_load(struct bxe_softc *sc,
12680 BXE_CORE_LOCK_ASSERT(sc);
12682 BLOGD(sc, DBG_LOAD, "Starting NIC load...\n");
12684 sc->state = BXE_STATE_OPENING_WAITING_LOAD;
12687 /* must be called before memory allocation and HW init */
12688 bxe_ilt_set_info(sc);
12691 sc->last_reported_link_state = LINK_STATE_UNKNOWN;
12693 bxe_set_fp_rx_buf_size(sc);
12695 if (bxe_alloc_fp_buffers(sc) != 0) {
12696 BLOGE(sc, "Failed to allocate fastpath memory\n");
12697 sc->state = BXE_STATE_CLOSED;
12699 goto bxe_nic_load_error0;
12702 if (bxe_alloc_mem(sc) != 0) {
12703 sc->state = BXE_STATE_CLOSED;
12705 goto bxe_nic_load_error0;
12708 if (bxe_alloc_fw_stats_mem(sc) != 0) {
12709 sc->state = BXE_STATE_CLOSED;
12711 goto bxe_nic_load_error0;
12715 /* set pf load just before approaching the MCP */
12716 bxe_set_pf_load(sc);
12718 /* if MCP exists send load request and analyze response */
12719 if (!BXE_NOMCP(sc)) {
12720 /* attempt to load pf */
12721 if (bxe_nic_load_request(sc, &load_code) != 0) {
12722 sc->state = BXE_STATE_CLOSED;
12724 goto bxe_nic_load_error1;
12727 /* what did the MCP say? */
12728 if (bxe_nic_load_analyze_req(sc, load_code) != 0) {
12729 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12730 sc->state = BXE_STATE_CLOSED;
12732 goto bxe_nic_load_error2;
12735 BLOGI(sc, "Device has no MCP!\n");
12736 load_code = bxe_nic_load_no_mcp(sc);
12739 /* mark PMF if applicable */
12740 bxe_nic_load_pmf(sc, load_code);
12742 /* Init Function state controlling object */
12743 bxe_init_func_obj(sc);
12745 /* Initialize HW */
12746 if (bxe_init_hw(sc, load_code) != 0) {
12747 BLOGE(sc, "HW init failed\n");
12748 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12749 sc->state = BXE_STATE_CLOSED;
12751 goto bxe_nic_load_error2;
12755 /* set ALWAYS_ALIVE bit in shmem */
12756 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
12758 sc->flags |= BXE_NO_PULSE;
12760 /* attach interrupts */
12761 if (bxe_interrupt_attach(sc) != 0) {
12762 sc->state = BXE_STATE_CLOSED;
12764 goto bxe_nic_load_error2;
12767 bxe_nic_init(sc, load_code);
12769 /* Init per-function objects */
12772 // XXX bxe_iov_nic_init(sc);
12774 /* set AFEX default VLAN tag to an invalid value */
12775 sc->devinfo.mf_info.afex_def_vlan_tag = -1;
12776 // XXX bxe_nic_load_afex_dcc(sc, load_code);
12778 sc->state = BXE_STATE_OPENING_WAITING_PORT;
12779 rc = bxe_func_start(sc);
12781 BLOGE(sc, "Function start failed! rc = %d\n", rc);
12782 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12783 sc->state = BXE_STATE_ERROR;
12784 goto bxe_nic_load_error3;
12787 /* send LOAD_DONE command to MCP */
12788 if (!BXE_NOMCP(sc)) {
12789 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12791 BLOGE(sc, "MCP response failure, aborting\n");
12792 sc->state = BXE_STATE_ERROR;
12794 goto bxe_nic_load_error3;
12798 rc = bxe_setup_leading(sc);
12800 BLOGE(sc, "Setup leading failed! rc = %d\n", rc);
12801 sc->state = BXE_STATE_ERROR;
12802 goto bxe_nic_load_error3;
12805 FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) {
12806 rc = bxe_setup_queue(sc, &sc->fp[i], FALSE);
12808 BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc);
12809 sc->state = BXE_STATE_ERROR;
12810 goto bxe_nic_load_error3;
12814 rc = bxe_init_rss_pf(sc);
12816 BLOGE(sc, "PF RSS init failed\n");
12817 sc->state = BXE_STATE_ERROR;
12818 goto bxe_nic_load_error3;
12823 /* now when Clients are configured we are ready to work */
12824 sc->state = BXE_STATE_OPEN;
12826 /* Configure a ucast MAC */
12828 rc = bxe_set_eth_mac(sc, TRUE);
12831 BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc);
12832 sc->state = BXE_STATE_ERROR;
12833 goto bxe_nic_load_error3;
12836 if (sc->port.pmf) {
12837 rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN);
12839 sc->state = BXE_STATE_ERROR;
12840 goto bxe_nic_load_error3;
12844 sc->link_params.feature_config_flags &=
12845 ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN;
12847 /* start fast path */
12849 /* Initialize Rx filter */
12850 bxe_set_rx_mode(sc);
12853 switch (/* XXX load_mode */LOAD_OPEN) {
12859 case LOAD_LOOPBACK_EXT:
12860 sc->state = BXE_STATE_DIAG;
12867 if (sc->port.pmf) {
12868 bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0);
12870 bxe_link_status_update(sc);
12873 /* start the periodic timer callout */
12874 bxe_periodic_start(sc);
12876 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
12877 /* mark driver is loaded in shmem2 */
12878 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
12879 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
12881 DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
12882 DRV_FLAGS_CAPABILITIES_LOADED_L2));
12885 /* wait for all pending SP commands to complete */
12886 if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) {
12887 BLOGE(sc, "Timeout waiting for all SPs to complete!\n");
12888 bxe_periodic_stop(sc);
12889 bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE);
12893 /* Tell the stack the driver is running! */
12894 sc->ifnet->if_drv_flags = IFF_DRV_RUNNING;
12896 BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n");
12900 bxe_nic_load_error3:
12903 bxe_int_disable_sync(sc, 1);
12905 /* clean out queued objects */
12906 bxe_squeeze_objects(sc);
12909 bxe_interrupt_detach(sc);
12911 bxe_nic_load_error2:
12913 if (IS_PF(sc) && !BXE_NOMCP(sc)) {
12914 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
12915 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
12920 bxe_nic_load_error1:
12922 /* clear pf_load status, as it was already set */
12924 bxe_clear_pf_load(sc);
12927 bxe_nic_load_error0:
12929 bxe_free_fw_stats_mem(sc);
12930 bxe_free_fp_buffers(sc);
12937 bxe_init_locked(struct bxe_softc *sc)
12939 int other_engine = SC_PATH(sc) ? 0 : 1;
12940 uint8_t other_load_status, load_status;
12941 uint8_t global = FALSE;
12944 BXE_CORE_LOCK_ASSERT(sc);
12946 /* check if the driver is already running */
12947 if (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING) {
12948 BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n");
12952 if((sc->state == BXE_STATE_ERROR) &&
12953 (sc->recovery_state == BXE_RECOVERY_FAILED)) {
12954 BLOGE(sc, "Initialization not done, "
12955 "as previous recovery failed."
12956 "Reboot/Power-cycle the system\n" );
12961 bxe_set_power_state(sc, PCI_PM_D0);
12964 * If parity occurred during the unload, then attentions and/or
12965 * RECOVERY_IN_PROGRES may still be set. If so we want the first function
12966 * loaded on the current engine to complete the recovery. Parity recovery
12967 * is only relevant for PF driver.
12970 other_load_status = bxe_get_load_status(sc, other_engine);
12971 load_status = bxe_get_load_status(sc, SC_PATH(sc));
12973 if (!bxe_reset_is_done(sc, SC_PATH(sc)) ||
12974 bxe_chk_parity_attn(sc, &global, TRUE)) {
12977 * If there are attentions and they are in global blocks, set
12978 * the GLOBAL_RESET bit regardless whether it will be this
12979 * function that will complete the recovery or not.
12982 bxe_set_reset_global(sc);
12986 * Only the first function on the current engine should try
12987 * to recover in open. In case of attentions in global blocks
12988 * only the first in the chip should try to recover.
12990 if ((!load_status && (!global || !other_load_status)) &&
12991 bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) {
12992 BLOGI(sc, "Recovered during init\n");
12996 /* recovery has failed... */
12997 bxe_set_power_state(sc, PCI_PM_D3hot);
12998 sc->recovery_state = BXE_RECOVERY_FAILED;
13000 BLOGE(sc, "Recovery flow hasn't properly "
13001 "completed yet, try again later. "
13002 "If you still see this message after a "
13003 "few retries then power cycle is required.\n");
13006 goto bxe_init_locked_done;
13011 sc->recovery_state = BXE_RECOVERY_DONE;
13013 rc = bxe_nic_load(sc, LOAD_OPEN);
13015 bxe_init_locked_done:
13018 /* Tell the stack the driver is NOT running! */
13019 BLOGE(sc, "Initialization failed, "
13020 "stack notified driver is NOT running!\n");
13021 sc->ifnet->if_drv_flags &= ~IFF_DRV_RUNNING;
13028 bxe_stop_locked(struct bxe_softc *sc)
13030 BXE_CORE_LOCK_ASSERT(sc);
13031 return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE));
13035 * Handles controller initialization when called from an unlocked routine.
13036 * ifconfig calls this function.
13042 bxe_init(void *xsc)
13044 struct bxe_softc *sc = (struct bxe_softc *)xsc;
13047 bxe_init_locked(sc);
13048 BXE_CORE_UNLOCK(sc);
13052 bxe_init_ifnet(struct bxe_softc *sc)
13056 /* ifconfig entrypoint for media type/status reporting */
13057 ifmedia_init(&sc->ifmedia, IFM_IMASK,
13058 bxe_ifmedia_update,
13059 bxe_ifmedia_status);
13061 /* set the default interface values */
13062 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL);
13063 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
13064 ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO));
13066 sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */
13067 BLOGI(sc, "IFMEDIA flags : %x\n", sc->ifmedia.ifm_media);
13069 /* allocate the ifnet structure */
13070 if ((ifp = if_alloc(IFT_ETHER)) == NULL) {
13071 BLOGE(sc, "Interface allocation failed!\n");
13075 ifp->if_softc = sc;
13076 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
13077 ifp->if_flags = (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
13078 ifp->if_ioctl = bxe_ioctl;
13079 ifp->if_start = bxe_tx_start;
13080 #if __FreeBSD_version >= 901504
13081 ifp->if_transmit = bxe_tx_mq_start;
13082 ifp->if_qflush = bxe_mq_flush;
13087 ifp->if_init = bxe_init;
13088 ifp->if_mtu = sc->mtu;
13089 ifp->if_hwassist = (CSUM_IP |
13095 ifp->if_capabilities =
13096 #if __FreeBSD_version < 700000
13098 IFCAP_VLAN_HWTAGGING |
13104 IFCAP_VLAN_HWTAGGING |
13106 IFCAP_VLAN_HWFILTER |
13107 IFCAP_VLAN_HWCSUM |
13115 ifp->if_capenable = ifp->if_capabilities;
13116 ifp->if_capenable &= ~IFCAP_WOL_MAGIC; /* XXX not yet... */
13117 #if __FreeBSD_version < 1000025
13118 ifp->if_baudrate = 1000000000;
13120 if_initbaudrate(ifp, IF_Gbps(10));
13122 ifp->if_snd.ifq_drv_maxlen = sc->tx_ring_size;
13124 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
13125 IFQ_SET_READY(&ifp->if_snd);
13129 /* attach to the Ethernet interface list */
13130 ether_ifattach(ifp, sc->link_params.mac_addr);
13136 bxe_deallocate_bars(struct bxe_softc *sc)
13140 for (i = 0; i < MAX_BARS; i++) {
13141 if (sc->bar[i].resource != NULL) {
13142 bus_release_resource(sc->dev,
13145 sc->bar[i].resource);
13146 BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n",
13153 bxe_allocate_bars(struct bxe_softc *sc)
13158 memset(sc->bar, 0, sizeof(sc->bar));
13160 for (i = 0; i < MAX_BARS; i++) {
13162 /* memory resources reside at BARs 0, 2, 4 */
13163 /* Run `pciconf -lb` to see mappings */
13164 if ((i != 0) && (i != 2) && (i != 4)) {
13168 sc->bar[i].rid = PCIR_BAR(i);
13172 flags |= RF_SHAREABLE;
13175 if ((sc->bar[i].resource =
13176 bus_alloc_resource_any(sc->dev,
13183 sc->bar[i].tag = rman_get_bustag(sc->bar[i].resource);
13184 sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource);
13185 sc->bar[i].kva = (vm_offset_t)rman_get_virtual(sc->bar[i].resource);
13187 BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %#lx-%#lx (%ld) -> %#jx\n",
13189 rman_get_start(sc->bar[i].resource),
13190 rman_get_end(sc->bar[i].resource),
13191 rman_get_size(sc->bar[i].resource),
13192 (uintmax_t)sc->bar[i].kva);
13199 bxe_get_function_num(struct bxe_softc *sc)
13204 * Read the ME register to get the function number. The ME register
13205 * holds the relative-function number and absolute-function number. The
13206 * absolute-function number appears only in E2 and above. Before that
13207 * these bits always contained zero, therefore we cannot blindly use them.
13210 val = REG_RD(sc, BAR_ME_REGISTER);
13213 (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT);
13215 (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1;
13217 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13218 sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id);
13220 sc->pfunc_abs = (sc->pfunc_rel | sc->path_id);
13223 BLOGD(sc, DBG_LOAD,
13224 "Relative function %d, Absolute function %d, Path %d\n",
13225 sc->pfunc_rel, sc->pfunc_abs, sc->path_id);
13229 bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc)
13231 uint32_t shmem2_size;
13233 uint32_t mf_cfg_offset_value;
13236 offset = (SHMEM_RD(sc, func_mb) +
13237 (MAX_FUNC_NUM * sizeof(struct drv_func_mb)));
13240 if (sc->devinfo.shmem2_base != 0) {
13241 shmem2_size = SHMEM2_RD(sc, size);
13242 if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) {
13243 mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr);
13244 if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) {
13245 offset = mf_cfg_offset_value;
13254 bxe_pcie_capability_read(struct bxe_softc *sc,
13260 /* ensure PCIe capability is enabled */
13261 if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) {
13262 if (pcie_reg != 0) {
13263 BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg);
13264 return (pci_read_config(sc->dev, (pcie_reg + reg), width));
13268 BLOGE(sc, "PCIe capability NOT FOUND!!!\n");
13274 bxe_is_pcie_pending(struct bxe_softc *sc)
13276 return (bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA, 2) &
13277 PCIM_EXP_STA_TRANSACTION_PND);
13281 * Walk the PCI capabiites list for the device to find what features are
13282 * supported. These capabilites may be enabled/disabled by firmware so it's
13283 * best to walk the list rather than make assumptions.
13286 bxe_probe_pci_caps(struct bxe_softc *sc)
13288 uint16_t link_status;
13291 /* check if PCI Power Management is enabled */
13292 if (pci_find_cap(sc->dev, PCIY_PMG, ®) == 0) {
13294 BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg);
13296 sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG;
13297 sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg;
13301 link_status = bxe_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA, 2);
13303 /* handle PCIe 2.0 workarounds for 57710 */
13304 if (CHIP_IS_E1(sc)) {
13305 /* workaround for 57710 errata E4_57710_27462 */
13306 sc->devinfo.pcie_link_speed =
13307 (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1;
13309 /* workaround for 57710 errata E4_57710_27488 */
13310 sc->devinfo.pcie_link_width =
13311 ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
13312 if (sc->devinfo.pcie_link_speed > 1) {
13313 sc->devinfo.pcie_link_width =
13314 ((link_status & PCIM_LINK_STA_WIDTH) >> 4) >> 1;
13317 sc->devinfo.pcie_link_speed =
13318 (link_status & PCIM_LINK_STA_SPEED);
13319 sc->devinfo.pcie_link_width =
13320 ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
13323 BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n",
13324 sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width);
13326 sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG;
13327 sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg;
13329 /* check if MSI capability is enabled */
13330 if (pci_find_cap(sc->dev, PCIY_MSI, ®) == 0) {
13332 BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg);
13334 sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG;
13335 sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg;
13339 /* check if MSI-X capability is enabled */
13340 if (pci_find_cap(sc->dev, PCIY_MSIX, ®) == 0) {
13342 BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg);
13344 sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG;
13345 sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg;
13351 bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc)
13353 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13356 /* get the outer vlan if we're in switch-dependent mode */
13358 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13359 mf_info->ext_id = (uint16_t)val;
13361 mf_info->multi_vnics_mode = 1;
13363 if (!VALID_OVLAN(mf_info->ext_id)) {
13364 BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id);
13368 /* get the capabilities */
13369 if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13370 FUNC_MF_CFG_PROTOCOL_ISCSI) {
13371 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI;
13372 } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13373 FUNC_MF_CFG_PROTOCOL_FCOE) {
13374 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE;
13376 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET;
13379 mf_info->vnics_per_port =
13380 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13386 bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc)
13388 uint32_t retval = 0;
13391 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13393 if (val & MACP_FUNC_CFG_FLAGS_ENABLED) {
13394 if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) {
13395 retval |= MF_PROTO_SUPPORT_ETHERNET;
13397 if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
13398 retval |= MF_PROTO_SUPPORT_ISCSI;
13400 if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
13401 retval |= MF_PROTO_SUPPORT_FCOE;
13409 bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc)
13411 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13415 * There is no outer vlan if we're in switch-independent mode.
13416 * If the mac is valid then assume multi-function.
13419 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13421 mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0);
13423 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13425 mf_info->vnics_per_port =
13426 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13432 bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc)
13434 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13435 uint32_t e1hov_tag;
13436 uint32_t func_config;
13437 uint32_t niv_config;
13439 mf_info->multi_vnics_mode = 1;
13441 e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13442 func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13443 niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config);
13446 (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >>
13447 FUNC_MF_CFG_E1HOV_TAG_SHIFT);
13449 mf_info->default_vlan =
13450 (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >>
13451 FUNC_MF_CFG_AFEX_VLAN_SHIFT);
13453 mf_info->niv_allowed_priorities =
13454 (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
13455 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT);
13457 mf_info->niv_default_cos =
13458 (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
13459 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT);
13461 mf_info->afex_vlan_mode =
13462 ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
13463 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT);
13465 mf_info->niv_mba_enabled =
13466 ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >>
13467 FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT);
13469 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13471 mf_info->vnics_per_port =
13472 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13478 bxe_check_valid_mf_cfg(struct bxe_softc *sc)
13480 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13487 BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n",
13489 BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n",
13490 mf_info->mf_config[SC_VN(sc)]);
13491 BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n",
13492 mf_info->multi_vnics_mode);
13493 BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n",
13494 mf_info->vnics_per_port);
13495 BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n",
13497 BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n",
13498 mf_info->min_bw[0], mf_info->min_bw[1],
13499 mf_info->min_bw[2], mf_info->min_bw[3]);
13500 BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n",
13501 mf_info->max_bw[0], mf_info->max_bw[1],
13502 mf_info->max_bw[2], mf_info->max_bw[3]);
13503 BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n",
13506 /* various MF mode sanity checks... */
13508 if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) {
13509 BLOGE(sc, "Enumerated function %d is marked as hidden\n",
13514 if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) {
13515 BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n",
13516 mf_info->vnics_per_port, mf_info->multi_vnics_mode);
13520 if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13521 /* vnic id > 0 must have valid ovlan in switch-dependent mode */
13522 if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) {
13523 BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n",
13524 SC_VN(sc), OVLAN(sc));
13528 if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) {
13529 BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n",
13530 mf_info->multi_vnics_mode, OVLAN(sc));
13535 * Verify all functions are either MF or SF mode. If MF, make sure
13536 * sure that all non-hidden functions have a valid ovlan. If SF,
13537 * make sure that all non-hidden functions have an invalid ovlan.
13539 FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13540 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13541 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13542 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13543 (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) ||
13544 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) {
13545 BLOGE(sc, "mf_mode=SD function %d MF config "
13546 "mismatch, multi_vnics_mode=%d ovlan=%d\n",
13547 i, mf_info->multi_vnics_mode, ovlan1);
13552 /* Verify all funcs on the same port each have a different ovlan. */
13553 FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13554 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13555 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13556 /* iterate from the next function on the port to the max func */
13557 for (j = i + 2; j < MAX_FUNC_NUM; j += 2) {
13558 mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config);
13559 ovlan2 = MFCFG_RD(sc, func_mf_config[j].e1hov_tag);
13560 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13561 VALID_OVLAN(ovlan1) &&
13562 !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) &&
13563 VALID_OVLAN(ovlan2) &&
13564 (ovlan1 == ovlan2)) {
13565 BLOGE(sc, "mf_mode=SD functions %d and %d "
13566 "have the same ovlan (%d)\n",
13572 } /* MULTI_FUNCTION_SD */
13578 bxe_get_mf_cfg_info(struct bxe_softc *sc)
13580 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13581 uint32_t val, mac_upper;
13584 /* initialize mf_info defaults */
13585 mf_info->vnics_per_port = 1;
13586 mf_info->multi_vnics_mode = FALSE;
13587 mf_info->path_has_ovlan = FALSE;
13588 mf_info->mf_mode = SINGLE_FUNCTION;
13590 if (!CHIP_IS_MF_CAP(sc)) {
13594 if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) {
13595 BLOGE(sc, "Invalid mf_cfg_base!\n");
13599 /* get the MF mode (switch dependent / independent / single-function) */
13601 val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13603 switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)
13605 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
13607 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13609 /* check for legal upper mac bytes */
13610 if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) {
13611 mf_info->mf_mode = MULTI_FUNCTION_SI;
13613 BLOGE(sc, "Invalid config for Switch Independent mode\n");
13618 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
13619 case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4:
13621 /* get outer vlan configuration */
13622 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13624 if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) !=
13625 FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
13626 mf_info->mf_mode = MULTI_FUNCTION_SD;
13628 BLOGE(sc, "Invalid config for Switch Dependent mode\n");
13633 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
13635 /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */
13638 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
13641 * Mark MF mode as NIV if MCP version includes NPAR-SD support
13642 * and the MAC address is valid.
13644 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13646 if ((SHMEM2_HAS(sc, afex_driver_support)) &&
13647 (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) {
13648 mf_info->mf_mode = MULTI_FUNCTION_AFEX;
13650 BLOGE(sc, "Invalid config for AFEX mode\n");
13657 BLOGE(sc, "Unknown MF mode (0x%08x)\n",
13658 (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK));
13663 /* set path mf_mode (which could be different than function mf_mode) */
13664 if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13665 mf_info->path_has_ovlan = TRUE;
13666 } else if (mf_info->mf_mode == SINGLE_FUNCTION) {
13668 * Decide on path multi vnics mode. If we're not in MF mode and in
13669 * 4-port mode, this is good enough to check vnic-0 of the other port
13672 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13673 uint8_t other_port = !(PORT_ID(sc) & 1);
13674 uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port));
13676 val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag);
13678 mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0;
13682 if (mf_info->mf_mode == SINGLE_FUNCTION) {
13683 /* invalid MF config */
13684 if (SC_VN(sc) >= 1) {
13685 BLOGE(sc, "VNIC ID >= 1 in SF mode\n");
13692 /* get the MF configuration */
13693 mf_info->mf_config[SC_VN(sc)] =
13694 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13696 switch(mf_info->mf_mode)
13698 case MULTI_FUNCTION_SD:
13700 bxe_get_shmem_mf_cfg_info_sd(sc);
13703 case MULTI_FUNCTION_SI:
13705 bxe_get_shmem_mf_cfg_info_si(sc);
13708 case MULTI_FUNCTION_AFEX:
13710 bxe_get_shmem_mf_cfg_info_niv(sc);
13715 BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n",
13720 /* get the congestion management parameters */
13723 FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13724 /* get min/max bw */
13725 val = MFCFG_RD(sc, func_mf_config[i].config);
13726 mf_info->min_bw[vnic] =
13727 ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT);
13728 mf_info->max_bw[vnic] =
13729 ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT);
13733 return (bxe_check_valid_mf_cfg(sc));
13737 bxe_get_shmem_info(struct bxe_softc *sc)
13740 uint32_t mac_hi, mac_lo, val;
13742 port = SC_PORT(sc);
13743 mac_hi = mac_lo = 0;
13745 sc->link_params.sc = sc;
13746 sc->link_params.port = port;
13748 /* get the hardware config info */
13749 sc->devinfo.hw_config =
13750 SHMEM_RD(sc, dev_info.shared_hw_config.config);
13751 sc->devinfo.hw_config2 =
13752 SHMEM_RD(sc, dev_info.shared_hw_config.config2);
13754 sc->link_params.hw_led_mode =
13755 ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
13756 SHARED_HW_CFG_LED_MODE_SHIFT);
13758 /* get the port feature config */
13760 SHMEM_RD(sc, dev_info.port_feature_config[port].config);
13762 /* get the link params */
13763 sc->link_params.speed_cap_mask[0] =
13764 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask);
13765 sc->link_params.speed_cap_mask[1] =
13766 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2);
13768 /* get the lane config */
13769 sc->link_params.lane_config =
13770 SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config);
13772 /* get the link config */
13773 val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config);
13774 sc->port.link_config[ELINK_INT_PHY] = val;
13775 sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK);
13776 sc->port.link_config[ELINK_EXT_PHY1] =
13777 SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2);
13779 /* get the override preemphasis flag and enable it or turn it off */
13780 val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13781 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) {
13782 sc->link_params.feature_config_flags |=
13783 ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13785 sc->link_params.feature_config_flags &=
13786 ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13789 /* get the initial value of the link params */
13790 sc->link_params.multi_phy_config =
13791 SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config);
13793 /* get external phy info */
13794 sc->port.ext_phy_config =
13795 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
13797 /* get the multifunction configuration */
13798 bxe_get_mf_cfg_info(sc);
13800 /* get the mac address */
13802 mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13803 mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower);
13805 mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper);
13806 mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower);
13809 if ((mac_lo == 0) && (mac_hi == 0)) {
13810 *sc->mac_addr_str = 0;
13811 BLOGE(sc, "No Ethernet address programmed!\n");
13813 sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8);
13814 sc->link_params.mac_addr[1] = (uint8_t)(mac_hi);
13815 sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24);
13816 sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16);
13817 sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8);
13818 sc->link_params.mac_addr[5] = (uint8_t)(mac_lo);
13819 snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str),
13820 "%02x:%02x:%02x:%02x:%02x:%02x",
13821 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1],
13822 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3],
13823 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]);
13824 BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str);
13831 bxe_get_tunable_params(struct bxe_softc *sc)
13833 /* sanity checks */
13835 if ((bxe_interrupt_mode != INTR_MODE_INTX) &&
13836 (bxe_interrupt_mode != INTR_MODE_MSI) &&
13837 (bxe_interrupt_mode != INTR_MODE_MSIX)) {
13838 BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode);
13839 bxe_interrupt_mode = INTR_MODE_MSIX;
13842 if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) {
13843 BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count);
13844 bxe_queue_count = 0;
13847 if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) {
13848 if (bxe_max_rx_bufs == 0) {
13849 bxe_max_rx_bufs = RX_BD_USABLE;
13851 BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs);
13852 bxe_max_rx_bufs = 2048;
13856 if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) {
13857 BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks);
13858 bxe_hc_rx_ticks = 25;
13861 if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) {
13862 BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks);
13863 bxe_hc_tx_ticks = 50;
13866 if (bxe_max_aggregation_size == 0) {
13867 bxe_max_aggregation_size = TPA_AGG_SIZE;
13870 if (bxe_max_aggregation_size > 0xffff) {
13871 BLOGW(sc, "invalid max_aggregation_size (%d)\n",
13872 bxe_max_aggregation_size);
13873 bxe_max_aggregation_size = TPA_AGG_SIZE;
13876 if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) {
13877 BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs);
13881 if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) {
13882 BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen);
13883 bxe_autogreeen = 0;
13886 if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) {
13887 BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss);
13891 /* pull in user settings */
13893 sc->interrupt_mode = bxe_interrupt_mode;
13894 sc->max_rx_bufs = bxe_max_rx_bufs;
13895 sc->hc_rx_ticks = bxe_hc_rx_ticks;
13896 sc->hc_tx_ticks = bxe_hc_tx_ticks;
13897 sc->max_aggregation_size = bxe_max_aggregation_size;
13898 sc->mrrs = bxe_mrrs;
13899 sc->autogreeen = bxe_autogreeen;
13900 sc->udp_rss = bxe_udp_rss;
13902 if (bxe_interrupt_mode == INTR_MODE_INTX) {
13903 sc->num_queues = 1;
13904 } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */
13906 min((bxe_queue_count ? bxe_queue_count : mp_ncpus),
13908 if (sc->num_queues > mp_ncpus) {
13909 sc->num_queues = mp_ncpus;
13913 BLOGD(sc, DBG_LOAD,
13916 "interrupt_mode=%d "
13921 "max_aggregation_size=%d "
13926 sc->interrupt_mode,
13931 sc->max_aggregation_size,
13938 bxe_media_detect(struct bxe_softc *sc)
13941 uint32_t phy_idx = bxe_get_cur_phy_idx(sc);
13943 switch (sc->link_params.phy[phy_idx].media_type) {
13944 case ELINK_ETH_PHY_SFPP_10G_FIBER:
13945 case ELINK_ETH_PHY_XFP_FIBER:
13946 BLOGI(sc, "Found 10Gb Fiber media.\n");
13947 sc->media = IFM_10G_SR;
13948 port_type = PORT_FIBRE;
13950 case ELINK_ETH_PHY_SFP_1G_FIBER:
13951 BLOGI(sc, "Found 1Gb Fiber media.\n");
13952 sc->media = IFM_1000_SX;
13953 port_type = PORT_FIBRE;
13955 case ELINK_ETH_PHY_KR:
13956 case ELINK_ETH_PHY_CX4:
13957 BLOGI(sc, "Found 10GBase-CX4 media.\n");
13958 sc->media = IFM_10G_CX4;
13959 port_type = PORT_FIBRE;
13961 case ELINK_ETH_PHY_DA_TWINAX:
13962 BLOGI(sc, "Found 10Gb Twinax media.\n");
13963 sc->media = IFM_10G_TWINAX;
13964 port_type = PORT_DA;
13966 case ELINK_ETH_PHY_BASE_T:
13967 if (sc->link_params.speed_cap_mask[0] &
13968 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
13969 BLOGI(sc, "Found 10GBase-T media.\n");
13970 sc->media = IFM_10G_T;
13971 port_type = PORT_TP;
13973 BLOGI(sc, "Found 1000Base-T media.\n");
13974 sc->media = IFM_1000_T;
13975 port_type = PORT_TP;
13978 case ELINK_ETH_PHY_NOT_PRESENT:
13979 BLOGI(sc, "Media not present.\n");
13981 port_type = PORT_OTHER;
13983 case ELINK_ETH_PHY_UNSPECIFIED:
13985 BLOGI(sc, "Unknown media!\n");
13987 port_type = PORT_OTHER;
13993 #define GET_FIELD(value, fname) \
13994 (((value) & (fname##_MASK)) >> (fname##_SHIFT))
13995 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
13996 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
13999 bxe_get_igu_cam_info(struct bxe_softc *sc)
14001 int pfid = SC_FUNC(sc);
14004 uint8_t fid, igu_sb_cnt = 0;
14006 sc->igu_base_sb = 0xff;
14008 if (CHIP_INT_MODE_IS_BC(sc)) {
14009 int vn = SC_VN(sc);
14010 igu_sb_cnt = sc->igu_sb_cnt;
14011 sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) *
14013 sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x +
14014 (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn));
14018 /* IGU in normal mode - read CAM */
14019 for (igu_sb_id = 0;
14020 igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
14022 val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
14023 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) {
14026 fid = IGU_FID(val);
14027 if ((fid & IGU_FID_ENCODE_IS_PF)) {
14028 if ((fid & IGU_FID_PF_NUM_MASK) != pfid) {
14031 if (IGU_VEC(val) == 0) {
14032 /* default status block */
14033 sc->igu_dsb_id = igu_sb_id;
14035 if (sc->igu_base_sb == 0xff) {
14036 sc->igu_base_sb = igu_sb_id;
14044 * Due to new PF resource allocation by MFW T7.4 and above, it's optional
14045 * that number of CAM entries will not be equal to the value advertised in
14046 * PCI. Driver should use the minimal value of both as the actual status
14049 sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt);
14051 if (igu_sb_cnt == 0) {
14052 BLOGE(sc, "CAM configuration error\n");
14060 * Gather various information from the device config space, the device itself,
14061 * shmem, and the user input.
14064 bxe_get_device_info(struct bxe_softc *sc)
14069 /* Get the data for the device */
14070 sc->devinfo.vendor_id = pci_get_vendor(sc->dev);
14071 sc->devinfo.device_id = pci_get_device(sc->dev);
14072 sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev);
14073 sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev);
14075 /* get the chip revision (chip metal comes from pci config space) */
14076 sc->devinfo.chip_id =
14077 sc->link_params.chip_id =
14078 (((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) |
14079 ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) |
14080 (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) |
14081 ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0));
14083 /* force 57811 according to MISC register */
14084 if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
14085 if (CHIP_IS_57810(sc)) {
14086 sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) |
14087 (sc->devinfo.chip_id & 0x0000ffff));
14088 } else if (CHIP_IS_57810_MF(sc)) {
14089 sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) |
14090 (sc->devinfo.chip_id & 0x0000ffff));
14092 sc->devinfo.chip_id |= 0x1;
14095 BLOGD(sc, DBG_LOAD,
14096 "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n",
14097 sc->devinfo.chip_id,
14098 ((sc->devinfo.chip_id >> 16) & 0xffff),
14099 ((sc->devinfo.chip_id >> 12) & 0xf),
14100 ((sc->devinfo.chip_id >> 4) & 0xff),
14101 ((sc->devinfo.chip_id >> 0) & 0xf));
14103 val = (REG_RD(sc, 0x2874) & 0x55);
14104 if ((sc->devinfo.chip_id & 0x1) ||
14105 (CHIP_IS_E1(sc) && val) ||
14106 (CHIP_IS_E1H(sc) && (val == 0x55))) {
14107 sc->flags |= BXE_ONE_PORT_FLAG;
14108 BLOGD(sc, DBG_LOAD, "single port device\n");
14111 /* set the doorbell size */
14112 sc->doorbell_size = (1 << BXE_DB_SHIFT);
14114 /* determine whether the device is in 2 port or 4 port mode */
14115 sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/
14116 if (CHIP_IS_E2E3(sc)) {
14118 * Read port4mode_en_ovwr[0]:
14119 * If 1, four port mode is in port4mode_en_ovwr[1].
14120 * If 0, four port mode is in port4mode_en[0].
14122 val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR);
14124 val = ((val >> 1) & 1);
14126 val = REG_RD(sc, MISC_REG_PORT4MODE_EN);
14129 sc->devinfo.chip_port_mode =
14130 (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE;
14132 BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2");
14135 /* get the function and path info for the device */
14136 bxe_get_function_num(sc);
14138 /* get the shared memory base address */
14139 sc->devinfo.shmem_base =
14140 sc->link_params.shmem_base =
14141 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
14142 sc->devinfo.shmem2_base =
14143 REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 :
14144 MISC_REG_GENERIC_CR_0));
14146 BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n",
14147 sc->devinfo.shmem_base, sc->devinfo.shmem2_base);
14149 if (!sc->devinfo.shmem_base) {
14150 /* this should ONLY prevent upcoming shmem reads */
14151 BLOGI(sc, "MCP not active\n");
14152 sc->flags |= BXE_NO_MCP_FLAG;
14156 /* make sure the shared memory contents are valid */
14157 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
14158 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
14159 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
14160 BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val);
14163 BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val);
14165 /* get the bootcode version */
14166 sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev);
14167 snprintf(sc->devinfo.bc_ver_str,
14168 sizeof(sc->devinfo.bc_ver_str),
14170 ((sc->devinfo.bc_ver >> 24) & 0xff),
14171 ((sc->devinfo.bc_ver >> 16) & 0xff),
14172 ((sc->devinfo.bc_ver >> 8) & 0xff));
14173 BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str);
14175 /* get the bootcode shmem address */
14176 sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc);
14177 BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base);
14179 /* clean indirect addresses as they're not used */
14180 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
14182 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0);
14183 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0);
14184 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0);
14185 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0);
14186 if (CHIP_IS_E1x(sc)) {
14187 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0);
14188 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0);
14189 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
14190 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
14194 * Enable internal target-read (in case we are probed after PF
14195 * FLR). Must be done prior to any BAR read access. Only for
14198 if (!CHIP_IS_E1x(sc)) {
14199 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
14203 /* get the nvram size */
14204 val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4);
14205 sc->devinfo.flash_size =
14206 (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
14207 BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size);
14209 /* get PCI capabilites */
14210 bxe_probe_pci_caps(sc);
14212 bxe_set_power_state(sc, PCI_PM_D0);
14214 /* get various configuration parameters from shmem */
14215 bxe_get_shmem_info(sc);
14217 if (sc->devinfo.pcie_msix_cap_reg != 0) {
14218 val = pci_read_config(sc->dev,
14219 (sc->devinfo.pcie_msix_cap_reg +
14222 sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE);
14224 sc->igu_sb_cnt = 1;
14227 sc->igu_base_addr = BAR_IGU_INTMEM;
14229 /* initialize IGU parameters */
14230 if (CHIP_IS_E1x(sc)) {
14231 sc->devinfo.int_block = INT_BLOCK_HC;
14232 sc->igu_dsb_id = DEF_SB_IGU_ID;
14233 sc->igu_base_sb = 0;
14235 sc->devinfo.int_block = INT_BLOCK_IGU;
14237 /* do not allow device reset during IGU info preocessing */
14238 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14240 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
14242 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
14245 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n");
14247 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
14248 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val);
14249 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f);
14251 while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
14256 if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
14257 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n");
14258 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14263 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
14264 BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n");
14265 sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP;
14267 BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n");
14270 rc = bxe_get_igu_cam_info(sc);
14272 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14280 * Get base FW non-default (fast path) status block ID. This value is
14281 * used to initialize the fw_sb_id saved on the fp/queue structure to
14282 * determine the id used by the FW.
14284 if (CHIP_IS_E1x(sc)) {
14285 sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc));
14288 * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of
14289 * the same queue are indicated on the same IGU SB). So we prefer
14290 * FW and IGU SBs to be the same value.
14292 sc->base_fw_ndsb = sc->igu_base_sb;
14295 BLOGD(sc, DBG_LOAD,
14296 "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n",
14297 sc->igu_dsb_id, sc->igu_base_sb,
14298 sc->igu_sb_cnt, sc->base_fw_ndsb);
14300 elink_phy_probe(&sc->link_params);
14306 bxe_link_settings_supported(struct bxe_softc *sc,
14307 uint32_t switch_cfg)
14309 uint32_t cfg_size = 0;
14311 uint8_t port = SC_PORT(sc);
14313 /* aggregation of supported attributes of all external phys */
14314 sc->port.supported[0] = 0;
14315 sc->port.supported[1] = 0;
14317 switch (sc->link_params.num_phys) {
14319 sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported;
14323 sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported;
14327 if (sc->link_params.multi_phy_config &
14328 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
14329 sc->port.supported[1] =
14330 sc->link_params.phy[ELINK_EXT_PHY1].supported;
14331 sc->port.supported[0] =
14332 sc->link_params.phy[ELINK_EXT_PHY2].supported;
14334 sc->port.supported[0] =
14335 sc->link_params.phy[ELINK_EXT_PHY1].supported;
14336 sc->port.supported[1] =
14337 sc->link_params.phy[ELINK_EXT_PHY2].supported;
14343 if (!(sc->port.supported[0] || sc->port.supported[1])) {
14344 BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n",
14346 dev_info.port_hw_config[port].external_phy_config),
14348 dev_info.port_hw_config[port].external_phy_config2));
14352 if (CHIP_IS_E3(sc))
14353 sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR);
14355 switch (switch_cfg) {
14356 case ELINK_SWITCH_CFG_1G:
14357 sc->port.phy_addr =
14358 REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
14360 case ELINK_SWITCH_CFG_10G:
14361 sc->port.phy_addr =
14362 REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
14365 BLOGE(sc, "Invalid switch config in link_config=0x%08x\n",
14366 sc->port.link_config[0]);
14371 BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr);
14373 /* mask what we support according to speed_cap_mask per configuration */
14374 for (idx = 0; idx < cfg_size; idx++) {
14375 if (!(sc->link_params.speed_cap_mask[idx] &
14376 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
14377 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half;
14380 if (!(sc->link_params.speed_cap_mask[idx] &
14381 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) {
14382 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full;
14385 if (!(sc->link_params.speed_cap_mask[idx] &
14386 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
14387 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half;
14390 if (!(sc->link_params.speed_cap_mask[idx] &
14391 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) {
14392 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full;
14395 if (!(sc->link_params.speed_cap_mask[idx] &
14396 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) {
14397 sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full;
14400 if (!(sc->link_params.speed_cap_mask[idx] &
14401 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) {
14402 sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full;
14405 if (!(sc->link_params.speed_cap_mask[idx] &
14406 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
14407 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full;
14410 if (!(sc->link_params.speed_cap_mask[idx] &
14411 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) {
14412 sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full;
14416 BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n",
14417 sc->port.supported[0], sc->port.supported[1]);
14418 ELINK_DEBUG_P2(sc, "PHY supported 0=0x%08x 1=0x%08x\n",
14419 sc->port.supported[0], sc->port.supported[1]);
14423 bxe_link_settings_requested(struct bxe_softc *sc)
14425 uint32_t link_config;
14427 uint32_t cfg_size = 0;
14429 sc->port.advertising[0] = 0;
14430 sc->port.advertising[1] = 0;
14432 switch (sc->link_params.num_phys) {
14442 for (idx = 0; idx < cfg_size; idx++) {
14443 sc->link_params.req_duplex[idx] = DUPLEX_FULL;
14444 link_config = sc->port.link_config[idx];
14446 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
14447 case PORT_FEATURE_LINK_SPEED_AUTO:
14448 if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) {
14449 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14450 sc->port.advertising[idx] |= sc->port.supported[idx];
14451 if (sc->link_params.phy[ELINK_EXT_PHY1].type ==
14452 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
14453 sc->port.advertising[idx] |=
14454 (ELINK_SUPPORTED_100baseT_Half |
14455 ELINK_SUPPORTED_100baseT_Full);
14457 /* force 10G, no AN */
14458 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14459 sc->port.advertising[idx] |=
14460 (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
14465 case PORT_FEATURE_LINK_SPEED_10M_FULL:
14466 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) {
14467 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14468 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full |
14471 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14472 "speed_cap_mask=0x%08x\n",
14473 link_config, sc->link_params.speed_cap_mask[idx]);
14478 case PORT_FEATURE_LINK_SPEED_10M_HALF:
14479 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) {
14480 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14481 sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14482 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half |
14484 ELINK_DEBUG_P1(sc, "driver requesting DUPLEX_HALF req_duplex = %x!\n",
14485 sc->link_params.req_duplex[idx]);
14487 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14488 "speed_cap_mask=0x%08x\n",
14489 link_config, sc->link_params.speed_cap_mask[idx]);
14494 case PORT_FEATURE_LINK_SPEED_100M_FULL:
14495 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) {
14496 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14497 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full |
14500 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14501 "speed_cap_mask=0x%08x\n",
14502 link_config, sc->link_params.speed_cap_mask[idx]);
14507 case PORT_FEATURE_LINK_SPEED_100M_HALF:
14508 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) {
14509 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14510 sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14511 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half |
14514 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14515 "speed_cap_mask=0x%08x\n",
14516 link_config, sc->link_params.speed_cap_mask[idx]);
14521 case PORT_FEATURE_LINK_SPEED_1G:
14522 if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) {
14523 sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000;
14524 sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full |
14527 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14528 "speed_cap_mask=0x%08x\n",
14529 link_config, sc->link_params.speed_cap_mask[idx]);
14534 case PORT_FEATURE_LINK_SPEED_2_5G:
14535 if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) {
14536 sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500;
14537 sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full |
14540 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14541 "speed_cap_mask=0x%08x\n",
14542 link_config, sc->link_params.speed_cap_mask[idx]);
14547 case PORT_FEATURE_LINK_SPEED_10G_CX4:
14548 if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) {
14549 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14550 sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full |
14553 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14554 "speed_cap_mask=0x%08x\n",
14555 link_config, sc->link_params.speed_cap_mask[idx]);
14560 case PORT_FEATURE_LINK_SPEED_20G:
14561 sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000;
14565 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14566 "speed_cap_mask=0x%08x\n",
14567 link_config, sc->link_params.speed_cap_mask[idx]);
14568 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14569 sc->port.advertising[idx] = sc->port.supported[idx];
14573 sc->link_params.req_flow_ctrl[idx] =
14574 (link_config & PORT_FEATURE_FLOW_CONTROL_MASK);
14576 if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) {
14577 if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) {
14578 sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE;
14580 bxe_set_requested_fc(sc);
14584 BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d "
14585 "req_flow_ctrl=0x%x advertising=0x%x\n",
14586 sc->link_params.req_line_speed[idx],
14587 sc->link_params.req_duplex[idx],
14588 sc->link_params.req_flow_ctrl[idx],
14589 sc->port.advertising[idx]);
14590 ELINK_DEBUG_P3(sc, "req_line_speed=%d req_duplex=%d "
14591 "advertising=0x%x\n",
14592 sc->link_params.req_line_speed[idx],
14593 sc->link_params.req_duplex[idx],
14594 sc->port.advertising[idx]);
14599 bxe_get_phy_info(struct bxe_softc *sc)
14601 uint8_t port = SC_PORT(sc);
14602 uint32_t config = sc->port.config;
14605 /* shmem data already read in bxe_get_shmem_info() */
14607 ELINK_DEBUG_P3(sc, "lane_config=0x%08x speed_cap_mask0=0x%08x "
14608 "link_config0=0x%08x\n",
14609 sc->link_params.lane_config,
14610 sc->link_params.speed_cap_mask[0],
14611 sc->port.link_config[0]);
14614 bxe_link_settings_supported(sc, sc->link_params.switch_cfg);
14615 bxe_link_settings_requested(sc);
14617 if (sc->autogreeen == AUTO_GREEN_FORCE_ON) {
14618 sc->link_params.feature_config_flags |=
14619 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14620 } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) {
14621 sc->link_params.feature_config_flags &=
14622 ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14623 } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) {
14624 sc->link_params.feature_config_flags |=
14625 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14628 /* configure link feature according to nvram value */
14630 (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) &
14631 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
14632 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
14633 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
14634 sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI |
14635 ELINK_EEE_MODE_ENABLE_LPI |
14636 ELINK_EEE_MODE_OUTPUT_TIME);
14638 sc->link_params.eee_mode = 0;
14641 /* get the media type */
14642 bxe_media_detect(sc);
14643 ELINK_DEBUG_P1(sc, "detected media type\n", sc->media);
14647 bxe_get_params(struct bxe_softc *sc)
14649 /* get user tunable params */
14650 bxe_get_tunable_params(sc);
14652 /* select the RX and TX ring sizes */
14653 sc->tx_ring_size = TX_BD_USABLE;
14654 sc->rx_ring_size = RX_BD_USABLE;
14656 /* XXX disable WoL */
14661 bxe_set_modes_bitmap(struct bxe_softc *sc)
14663 uint32_t flags = 0;
14665 if (CHIP_REV_IS_FPGA(sc)) {
14666 SET_FLAGS(flags, MODE_FPGA);
14667 } else if (CHIP_REV_IS_EMUL(sc)) {
14668 SET_FLAGS(flags, MODE_EMUL);
14670 SET_FLAGS(flags, MODE_ASIC);
14673 if (CHIP_IS_MODE_4_PORT(sc)) {
14674 SET_FLAGS(flags, MODE_PORT4);
14676 SET_FLAGS(flags, MODE_PORT2);
14679 if (CHIP_IS_E2(sc)) {
14680 SET_FLAGS(flags, MODE_E2);
14681 } else if (CHIP_IS_E3(sc)) {
14682 SET_FLAGS(flags, MODE_E3);
14683 if (CHIP_REV(sc) == CHIP_REV_Ax) {
14684 SET_FLAGS(flags, MODE_E3_A0);
14685 } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ {
14686 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
14691 SET_FLAGS(flags, MODE_MF);
14692 switch (sc->devinfo.mf_info.mf_mode) {
14693 case MULTI_FUNCTION_SD:
14694 SET_FLAGS(flags, MODE_MF_SD);
14696 case MULTI_FUNCTION_SI:
14697 SET_FLAGS(flags, MODE_MF_SI);
14699 case MULTI_FUNCTION_AFEX:
14700 SET_FLAGS(flags, MODE_MF_AFEX);
14704 SET_FLAGS(flags, MODE_SF);
14707 #if defined(__LITTLE_ENDIAN)
14708 SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
14709 #else /* __BIG_ENDIAN */
14710 SET_FLAGS(flags, MODE_BIG_ENDIAN);
14713 INIT_MODE_FLAGS(sc) = flags;
14717 bxe_alloc_hsi_mem(struct bxe_softc *sc)
14719 struct bxe_fastpath *fp;
14720 bus_addr_t busaddr;
14721 int max_agg_queues;
14723 bus_size_t max_size;
14724 bus_size_t max_seg_size;
14729 /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */
14731 /* allocate the parent bus DMA tag */
14732 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */
14734 0, /* boundary limit */
14735 BUS_SPACE_MAXADDR, /* restricted low */
14736 BUS_SPACE_MAXADDR, /* restricted hi */
14737 NULL, /* addr filter() */
14738 NULL, /* addr filter() arg */
14739 BUS_SPACE_MAXSIZE_32BIT, /* max map size */
14740 BUS_SPACE_UNRESTRICTED, /* num discontinuous */
14741 BUS_SPACE_MAXSIZE_32BIT, /* max seg size */
14744 NULL, /* lock() arg */
14745 &sc->parent_dma_tag); /* returned dma tag */
14747 BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc);
14751 /************************/
14752 /* DEFAULT STATUS BLOCK */
14753 /************************/
14755 if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block),
14756 &sc->def_sb_dma, "default status block") != 0) {
14758 bus_dma_tag_destroy(sc->parent_dma_tag);
14762 sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr;
14768 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14769 &sc->eq_dma, "event queue") != 0) {
14771 bxe_dma_free(sc, &sc->def_sb_dma);
14773 bus_dma_tag_destroy(sc->parent_dma_tag);
14777 sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr;
14783 if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath),
14784 &sc->sp_dma, "slow path") != 0) {
14786 bxe_dma_free(sc, &sc->eq_dma);
14788 bxe_dma_free(sc, &sc->def_sb_dma);
14790 bus_dma_tag_destroy(sc->parent_dma_tag);
14794 sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr;
14796 /*******************/
14797 /* SLOW PATH QUEUE */
14798 /*******************/
14800 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14801 &sc->spq_dma, "slow path queue") != 0) {
14803 bxe_dma_free(sc, &sc->sp_dma);
14805 bxe_dma_free(sc, &sc->eq_dma);
14807 bxe_dma_free(sc, &sc->def_sb_dma);
14809 bus_dma_tag_destroy(sc->parent_dma_tag);
14813 sc->spq = (struct eth_spe *)sc->spq_dma.vaddr;
14815 /***************************/
14816 /* FW DECOMPRESSION BUFFER */
14817 /***************************/
14819 if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
14820 "fw decompression buffer") != 0) {
14822 bxe_dma_free(sc, &sc->spq_dma);
14824 bxe_dma_free(sc, &sc->sp_dma);
14826 bxe_dma_free(sc, &sc->eq_dma);
14828 bxe_dma_free(sc, &sc->def_sb_dma);
14830 bus_dma_tag_destroy(sc->parent_dma_tag);
14834 sc->gz_buf = (void *)sc->gz_buf_dma.vaddr;
14837 malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) {
14839 bxe_dma_free(sc, &sc->gz_buf_dma);
14841 bxe_dma_free(sc, &sc->spq_dma);
14843 bxe_dma_free(sc, &sc->sp_dma);
14845 bxe_dma_free(sc, &sc->eq_dma);
14847 bxe_dma_free(sc, &sc->def_sb_dma);
14849 bus_dma_tag_destroy(sc->parent_dma_tag);
14857 /* allocate DMA memory for each fastpath structure */
14858 for (i = 0; i < sc->num_queues; i++) {
14863 /*******************/
14864 /* FP STATUS BLOCK */
14865 /*******************/
14867 snprintf(buf, sizeof(buf), "fp %d status block", i);
14868 if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block),
14869 &fp->sb_dma, buf) != 0) {
14870 /* XXX unwind and free previous fastpath allocations */
14871 BLOGE(sc, "Failed to alloc %s\n", buf);
14874 if (CHIP_IS_E2E3(sc)) {
14875 fp->status_block.e2_sb =
14876 (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr;
14878 fp->status_block.e1x_sb =
14879 (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr;
14883 /******************/
14884 /* FP TX BD CHAIN */
14885 /******************/
14887 snprintf(buf, sizeof(buf), "fp %d tx bd chain", i);
14888 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES),
14889 &fp->tx_dma, buf) != 0) {
14890 /* XXX unwind and free previous fastpath allocations */
14891 BLOGE(sc, "Failed to alloc %s\n", buf);
14894 fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr;
14897 /* link together the tx bd chain pages */
14898 for (j = 1; j <= TX_BD_NUM_PAGES; j++) {
14899 /* index into the tx bd chain array to last entry per page */
14900 struct eth_tx_next_bd *tx_next_bd =
14901 &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd;
14902 /* point to the next page and wrap from last page */
14903 busaddr = (fp->tx_dma.paddr +
14904 (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES)));
14905 tx_next_bd->addr_hi = htole32(U64_HI(busaddr));
14906 tx_next_bd->addr_lo = htole32(U64_LO(busaddr));
14909 /******************/
14910 /* FP RX BD CHAIN */
14911 /******************/
14913 snprintf(buf, sizeof(buf), "fp %d rx bd chain", i);
14914 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES),
14915 &fp->rx_dma, buf) != 0) {
14916 /* XXX unwind and free previous fastpath allocations */
14917 BLOGE(sc, "Failed to alloc %s\n", buf);
14920 fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr;
14923 /* link together the rx bd chain pages */
14924 for (j = 1; j <= RX_BD_NUM_PAGES; j++) {
14925 /* index into the rx bd chain array to last entry per page */
14926 struct eth_rx_bd *rx_bd =
14927 &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2];
14928 /* point to the next page and wrap from last page */
14929 busaddr = (fp->rx_dma.paddr +
14930 (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES)));
14931 rx_bd->addr_hi = htole32(U64_HI(busaddr));
14932 rx_bd->addr_lo = htole32(U64_LO(busaddr));
14935 /*******************/
14936 /* FP RX RCQ CHAIN */
14937 /*******************/
14939 snprintf(buf, sizeof(buf), "fp %d rcq chain", i);
14940 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES),
14941 &fp->rcq_dma, buf) != 0) {
14942 /* XXX unwind and free previous fastpath allocations */
14943 BLOGE(sc, "Failed to alloc %s\n", buf);
14946 fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr;
14949 /* link together the rcq chain pages */
14950 for (j = 1; j <= RCQ_NUM_PAGES; j++) {
14951 /* index into the rcq chain array to last entry per page */
14952 struct eth_rx_cqe_next_page *rx_cqe_next =
14953 (struct eth_rx_cqe_next_page *)
14954 &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1];
14955 /* point to the next page and wrap from last page */
14956 busaddr = (fp->rcq_dma.paddr +
14957 (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES)));
14958 rx_cqe_next->addr_hi = htole32(U64_HI(busaddr));
14959 rx_cqe_next->addr_lo = htole32(U64_LO(busaddr));
14962 /*******************/
14963 /* FP RX SGE CHAIN */
14964 /*******************/
14966 snprintf(buf, sizeof(buf), "fp %d sge chain", i);
14967 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES),
14968 &fp->rx_sge_dma, buf) != 0) {
14969 /* XXX unwind and free previous fastpath allocations */
14970 BLOGE(sc, "Failed to alloc %s\n", buf);
14973 fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr;
14976 /* link together the sge chain pages */
14977 for (j = 1; j <= RX_SGE_NUM_PAGES; j++) {
14978 /* index into the rcq chain array to last entry per page */
14979 struct eth_rx_sge *rx_sge =
14980 &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2];
14981 /* point to the next page and wrap from last page */
14982 busaddr = (fp->rx_sge_dma.paddr +
14983 (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES)));
14984 rx_sge->addr_hi = htole32(U64_HI(busaddr));
14985 rx_sge->addr_lo = htole32(U64_LO(busaddr));
14988 /***********************/
14989 /* FP TX MBUF DMA MAPS */
14990 /***********************/
14992 /* set required sizes before mapping to conserve resources */
14993 if (sc->ifnet->if_capenable & (IFCAP_TSO4 | IFCAP_TSO6)) {
14994 max_size = BXE_TSO_MAX_SIZE;
14995 max_segments = BXE_TSO_MAX_SEGMENTS;
14996 max_seg_size = BXE_TSO_MAX_SEG_SIZE;
14998 max_size = (MCLBYTES * BXE_MAX_SEGMENTS);
14999 max_segments = BXE_MAX_SEGMENTS;
15000 max_seg_size = MCLBYTES;
15003 /* create a dma tag for the tx mbufs */
15004 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
15006 0, /* boundary limit */
15007 BUS_SPACE_MAXADDR, /* restricted low */
15008 BUS_SPACE_MAXADDR, /* restricted hi */
15009 NULL, /* addr filter() */
15010 NULL, /* addr filter() arg */
15011 max_size, /* max map size */
15012 max_segments, /* num discontinuous */
15013 max_seg_size, /* max seg size */
15016 NULL, /* lock() arg */
15017 &fp->tx_mbuf_tag); /* returned dma tag */
15019 /* XXX unwind and free previous fastpath allocations */
15020 BLOGE(sc, "Failed to create dma tag for "
15021 "'fp %d tx mbufs' (%d)\n", i, rc);
15025 /* create dma maps for each of the tx mbuf clusters */
15026 for (j = 0; j < TX_BD_TOTAL; j++) {
15027 if (bus_dmamap_create(fp->tx_mbuf_tag,
15029 &fp->tx_mbuf_chain[j].m_map)) {
15030 /* XXX unwind and free previous fastpath allocations */
15031 BLOGE(sc, "Failed to create dma map for "
15032 "'fp %d tx mbuf %d' (%d)\n", i, j, rc);
15037 /***********************/
15038 /* FP RX MBUF DMA MAPS */
15039 /***********************/
15041 /* create a dma tag for the rx mbufs */
15042 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
15044 0, /* boundary limit */
15045 BUS_SPACE_MAXADDR, /* restricted low */
15046 BUS_SPACE_MAXADDR, /* restricted hi */
15047 NULL, /* addr filter() */
15048 NULL, /* addr filter() arg */
15049 MJUM9BYTES, /* max map size */
15050 1, /* num discontinuous */
15051 MJUM9BYTES, /* max seg size */
15054 NULL, /* lock() arg */
15055 &fp->rx_mbuf_tag); /* returned dma tag */
15057 /* XXX unwind and free previous fastpath allocations */
15058 BLOGE(sc, "Failed to create dma tag for "
15059 "'fp %d rx mbufs' (%d)\n", i, rc);
15063 /* create dma maps for each of the rx mbuf clusters */
15064 for (j = 0; j < RX_BD_TOTAL; j++) {
15065 if (bus_dmamap_create(fp->rx_mbuf_tag,
15067 &fp->rx_mbuf_chain[j].m_map)) {
15068 /* XXX unwind and free previous fastpath allocations */
15069 BLOGE(sc, "Failed to create dma map for "
15070 "'fp %d rx mbuf %d' (%d)\n", i, j, rc);
15075 /* create dma map for the spare rx mbuf cluster */
15076 if (bus_dmamap_create(fp->rx_mbuf_tag,
15078 &fp->rx_mbuf_spare_map)) {
15079 /* XXX unwind and free previous fastpath allocations */
15080 BLOGE(sc, "Failed to create dma map for "
15081 "'fp %d spare rx mbuf' (%d)\n", i, rc);
15085 /***************************/
15086 /* FP RX SGE MBUF DMA MAPS */
15087 /***************************/
15089 /* create a dma tag for the rx sge mbufs */
15090 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
15092 0, /* boundary limit */
15093 BUS_SPACE_MAXADDR, /* restricted low */
15094 BUS_SPACE_MAXADDR, /* restricted hi */
15095 NULL, /* addr filter() */
15096 NULL, /* addr filter() arg */
15097 BCM_PAGE_SIZE, /* max map size */
15098 1, /* num discontinuous */
15099 BCM_PAGE_SIZE, /* max seg size */
15102 NULL, /* lock() arg */
15103 &fp->rx_sge_mbuf_tag); /* returned dma tag */
15105 /* XXX unwind and free previous fastpath allocations */
15106 BLOGE(sc, "Failed to create dma tag for "
15107 "'fp %d rx sge mbufs' (%d)\n", i, rc);
15111 /* create dma maps for the rx sge mbuf clusters */
15112 for (j = 0; j < RX_SGE_TOTAL; j++) {
15113 if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
15115 &fp->rx_sge_mbuf_chain[j].m_map)) {
15116 /* XXX unwind and free previous fastpath allocations */
15117 BLOGE(sc, "Failed to create dma map for "
15118 "'fp %d rx sge mbuf %d' (%d)\n", i, j, rc);
15123 /* create dma map for the spare rx sge mbuf cluster */
15124 if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
15126 &fp->rx_sge_mbuf_spare_map)) {
15127 /* XXX unwind and free previous fastpath allocations */
15128 BLOGE(sc, "Failed to create dma map for "
15129 "'fp %d spare rx sge mbuf' (%d)\n", i, rc);
15133 /***************************/
15134 /* FP RX TPA MBUF DMA MAPS */
15135 /***************************/
15137 /* create dma maps for the rx tpa mbuf clusters */
15138 max_agg_queues = MAX_AGG_QS(sc);
15140 for (j = 0; j < max_agg_queues; j++) {
15141 if (bus_dmamap_create(fp->rx_mbuf_tag,
15143 &fp->rx_tpa_info[j].bd.m_map)) {
15144 /* XXX unwind and free previous fastpath allocations */
15145 BLOGE(sc, "Failed to create dma map for "
15146 "'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc);
15151 /* create dma map for the spare rx tpa mbuf cluster */
15152 if (bus_dmamap_create(fp->rx_mbuf_tag,
15154 &fp->rx_tpa_info_mbuf_spare_map)) {
15155 /* XXX unwind and free previous fastpath allocations */
15156 BLOGE(sc, "Failed to create dma map for "
15157 "'fp %d spare rx tpa mbuf' (%d)\n", i, rc);
15161 bxe_init_sge_ring_bit_mask(fp);
15168 bxe_free_hsi_mem(struct bxe_softc *sc)
15170 struct bxe_fastpath *fp;
15171 int max_agg_queues;
15174 if (sc->parent_dma_tag == NULL) {
15175 return; /* assume nothing was allocated */
15178 for (i = 0; i < sc->num_queues; i++) {
15181 /*******************/
15182 /* FP STATUS BLOCK */
15183 /*******************/
15185 bxe_dma_free(sc, &fp->sb_dma);
15186 memset(&fp->status_block, 0, sizeof(fp->status_block));
15188 /******************/
15189 /* FP TX BD CHAIN */
15190 /******************/
15192 bxe_dma_free(sc, &fp->tx_dma);
15193 fp->tx_chain = NULL;
15195 /******************/
15196 /* FP RX BD CHAIN */
15197 /******************/
15199 bxe_dma_free(sc, &fp->rx_dma);
15200 fp->rx_chain = NULL;
15202 /*******************/
15203 /* FP RX RCQ CHAIN */
15204 /*******************/
15206 bxe_dma_free(sc, &fp->rcq_dma);
15207 fp->rcq_chain = NULL;
15209 /*******************/
15210 /* FP RX SGE CHAIN */
15211 /*******************/
15213 bxe_dma_free(sc, &fp->rx_sge_dma);
15214 fp->rx_sge_chain = NULL;
15216 /***********************/
15217 /* FP TX MBUF DMA MAPS */
15218 /***********************/
15220 if (fp->tx_mbuf_tag != NULL) {
15221 for (j = 0; j < TX_BD_TOTAL; j++) {
15222 if (fp->tx_mbuf_chain[j].m_map != NULL) {
15223 bus_dmamap_unload(fp->tx_mbuf_tag,
15224 fp->tx_mbuf_chain[j].m_map);
15225 bus_dmamap_destroy(fp->tx_mbuf_tag,
15226 fp->tx_mbuf_chain[j].m_map);
15230 bus_dma_tag_destroy(fp->tx_mbuf_tag);
15231 fp->tx_mbuf_tag = NULL;
15234 /***********************/
15235 /* FP RX MBUF DMA MAPS */
15236 /***********************/
15238 if (fp->rx_mbuf_tag != NULL) {
15239 for (j = 0; j < RX_BD_TOTAL; j++) {
15240 if (fp->rx_mbuf_chain[j].m_map != NULL) {
15241 bus_dmamap_unload(fp->rx_mbuf_tag,
15242 fp->rx_mbuf_chain[j].m_map);
15243 bus_dmamap_destroy(fp->rx_mbuf_tag,
15244 fp->rx_mbuf_chain[j].m_map);
15248 if (fp->rx_mbuf_spare_map != NULL) {
15249 bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
15250 bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
15253 /***************************/
15254 /* FP RX TPA MBUF DMA MAPS */
15255 /***************************/
15257 max_agg_queues = MAX_AGG_QS(sc);
15259 for (j = 0; j < max_agg_queues; j++) {
15260 if (fp->rx_tpa_info[j].bd.m_map != NULL) {
15261 bus_dmamap_unload(fp->rx_mbuf_tag,
15262 fp->rx_tpa_info[j].bd.m_map);
15263 bus_dmamap_destroy(fp->rx_mbuf_tag,
15264 fp->rx_tpa_info[j].bd.m_map);
15268 if (fp->rx_tpa_info_mbuf_spare_map != NULL) {
15269 bus_dmamap_unload(fp->rx_mbuf_tag,
15270 fp->rx_tpa_info_mbuf_spare_map);
15271 bus_dmamap_destroy(fp->rx_mbuf_tag,
15272 fp->rx_tpa_info_mbuf_spare_map);
15275 bus_dma_tag_destroy(fp->rx_mbuf_tag);
15276 fp->rx_mbuf_tag = NULL;
15279 /***************************/
15280 /* FP RX SGE MBUF DMA MAPS */
15281 /***************************/
15283 if (fp->rx_sge_mbuf_tag != NULL) {
15284 for (j = 0; j < RX_SGE_TOTAL; j++) {
15285 if (fp->rx_sge_mbuf_chain[j].m_map != NULL) {
15286 bus_dmamap_unload(fp->rx_sge_mbuf_tag,
15287 fp->rx_sge_mbuf_chain[j].m_map);
15288 bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
15289 fp->rx_sge_mbuf_chain[j].m_map);
15293 if (fp->rx_sge_mbuf_spare_map != NULL) {
15294 bus_dmamap_unload(fp->rx_sge_mbuf_tag,
15295 fp->rx_sge_mbuf_spare_map);
15296 bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
15297 fp->rx_sge_mbuf_spare_map);
15300 bus_dma_tag_destroy(fp->rx_sge_mbuf_tag);
15301 fp->rx_sge_mbuf_tag = NULL;
15305 /***************************/
15306 /* FW DECOMPRESSION BUFFER */
15307 /***************************/
15309 bxe_dma_free(sc, &sc->gz_buf_dma);
15311 free(sc->gz_strm, M_DEVBUF);
15312 sc->gz_strm = NULL;
15314 /*******************/
15315 /* SLOW PATH QUEUE */
15316 /*******************/
15318 bxe_dma_free(sc, &sc->spq_dma);
15325 bxe_dma_free(sc, &sc->sp_dma);
15332 bxe_dma_free(sc, &sc->eq_dma);
15335 /************************/
15336 /* DEFAULT STATUS BLOCK */
15337 /************************/
15339 bxe_dma_free(sc, &sc->def_sb_dma);
15342 bus_dma_tag_destroy(sc->parent_dma_tag);
15343 sc->parent_dma_tag = NULL;
15347 * Previous driver DMAE transaction may have occurred when pre-boot stage
15348 * ended and boot began. This would invalidate the addresses of the
15349 * transaction, resulting in was-error bit set in the PCI causing all
15350 * hw-to-host PCIe transactions to timeout. If this happened we want to clear
15351 * the interrupt which detected this from the pglueb and the was-done bit
15354 bxe_prev_interrupted_dmae(struct bxe_softc *sc)
15358 if (!CHIP_IS_E1x(sc)) {
15359 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS);
15360 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
15361 BLOGD(sc, DBG_LOAD,
15362 "Clearing 'was-error' bit that was set in pglueb");
15363 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc));
15369 bxe_prev_mcp_done(struct bxe_softc *sc)
15371 uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE,
15372 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
15374 BLOGE(sc, "MCP response failure, aborting\n");
15381 static struct bxe_prev_list_node *
15382 bxe_prev_path_get_entry(struct bxe_softc *sc)
15384 struct bxe_prev_list_node *tmp;
15386 LIST_FOREACH(tmp, &bxe_prev_list, node) {
15387 if ((sc->pcie_bus == tmp->bus) &&
15388 (sc->pcie_device == tmp->slot) &&
15389 (SC_PATH(sc) == tmp->path)) {
15398 bxe_prev_is_path_marked(struct bxe_softc *sc)
15400 struct bxe_prev_list_node *tmp;
15403 mtx_lock(&bxe_prev_mtx);
15405 tmp = bxe_prev_path_get_entry(sc);
15408 BLOGD(sc, DBG_LOAD,
15409 "Path %d/%d/%d was marked by AER\n",
15410 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15413 BLOGD(sc, DBG_LOAD,
15414 "Path %d/%d/%d was already cleaned from previous drivers\n",
15415 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15419 mtx_unlock(&bxe_prev_mtx);
15425 bxe_prev_mark_path(struct bxe_softc *sc,
15426 uint8_t after_undi)
15428 struct bxe_prev_list_node *tmp;
15430 mtx_lock(&bxe_prev_mtx);
15432 /* Check whether the entry for this path already exists */
15433 tmp = bxe_prev_path_get_entry(sc);
15436 BLOGD(sc, DBG_LOAD,
15437 "Re-marking AER in path %d/%d/%d\n",
15438 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15440 BLOGD(sc, DBG_LOAD,
15441 "Removing AER indication from path %d/%d/%d\n",
15442 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15446 mtx_unlock(&bxe_prev_mtx);
15450 mtx_unlock(&bxe_prev_mtx);
15452 /* Create an entry for this path and add it */
15453 tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF,
15454 (M_NOWAIT | M_ZERO));
15456 BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n");
15460 tmp->bus = sc->pcie_bus;
15461 tmp->slot = sc->pcie_device;
15462 tmp->path = SC_PATH(sc);
15464 tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0;
15466 mtx_lock(&bxe_prev_mtx);
15468 BLOGD(sc, DBG_LOAD,
15469 "Marked path %d/%d/%d - finished previous unload\n",
15470 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15471 LIST_INSERT_HEAD(&bxe_prev_list, tmp, node);
15473 mtx_unlock(&bxe_prev_mtx);
15479 bxe_do_flr(struct bxe_softc *sc)
15483 /* only E2 and onwards support FLR */
15484 if (CHIP_IS_E1x(sc)) {
15485 BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n");
15489 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
15490 if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
15491 BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n",
15492 sc->devinfo.bc_ver);
15496 /* Wait for Transaction Pending bit clean */
15497 for (i = 0; i < 4; i++) {
15499 DELAY(((1 << (i - 1)) * 100) * 1000);
15502 if (!bxe_is_pcie_pending(sc)) {
15507 BLOGE(sc, "PCIE transaction is not cleared, "
15508 "proceeding with reset anyway\n");
15512 BLOGD(sc, DBG_LOAD, "Initiating FLR\n");
15513 bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0);
15518 struct bxe_mac_vals {
15519 uint32_t xmac_addr;
15521 uint32_t emac_addr;
15523 uint32_t umac_addr;
15525 uint32_t bmac_addr;
15526 uint32_t bmac_val[2];
15530 bxe_prev_unload_close_mac(struct bxe_softc *sc,
15531 struct bxe_mac_vals *vals)
15533 uint32_t val, base_addr, offset, mask, reset_reg;
15534 uint8_t mac_stopped = FALSE;
15535 uint8_t port = SC_PORT(sc);
15536 uint32_t wb_data[2];
15538 /* reset addresses as they also mark which values were changed */
15539 vals->bmac_addr = 0;
15540 vals->umac_addr = 0;
15541 vals->xmac_addr = 0;
15542 vals->emac_addr = 0;
15544 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2);
15546 if (!CHIP_IS_E3(sc)) {
15547 val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
15548 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
15549 if ((mask & reset_reg) && val) {
15550 BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n");
15551 base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM
15552 : NIG_REG_INGRESS_BMAC0_MEM;
15553 offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL
15554 : BIGMAC_REGISTER_BMAC_CONTROL;
15557 * use rd/wr since we cannot use dmae. This is safe
15558 * since MCP won't access the bus due to the request
15559 * to unload, and no function on the path can be
15560 * loaded at this time.
15562 wb_data[0] = REG_RD(sc, base_addr + offset);
15563 wb_data[1] = REG_RD(sc, base_addr + offset + 0x4);
15564 vals->bmac_addr = base_addr + offset;
15565 vals->bmac_val[0] = wb_data[0];
15566 vals->bmac_val[1] = wb_data[1];
15567 wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE;
15568 REG_WR(sc, vals->bmac_addr, wb_data[0]);
15569 REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]);
15572 BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n");
15573 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4;
15574 vals->emac_val = REG_RD(sc, vals->emac_addr);
15575 REG_WR(sc, vals->emac_addr, 0);
15576 mac_stopped = TRUE;
15578 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
15579 BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n");
15580 base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
15581 val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI);
15582 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1));
15583 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1));
15584 vals->xmac_addr = base_addr + XMAC_REG_CTRL;
15585 vals->xmac_val = REG_RD(sc, vals->xmac_addr);
15586 REG_WR(sc, vals->xmac_addr, 0);
15587 mac_stopped = TRUE;
15590 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
15591 if (mask & reset_reg) {
15592 BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n");
15593 base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
15594 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
15595 vals->umac_val = REG_RD(sc, vals->umac_addr);
15596 REG_WR(sc, vals->umac_addr, 0);
15597 mac_stopped = TRUE;
15606 #define BXE_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
15607 #define BXE_PREV_UNDI_RCQ(val) ((val) & 0xffff)
15608 #define BXE_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
15609 #define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
15612 bxe_prev_unload_undi_inc(struct bxe_softc *sc,
15617 uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port));
15619 rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc;
15620 bd = BXE_PREV_UNDI_BD(tmp_reg) + inc;
15622 tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd);
15623 REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg);
15625 BLOGD(sc, DBG_LOAD,
15626 "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
15631 bxe_prev_unload_common(struct bxe_softc *sc)
15633 uint32_t reset_reg, tmp_reg = 0, rc;
15634 uint8_t prev_undi = FALSE;
15635 struct bxe_mac_vals mac_vals;
15636 uint32_t timer_count = 1000;
15640 * It is possible a previous function received 'common' answer,
15641 * but hasn't loaded yet, therefore creating a scenario of
15642 * multiple functions receiving 'common' on the same path.
15644 BLOGD(sc, DBG_LOAD, "Common unload Flow\n");
15646 memset(&mac_vals, 0, sizeof(mac_vals));
15648 if (bxe_prev_is_path_marked(sc)) {
15649 return (bxe_prev_mcp_done(sc));
15652 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1);
15654 /* Reset should be performed after BRB is emptied */
15655 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
15656 /* Close the MAC Rx to prevent BRB from filling up */
15657 bxe_prev_unload_close_mac(sc, &mac_vals);
15659 /* close LLH filters towards the BRB */
15660 elink_set_rx_filter(&sc->link_params, 0);
15663 * Check if the UNDI driver was previously loaded.
15664 * UNDI driver initializes CID offset for normal bell to 0x7
15666 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
15667 tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST);
15668 if (tmp_reg == 0x7) {
15669 BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n");
15671 /* clear the UNDI indication */
15672 REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0);
15673 /* clear possible idle check errors */
15674 REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0);
15678 /* wait until BRB is empty */
15679 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15680 while (timer_count) {
15681 prev_brb = tmp_reg;
15683 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15688 BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg);
15690 /* reset timer as long as BRB actually gets emptied */
15691 if (prev_brb > tmp_reg) {
15692 timer_count = 1000;
15697 /* If UNDI resides in memory, manually increment it */
15699 bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1);
15705 if (!timer_count) {
15706 BLOGE(sc, "Failed to empty BRB\n");
15710 /* No packets are in the pipeline, path is ready for reset */
15711 bxe_reset_common(sc);
15713 if (mac_vals.xmac_addr) {
15714 REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val);
15716 if (mac_vals.umac_addr) {
15717 REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val);
15719 if (mac_vals.emac_addr) {
15720 REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val);
15722 if (mac_vals.bmac_addr) {
15723 REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
15724 REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
15727 rc = bxe_prev_mark_path(sc, prev_undi);
15729 bxe_prev_mcp_done(sc);
15733 return (bxe_prev_mcp_done(sc));
15737 bxe_prev_unload_uncommon(struct bxe_softc *sc)
15741 BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n");
15743 /* Test if previous unload process was already finished for this path */
15744 if (bxe_prev_is_path_marked(sc)) {
15745 return (bxe_prev_mcp_done(sc));
15748 BLOGD(sc, DBG_LOAD, "Path is unmarked\n");
15751 * If function has FLR capabilities, and existing FW version matches
15752 * the one required, then FLR will be sufficient to clean any residue
15753 * left by previous driver
15755 rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION);
15757 /* fw version is good */
15758 BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n");
15759 rc = bxe_do_flr(sc);
15763 /* FLR was performed */
15764 BLOGD(sc, DBG_LOAD, "FLR successful\n");
15768 BLOGD(sc, DBG_LOAD, "Could not FLR\n");
15770 /* Close the MCP request, return failure*/
15771 rc = bxe_prev_mcp_done(sc);
15773 rc = BXE_PREV_WAIT_NEEDED;
15780 bxe_prev_unload(struct bxe_softc *sc)
15782 int time_counter = 10;
15783 uint32_t fw, hw_lock_reg, hw_lock_val;
15787 * Clear HW from errors which may have resulted from an interrupted
15788 * DMAE transaction.
15790 bxe_prev_interrupted_dmae(sc);
15792 /* Release previously held locks */
15794 (SC_FUNC(sc) <= 5) ?
15795 (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) :
15796 (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
15798 hw_lock_val = (REG_RD(sc, hw_lock_reg));
15800 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
15801 BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n");
15802 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
15803 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc)));
15805 BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n");
15806 REG_WR(sc, hw_lock_reg, 0xffffffff);
15808 BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n");
15811 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) {
15812 BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n");
15813 REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0);
15817 /* Lock MCP using an unload request */
15818 fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
15820 BLOGE(sc, "MCP response failure, aborting\n");
15825 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
15826 rc = bxe_prev_unload_common(sc);
15830 /* non-common reply from MCP night require looping */
15831 rc = bxe_prev_unload_uncommon(sc);
15832 if (rc != BXE_PREV_WAIT_NEEDED) {
15837 } while (--time_counter);
15839 if (!time_counter || rc) {
15840 BLOGE(sc, "Failed to unload previous driver!"
15841 " time_counter %d rc %d\n", time_counter, rc);
15849 bxe_dcbx_set_state(struct bxe_softc *sc,
15851 uint32_t dcbx_enabled)
15853 if (!CHIP_IS_E1x(sc)) {
15854 sc->dcb_state = dcb_on;
15855 sc->dcbx_enabled = dcbx_enabled;
15857 sc->dcb_state = FALSE;
15858 sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID;
15860 BLOGD(sc, DBG_LOAD,
15861 "DCB state [%s:%s]\n",
15862 dcb_on ? "ON" : "OFF",
15863 (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" :
15864 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" :
15865 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ?
15866 "on-chip with negotiation" : "invalid");
15869 /* must be called after sriov-enable */
15871 bxe_set_qm_cid_count(struct bxe_softc *sc)
15873 int cid_count = BXE_L2_MAX_CID(sc);
15875 if (IS_SRIOV(sc)) {
15876 cid_count += BXE_VF_CIDS;
15879 if (CNIC_SUPPORT(sc)) {
15880 cid_count += CNIC_CID_MAX;
15883 return (roundup(cid_count, QM_CID_ROUND));
15887 bxe_init_multi_cos(struct bxe_softc *sc)
15891 uint32_t pri_map = 0; /* XXX change to user config */
15893 for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) {
15894 cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4));
15895 if (cos < sc->max_cos) {
15896 sc->prio_to_cos[pri] = cos;
15898 BLOGW(sc, "Invalid COS %d for priority %d "
15899 "(max COS is %d), setting to 0\n",
15900 cos, pri, (sc->max_cos - 1));
15901 sc->prio_to_cos[pri] = 0;
15907 bxe_sysctl_state(SYSCTL_HANDLER_ARGS)
15909 struct bxe_softc *sc;
15913 error = sysctl_handle_int(oidp, &result, 0, req);
15915 if (error || !req->newptr) {
15921 sc = (struct bxe_softc *)arg1;
15923 BLOGI(sc, "... dumping driver state ...\n");
15924 temp = SHMEM2_RD(sc, temperature_in_half_celsius);
15925 BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2));
15932 bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS)
15934 struct bxe_softc *sc = (struct bxe_softc *)arg1;
15935 uint32_t *eth_stats = (uint32_t *)&sc->eth_stats;
15937 uint64_t value = 0;
15938 int index = (int)arg2;
15940 if (index >= BXE_NUM_ETH_STATS) {
15941 BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index);
15945 offset = (eth_stats + bxe_eth_stats_arr[index].offset);
15947 switch (bxe_eth_stats_arr[index].size) {
15949 value = (uint64_t)*offset;
15952 value = HILO_U64(*offset, *(offset + 1));
15955 BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n",
15956 index, bxe_eth_stats_arr[index].size);
15960 return (sysctl_handle_64(oidp, &value, 0, req));
15964 bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS)
15966 struct bxe_softc *sc = (struct bxe_softc *)arg1;
15967 uint32_t *eth_stats;
15969 uint64_t value = 0;
15970 uint32_t q_stat = (uint32_t)arg2;
15971 uint32_t fp_index = ((q_stat >> 16) & 0xffff);
15972 uint32_t index = (q_stat & 0xffff);
15974 eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats;
15976 if (index >= BXE_NUM_ETH_Q_STATS) {
15977 BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index);
15981 offset = (eth_stats + bxe_eth_q_stats_arr[index].offset);
15983 switch (bxe_eth_q_stats_arr[index].size) {
15985 value = (uint64_t)*offset;
15988 value = HILO_U64(*offset, *(offset + 1));
15991 BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n",
15992 index, bxe_eth_q_stats_arr[index].size);
15996 return (sysctl_handle_64(oidp, &value, 0, req));
15999 static void bxe_force_link_reset(struct bxe_softc *sc)
16002 bxe_acquire_phy_lock(sc);
16003 elink_link_reset(&sc->link_params, &sc->link_vars, 1);
16004 bxe_release_phy_lock(sc);
16008 bxe_sysctl_pauseparam(SYSCTL_HANDLER_ARGS)
16010 struct bxe_softc *sc = (struct bxe_softc *)arg1;;
16011 uint32_t cfg_idx = bxe_get_link_cfg_idx(sc);
16017 error = sysctl_handle_int(oidp, &sc->bxe_pause_param, 0, req);
16019 if (error || !req->newptr) {
16022 if ((sc->bxe_pause_param < 0) || (sc->bxe_pause_param > 8)) {
16023 BLOGW(sc, "invalid pause param (%d) - use intergers between 1 & 8\n",sc->bxe_pause_param);
16024 sc->bxe_pause_param = 8;
16027 result = (sc->bxe_pause_param << PORT_FEATURE_FLOW_CONTROL_SHIFT);
16030 if((result & 0x400) && !(sc->port.supported[cfg_idx] & ELINK_SUPPORTED_Autoneg)) {
16031 BLOGW(sc, "Does not support Autoneg pause_param %d\n", sc->bxe_pause_param);
16037 sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_AUTO;
16038 if(result & ELINK_FLOW_CTRL_RX)
16039 sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_RX;
16041 if(result & ELINK_FLOW_CTRL_TX)
16042 sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_TX;
16043 if(sc->link_params.req_flow_ctrl[cfg_idx] == ELINK_FLOW_CTRL_AUTO)
16044 sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_NONE;
16046 if(result & 0x400) {
16047 if (sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) {
16048 sc->link_params.req_flow_ctrl[cfg_idx] =
16049 ELINK_FLOW_CTRL_AUTO;
16051 sc->link_params.req_fc_auto_adv = 0;
16052 if (result & ELINK_FLOW_CTRL_RX)
16053 sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_RX;
16055 if (result & ELINK_FLOW_CTRL_TX)
16056 sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_TX;
16057 if (!sc->link_params.req_fc_auto_adv)
16058 sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_NONE;
16061 if (sc->link_vars.link_up) {
16062 bxe_stats_handle(sc, STATS_EVENT_STOP);
16064 if (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING) {
16065 bxe_force_link_reset(sc);
16066 bxe_acquire_phy_lock(sc);
16068 rc = elink_phy_init(&sc->link_params, &sc->link_vars);
16070 bxe_release_phy_lock(sc);
16072 bxe_calc_fc_adv(sc);
16080 bxe_add_sysctls(struct bxe_softc *sc)
16082 struct sysctl_ctx_list *ctx;
16083 struct sysctl_oid_list *children;
16084 struct sysctl_oid *queue_top, *queue;
16085 struct sysctl_oid_list *queue_top_children, *queue_children;
16086 char queue_num_buf[32];
16090 ctx = device_get_sysctl_ctx(sc->dev);
16091 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
16093 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version",
16094 CTLFLAG_RD, BXE_DRIVER_VERSION, 0,
16097 snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d",
16098 BCM_5710_FW_MAJOR_VERSION,
16099 BCM_5710_FW_MINOR_VERSION,
16100 BCM_5710_FW_REVISION_VERSION,
16101 BCM_5710_FW_ENGINEERING_VERSION);
16103 snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s",
16104 ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION) ? "Single" :
16105 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD) ? "MF-SD" :
16106 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI) ? "MF-SI" :
16107 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" :
16109 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics",
16110 CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0,
16111 "multifunction vnics per port");
16113 snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d",
16114 ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" :
16115 (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" :
16116 (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" :
16118 sc->devinfo.pcie_link_width);
16120 sc->debug = bxe_debug;
16122 #if __FreeBSD_version >= 900000
16123 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
16124 CTLFLAG_RD, sc->devinfo.bc_ver_str, 0,
16125 "bootcode version");
16126 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
16127 CTLFLAG_RD, sc->fw_ver_str, 0,
16128 "firmware version");
16129 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
16130 CTLFLAG_RD, sc->mf_mode_str, 0,
16131 "multifunction mode");
16132 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
16133 CTLFLAG_RD, sc->mac_addr_str, 0,
16135 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
16136 CTLFLAG_RD, sc->pci_link_str, 0,
16137 "pci link status");
16138 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug",
16139 CTLFLAG_RW, &sc->debug,
16140 "debug logging mode");
16142 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
16143 CTLFLAG_RD, &sc->devinfo.bc_ver_str, 0,
16144 "bootcode version");
16145 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
16146 CTLFLAG_RD, &sc->fw_ver_str, 0,
16147 "firmware version");
16148 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
16149 CTLFLAG_RD, &sc->mf_mode_str, 0,
16150 "multifunction mode");
16151 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
16152 CTLFLAG_RD, &sc->mac_addr_str, 0,
16154 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
16155 CTLFLAG_RD, &sc->pci_link_str, 0,
16156 "pci link status");
16157 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "debug",
16158 CTLFLAG_RW, &sc->debug, 0,
16159 "debug logging mode");
16160 #endif /* #if __FreeBSD_version >= 900000 */
16162 sc->trigger_grcdump = 0;
16163 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "trigger_grcdump",
16164 CTLFLAG_RW, &sc->trigger_grcdump, 0,
16165 "trigger grcdump should be invoked"
16166 " before collecting grcdump");
16168 sc->grcdump_started = 0;
16169 sc->grcdump_done = 0;
16170 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done",
16171 CTLFLAG_RD, &sc->grcdump_done, 0,
16172 "set by driver when grcdump is done");
16174 sc->rx_budget = bxe_rx_budget;
16175 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget",
16176 CTLFLAG_RW, &sc->rx_budget, 0,
16177 "rx processing budget");
16179 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_param",
16180 CTLTYPE_UINT | CTLFLAG_RW, sc, 0,
16181 bxe_sysctl_pauseparam, "IU",
16182 "need pause frames- DEF:0/TX:1/RX:2/BOTH:3/AUTO:4/AUTOTX:5/AUTORX:6/AUTORXTX:7/NONE:8");
16185 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state",
16186 CTLTYPE_UINT | CTLFLAG_RW, sc, 0,
16187 bxe_sysctl_state, "IU", "dump driver state");
16189 for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
16190 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
16191 bxe_eth_stats_arr[i].string,
16192 CTLTYPE_U64 | CTLFLAG_RD, sc, i,
16193 bxe_sysctl_eth_stat, "LU",
16194 bxe_eth_stats_arr[i].string);
16197 /* add a new parent node for all queues "dev.bxe.#.queue" */
16198 queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue",
16199 CTLFLAG_RD, NULL, "queue");
16200 queue_top_children = SYSCTL_CHILDREN(queue_top);
16202 for (i = 0; i < sc->num_queues; i++) {
16203 /* add a new parent node for a single queue "dev.bxe.#.queue.#" */
16204 snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i);
16205 queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO,
16206 queue_num_buf, CTLFLAG_RD, NULL,
16208 queue_children = SYSCTL_CHILDREN(queue);
16210 for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) {
16211 q_stat = ((i << 16) | j);
16212 SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO,
16213 bxe_eth_q_stats_arr[j].string,
16214 CTLTYPE_U64 | CTLFLAG_RD, sc, q_stat,
16215 bxe_sysctl_eth_q_stat, "LU",
16216 bxe_eth_q_stats_arr[j].string);
16222 bxe_alloc_buf_rings(struct bxe_softc *sc)
16224 #if __FreeBSD_version >= 901504
16227 struct bxe_fastpath *fp;
16229 for (i = 0; i < sc->num_queues; i++) {
16233 fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF,
16234 M_NOWAIT, &fp->tx_mtx);
16235 if (fp->tx_br == NULL)
16243 bxe_free_buf_rings(struct bxe_softc *sc)
16245 #if __FreeBSD_version >= 901504
16248 struct bxe_fastpath *fp;
16250 for (i = 0; i < sc->num_queues; i++) {
16255 buf_ring_free(fp->tx_br, M_DEVBUF);
16264 bxe_init_fp_mutexs(struct bxe_softc *sc)
16267 struct bxe_fastpath *fp;
16269 for (i = 0; i < sc->num_queues; i++) {
16273 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
16274 "bxe%d_fp%d_tx_lock", sc->unit, i);
16275 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
16277 snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name),
16278 "bxe%d_fp%d_rx_lock", sc->unit, i);
16279 mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF);
16284 bxe_destroy_fp_mutexs(struct bxe_softc *sc)
16287 struct bxe_fastpath *fp;
16289 for (i = 0; i < sc->num_queues; i++) {
16293 if (mtx_initialized(&fp->tx_mtx)) {
16294 mtx_destroy(&fp->tx_mtx);
16297 if (mtx_initialized(&fp->rx_mtx)) {
16298 mtx_destroy(&fp->rx_mtx);
16305 * Device attach function.
16307 * Allocates device resources, performs secondary chip identification, and
16308 * initializes driver instance variables. This function is called from driver
16309 * load after a successful probe.
16312 * 0 = Success, >0 = Failure
16315 bxe_attach(device_t dev)
16317 struct bxe_softc *sc;
16319 sc = device_get_softc(dev);
16321 BLOGD(sc, DBG_LOAD, "Starting attach...\n");
16323 sc->state = BXE_STATE_CLOSED;
16326 sc->unit = device_get_unit(dev);
16328 BLOGD(sc, DBG_LOAD, "softc = %p\n", sc);
16330 sc->pcie_bus = pci_get_bus(dev);
16331 sc->pcie_device = pci_get_slot(dev);
16332 sc->pcie_func = pci_get_function(dev);
16334 /* enable bus master capability */
16335 pci_enable_busmaster(dev);
16338 if (bxe_allocate_bars(sc) != 0) {
16342 /* initialize the mutexes */
16343 bxe_init_mutexes(sc);
16345 /* prepare the periodic callout */
16346 callout_init(&sc->periodic_callout, 0);
16348 /* prepare the chip taskqueue */
16349 sc->chip_tq_flags = CHIP_TQ_NONE;
16350 snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name),
16351 "bxe%d_chip_tq", sc->unit);
16352 TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc);
16353 sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT,
16354 taskqueue_thread_enqueue,
16356 taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */
16357 "%s", sc->chip_tq_name);
16359 TIMEOUT_TASK_INIT(taskqueue_thread,
16360 &sc->sp_err_timeout_task, 0, bxe_sp_err_timeout_task, sc);
16363 /* get device info and set params */
16364 if (bxe_get_device_info(sc) != 0) {
16365 BLOGE(sc, "getting device info\n");
16366 bxe_deallocate_bars(sc);
16367 pci_disable_busmaster(dev);
16371 /* get final misc params */
16372 bxe_get_params(sc);
16374 /* set the default MTU (changed via ifconfig) */
16375 sc->mtu = ETHERMTU;
16377 bxe_set_modes_bitmap(sc);
16380 * If in AFEX mode and the function is configured for FCoE
16381 * then bail... no L2 allowed.
16384 /* get phy settings from shmem and 'and' against admin settings */
16385 bxe_get_phy_info(sc);
16387 /* initialize the FreeBSD ifnet interface */
16388 if (bxe_init_ifnet(sc) != 0) {
16389 bxe_release_mutexes(sc);
16390 bxe_deallocate_bars(sc);
16391 pci_disable_busmaster(dev);
16395 if (bxe_add_cdev(sc) != 0) {
16396 if (sc->ifnet != NULL) {
16397 ether_ifdetach(sc->ifnet);
16399 ifmedia_removeall(&sc->ifmedia);
16400 bxe_release_mutexes(sc);
16401 bxe_deallocate_bars(sc);
16402 pci_disable_busmaster(dev);
16406 /* allocate device interrupts */
16407 if (bxe_interrupt_alloc(sc) != 0) {
16409 if (sc->ifnet != NULL) {
16410 ether_ifdetach(sc->ifnet);
16412 ifmedia_removeall(&sc->ifmedia);
16413 bxe_release_mutexes(sc);
16414 bxe_deallocate_bars(sc);
16415 pci_disable_busmaster(dev);
16419 bxe_init_fp_mutexs(sc);
16421 if (bxe_alloc_buf_rings(sc) != 0) {
16422 bxe_free_buf_rings(sc);
16423 bxe_interrupt_free(sc);
16425 if (sc->ifnet != NULL) {
16426 ether_ifdetach(sc->ifnet);
16428 ifmedia_removeall(&sc->ifmedia);
16429 bxe_release_mutexes(sc);
16430 bxe_deallocate_bars(sc);
16431 pci_disable_busmaster(dev);
16436 if (bxe_alloc_ilt_mem(sc) != 0) {
16437 bxe_free_buf_rings(sc);
16438 bxe_interrupt_free(sc);
16440 if (sc->ifnet != NULL) {
16441 ether_ifdetach(sc->ifnet);
16443 ifmedia_removeall(&sc->ifmedia);
16444 bxe_release_mutexes(sc);
16445 bxe_deallocate_bars(sc);
16446 pci_disable_busmaster(dev);
16450 /* allocate the host hardware/software hsi structures */
16451 if (bxe_alloc_hsi_mem(sc) != 0) {
16452 bxe_free_ilt_mem(sc);
16453 bxe_free_buf_rings(sc);
16454 bxe_interrupt_free(sc);
16456 if (sc->ifnet != NULL) {
16457 ether_ifdetach(sc->ifnet);
16459 ifmedia_removeall(&sc->ifmedia);
16460 bxe_release_mutexes(sc);
16461 bxe_deallocate_bars(sc);
16462 pci_disable_busmaster(dev);
16466 /* need to reset chip if UNDI was active */
16467 if (IS_PF(sc) && !BXE_NOMCP(sc)) {
16470 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
16471 DRV_MSG_SEQ_NUMBER_MASK);
16472 BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq);
16473 bxe_prev_unload(sc);
16478 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16480 if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) &&
16481 SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) &&
16482 SHMEM2_RD(sc, dcbx_lldp_params_offset) &&
16483 SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) {
16484 bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON);
16485 bxe_dcbx_init_params(sc);
16487 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16491 /* calculate qm_cid_count */
16492 sc->qm_cid_count = bxe_set_qm_cid_count(sc);
16493 BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count);
16496 bxe_init_multi_cos(sc);
16498 bxe_add_sysctls(sc);
16504 * Device detach function.
16506 * Stops the controller, resets the controller, and releases resources.
16509 * 0 = Success, >0 = Failure
16512 bxe_detach(device_t dev)
16514 struct bxe_softc *sc;
16517 sc = device_get_softc(dev);
16519 BLOGD(sc, DBG_LOAD, "Starting detach...\n");
16522 if (ifp != NULL && ifp->if_vlantrunk != NULL) {
16523 BLOGE(sc, "Cannot detach while VLANs are in use.\n");
16529 /* stop the periodic callout */
16530 bxe_periodic_stop(sc);
16532 /* stop the chip taskqueue */
16533 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE);
16535 taskqueue_drain(sc->chip_tq, &sc->chip_tq_task);
16536 taskqueue_free(sc->chip_tq);
16537 sc->chip_tq = NULL;
16538 taskqueue_drain_timeout(taskqueue_thread,
16539 &sc->sp_err_timeout_task);
16542 /* stop and reset the controller if it was open */
16543 if (sc->state != BXE_STATE_CLOSED) {
16545 bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE);
16546 sc->state = BXE_STATE_DISABLED;
16547 BXE_CORE_UNLOCK(sc);
16550 /* release the network interface */
16552 ether_ifdetach(ifp);
16554 ifmedia_removeall(&sc->ifmedia);
16556 /* XXX do the following based on driver state... */
16558 /* free the host hardware/software hsi structures */
16559 bxe_free_hsi_mem(sc);
16562 bxe_free_ilt_mem(sc);
16564 bxe_free_buf_rings(sc);
16566 /* release the interrupts */
16567 bxe_interrupt_free(sc);
16569 /* Release the mutexes*/
16570 bxe_destroy_fp_mutexs(sc);
16571 bxe_release_mutexes(sc);
16574 /* Release the PCIe BAR mapped memory */
16575 bxe_deallocate_bars(sc);
16577 /* Release the FreeBSD interface. */
16578 if (sc->ifnet != NULL) {
16579 if_free(sc->ifnet);
16582 pci_disable_busmaster(dev);
16588 * Device shutdown function.
16590 * Stops and resets the controller.
16596 bxe_shutdown(device_t dev)
16598 struct bxe_softc *sc;
16600 sc = device_get_softc(dev);
16602 BLOGD(sc, DBG_LOAD, "Starting shutdown...\n");
16604 /* stop the periodic callout */
16605 bxe_periodic_stop(sc);
16607 if (sc->state != BXE_STATE_CLOSED) {
16609 bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE);
16610 BXE_CORE_UNLOCK(sc);
16617 bxe_igu_ack_sb(struct bxe_softc *sc,
16624 uint32_t igu_addr = sc->igu_base_addr;
16625 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
16626 bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr);
16630 bxe_igu_clear_sb_gen(struct bxe_softc *sc,
16635 uint32_t data, ctl, cnt = 100;
16636 uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
16637 uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
16638 uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
16639 uint32_t sb_bit = 1 << (idu_sb_id%32);
16640 uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
16641 uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
16643 /* Not supported in BC mode */
16644 if (CHIP_INT_MODE_IS_BC(sc)) {
16648 data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup <<
16649 IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
16650 IGU_REGULAR_CLEANUP_SET |
16651 IGU_REGULAR_BCLEANUP);
16653 ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) |
16654 (func_encode << IGU_CTRL_REG_FID_SHIFT) |
16655 (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
16657 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16658 data, igu_addr_data);
16659 REG_WR(sc, igu_addr_data, data);
16661 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16662 BUS_SPACE_BARRIER_WRITE);
16665 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16666 ctl, igu_addr_ctl);
16667 REG_WR(sc, igu_addr_ctl, ctl);
16669 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16670 BUS_SPACE_BARRIER_WRITE);
16673 /* wait for clean up to finish */
16674 while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) {
16678 if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) {
16679 BLOGD(sc, DBG_LOAD,
16680 "Unable to finish IGU cleanup: "
16681 "idu_sb_id %d offset %d bit %d (cnt %d)\n",
16682 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
16687 bxe_igu_clear_sb(struct bxe_softc *sc,
16690 bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/);
16699 /*******************/
16700 /* ECORE CALLBACKS */
16701 /*******************/
16704 bxe_reset_common(struct bxe_softc *sc)
16706 uint32_t val = 0x1400;
16709 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f);
16711 if (CHIP_IS_E3(sc)) {
16712 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16713 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16716 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val);
16720 bxe_common_init_phy(struct bxe_softc *sc)
16722 uint32_t shmem_base[2];
16723 uint32_t shmem2_base[2];
16725 /* Avoid common init in case MFW supports LFA */
16726 if (SHMEM2_RD(sc, size) >
16727 (uint32_t)offsetof(struct shmem2_region,
16728 lfa_host_addr[SC_PORT(sc)])) {
16732 shmem_base[0] = sc->devinfo.shmem_base;
16733 shmem2_base[0] = sc->devinfo.shmem2_base;
16735 if (!CHIP_IS_E1x(sc)) {
16736 shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr);
16737 shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr);
16740 bxe_acquire_phy_lock(sc);
16741 elink_common_init_phy(sc, shmem_base, shmem2_base,
16742 sc->devinfo.chip_id, 0);
16743 bxe_release_phy_lock(sc);
16747 bxe_pf_disable(struct bxe_softc *sc)
16749 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
16751 val &= ~IGU_PF_CONF_FUNC_EN;
16753 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
16754 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16755 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0);
16759 bxe_init_pxp(struct bxe_softc *sc)
16762 int r_order, w_order;
16764 devctl = bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL, 2);
16766 BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl);
16768 w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5);
16770 if (sc->mrrs == -1) {
16771 r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12);
16773 BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs);
16774 r_order = sc->mrrs;
16777 ecore_init_pxp_arb(sc, r_order, w_order);
16781 bxe_get_pretend_reg(struct bxe_softc *sc)
16783 uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0;
16784 uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base);
16785 return (base + (SC_ABS_FUNC(sc)) * stride);
16789 * Called only on E1H or E2.
16790 * When pretending to be PF, the pretend value is the function number 0..7.
16791 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
16795 bxe_pretend_func(struct bxe_softc *sc,
16796 uint16_t pretend_func_val)
16798 uint32_t pretend_reg;
16800 if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) {
16804 /* get my own pretend register */
16805 pretend_reg = bxe_get_pretend_reg(sc);
16806 REG_WR(sc, pretend_reg, pretend_func_val);
16807 REG_RD(sc, pretend_reg);
16812 bxe_iov_init_dmae(struct bxe_softc *sc)
16818 bxe_iov_init_dq(struct bxe_softc *sc)
16823 /* send a NIG loopback debug packet */
16825 bxe_lb_pckt(struct bxe_softc *sc)
16827 uint32_t wb_write[3];
16829 /* Ethernet source and destination addresses */
16830 wb_write[0] = 0x55555555;
16831 wb_write[1] = 0x55555555;
16832 wb_write[2] = 0x20; /* SOP */
16833 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16835 /* NON-IP protocol */
16836 wb_write[0] = 0x09000000;
16837 wb_write[1] = 0x55555555;
16838 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
16839 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16843 * Some of the internal memories are not directly readable from the driver.
16844 * To test them we send debug packets.
16847 bxe_int_mem_test(struct bxe_softc *sc)
16853 if (CHIP_REV_IS_FPGA(sc)) {
16855 } else if (CHIP_REV_IS_EMUL(sc)) {
16861 /* disable inputs of parser neighbor blocks */
16862 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16863 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16864 REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16865 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16867 /* write 0 to parser credits for CFC search request */
16868 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16870 /* send Ethernet packet */
16873 /* TODO do i reset NIG statistic? */
16874 /* Wait until NIG register shows 1 packet of size 0x10 */
16875 count = 1000 * factor;
16877 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16878 val = *BXE_SP(sc, wb_data[0]);
16888 BLOGE(sc, "NIG timeout val=0x%x\n", val);
16892 /* wait until PRS register shows 1 packet */
16893 count = (1000 * factor);
16895 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16905 BLOGE(sc, "PRS timeout val=0x%x\n", val);
16909 /* Reset and init BRB, PRS */
16910 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16912 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16914 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16915 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16917 /* Disable inputs of parser neighbor blocks */
16918 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16919 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16920 REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16921 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16923 /* Write 0 to parser credits for CFC search request */
16924 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16926 /* send 10 Ethernet packets */
16927 for (i = 0; i < 10; i++) {
16931 /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */
16932 count = (1000 * factor);
16934 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16935 val = *BXE_SP(sc, wb_data[0]);
16945 BLOGE(sc, "NIG timeout val=0x%x\n", val);
16949 /* Wait until PRS register shows 2 packets */
16950 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16952 BLOGE(sc, "PRS timeout val=0x%x\n", val);
16955 /* Write 1 to parser credits for CFC search request */
16956 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
16958 /* Wait until PRS register shows 3 packets */
16959 DELAY(10000 * factor);
16961 /* Wait until NIG register shows 1 packet of size 0x10 */
16962 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16964 BLOGE(sc, "PRS timeout val=0x%x\n", val);
16967 /* clear NIG EOP FIFO */
16968 for (i = 0; i < 11; i++) {
16969 REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO);
16972 val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY);
16974 BLOGE(sc, "clear of NIG failed val=0x%x\n", val);
16978 /* Reset and init BRB, PRS, NIG */
16979 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16981 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16983 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16984 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16985 if (!CNIC_SUPPORT(sc)) {
16987 REG_WR(sc, PRS_REG_NIC_MODE, 1);
16990 /* Enable inputs of parser neighbor blocks */
16991 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff);
16992 REG_WR(sc, TCM_REG_PRS_IFEN, 0x1);
16993 REG_WR(sc, CFC_REG_DEBUG0, 0x0);
16994 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1);
17000 bxe_setup_fan_failure_detection(struct bxe_softc *sc)
17007 val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) &
17008 SHARED_HW_CFG_FAN_FAILURE_MASK);
17010 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) {
17014 * The fan failure mechanism is usually related to the PHY type since
17015 * the power consumption of the board is affected by the PHY. Currently,
17016 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
17018 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) {
17019 for (port = PORT_0; port < PORT_MAX; port++) {
17020 is_required |= elink_fan_failure_det_req(sc,
17021 sc->devinfo.shmem_base,
17022 sc->devinfo.shmem2_base,
17027 BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required);
17029 if (is_required == 0) {
17033 /* Fan failure is indicated by SPIO 5 */
17034 bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
17036 /* set to active low mode */
17037 val = REG_RD(sc, MISC_REG_SPIO_INT);
17038 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
17039 REG_WR(sc, MISC_REG_SPIO_INT, val);
17041 /* enable interrupt to signal the IGU */
17042 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
17043 val |= MISC_SPIO_SPIO5;
17044 REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val);
17048 bxe_enable_blocks_attention(struct bxe_softc *sc)
17052 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
17053 if (!CHIP_IS_E1x(sc)) {
17054 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40);
17056 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0);
17058 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
17059 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
17061 * mask read length error interrupts in brb for parser
17062 * (parsing unit and 'checksum and crc' unit)
17063 * these errors are legal (PU reads fixed length and CAC can cause
17064 * read length error on truncated packets)
17066 REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00);
17067 REG_WR(sc, QM_REG_QM_INT_MASK, 0);
17068 REG_WR(sc, TM_REG_TM_INT_MASK, 0);
17069 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0);
17070 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0);
17071 REG_WR(sc, XCM_REG_XCM_INT_MASK, 0);
17072 /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */
17073 /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */
17074 REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0);
17075 REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0);
17076 REG_WR(sc, UCM_REG_UCM_INT_MASK, 0);
17077 /* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */
17078 /* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */
17079 REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
17080 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0);
17081 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0);
17082 REG_WR(sc, CCM_REG_CCM_INT_MASK, 0);
17083 /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */
17084 /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */
17086 val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
17087 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
17088 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN);
17089 if (!CHIP_IS_E1x(sc)) {
17090 val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
17091 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED);
17093 REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val);
17095 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0);
17096 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0);
17097 REG_WR(sc, TCM_REG_TCM_INT_MASK, 0);
17098 /* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */
17100 if (!CHIP_IS_E1x(sc)) {
17101 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
17102 REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
17105 REG_WR(sc, CDU_REG_CDU_INT_MASK, 0);
17106 REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0);
17107 /* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */
17108 REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
17112 * bxe_init_hw_common - initialize the HW at the COMMON phase.
17114 * @sc: driver handle
17117 bxe_init_hw_common(struct bxe_softc *sc)
17119 uint8_t abs_func_id;
17122 BLOGD(sc, DBG_LOAD, "starting common init for func %d\n",
17126 * take the RESET lock to protect undi_unload flow from accessing
17127 * registers while we are resetting the chip
17129 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
17131 bxe_reset_common(sc);
17133 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff);
17136 if (CHIP_IS_E3(sc)) {
17137 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
17138 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
17141 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val);
17143 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
17145 ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON);
17146 BLOGD(sc, DBG_LOAD, "after misc block init\n");
17148 if (!CHIP_IS_E1x(sc)) {
17150 * 4-port mode or 2-port mode we need to turn off master-enable for
17151 * everyone. After that we turn it back on for self. So, we disregard
17152 * multi-function, and always disable all functions on the given path,
17153 * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1
17155 for (abs_func_id = SC_PATH(sc);
17156 abs_func_id < (E2_FUNC_MAX * 2);
17157 abs_func_id += 2) {
17158 if (abs_func_id == SC_ABS_FUNC(sc)) {
17159 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17163 bxe_pretend_func(sc, abs_func_id);
17165 /* clear pf enable */
17166 bxe_pf_disable(sc);
17168 bxe_pretend_func(sc, SC_ABS_FUNC(sc));
17172 BLOGD(sc, DBG_LOAD, "after pf disable\n");
17174 ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON);
17176 if (CHIP_IS_E1(sc)) {
17178 * enable HW interrupt from PXP on USDM overflow
17179 * bit 16 on INT_MASK_0
17181 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
17184 ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON);
17187 #ifdef __BIG_ENDIAN
17188 REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1);
17189 REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1);
17190 REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
17191 REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
17192 REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
17193 /* make sure this value is 0 */
17194 REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0);
17196 //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1);
17197 REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1);
17198 REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1);
17199 REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1);
17200 REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
17203 ecore_ilt_init_page_size(sc, INITOP_SET);
17205 if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) {
17206 REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
17209 /* let the HW do it's magic... */
17212 /* finish PXP init */
17213 val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE);
17215 BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n",
17219 val = REG_RD(sc, PXP2_REG_RD_INIT_DONE);
17221 BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val);
17225 BLOGD(sc, DBG_LOAD, "after pxp init\n");
17228 * Timer bug workaround for E2 only. We need to set the entire ILT to have
17229 * entries with value "0" and valid bit on. This needs to be done by the
17230 * first PF that is loaded in a path (i.e. common phase)
17232 if (!CHIP_IS_E1x(sc)) {
17234 * In E2 there is a bug in the timers block that can cause function 6 / 7
17235 * (i.e. vnic3) to start even if it is marked as "scan-off".
17236 * This occurs when a different function (func2,3) is being marked
17237 * as "scan-off". Real-life scenario for example: if a driver is being
17238 * load-unloaded while func6,7 are down. This will cause the timer to access
17239 * the ilt, translate to a logical address and send a request to read/write.
17240 * Since the ilt for the function that is down is not valid, this will cause
17241 * a translation error which is unrecoverable.
17242 * The Workaround is intended to make sure that when this happens nothing
17243 * fatal will occur. The workaround:
17244 * 1. First PF driver which loads on a path will:
17245 * a. After taking the chip out of reset, by using pretend,
17246 * it will write "0" to the following registers of
17248 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
17249 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
17250 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
17251 * And for itself it will write '1' to
17252 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
17253 * dmae-operations (writing to pram for example.)
17254 * note: can be done for only function 6,7 but cleaner this
17256 * b. Write zero+valid to the entire ILT.
17257 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of
17258 * VNIC3 (of that port). The range allocated will be the
17259 * entire ILT. This is needed to prevent ILT range error.
17260 * 2. Any PF driver load flow:
17261 * a. ILT update with the physical addresses of the allocated
17263 * b. Wait 20msec. - note that this timeout is needed to make
17264 * sure there are no requests in one of the PXP internal
17265 * queues with "old" ILT addresses.
17266 * c. PF enable in the PGLC.
17267 * d. Clear the was_error of the PF in the PGLC. (could have
17268 * occurred while driver was down)
17269 * e. PF enable in the CFC (WEAK + STRONG)
17270 * f. Timers scan enable
17271 * 3. PF driver unload flow:
17272 * a. Clear the Timers scan_en.
17273 * b. Polling for scan_on=0 for that PF.
17274 * c. Clear the PF enable bit in the PXP.
17275 * d. Clear the PF enable in the CFC (WEAK + STRONG)
17276 * e. Write zero+valid to all ILT entries (The valid bit must
17278 * f. If this is VNIC 3 of a port then also init
17279 * first_timers_ilt_entry to zero and last_timers_ilt_entry
17280 * to the last enrty in the ILT.
17283 * Currently the PF error in the PGLC is non recoverable.
17284 * In the future the there will be a recovery routine for this error.
17285 * Currently attention is masked.
17286 * Having an MCP lock on the load/unload process does not guarantee that
17287 * there is no Timer disable during Func6/7 enable. This is because the
17288 * Timers scan is currently being cleared by the MCP on FLR.
17289 * Step 2.d can be done only for PF6/7 and the driver can also check if
17290 * there is error before clearing it. But the flow above is simpler and
17292 * All ILT entries are written by zero+valid and not just PF6/7
17293 * ILT entries since in the future the ILT entries allocation for
17294 * PF-s might be dynamic.
17296 struct ilt_client_info ilt_cli;
17297 struct ecore_ilt ilt;
17299 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
17300 memset(&ilt, 0, sizeof(struct ecore_ilt));
17302 /* initialize dummy TM client */
17304 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
17305 ilt_cli.client_num = ILT_CLIENT_TM;
17308 * Step 1: set zeroes to all ilt page entries with valid bit on
17309 * Step 2: set the timers first/last ilt entry to point
17310 * to the entire range to prevent ILT range error for 3rd/4th
17311 * vnic (this code assumes existence of the vnic)
17313 * both steps performed by call to ecore_ilt_client_init_op()
17314 * with dummy TM client
17316 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
17317 * and his brother are split registers
17320 bxe_pretend_func(sc, (SC_PATH(sc) + 6));
17321 ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR);
17322 bxe_pretend_func(sc, SC_ABS_FUNC(sc));
17324 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN);
17325 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN);
17326 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
17329 REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0);
17330 REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0);
17332 if (!CHIP_IS_E1x(sc)) {
17333 int factor = CHIP_REV_IS_EMUL(sc) ? 1000 :
17334 (CHIP_REV_IS_FPGA(sc) ? 400 : 0);
17336 ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON);
17337 ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON);
17339 /* let the HW do it's magic... */
17342 val = REG_RD(sc, ATC_REG_ATC_INIT_DONE);
17343 } while (factor-- && (val != 1));
17346 BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val);
17351 BLOGD(sc, DBG_LOAD, "after pglue and atc init\n");
17353 ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON);
17355 bxe_iov_init_dmae(sc);
17357 /* clean the DMAE memory */
17358 sc->dmae_ready = 1;
17359 ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1);
17361 ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON);
17363 ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON);
17365 ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON);
17367 ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON);
17369 bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3);
17370 bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3);
17371 bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3);
17372 bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3);
17374 ecore_init_block(sc, BLOCK_QM, PHASE_COMMON);
17376 /* QM queues pointers table */
17377 ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET);
17379 /* soft reset pulse */
17380 REG_WR(sc, QM_REG_SOFT_RESET, 1);
17381 REG_WR(sc, QM_REG_SOFT_RESET, 0);
17383 if (CNIC_SUPPORT(sc))
17384 ecore_init_block(sc, BLOCK_TM, PHASE_COMMON);
17386 ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON);
17387 REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT);
17388 if (!CHIP_REV_IS_SLOW(sc)) {
17389 /* enable hw interrupt from doorbell Q */
17390 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
17393 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
17395 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
17396 REG_WR(sc, PRS_REG_A_PRSU_20, 0xf);
17398 if (!CHIP_IS_E1(sc)) {
17399 REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan);
17402 if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) {
17403 if (IS_MF_AFEX(sc)) {
17405 * configure that AFEX and VLAN headers must be
17406 * received in AFEX mode
17408 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE);
17409 REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA);
17410 REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
17411 REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
17412 REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4);
17415 * Bit-map indicating which L2 hdrs may appear
17416 * after the basic Ethernet header
17418 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC,
17419 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17423 ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON);
17424 ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON);
17425 ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON);
17426 ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON);
17428 if (!CHIP_IS_E1x(sc)) {
17429 /* reset VFC memories */
17430 REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17431 VFC_MEMORIES_RST_REG_CAM_RST |
17432 VFC_MEMORIES_RST_REG_RAM_RST);
17433 REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17434 VFC_MEMORIES_RST_REG_CAM_RST |
17435 VFC_MEMORIES_RST_REG_RAM_RST);
17440 ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON);
17441 ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON);
17442 ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON);
17443 ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON);
17445 /* sync semi rtc */
17446 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
17448 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
17451 ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON);
17452 ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON);
17453 ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON);
17455 if (!CHIP_IS_E1x(sc)) {
17456 if (IS_MF_AFEX(sc)) {
17458 * configure that AFEX and VLAN headers must be
17459 * sent in AFEX mode
17461 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE);
17462 REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA);
17463 REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
17464 REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
17465 REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4);
17467 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC,
17468 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17472 REG_WR(sc, SRC_REG_SOFT_RST, 1);
17474 ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON);
17476 if (CNIC_SUPPORT(sc)) {
17477 REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672);
17478 REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
17479 REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b);
17480 REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a);
17481 REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116);
17482 REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
17483 REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf);
17484 REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
17485 REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f);
17486 REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7);
17488 REG_WR(sc, SRC_REG_SOFT_RST, 0);
17490 if (sizeof(union cdu_context) != 1024) {
17491 /* we currently assume that a context is 1024 bytes */
17492 BLOGE(sc, "please adjust the size of cdu_context(%ld)\n",
17493 (long)sizeof(union cdu_context));
17496 ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON);
17497 val = (4 << 24) + (0 << 12) + 1024;
17498 REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val);
17500 ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON);
17502 REG_WR(sc, CFC_REG_INIT_REG, 0x7FF);
17503 /* enable context validation interrupt from CFC */
17504 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
17506 /* set the thresholds to prevent CFC/CDU race */
17507 REG_WR(sc, CFC_REG_DEBUG0, 0x20020000);
17508 ecore_init_block(sc, BLOCK_HC, PHASE_COMMON);
17510 if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) {
17511 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36);
17514 ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON);
17515 ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON);
17517 /* Reset PCIE errors for debug */
17518 REG_WR(sc, 0x2814, 0xffffffff);
17519 REG_WR(sc, 0x3820, 0xffffffff);
17521 if (!CHIP_IS_E1x(sc)) {
17522 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
17523 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
17524 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
17525 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
17526 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
17527 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
17528 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
17529 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
17530 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
17531 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
17532 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
17535 ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON);
17537 if (!CHIP_IS_E1(sc)) {
17538 /* in E3 this done in per-port section */
17539 if (!CHIP_IS_E3(sc))
17540 REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc));
17543 if (CHIP_IS_E1H(sc)) {
17544 /* not applicable for E2 (and above ...) */
17545 REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc));
17548 if (CHIP_REV_IS_SLOW(sc)) {
17552 /* finish CFC init */
17553 val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10);
17555 BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val);
17558 val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10);
17560 BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val);
17563 val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
17565 BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val);
17568 REG_WR(sc, CFC_REG_DEBUG0, 0);
17570 if (CHIP_IS_E1(sc)) {
17571 /* read NIG statistic to see if this is our first up since powerup */
17572 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
17573 val = *BXE_SP(sc, wb_data[0]);
17575 /* do internal memory self test */
17576 if ((val == 0) && bxe_int_mem_test(sc)) {
17577 BLOGE(sc, "internal mem self test failed val=0x%x\n", val);
17582 bxe_setup_fan_failure_detection(sc);
17584 /* clear PXP2 attentions */
17585 REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
17587 bxe_enable_blocks_attention(sc);
17589 if (!CHIP_REV_IS_SLOW(sc)) {
17590 ecore_enable_blocks_parity(sc);
17593 if (!BXE_NOMCP(sc)) {
17594 if (CHIP_IS_E1x(sc)) {
17595 bxe_common_init_phy(sc);
17603 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase.
17605 * @sc: driver handle
17608 bxe_init_hw_common_chip(struct bxe_softc *sc)
17610 int rc = bxe_init_hw_common(sc);
17613 BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc);
17617 /* In E2 2-PORT mode, same ext phy is used for the two paths */
17618 if (!BXE_NOMCP(sc)) {
17619 bxe_common_init_phy(sc);
17626 bxe_init_hw_port(struct bxe_softc *sc)
17628 int port = SC_PORT(sc);
17629 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
17630 uint32_t low, high;
17633 BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port);
17635 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
17637 ecore_init_block(sc, BLOCK_MISC, init_phase);
17638 ecore_init_block(sc, BLOCK_PXP, init_phase);
17639 ecore_init_block(sc, BLOCK_PXP2, init_phase);
17642 * Timers bug workaround: disables the pf_master bit in pglue at
17643 * common phase, we need to enable it here before any dmae access are
17644 * attempted. Therefore we manually added the enable-master to the
17645 * port phase (it also happens in the function phase)
17647 if (!CHIP_IS_E1x(sc)) {
17648 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17651 ecore_init_block(sc, BLOCK_ATC, init_phase);
17652 ecore_init_block(sc, BLOCK_DMAE, init_phase);
17653 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17654 ecore_init_block(sc, BLOCK_QM, init_phase);
17656 ecore_init_block(sc, BLOCK_TCM, init_phase);
17657 ecore_init_block(sc, BLOCK_UCM, init_phase);
17658 ecore_init_block(sc, BLOCK_CCM, init_phase);
17659 ecore_init_block(sc, BLOCK_XCM, init_phase);
17661 /* QM cid (connection) count */
17662 ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET);
17664 if (CNIC_SUPPORT(sc)) {
17665 ecore_init_block(sc, BLOCK_TM, init_phase);
17666 REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20);
17667 REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
17670 ecore_init_block(sc, BLOCK_DORQ, init_phase);
17672 ecore_init_block(sc, BLOCK_BRB1, init_phase);
17674 if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) {
17676 low = (BXE_ONE_PORT(sc) ? 160 : 246);
17677 } else if (sc->mtu > 4096) {
17678 if (BXE_ONE_PORT(sc)) {
17682 /* (24*1024 + val*4)/256 */
17683 low = (96 + (val / 64) + ((val % 64) ? 1 : 0));
17686 low = (BXE_ONE_PORT(sc) ? 80 : 160);
17688 high = (low + 56); /* 14*1024/256 */
17689 REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
17690 REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
17693 if (CHIP_IS_MODE_4_PORT(sc)) {
17694 REG_WR(sc, SC_PORT(sc) ?
17695 BRB1_REG_MAC_GUARANTIED_1 :
17696 BRB1_REG_MAC_GUARANTIED_0, 40);
17699 ecore_init_block(sc, BLOCK_PRS, init_phase);
17700 if (CHIP_IS_E3B0(sc)) {
17701 if (IS_MF_AFEX(sc)) {
17702 /* configure headers for AFEX mode */
17703 REG_WR(sc, SC_PORT(sc) ?
17704 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17705 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
17706 REG_WR(sc, SC_PORT(sc) ?
17707 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
17708 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
17709 REG_WR(sc, SC_PORT(sc) ?
17710 PRS_REG_MUST_HAVE_HDRS_PORT_1 :
17711 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
17713 /* Ovlan exists only if we are in multi-function +
17714 * switch-dependent mode, in switch-independent there
17715 * is no ovlan headers
17717 REG_WR(sc, SC_PORT(sc) ?
17718 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17719 PRS_REG_HDRS_AFTER_BASIC_PORT_0,
17720 (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6));
17724 ecore_init_block(sc, BLOCK_TSDM, init_phase);
17725 ecore_init_block(sc, BLOCK_CSDM, init_phase);
17726 ecore_init_block(sc, BLOCK_USDM, init_phase);
17727 ecore_init_block(sc, BLOCK_XSDM, init_phase);
17729 ecore_init_block(sc, BLOCK_TSEM, init_phase);
17730 ecore_init_block(sc, BLOCK_USEM, init_phase);
17731 ecore_init_block(sc, BLOCK_CSEM, init_phase);
17732 ecore_init_block(sc, BLOCK_XSEM, init_phase);
17734 ecore_init_block(sc, BLOCK_UPB, init_phase);
17735 ecore_init_block(sc, BLOCK_XPB, init_phase);
17737 ecore_init_block(sc, BLOCK_PBF, init_phase);
17739 if (CHIP_IS_E1x(sc)) {
17740 /* configure PBF to work without PAUSE mtu 9000 */
17741 REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
17743 /* update threshold */
17744 REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
17745 /* update init credit */
17746 REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
17748 /* probe changes */
17749 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1);
17751 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0);
17754 if (CNIC_SUPPORT(sc)) {
17755 ecore_init_block(sc, BLOCK_SRC, init_phase);
17758 ecore_init_block(sc, BLOCK_CDU, init_phase);
17759 ecore_init_block(sc, BLOCK_CFC, init_phase);
17761 if (CHIP_IS_E1(sc)) {
17762 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17763 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17765 ecore_init_block(sc, BLOCK_HC, init_phase);
17767 ecore_init_block(sc, BLOCK_IGU, init_phase);
17769 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17770 /* init aeu_mask_attn_func_0/1:
17771 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
17772 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
17773 * bits 4-7 are used for "per vn group attention" */
17774 val = IS_MF(sc) ? 0xF7 : 0x7;
17775 /* Enable DCBX attention for all but E1 */
17776 val |= CHIP_IS_E1(sc) ? 0 : 0x10;
17777 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
17779 ecore_init_block(sc, BLOCK_NIG, init_phase);
17781 if (!CHIP_IS_E1x(sc)) {
17782 /* Bit-map indicating which L2 hdrs may appear after the
17783 * basic Ethernet header
17785 if (IS_MF_AFEX(sc)) {
17786 REG_WR(sc, SC_PORT(sc) ?
17787 NIG_REG_P1_HDRS_AFTER_BASIC :
17788 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
17790 REG_WR(sc, SC_PORT(sc) ?
17791 NIG_REG_P1_HDRS_AFTER_BASIC :
17792 NIG_REG_P0_HDRS_AFTER_BASIC,
17793 IS_MF_SD(sc) ? 7 : 6);
17796 if (CHIP_IS_E3(sc)) {
17797 REG_WR(sc, SC_PORT(sc) ?
17798 NIG_REG_LLH1_MF_MODE :
17799 NIG_REG_LLH_MF_MODE, IS_MF(sc));
17802 if (!CHIP_IS_E3(sc)) {
17803 REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
17806 if (!CHIP_IS_E1(sc)) {
17807 /* 0x2 disable mf_ov, 0x1 enable */
17808 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
17809 (IS_MF_SD(sc) ? 0x1 : 0x2));
17811 if (!CHIP_IS_E1x(sc)) {
17813 switch (sc->devinfo.mf_info.mf_mode) {
17814 case MULTI_FUNCTION_SD:
17817 case MULTI_FUNCTION_SI:
17818 case MULTI_FUNCTION_AFEX:
17823 REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE :
17824 NIG_REG_LLH0_CLS_TYPE), val);
17826 REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
17827 REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
17828 REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
17831 /* If SPIO5 is set to generate interrupts, enable it for this port */
17832 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
17833 if (val & MISC_SPIO_SPIO5) {
17834 uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
17835 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
17836 val = REG_RD(sc, reg_addr);
17837 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
17838 REG_WR(sc, reg_addr, val);
17845 bxe_flr_clnup_reg_poll(struct bxe_softc *sc,
17848 uint32_t poll_count)
17850 uint32_t cur_cnt = poll_count;
17853 while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) {
17854 DELAY(FLR_WAIT_INTERVAL);
17861 bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc,
17866 uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt);
17869 BLOGE(sc, "%s usage count=%d\n", msg, val);
17876 /* Common routines with VF FLR cleanup */
17878 bxe_flr_clnup_poll_count(struct bxe_softc *sc)
17880 /* adjust polling timeout */
17881 if (CHIP_REV_IS_EMUL(sc)) {
17882 return (FLR_POLL_CNT * 2000);
17885 if (CHIP_REV_IS_FPGA(sc)) {
17886 return (FLR_POLL_CNT * 120);
17889 return (FLR_POLL_CNT);
17893 bxe_poll_hw_usage_counters(struct bxe_softc *sc,
17896 /* wait for CFC PF usage-counter to zero (includes all the VFs) */
17897 if (bxe_flr_clnup_poll_hw_counter(sc,
17898 CFC_REG_NUM_LCIDS_INSIDE_PF,
17899 "CFC PF usage counter timed out",
17904 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
17905 if (bxe_flr_clnup_poll_hw_counter(sc,
17906 DORQ_REG_PF_USAGE_CNT,
17907 "DQ PF usage counter timed out",
17912 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
17913 if (bxe_flr_clnup_poll_hw_counter(sc,
17914 QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc),
17915 "QM PF usage counter timed out",
17920 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
17921 if (bxe_flr_clnup_poll_hw_counter(sc,
17922 TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc),
17923 "Timers VNIC usage counter timed out",
17928 if (bxe_flr_clnup_poll_hw_counter(sc,
17929 TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc),
17930 "Timers NUM_SCANS usage counter timed out",
17935 /* Wait DMAE PF usage counter to zero */
17936 if (bxe_flr_clnup_poll_hw_counter(sc,
17937 dmae_reg_go_c[INIT_DMAE_C(sc)],
17938 "DMAE dommand register timed out",
17946 #define OP_GEN_PARAM(param) \
17947 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
17948 #define OP_GEN_TYPE(type) \
17949 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
17950 #define OP_GEN_AGG_VECT(index) \
17951 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
17954 bxe_send_final_clnup(struct bxe_softc *sc,
17955 uint8_t clnup_func,
17958 uint32_t op_gen_command = 0;
17959 uint32_t comp_addr = (BAR_CSTRORM_INTMEM +
17960 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func));
17963 if (REG_RD(sc, comp_addr)) {
17964 BLOGE(sc, "Cleanup complete was not 0 before sending\n");
17968 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
17969 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
17970 op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
17971 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
17973 BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n");
17974 REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command);
17976 if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) {
17977 BLOGE(sc, "FW final cleanup did not succeed\n");
17978 BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n",
17979 (REG_RD(sc, comp_addr)));
17980 bxe_panic(sc, ("FLR cleanup failed\n"));
17984 /* Zero completion for nxt FLR */
17985 REG_WR(sc, comp_addr, 0);
17991 bxe_pbf_pN_buf_flushed(struct bxe_softc *sc,
17992 struct pbf_pN_buf_regs *regs,
17993 uint32_t poll_count)
17995 uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start;
17996 uint32_t cur_cnt = poll_count;
17998 crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed);
17999 crd = crd_start = REG_RD(sc, regs->crd);
18000 init_crd = REG_RD(sc, regs->init_crd);
18002 BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
18003 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : s:%x\n", regs->pN, crd);
18004 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
18006 while ((crd != init_crd) &&
18007 ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) <
18008 (init_crd - crd_start))) {
18010 DELAY(FLR_WAIT_INTERVAL);
18011 crd = REG_RD(sc, regs->crd);
18012 crd_freed = REG_RD(sc, regs->crd_freed);
18014 BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN);
18015 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : c:%x\n", regs->pN, crd);
18016 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed);
18021 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n",
18022 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
18026 bxe_pbf_pN_cmd_flushed(struct bxe_softc *sc,
18027 struct pbf_pN_cmd_regs *regs,
18028 uint32_t poll_count)
18030 uint32_t occup, to_free, freed, freed_start;
18031 uint32_t cur_cnt = poll_count;
18033 occup = to_free = REG_RD(sc, regs->lines_occup);
18034 freed = freed_start = REG_RD(sc, regs->lines_freed);
18036 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
18037 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
18040 ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) {
18042 DELAY(FLR_WAIT_INTERVAL);
18043 occup = REG_RD(sc, regs->lines_occup);
18044 freed = REG_RD(sc, regs->lines_freed);
18046 BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN);
18047 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
18048 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
18053 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n",
18054 poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
18058 bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count)
18060 struct pbf_pN_cmd_regs cmd_regs[] = {
18061 {0, (CHIP_IS_E3B0(sc)) ?
18062 PBF_REG_TQ_OCCUPANCY_Q0 :
18063 PBF_REG_P0_TQ_OCCUPANCY,
18064 (CHIP_IS_E3B0(sc)) ?
18065 PBF_REG_TQ_LINES_FREED_CNT_Q0 :
18066 PBF_REG_P0_TQ_LINES_FREED_CNT},
18067 {1, (CHIP_IS_E3B0(sc)) ?
18068 PBF_REG_TQ_OCCUPANCY_Q1 :
18069 PBF_REG_P1_TQ_OCCUPANCY,
18070 (CHIP_IS_E3B0(sc)) ?
18071 PBF_REG_TQ_LINES_FREED_CNT_Q1 :
18072 PBF_REG_P1_TQ_LINES_FREED_CNT},
18073 {4, (CHIP_IS_E3B0(sc)) ?
18074 PBF_REG_TQ_OCCUPANCY_LB_Q :
18075 PBF_REG_P4_TQ_OCCUPANCY,
18076 (CHIP_IS_E3B0(sc)) ?
18077 PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
18078 PBF_REG_P4_TQ_LINES_FREED_CNT}
18081 struct pbf_pN_buf_regs buf_regs[] = {
18082 {0, (CHIP_IS_E3B0(sc)) ?
18083 PBF_REG_INIT_CRD_Q0 :
18084 PBF_REG_P0_INIT_CRD ,
18085 (CHIP_IS_E3B0(sc)) ?
18086 PBF_REG_CREDIT_Q0 :
18088 (CHIP_IS_E3B0(sc)) ?
18089 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
18090 PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
18091 {1, (CHIP_IS_E3B0(sc)) ?
18092 PBF_REG_INIT_CRD_Q1 :
18093 PBF_REG_P1_INIT_CRD,
18094 (CHIP_IS_E3B0(sc)) ?
18095 PBF_REG_CREDIT_Q1 :
18097 (CHIP_IS_E3B0(sc)) ?
18098 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
18099 PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
18100 {4, (CHIP_IS_E3B0(sc)) ?
18101 PBF_REG_INIT_CRD_LB_Q :
18102 PBF_REG_P4_INIT_CRD,
18103 (CHIP_IS_E3B0(sc)) ?
18104 PBF_REG_CREDIT_LB_Q :
18106 (CHIP_IS_E3B0(sc)) ?
18107 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
18108 PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
18113 /* Verify the command queues are flushed P0, P1, P4 */
18114 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) {
18115 bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count);
18118 /* Verify the transmission buffers are flushed P0, P1, P4 */
18119 for (i = 0; i < ARRAY_SIZE(buf_regs); i++) {
18120 bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count);
18125 bxe_hw_enable_status(struct bxe_softc *sc)
18129 val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF);
18130 BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
18132 val = REG_RD(sc, PBF_REG_DISABLE_PF);
18133 BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val);
18135 val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN);
18136 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
18138 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN);
18139 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
18141 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
18142 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
18144 val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
18145 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
18147 val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
18148 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
18150 val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
18151 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val);
18155 bxe_pf_flr_clnup(struct bxe_softc *sc)
18157 uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc);
18159 BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc));
18161 /* Re-enable PF target read access */
18162 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
18164 /* Poll HW usage counters */
18165 BLOGD(sc, DBG_LOAD, "Polling usage counters\n");
18166 if (bxe_poll_hw_usage_counters(sc, poll_cnt)) {
18170 /* Zero the igu 'trailing edge' and 'leading edge' */
18172 /* Send the FW cleanup command */
18173 if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) {
18179 /* Verify TX hw is flushed */
18180 bxe_tx_hw_flushed(sc, poll_cnt);
18182 /* Wait 100ms (not adjusted according to platform) */
18185 /* Verify no pending pci transactions */
18186 if (bxe_is_pcie_pending(sc)) {
18187 BLOGE(sc, "PCIE Transactions still pending\n");
18191 bxe_hw_enable_status(sc);
18194 * Master enable - Due to WB DMAE writes performed before this
18195 * register is re-initialized as part of the regular function init
18197 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
18203 bxe_init_hw_func(struct bxe_softc *sc)
18205 int port = SC_PORT(sc);
18206 int func = SC_FUNC(sc);
18207 int init_phase = PHASE_PF0 + func;
18208 struct ecore_ilt *ilt = sc->ilt;
18209 uint16_t cdu_ilt_start;
18210 uint32_t addr, val;
18211 uint32_t main_mem_base, main_mem_size, main_mem_prty_clr;
18212 int i, main_mem_width, rc;
18214 BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func);
18217 if (!CHIP_IS_E1x(sc)) {
18218 rc = bxe_pf_flr_clnup(sc);
18220 BLOGE(sc, "FLR cleanup failed!\n");
18221 // XXX bxe_fw_dump(sc);
18222 // XXX bxe_idle_chk(sc);
18227 /* set MSI reconfigure capability */
18228 if (sc->devinfo.int_block == INT_BLOCK_HC) {
18229 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
18230 val = REG_RD(sc, addr);
18231 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
18232 REG_WR(sc, addr, val);
18235 ecore_init_block(sc, BLOCK_PXP, init_phase);
18236 ecore_init_block(sc, BLOCK_PXP2, init_phase);
18239 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
18241 for (i = 0; i < L2_ILT_LINES(sc); i++) {
18242 ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
18243 ilt->lines[cdu_ilt_start + i].page_mapping =
18244 sc->context[i].vcxt_dma.paddr;
18245 ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
18247 ecore_ilt_init_op(sc, INITOP_SET);
18250 REG_WR(sc, PRS_REG_NIC_MODE, 1);
18251 BLOGD(sc, DBG_LOAD, "NIC MODE configured\n");
18253 if (!CHIP_IS_E1x(sc)) {
18254 uint32_t pf_conf = IGU_PF_CONF_FUNC_EN;
18256 /* Turn on a single ISR mode in IGU if driver is going to use
18259 if (sc->interrupt_mode != INTR_MODE_MSIX) {
18260 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
18264 * Timers workaround bug: function init part.
18265 * Need to wait 20msec after initializing ILT,
18266 * needed to make sure there are no requests in
18267 * one of the PXP internal queues with "old" ILT addresses
18272 * Master enable - Due to WB DMAE writes performed before this
18273 * register is re-initialized as part of the regular function
18276 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
18277 /* Enable the function in IGU */
18278 REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf);
18281 sc->dmae_ready = 1;
18283 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
18285 if (!CHIP_IS_E1x(sc))
18286 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
18288 ecore_init_block(sc, BLOCK_ATC, init_phase);
18289 ecore_init_block(sc, BLOCK_DMAE, init_phase);
18290 ecore_init_block(sc, BLOCK_NIG, init_phase);
18291 ecore_init_block(sc, BLOCK_SRC, init_phase);
18292 ecore_init_block(sc, BLOCK_MISC, init_phase);
18293 ecore_init_block(sc, BLOCK_TCM, init_phase);
18294 ecore_init_block(sc, BLOCK_UCM, init_phase);
18295 ecore_init_block(sc, BLOCK_CCM, init_phase);
18296 ecore_init_block(sc, BLOCK_XCM, init_phase);
18297 ecore_init_block(sc, BLOCK_TSEM, init_phase);
18298 ecore_init_block(sc, BLOCK_USEM, init_phase);
18299 ecore_init_block(sc, BLOCK_CSEM, init_phase);
18300 ecore_init_block(sc, BLOCK_XSEM, init_phase);
18302 if (!CHIP_IS_E1x(sc))
18303 REG_WR(sc, QM_REG_PF_EN, 1);
18305 if (!CHIP_IS_E1x(sc)) {
18306 REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18307 REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18308 REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18309 REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18311 ecore_init_block(sc, BLOCK_QM, init_phase);
18313 ecore_init_block(sc, BLOCK_TM, init_phase);
18314 ecore_init_block(sc, BLOCK_DORQ, init_phase);
18316 bxe_iov_init_dq(sc);
18318 ecore_init_block(sc, BLOCK_BRB1, init_phase);
18319 ecore_init_block(sc, BLOCK_PRS, init_phase);
18320 ecore_init_block(sc, BLOCK_TSDM, init_phase);
18321 ecore_init_block(sc, BLOCK_CSDM, init_phase);
18322 ecore_init_block(sc, BLOCK_USDM, init_phase);
18323 ecore_init_block(sc, BLOCK_XSDM, init_phase);
18324 ecore_init_block(sc, BLOCK_UPB, init_phase);
18325 ecore_init_block(sc, BLOCK_XPB, init_phase);
18326 ecore_init_block(sc, BLOCK_PBF, init_phase);
18327 if (!CHIP_IS_E1x(sc))
18328 REG_WR(sc, PBF_REG_DISABLE_PF, 0);
18330 ecore_init_block(sc, BLOCK_CDU, init_phase);
18332 ecore_init_block(sc, BLOCK_CFC, init_phase);
18334 if (!CHIP_IS_E1x(sc))
18335 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1);
18338 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
18339 REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc));
18342 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
18344 /* HC init per function */
18345 if (sc->devinfo.int_block == INT_BLOCK_HC) {
18346 if (CHIP_IS_E1H(sc)) {
18347 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
18349 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18350 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18352 ecore_init_block(sc, BLOCK_HC, init_phase);
18355 int num_segs, sb_idx, prod_offset;
18357 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
18359 if (!CHIP_IS_E1x(sc)) {
18360 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18361 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18364 ecore_init_block(sc, BLOCK_IGU, init_phase);
18366 if (!CHIP_IS_E1x(sc)) {
18370 * E2 mode: address 0-135 match to the mapping memory;
18371 * 136 - PF0 default prod; 137 - PF1 default prod;
18372 * 138 - PF2 default prod; 139 - PF3 default prod;
18373 * 140 - PF0 attn prod; 141 - PF1 attn prod;
18374 * 142 - PF2 attn prod; 143 - PF3 attn prod;
18375 * 144-147 reserved.
18377 * E1.5 mode - In backward compatible mode;
18378 * for non default SB; each even line in the memory
18379 * holds the U producer and each odd line hold
18380 * the C producer. The first 128 producers are for
18381 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
18382 * producers are for the DSB for each PF.
18383 * Each PF has five segments: (the order inside each
18384 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
18385 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
18386 * 144-147 attn prods;
18388 /* non-default-status-blocks */
18389 num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18390 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
18391 for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) {
18392 prod_offset = (sc->igu_base_sb + sb_idx) *
18395 for (i = 0; i < num_segs; i++) {
18396 addr = IGU_REG_PROD_CONS_MEMORY +
18397 (prod_offset + i) * 4;
18398 REG_WR(sc, addr, 0);
18400 /* send consumer update with value 0 */
18401 bxe_ack_sb(sc, sc->igu_base_sb + sb_idx,
18402 USTORM_ID, 0, IGU_INT_NOP, 1);
18403 bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx);
18406 /* default-status-blocks */
18407 num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18408 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
18410 if (CHIP_IS_MODE_4_PORT(sc))
18411 dsb_idx = SC_FUNC(sc);
18413 dsb_idx = SC_VN(sc);
18415 prod_offset = (CHIP_INT_MODE_IS_BC(sc) ?
18416 IGU_BC_BASE_DSB_PROD + dsb_idx :
18417 IGU_NORM_BASE_DSB_PROD + dsb_idx);
18420 * igu prods come in chunks of E1HVN_MAX (4) -
18421 * does not matters what is the current chip mode
18423 for (i = 0; i < (num_segs * E1HVN_MAX);
18425 addr = IGU_REG_PROD_CONS_MEMORY +
18426 (prod_offset + i)*4;
18427 REG_WR(sc, addr, 0);
18429 /* send consumer update with 0 */
18430 if (CHIP_INT_MODE_IS_BC(sc)) {
18431 bxe_ack_sb(sc, sc->igu_dsb_id,
18432 USTORM_ID, 0, IGU_INT_NOP, 1);
18433 bxe_ack_sb(sc, sc->igu_dsb_id,
18434 CSTORM_ID, 0, IGU_INT_NOP, 1);
18435 bxe_ack_sb(sc, sc->igu_dsb_id,
18436 XSTORM_ID, 0, IGU_INT_NOP, 1);
18437 bxe_ack_sb(sc, sc->igu_dsb_id,
18438 TSTORM_ID, 0, IGU_INT_NOP, 1);
18439 bxe_ack_sb(sc, sc->igu_dsb_id,
18440 ATTENTION_ID, 0, IGU_INT_NOP, 1);
18442 bxe_ack_sb(sc, sc->igu_dsb_id,
18443 USTORM_ID, 0, IGU_INT_NOP, 1);
18444 bxe_ack_sb(sc, sc->igu_dsb_id,
18445 ATTENTION_ID, 0, IGU_INT_NOP, 1);
18447 bxe_igu_clear_sb(sc, sc->igu_dsb_id);
18449 /* !!! these should become driver const once
18450 rf-tool supports split-68 const */
18451 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
18452 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
18453 REG_WR(sc, IGU_REG_SB_MASK_LSB, 0);
18454 REG_WR(sc, IGU_REG_SB_MASK_MSB, 0);
18455 REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0);
18456 REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0);
18460 /* Reset PCIE errors for debug */
18461 REG_WR(sc, 0x2114, 0xffffffff);
18462 REG_WR(sc, 0x2120, 0xffffffff);
18464 if (CHIP_IS_E1x(sc)) {
18465 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
18466 main_mem_base = HC_REG_MAIN_MEMORY +
18467 SC_PORT(sc) * (main_mem_size * 4);
18468 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
18469 main_mem_width = 8;
18471 val = REG_RD(sc, main_mem_prty_clr);
18473 BLOGD(sc, DBG_LOAD,
18474 "Parity errors in HC block during function init (0x%x)!\n",
18478 /* Clear "false" parity errors in MSI-X table */
18479 for (i = main_mem_base;
18480 i < main_mem_base + main_mem_size * 4;
18481 i += main_mem_width) {
18482 bxe_read_dmae(sc, i, main_mem_width / 4);
18483 bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data),
18484 i, main_mem_width / 4);
18486 /* Clear HC parity attention */
18487 REG_RD(sc, main_mem_prty_clr);
18491 /* Enable STORMs SP logging */
18492 REG_WR8(sc, BAR_USTRORM_INTMEM +
18493 USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18494 REG_WR8(sc, BAR_TSTRORM_INTMEM +
18495 TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18496 REG_WR8(sc, BAR_CSTRORM_INTMEM +
18497 CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18498 REG_WR8(sc, BAR_XSTRORM_INTMEM +
18499 XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18502 elink_phy_probe(&sc->link_params);
18508 bxe_link_reset(struct bxe_softc *sc)
18510 if (!BXE_NOMCP(sc)) {
18511 bxe_acquire_phy_lock(sc);
18512 elink_lfa_reset(&sc->link_params, &sc->link_vars);
18513 bxe_release_phy_lock(sc);
18515 if (!CHIP_REV_IS_SLOW(sc)) {
18516 BLOGW(sc, "Bootcode is missing - cannot reset link\n");
18522 bxe_reset_port(struct bxe_softc *sc)
18524 int port = SC_PORT(sc);
18527 ELINK_DEBUG_P0(sc, "bxe_reset_port called\n");
18528 /* reset physical Link */
18529 bxe_link_reset(sc);
18531 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
18533 /* Do not rcv packets to BRB */
18534 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
18535 /* Do not direct rcv packets that are not for MCP to the BRB */
18536 REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
18537 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
18539 /* Configure AEU */
18540 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
18544 /* Check for BRB port occupancy */
18545 val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
18547 BLOGD(sc, DBG_LOAD,
18548 "BRB1 is not empty, %d blocks are occupied\n", val);
18551 /* TODO: Close Doorbell port? */
18555 bxe_ilt_wr(struct bxe_softc *sc,
18560 uint32_t wb_write[2];
18562 if (CHIP_IS_E1(sc)) {
18563 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
18565 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
18568 wb_write[0] = ONCHIP_ADDR1(addr);
18569 wb_write[1] = ONCHIP_ADDR2(addr);
18570 REG_WR_DMAE(sc, reg, wb_write, 2);
18574 bxe_clear_func_ilt(struct bxe_softc *sc,
18577 uint32_t i, base = FUNC_ILT_BASE(func);
18578 for (i = base; i < base + ILT_PER_FUNC; i++) {
18579 bxe_ilt_wr(sc, i, 0);
18584 bxe_reset_func(struct bxe_softc *sc)
18586 struct bxe_fastpath *fp;
18587 int port = SC_PORT(sc);
18588 int func = SC_FUNC(sc);
18591 /* Disable the function in the FW */
18592 REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
18593 REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
18594 REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
18595 REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
18598 FOR_EACH_ETH_QUEUE(sc, i) {
18600 REG_WR8(sc, BAR_CSTRORM_INTMEM +
18601 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
18606 REG_WR8(sc, BAR_CSTRORM_INTMEM +
18607 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
18610 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) {
18611 REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0);
18614 /* Configure IGU */
18615 if (sc->devinfo.int_block == INT_BLOCK_HC) {
18616 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18617 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18619 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18620 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18623 if (CNIC_LOADED(sc)) {
18624 /* Disable Timer scan */
18625 REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
18627 * Wait for at least 10ms and up to 2 second for the timers
18630 for (i = 0; i < 200; i++) {
18632 if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4))
18638 bxe_clear_func_ilt(sc, func);
18641 * Timers workaround bug for E2: if this is vnic-3,
18642 * we need to set the entire ilt range for this timers.
18644 if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) {
18645 struct ilt_client_info ilt_cli;
18646 /* use dummy TM client */
18647 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
18649 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
18650 ilt_cli.client_num = ILT_CLIENT_TM;
18652 ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR);
18655 /* this assumes that reset_port() called before reset_func()*/
18656 if (!CHIP_IS_E1x(sc)) {
18657 bxe_pf_disable(sc);
18660 sc->dmae_ready = 0;
18664 bxe_gunzip_init(struct bxe_softc *sc)
18670 bxe_gunzip_end(struct bxe_softc *sc)
18676 bxe_init_firmware(struct bxe_softc *sc)
18678 if (CHIP_IS_E1(sc)) {
18679 ecore_init_e1_firmware(sc);
18680 sc->iro_array = e1_iro_arr;
18681 } else if (CHIP_IS_E1H(sc)) {
18682 ecore_init_e1h_firmware(sc);
18683 sc->iro_array = e1h_iro_arr;
18684 } else if (!CHIP_IS_E1x(sc)) {
18685 ecore_init_e2_firmware(sc);
18686 sc->iro_array = e2_iro_arr;
18688 BLOGE(sc, "Unsupported chip revision\n");
18696 bxe_release_firmware(struct bxe_softc *sc)
18703 ecore_gunzip(struct bxe_softc *sc,
18704 const uint8_t *zbuf,
18707 /* XXX : Implement... */
18708 BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n");
18713 ecore_reg_wr_ind(struct bxe_softc *sc,
18717 bxe_reg_wr_ind(sc, addr, val);
18721 ecore_write_dmae_phys_len(struct bxe_softc *sc,
18722 bus_addr_t phys_addr,
18726 bxe_write_dmae_phys_len(sc, phys_addr, addr, len);
18730 ecore_storm_memset_struct(struct bxe_softc *sc,
18736 for (i = 0; i < size/4; i++) {
18737 REG_WR(sc, addr + (i * 4), data[i]);
18743 * character device - ioctl interface definitions
18747 #include "bxe_dump.h"
18748 #include "bxe_ioctl.h"
18749 #include <sys/conf.h>
18751 static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
18752 struct thread *td);
18754 static struct cdevsw bxe_cdevsw = {
18755 .d_version = D_VERSION,
18756 .d_ioctl = bxe_eioctl,
18757 .d_name = "bxecnic",
18760 #define BXE_PATH(sc) (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1))
18763 #define DUMP_ALL_PRESETS 0x1FFF
18764 #define DUMP_MAX_PRESETS 13
18765 #define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
18766 #define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
18767 #define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
18768 #define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
18769 #define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
18771 #define IS_REG_IN_PRESET(presets, idx) \
18772 ((presets & (1 << (idx-1))) == (1 << (idx-1)))
18776 bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset)
18778 if (CHIP_IS_E1(sc))
18779 return dump_num_registers[0][preset-1];
18780 else if (CHIP_IS_E1H(sc))
18781 return dump_num_registers[1][preset-1];
18782 else if (CHIP_IS_E2(sc))
18783 return dump_num_registers[2][preset-1];
18784 else if (CHIP_IS_E3A0(sc))
18785 return dump_num_registers[3][preset-1];
18786 else if (CHIP_IS_E3B0(sc))
18787 return dump_num_registers[4][preset-1];
18793 bxe_get_total_regs_len32(struct bxe_softc *sc)
18795 uint32_t preset_idx;
18796 int regdump_len32 = 0;
18799 /* Calculate the total preset regs length */
18800 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18801 regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx);
18804 return regdump_len32;
18807 static const uint32_t *
18808 __bxe_get_page_addr_ar(struct bxe_softc *sc)
18810 if (CHIP_IS_E2(sc))
18811 return page_vals_e2;
18812 else if (CHIP_IS_E3(sc))
18813 return page_vals_e3;
18819 __bxe_get_page_reg_num(struct bxe_softc *sc)
18821 if (CHIP_IS_E2(sc))
18822 return PAGE_MODE_VALUES_E2;
18823 else if (CHIP_IS_E3(sc))
18824 return PAGE_MODE_VALUES_E3;
18829 static const uint32_t *
18830 __bxe_get_page_write_ar(struct bxe_softc *sc)
18832 if (CHIP_IS_E2(sc))
18833 return page_write_regs_e2;
18834 else if (CHIP_IS_E3(sc))
18835 return page_write_regs_e3;
18841 __bxe_get_page_write_num(struct bxe_softc *sc)
18843 if (CHIP_IS_E2(sc))
18844 return PAGE_WRITE_REGS_E2;
18845 else if (CHIP_IS_E3(sc))
18846 return PAGE_WRITE_REGS_E3;
18851 static const struct reg_addr *
18852 __bxe_get_page_read_ar(struct bxe_softc *sc)
18854 if (CHIP_IS_E2(sc))
18855 return page_read_regs_e2;
18856 else if (CHIP_IS_E3(sc))
18857 return page_read_regs_e3;
18863 __bxe_get_page_read_num(struct bxe_softc *sc)
18865 if (CHIP_IS_E2(sc))
18866 return PAGE_READ_REGS_E2;
18867 else if (CHIP_IS_E3(sc))
18868 return PAGE_READ_REGS_E3;
18874 bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info)
18876 if (CHIP_IS_E1(sc))
18877 return IS_E1_REG(reg_info->chips);
18878 else if (CHIP_IS_E1H(sc))
18879 return IS_E1H_REG(reg_info->chips);
18880 else if (CHIP_IS_E2(sc))
18881 return IS_E2_REG(reg_info->chips);
18882 else if (CHIP_IS_E3A0(sc))
18883 return IS_E3A0_REG(reg_info->chips);
18884 else if (CHIP_IS_E3B0(sc))
18885 return IS_E3B0_REG(reg_info->chips);
18891 bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info)
18893 if (CHIP_IS_E1(sc))
18894 return IS_E1_REG(wreg_info->chips);
18895 else if (CHIP_IS_E1H(sc))
18896 return IS_E1H_REG(wreg_info->chips);
18897 else if (CHIP_IS_E2(sc))
18898 return IS_E2_REG(wreg_info->chips);
18899 else if (CHIP_IS_E3A0(sc))
18900 return IS_E3A0_REG(wreg_info->chips);
18901 else if (CHIP_IS_E3B0(sc))
18902 return IS_E3B0_REG(wreg_info->chips);
18908 * bxe_read_pages_regs - read "paged" registers
18910 * @bp device handle
18913 * Reads "paged" memories: memories that may only be read by first writing to a
18914 * specific address ("write address") and then reading from a specific address
18915 * ("read address"). There may be more than one write address per "page" and
18916 * more than one read address per write address.
18919 bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18921 uint32_t i, j, k, n;
18923 /* addresses of the paged registers */
18924 const uint32_t *page_addr = __bxe_get_page_addr_ar(sc);
18925 /* number of paged registers */
18926 int num_pages = __bxe_get_page_reg_num(sc);
18927 /* write addresses */
18928 const uint32_t *write_addr = __bxe_get_page_write_ar(sc);
18929 /* number of write addresses */
18930 int write_num = __bxe_get_page_write_num(sc);
18931 /* read addresses info */
18932 const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc);
18933 /* number of read addresses */
18934 int read_num = __bxe_get_page_read_num(sc);
18935 uint32_t addr, size;
18937 for (i = 0; i < num_pages; i++) {
18938 for (j = 0; j < write_num; j++) {
18939 REG_WR(sc, write_addr[j], page_addr[i]);
18941 for (k = 0; k < read_num; k++) {
18942 if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) {
18943 size = read_addr[k].size;
18944 for (n = 0; n < size; n++) {
18945 addr = read_addr[k].addr + n*4;
18946 *p++ = REG_RD(sc, addr);
18957 bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18959 uint32_t i, j, addr;
18960 const struct wreg_addr *wreg_addr_p = NULL;
18962 if (CHIP_IS_E1(sc))
18963 wreg_addr_p = &wreg_addr_e1;
18964 else if (CHIP_IS_E1H(sc))
18965 wreg_addr_p = &wreg_addr_e1h;
18966 else if (CHIP_IS_E2(sc))
18967 wreg_addr_p = &wreg_addr_e2;
18968 else if (CHIP_IS_E3A0(sc))
18969 wreg_addr_p = &wreg_addr_e3;
18970 else if (CHIP_IS_E3B0(sc))
18971 wreg_addr_p = &wreg_addr_e3b0;
18975 /* Read the idle_chk registers */
18976 for (i = 0; i < IDLE_REGS_COUNT; i++) {
18977 if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) &&
18978 IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
18979 for (j = 0; j < idle_reg_addrs[i].size; j++)
18980 *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4);
18984 /* Read the regular registers */
18985 for (i = 0; i < REGS_COUNT; i++) {
18986 if (bxe_is_reg_in_chip(sc, ®_addrs[i]) &&
18987 IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
18988 for (j = 0; j < reg_addrs[i].size; j++)
18989 *p++ = REG_RD(sc, reg_addrs[i].addr + j*4);
18993 /* Read the CAM registers */
18994 if (bxe_is_wreg_in_chip(sc, wreg_addr_p) &&
18995 IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
18996 for (i = 0; i < wreg_addr_p->size; i++) {
18997 *p++ = REG_RD(sc, wreg_addr_p->addr + i*4);
18999 /* In case of wreg_addr register, read additional
19000 registers from read_regs array
19002 for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
19003 addr = *(wreg_addr_p->read_regs);
19004 *p++ = REG_RD(sc, addr + j*4);
19009 /* Paged registers are supported in E2 & E3 only */
19010 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
19011 /* Read "paged" registers */
19012 bxe_read_pages_regs(sc, p, preset);
19019 bxe_grc_dump(struct bxe_softc *sc)
19022 uint32_t preset_idx;
19025 struct dump_header *d_hdr;
19029 uint32_t cmd_offset;
19030 struct ecore_ilt *ilt = SC_ILT(sc);
19031 struct bxe_fastpath *fp;
19032 struct ilt_client_info *ilt_cli;
19036 if (sc->grcdump_done || sc->grcdump_started)
19039 sc->grcdump_started = 1;
19040 BLOGI(sc, "Started collecting grcdump\n");
19042 grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19043 sizeof(struct dump_header);
19045 sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT);
19047 if (sc->grc_dump == NULL) {
19048 BLOGW(sc, "Unable to allocate memory for grcdump collection\n");
19054 /* Disable parity attentions as long as following dump may
19055 * cause false alarms by reading never written registers. We
19056 * will re-enable parity attentions right after the dump.
19059 /* Disable parity on path 0 */
19060 bxe_pretend_func(sc, 0);
19062 ecore_disable_blocks_parity(sc);
19064 /* Disable parity on path 1 */
19065 bxe_pretend_func(sc, 1);
19066 ecore_disable_blocks_parity(sc);
19068 /* Return to current function */
19069 bxe_pretend_func(sc, SC_ABS_FUNC(sc));
19071 buf = sc->grc_dump;
19072 d_hdr = sc->grc_dump;
19074 d_hdr->header_size = (sizeof(struct dump_header) >> 2) - 1;
19075 d_hdr->version = BNX2X_DUMP_VERSION;
19076 d_hdr->preset = DUMP_ALL_PRESETS;
19078 if (CHIP_IS_E1(sc)) {
19079 d_hdr->dump_meta_data = DUMP_CHIP_E1;
19080 } else if (CHIP_IS_E1H(sc)) {
19081 d_hdr->dump_meta_data = DUMP_CHIP_E1H;
19082 } else if (CHIP_IS_E2(sc)) {
19083 d_hdr->dump_meta_data = DUMP_CHIP_E2 |
19084 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
19085 } else if (CHIP_IS_E3A0(sc)) {
19086 d_hdr->dump_meta_data = DUMP_CHIP_E3A0 |
19087 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
19088 } else if (CHIP_IS_E3B0(sc)) {
19089 d_hdr->dump_meta_data = DUMP_CHIP_E3B0 |
19090 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
19093 buf += sizeof(struct dump_header);
19095 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
19097 /* Skip presets with IOR */
19098 if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) ||
19099 (preset_idx == 11))
19102 rval = bxe_get_preset_regs(sc, (uint32_t *)buf, preset_idx);
19107 size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t));
19112 bxe_pretend_func(sc, 0);
19113 ecore_clear_blocks_parity(sc);
19114 ecore_enable_blocks_parity(sc);
19116 bxe_pretend_func(sc, 1);
19117 ecore_clear_blocks_parity(sc);
19118 ecore_enable_blocks_parity(sc);
19120 /* Return to current function */
19121 bxe_pretend_func(sc, SC_ABS_FUNC(sc));
19125 if(sc->state == BXE_STATE_OPEN) {
19126 if(sc->fw_stats_req != NULL) {
19127 BLOGI(sc, "fw stats start_paddr %#jx end_paddr %#jx vaddr %p size 0x%x\n",
19128 (uintmax_t)sc->fw_stats_req_mapping,
19129 (uintmax_t)sc->fw_stats_data_mapping,
19130 sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size));
19132 if(sc->def_sb != NULL) {
19133 BLOGI(sc, "def_status_block paddr %p vaddr %p size 0x%zx\n",
19134 (void *)sc->def_sb_dma.paddr, sc->def_sb,
19135 sizeof(struct host_sp_status_block));
19137 if(sc->eq_dma.vaddr != NULL) {
19138 BLOGI(sc, "event_queue paddr %#jx vaddr %p size 0x%x\n",
19139 (uintmax_t)sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE);
19141 if(sc->sp_dma.vaddr != NULL) {
19142 BLOGI(sc, "slow path paddr %#jx vaddr %p size 0x%zx\n",
19143 (uintmax_t)sc->sp_dma.paddr, sc->sp_dma.vaddr,
19144 sizeof(struct bxe_slowpath));
19146 if(sc->spq_dma.vaddr != NULL) {
19147 BLOGI(sc, "slow path queue paddr %#jx vaddr %p size 0x%x\n",
19148 (uintmax_t)sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE);
19150 if(sc->gz_buf_dma.vaddr != NULL) {
19151 BLOGI(sc, "fw_buf paddr %#jx vaddr %p size 0x%x\n",
19152 (uintmax_t)sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr,
19155 for (i = 0; i < sc->num_queues; i++) {
19157 if(fp->sb_dma.vaddr != NULL && fp->tx_dma.vaddr != NULL &&
19158 fp->rx_dma.vaddr != NULL && fp->rcq_dma.vaddr != NULL &&
19159 fp->rx_sge_dma.vaddr != NULL) {
19161 BLOGI(sc, "FP status block fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
19162 (uintmax_t)fp->sb_dma.paddr, fp->sb_dma.vaddr,
19163 sizeof(union bxe_host_hc_status_block));
19164 BLOGI(sc, "TX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
19165 (uintmax_t)fp->tx_dma.paddr, fp->tx_dma.vaddr,
19166 (BCM_PAGE_SIZE * TX_BD_NUM_PAGES));
19167 BLOGI(sc, "RX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
19168 (uintmax_t)fp->rx_dma.paddr, fp->rx_dma.vaddr,
19169 (BCM_PAGE_SIZE * RX_BD_NUM_PAGES));
19170 BLOGI(sc, "RX RCQ CHAIN fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
19171 (uintmax_t)fp->rcq_dma.paddr, fp->rcq_dma.vaddr,
19172 (BCM_PAGE_SIZE * RCQ_NUM_PAGES));
19173 BLOGI(sc, "RX SGE CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
19174 (uintmax_t)fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr,
19175 (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES));
19179 ilt_cli = &ilt->clients[1];
19180 if(ilt->lines != NULL) {
19181 for (i = ilt_cli->start; i <= ilt_cli->end; i++) {
19182 BLOGI(sc, "ECORE_ILT paddr %#jx vaddr %p size 0x%x\n",
19183 (uintmax_t)(((struct bxe_dma *)((&ilt->lines[i])->page))->paddr),
19184 ((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE);
19190 cmd_offset = DMAE_REG_CMD_MEM;
19191 for (i = 0; i < 224; i++) {
19192 reg_addr = (cmd_offset +(i * 4));
19193 reg_val = REG_RD(sc, reg_addr);
19194 BLOGI(sc, "DMAE_REG_CMD_MEM i=%d reg_addr 0x%x reg_val 0x%08x\n",i,
19195 reg_addr, reg_val);
19199 BLOGI(sc, "Collection of grcdump done\n");
19200 sc->grcdump_done = 1;
19205 bxe_add_cdev(struct bxe_softc *sc)
19207 sc->eeprom = malloc(BXE_EEPROM_MAX_DATA_LEN, M_DEVBUF, M_NOWAIT);
19209 if (sc->eeprom == NULL) {
19210 BLOGW(sc, "Unable to alloc for eeprom size buffer\n");
19214 sc->ioctl_dev = make_dev(&bxe_cdevsw,
19215 sc->ifnet->if_dunit,
19220 if_name(sc->ifnet));
19222 if (sc->ioctl_dev == NULL) {
19223 free(sc->eeprom, M_DEVBUF);
19228 sc->ioctl_dev->si_drv1 = sc;
19234 bxe_del_cdev(struct bxe_softc *sc)
19236 if (sc->ioctl_dev != NULL)
19237 destroy_dev(sc->ioctl_dev);
19239 if (sc->eeprom != NULL) {
19240 free(sc->eeprom, M_DEVBUF);
19243 sc->ioctl_dev = NULL;
19248 static bool bxe_is_nvram_accessible(struct bxe_softc *sc)
19251 if ((sc->ifnet->if_drv_flags & IFF_DRV_RUNNING) == 0)
19259 bxe_wr_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
19263 if(!bxe_is_nvram_accessible(sc)) {
19264 BLOGW(sc, "Cannot access eeprom when interface is down\n");
19267 rval = bxe_nvram_write(sc, offset, (uint8_t *)data, len);
19274 bxe_rd_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
19278 if(!bxe_is_nvram_accessible(sc)) {
19279 BLOGW(sc, "Cannot access eeprom when interface is down\n");
19282 rval = bxe_nvram_read(sc, offset, (uint8_t *)data, len);
19288 bxe_eeprom_rd_wr(struct bxe_softc *sc, bxe_eeprom_t *eeprom)
19292 switch (eeprom->eeprom_cmd) {
19294 case BXE_EEPROM_CMD_SET_EEPROM:
19296 rval = copyin(eeprom->eeprom_data, sc->eeprom,
19297 eeprom->eeprom_data_len);
19302 rval = bxe_wr_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
19303 eeprom->eeprom_data_len);
19306 case BXE_EEPROM_CMD_GET_EEPROM:
19308 rval = bxe_rd_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
19309 eeprom->eeprom_data_len);
19315 rval = copyout(sc->eeprom, eeprom->eeprom_data,
19316 eeprom->eeprom_data_len);
19325 BLOGW(sc, "ioctl cmd %d failed rval %d\n", eeprom->eeprom_cmd, rval);
19332 bxe_get_settings(struct bxe_softc *sc, bxe_dev_setting_t *dev_p)
19334 uint32_t ext_phy_config;
19335 int port = SC_PORT(sc);
19336 int cfg_idx = bxe_get_link_cfg_idx(sc);
19338 dev_p->supported = sc->port.supported[cfg_idx] |
19339 (sc->port.supported[cfg_idx ^ 1] &
19340 (ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE));
19341 dev_p->advertising = sc->port.advertising[cfg_idx];
19342 if(sc->link_params.phy[bxe_get_cur_phy_idx(sc)].media_type ==
19343 ELINK_ETH_PHY_SFP_1G_FIBER) {
19344 dev_p->supported = ~(ELINK_SUPPORTED_10000baseT_Full);
19345 dev_p->advertising &= ~(ADVERTISED_10000baseT_Full);
19347 if ((sc->state == BXE_STATE_OPEN) && sc->link_vars.link_up &&
19348 !(sc->flags & BXE_MF_FUNC_DIS)) {
19349 dev_p->duplex = sc->link_vars.duplex;
19350 if (IS_MF(sc) && !BXE_NOMCP(sc))
19351 dev_p->speed = bxe_get_mf_speed(sc);
19353 dev_p->speed = sc->link_vars.line_speed;
19355 dev_p->duplex = DUPLEX_UNKNOWN;
19356 dev_p->speed = SPEED_UNKNOWN;
19359 dev_p->port = bxe_media_detect(sc);
19361 ext_phy_config = SHMEM_RD(sc,
19362 dev_info.port_hw_config[port].external_phy_config);
19363 if((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) ==
19364 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
19365 dev_p->phy_address = sc->port.phy_addr;
19366 else if(((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
19367 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
19368 ((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
19369 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
19370 dev_p->phy_address = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config);
19372 dev_p->phy_address = 0;
19374 if(sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG)
19375 dev_p->autoneg = AUTONEG_ENABLE;
19377 dev_p->autoneg = AUTONEG_DISABLE;
19384 bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
19387 struct bxe_softc *sc;
19390 bxe_grcdump_t *dump = NULL;
19392 bxe_drvinfo_t *drv_infop = NULL;
19393 bxe_dev_setting_t *dev_p;
19394 bxe_dev_setting_t dev_set;
19395 bxe_get_regs_t *reg_p;
19396 bxe_reg_rdw_t *reg_rdw_p;
19397 bxe_pcicfg_rdw_t *cfg_rdw_p;
19398 bxe_perm_mac_addr_t *mac_addr_p;
19401 if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL)
19406 dump = (bxe_grcdump_t *)data;
19410 case BXE_GRC_DUMP_SIZE:
19411 dump->pci_func = sc->pcie_func;
19412 dump->grcdump_size =
19413 (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19414 sizeof(struct dump_header);
19419 grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19420 sizeof(struct dump_header);
19421 if ((!sc->trigger_grcdump) || (dump->grcdump == NULL) ||
19422 (dump->grcdump_size < grc_dump_size)) {
19427 if((sc->trigger_grcdump) && (!sc->grcdump_done) &&
19428 (!sc->grcdump_started)) {
19429 rval = bxe_grc_dump(sc);
19432 if((!rval) && (sc->grcdump_done) && (sc->grcdump_started) &&
19433 (sc->grc_dump != NULL)) {
19434 dump->grcdump_dwords = grc_dump_size >> 2;
19435 rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size);
19436 free(sc->grc_dump, M_DEVBUF);
19437 sc->grc_dump = NULL;
19438 sc->grcdump_started = 0;
19439 sc->grcdump_done = 0;
19445 drv_infop = (bxe_drvinfo_t *)data;
19446 snprintf(drv_infop->drv_name, BXE_DRV_NAME_LENGTH, "%s", "bxe");
19447 snprintf(drv_infop->drv_version, BXE_DRV_VERSION_LENGTH, "v:%s",
19448 BXE_DRIVER_VERSION);
19449 snprintf(drv_infop->mfw_version, BXE_MFW_VERSION_LENGTH, "%s",
19450 sc->devinfo.bc_ver_str);
19451 snprintf(drv_infop->stormfw_version, BXE_STORMFW_VERSION_LENGTH,
19452 "%s", sc->fw_ver_str);
19453 drv_infop->eeprom_dump_len = sc->devinfo.flash_size;
19454 drv_infop->reg_dump_len =
19455 (bxe_get_total_regs_len32(sc) * sizeof(uint32_t))
19456 + sizeof(struct dump_header);
19457 snprintf(drv_infop->bus_info, BXE_BUS_INFO_LENGTH, "%d:%d:%d",
19458 sc->pcie_bus, sc->pcie_device, sc->pcie_func);
19461 case BXE_DEV_SETTING:
19462 dev_p = (bxe_dev_setting_t *)data;
19463 bxe_get_settings(sc, &dev_set);
19464 dev_p->supported = dev_set.supported;
19465 dev_p->advertising = dev_set.advertising;
19466 dev_p->speed = dev_set.speed;
19467 dev_p->duplex = dev_set.duplex;
19468 dev_p->port = dev_set.port;
19469 dev_p->phy_address = dev_set.phy_address;
19470 dev_p->autoneg = dev_set.autoneg;
19476 reg_p = (bxe_get_regs_t *)data;
19477 grc_dump_size = reg_p->reg_buf_len;
19479 if((!sc->grcdump_done) && (!sc->grcdump_started)) {
19482 if((sc->grcdump_done) && (sc->grcdump_started) &&
19483 (sc->grc_dump != NULL)) {
19484 rval = copyout(sc->grc_dump, reg_p->reg_buf, grc_dump_size);
19485 free(sc->grc_dump, M_DEVBUF);
19486 sc->grc_dump = NULL;
19487 sc->grcdump_started = 0;
19488 sc->grcdump_done = 0;
19494 reg_rdw_p = (bxe_reg_rdw_t *)data;
19495 if((reg_rdw_p->reg_cmd == BXE_READ_REG_CMD) &&
19496 (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
19497 reg_rdw_p->reg_val = REG_RD(sc, reg_rdw_p->reg_id);
19499 if((reg_rdw_p->reg_cmd == BXE_WRITE_REG_CMD) &&
19500 (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
19501 REG_WR(sc, reg_rdw_p->reg_id, reg_rdw_p->reg_val);
19505 case BXE_RDW_PCICFG:
19506 cfg_rdw_p = (bxe_pcicfg_rdw_t *)data;
19507 if(cfg_rdw_p->cfg_cmd == BXE_READ_PCICFG) {
19509 cfg_rdw_p->cfg_val = pci_read_config(sc->dev, cfg_rdw_p->cfg_id,
19510 cfg_rdw_p->cfg_width);
19512 } else if(cfg_rdw_p->cfg_cmd == BXE_WRITE_PCICFG) {
19513 pci_write_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_val,
19514 cfg_rdw_p->cfg_width);
19516 BLOGW(sc, "BXE_RDW_PCICFG ioctl wrong cmd passed\n");
19521 mac_addr_p = (bxe_perm_mac_addr_t *)data;
19522 snprintf(mac_addr_p->mac_addr_str, sizeof(sc->mac_addr_str), "%s",
19527 rval = bxe_eeprom_rd_wr(sc, (bxe_eeprom_t *)data);