2 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
18 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
24 * THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #define BXE_DRIVER_VERSION "1.78.79"
34 #include "ecore_init.h"
35 #include "ecore_init_ops.h"
37 #include "57710_int_offsets.h"
38 #include "57711_int_offsets.h"
39 #include "57712_int_offsets.h"
42 * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these
43 * explicitly here for older kernels that don't include this changeset.
46 #define CTLTYPE_U64 CTLTYPE_QUAD
47 #define sysctl_handle_64 sysctl_handle_quad
51 * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these
52 * here as zero(0) for older kernels that don't include this changeset
53 * thereby masking the functionality.
56 #define CSUM_TCP_IPV6 0
57 #define CSUM_UDP_IPV6 0
61 * pci_find_cap was added in r219865. Re-define this at pci_find_extcap
62 * for older kernels that don't include this changeset.
64 #if __FreeBSD_version < 900035
65 #define pci_find_cap pci_find_extcap
68 #define BXE_DEF_SB_ATT_IDX 0x0001
69 #define BXE_DEF_SB_IDX 0x0002
72 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per
73 * function HW initialization.
75 #define FLR_WAIT_USEC 10000 /* 10 msecs */
76 #define FLR_WAIT_INTERVAL 50 /* usecs */
77 #define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */
79 struct pbf_pN_buf_regs {
86 struct pbf_pN_cmd_regs {
93 * PCI Device ID Table used by bxe_probe().
95 #define BXE_DEVDESC_MAX 64
96 static struct bxe_device_type bxe_devs[] = {
100 PCI_ANY_ID, PCI_ANY_ID,
101 "QLogic NetXtreme II BCM57710 10GbE"
106 PCI_ANY_ID, PCI_ANY_ID,
107 "QLogic NetXtreme II BCM57711 10GbE"
112 PCI_ANY_ID, PCI_ANY_ID,
113 "QLogic NetXtreme II BCM57711E 10GbE"
118 PCI_ANY_ID, PCI_ANY_ID,
119 "QLogic NetXtreme II BCM57712 10GbE"
124 PCI_ANY_ID, PCI_ANY_ID,
125 "QLogic NetXtreme II BCM57712 MF 10GbE"
131 PCI_ANY_ID, PCI_ANY_ID,
132 "QLogic NetXtreme II BCM57712 VF 10GbE"
138 PCI_ANY_ID, PCI_ANY_ID,
139 "QLogic NetXtreme II BCM57800 10GbE"
144 PCI_ANY_ID, PCI_ANY_ID,
145 "QLogic NetXtreme II BCM57800 MF 10GbE"
151 PCI_ANY_ID, PCI_ANY_ID,
152 "QLogic NetXtreme II BCM57800 VF 10GbE"
158 PCI_ANY_ID, PCI_ANY_ID,
159 "QLogic NetXtreme II BCM57810 10GbE"
164 PCI_ANY_ID, PCI_ANY_ID,
165 "QLogic NetXtreme II BCM57810 MF 10GbE"
171 PCI_ANY_ID, PCI_ANY_ID,
172 "QLogic NetXtreme II BCM57810 VF 10GbE"
178 PCI_ANY_ID, PCI_ANY_ID,
179 "QLogic NetXtreme II BCM57811 10GbE"
184 PCI_ANY_ID, PCI_ANY_ID,
185 "QLogic NetXtreme II BCM57811 MF 10GbE"
191 PCI_ANY_ID, PCI_ANY_ID,
192 "QLogic NetXtreme II BCM57811 VF 10GbE"
198 PCI_ANY_ID, PCI_ANY_ID,
199 "QLogic NetXtreme II BCM57840 4x10GbE"
205 PCI_ANY_ID, PCI_ANY_ID,
206 "QLogic NetXtreme II BCM57840 2x20GbE"
212 PCI_ANY_ID, PCI_ANY_ID,
213 "QLogic NetXtreme II BCM57840 MF 10GbE"
219 PCI_ANY_ID, PCI_ANY_ID,
220 "QLogic NetXtreme II BCM57840 VF 10GbE"
228 MALLOC_DECLARE(M_BXE_ILT);
229 MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer");
232 * FreeBSD device entry points.
234 static int bxe_probe(device_t);
235 static int bxe_attach(device_t);
236 static int bxe_detach(device_t);
237 static int bxe_shutdown(device_t);
240 * FreeBSD KLD module/device interface event handler method.
242 static device_method_t bxe_methods[] = {
243 /* Device interface (device_if.h) */
244 DEVMETHOD(device_probe, bxe_probe),
245 DEVMETHOD(device_attach, bxe_attach),
246 DEVMETHOD(device_detach, bxe_detach),
247 DEVMETHOD(device_shutdown, bxe_shutdown),
249 DEVMETHOD(device_suspend, bxe_suspend),
250 DEVMETHOD(device_resume, bxe_resume),
252 /* Bus interface (bus_if.h) */
253 DEVMETHOD(bus_print_child, bus_generic_print_child),
254 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
259 * FreeBSD KLD Module data declaration
261 static driver_t bxe_driver = {
262 "bxe", /* module name */
263 bxe_methods, /* event handler */
264 sizeof(struct bxe_softc) /* extra data */
268 * FreeBSD dev class is needed to manage dev instances and
269 * to associate with a bus type
271 static devclass_t bxe_devclass;
273 MODULE_DEPEND(bxe, pci, 1, 1, 1);
274 MODULE_DEPEND(bxe, ether, 1, 1, 1);
275 DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0);
277 /* resources needed for unloading a previously loaded device */
279 #define BXE_PREV_WAIT_NEEDED 1
280 struct mtx bxe_prev_mtx;
281 MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF);
282 struct bxe_prev_list_node {
283 LIST_ENTRY(bxe_prev_list_node) node;
287 uint8_t aer; /* XXX automatic error recovery */
290 static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list);
292 static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
294 /* Tunable device values... */
296 SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters");
299 unsigned long bxe_debug = 0;
300 TUNABLE_ULONG("hw.bxe.debug", &bxe_debug);
301 SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, (CTLFLAG_RDTUN),
302 &bxe_debug, 0, "Debug logging mode");
304 /* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
305 static int bxe_interrupt_mode = INTR_MODE_MSIX;
306 TUNABLE_INT("hw.bxe.interrupt_mode", &bxe_interrupt_mode);
307 SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN,
308 &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode");
310 /* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */
311 static int bxe_queue_count = 4;
312 TUNABLE_INT("hw.bxe.queue_count", &bxe_queue_count);
313 SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
314 &bxe_queue_count, 0, "Multi-Queue queue count");
316 /* max number of buffers per queue (default RX_BD_USABLE) */
317 static int bxe_max_rx_bufs = 0;
318 TUNABLE_INT("hw.bxe.max_rx_bufs", &bxe_max_rx_bufs);
319 SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN,
320 &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue");
322 /* Host interrupt coalescing RX tick timer (usecs) */
323 static int bxe_hc_rx_ticks = 25;
324 TUNABLE_INT("hw.bxe.hc_rx_ticks", &bxe_hc_rx_ticks);
325 SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN,
326 &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks");
328 /* Host interrupt coalescing TX tick timer (usecs) */
329 static int bxe_hc_tx_ticks = 50;
330 TUNABLE_INT("hw.bxe.hc_tx_ticks", &bxe_hc_tx_ticks);
331 SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN,
332 &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks");
334 /* Maximum number of Rx packets to process at a time */
335 static int bxe_rx_budget = 0xffffffff;
336 TUNABLE_INT("hw.bxe.rx_budget", &bxe_rx_budget);
337 SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN,
338 &bxe_rx_budget, 0, "Rx processing budget");
340 /* Maximum LRO aggregation size */
341 static int bxe_max_aggregation_size = 0;
342 TUNABLE_INT("hw.bxe.max_aggregation_size", &bxe_max_aggregation_size);
343 SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN,
344 &bxe_max_aggregation_size, 0, "max aggregation size");
346 /* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */
347 static int bxe_mrrs = -1;
348 TUNABLE_INT("hw.bxe.mrrs", &bxe_mrrs);
349 SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN,
350 &bxe_mrrs, 0, "PCIe maximum read request size");
352 /* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */
353 static int bxe_autogreeen = 0;
354 TUNABLE_INT("hw.bxe.autogreeen", &bxe_autogreeen);
355 SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN,
356 &bxe_autogreeen, 0, "AutoGrEEEn support");
358 /* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */
359 static int bxe_udp_rss = 0;
360 TUNABLE_INT("hw.bxe.udp_rss", &bxe_udp_rss);
361 SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN,
362 &bxe_udp_rss, 0, "UDP RSS support");
365 #define STAT_NAME_LEN 32 /* no stat names below can be longer than this */
367 #define STATS_OFFSET32(stat_name) \
368 (offsetof(struct bxe_eth_stats, stat_name) / 4)
370 #define Q_STATS_OFFSET32(stat_name) \
371 (offsetof(struct bxe_eth_q_stats, stat_name) / 4)
373 static const struct {
377 #define STATS_FLAGS_PORT 1
378 #define STATS_FLAGS_FUNC 2 /* MF only cares about function stats */
379 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
380 char string[STAT_NAME_LEN];
381 } bxe_eth_stats_arr[] = {
382 { STATS_OFFSET32(total_bytes_received_hi),
383 8, STATS_FLAGS_BOTH, "rx_bytes" },
384 { STATS_OFFSET32(error_bytes_received_hi),
385 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
386 { STATS_OFFSET32(total_unicast_packets_received_hi),
387 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
388 { STATS_OFFSET32(total_multicast_packets_received_hi),
389 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
390 { STATS_OFFSET32(total_broadcast_packets_received_hi),
391 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
392 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
393 8, STATS_FLAGS_PORT, "rx_crc_errors" },
394 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
395 8, STATS_FLAGS_PORT, "rx_align_errors" },
396 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
397 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
398 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
399 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
400 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
401 8, STATS_FLAGS_PORT, "rx_fragments" },
402 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
403 8, STATS_FLAGS_PORT, "rx_jabbers" },
404 { STATS_OFFSET32(no_buff_discard_hi),
405 8, STATS_FLAGS_BOTH, "rx_discards" },
406 { STATS_OFFSET32(mac_filter_discard),
407 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
408 { STATS_OFFSET32(mf_tag_discard),
409 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
410 { STATS_OFFSET32(pfc_frames_received_hi),
411 8, STATS_FLAGS_PORT, "pfc_frames_received" },
412 { STATS_OFFSET32(pfc_frames_sent_hi),
413 8, STATS_FLAGS_PORT, "pfc_frames_sent" },
414 { STATS_OFFSET32(brb_drop_hi),
415 8, STATS_FLAGS_PORT, "rx_brb_discard" },
416 { STATS_OFFSET32(brb_truncate_hi),
417 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
418 { STATS_OFFSET32(pause_frames_received_hi),
419 8, STATS_FLAGS_PORT, "rx_pause_frames" },
420 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
421 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
422 { STATS_OFFSET32(nig_timer_max),
423 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
424 { STATS_OFFSET32(total_bytes_transmitted_hi),
425 8, STATS_FLAGS_BOTH, "tx_bytes" },
426 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
427 8, STATS_FLAGS_PORT, "tx_error_bytes" },
428 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
429 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
430 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
431 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
432 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
433 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
434 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
435 8, STATS_FLAGS_PORT, "tx_mac_errors" },
436 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
437 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
438 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
439 8, STATS_FLAGS_PORT, "tx_single_collisions" },
440 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
441 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
442 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
443 8, STATS_FLAGS_PORT, "tx_deferred" },
444 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
445 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
446 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
447 8, STATS_FLAGS_PORT, "tx_late_collisions" },
448 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
449 8, STATS_FLAGS_PORT, "tx_total_collisions" },
450 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
451 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
452 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
453 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
454 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
455 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
456 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
457 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
458 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
459 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
460 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
461 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
462 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
463 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
464 { STATS_OFFSET32(pause_frames_sent_hi),
465 8, STATS_FLAGS_PORT, "tx_pause_frames" },
466 { STATS_OFFSET32(total_tpa_aggregations_hi),
467 8, STATS_FLAGS_FUNC, "tpa_aggregations" },
468 { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
469 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
470 { STATS_OFFSET32(total_tpa_bytes_hi),
471 8, STATS_FLAGS_FUNC, "tpa_bytes"},
473 { STATS_OFFSET32(recoverable_error),
474 4, STATS_FLAGS_FUNC, "recoverable_errors" },
475 { STATS_OFFSET32(unrecoverable_error),
476 4, STATS_FLAGS_FUNC, "unrecoverable_errors" },
478 { STATS_OFFSET32(eee_tx_lpi),
479 4, STATS_FLAGS_PORT, "eee_tx_lpi"},
480 { STATS_OFFSET32(rx_calls),
481 4, STATS_FLAGS_FUNC, "rx_calls"},
482 { STATS_OFFSET32(rx_pkts),
483 4, STATS_FLAGS_FUNC, "rx_pkts"},
484 { STATS_OFFSET32(rx_tpa_pkts),
485 4, STATS_FLAGS_FUNC, "rx_tpa_pkts"},
486 { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
487 4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"},
488 { STATS_OFFSET32(rx_bxe_service_rxsgl),
489 4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"},
490 { STATS_OFFSET32(rx_jumbo_sge_pkts),
491 4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"},
492 { STATS_OFFSET32(rx_soft_errors),
493 4, STATS_FLAGS_FUNC, "rx_soft_errors"},
494 { STATS_OFFSET32(rx_hw_csum_errors),
495 4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"},
496 { STATS_OFFSET32(rx_ofld_frames_csum_ip),
497 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"},
498 { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
499 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"},
500 { STATS_OFFSET32(rx_budget_reached),
501 4, STATS_FLAGS_FUNC, "rx_budget_reached"},
502 { STATS_OFFSET32(tx_pkts),
503 4, STATS_FLAGS_FUNC, "tx_pkts"},
504 { STATS_OFFSET32(tx_soft_errors),
505 4, STATS_FLAGS_FUNC, "tx_soft_errors"},
506 { STATS_OFFSET32(tx_ofld_frames_csum_ip),
507 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"},
508 { STATS_OFFSET32(tx_ofld_frames_csum_tcp),
509 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"},
510 { STATS_OFFSET32(tx_ofld_frames_csum_udp),
511 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"},
512 { STATS_OFFSET32(tx_ofld_frames_lso),
513 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"},
514 { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
515 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"},
516 { STATS_OFFSET32(tx_encap_failures),
517 4, STATS_FLAGS_FUNC, "tx_encap_failures"},
518 { STATS_OFFSET32(tx_hw_queue_full),
519 4, STATS_FLAGS_FUNC, "tx_hw_queue_full"},
520 { STATS_OFFSET32(tx_hw_max_queue_depth),
521 4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"},
522 { STATS_OFFSET32(tx_dma_mapping_failure),
523 4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"},
524 { STATS_OFFSET32(tx_max_drbr_queue_depth),
525 4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"},
526 { STATS_OFFSET32(tx_window_violation_std),
527 4, STATS_FLAGS_FUNC, "tx_window_violation_std"},
528 { STATS_OFFSET32(tx_window_violation_tso),
529 4, STATS_FLAGS_FUNC, "tx_window_violation_tso"},
531 { STATS_OFFSET32(tx_unsupported_tso_request_ipv6),
532 4, STATS_FLAGS_FUNC, "tx_unsupported_tso_request_ipv6"},
533 { STATS_OFFSET32(tx_unsupported_tso_request_not_tcp),
534 4, STATS_FLAGS_FUNC, "tx_unsupported_tso_request_not_tcp"},
536 { STATS_OFFSET32(tx_chain_lost_mbuf),
537 4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"},
538 { STATS_OFFSET32(tx_frames_deferred),
539 4, STATS_FLAGS_FUNC, "tx_frames_deferred"},
540 { STATS_OFFSET32(tx_queue_xoff),
541 4, STATS_FLAGS_FUNC, "tx_queue_xoff"},
542 { STATS_OFFSET32(mbuf_defrag_attempts),
543 4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"},
544 { STATS_OFFSET32(mbuf_defrag_failures),
545 4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"},
546 { STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
547 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"},
548 { STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
549 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"},
550 { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
551 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"},
552 { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
553 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"},
554 { STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
555 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"},
556 { STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
557 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"},
558 { STATS_OFFSET32(mbuf_alloc_tx),
559 4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"},
560 { STATS_OFFSET32(mbuf_alloc_rx),
561 4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"},
562 { STATS_OFFSET32(mbuf_alloc_sge),
563 4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"},
564 { STATS_OFFSET32(mbuf_alloc_tpa),
565 4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"}
568 static const struct {
571 char string[STAT_NAME_LEN];
572 } bxe_eth_q_stats_arr[] = {
573 { Q_STATS_OFFSET32(total_bytes_received_hi),
575 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
576 8, "rx_ucast_packets" },
577 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
578 8, "rx_mcast_packets" },
579 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
580 8, "rx_bcast_packets" },
581 { Q_STATS_OFFSET32(no_buff_discard_hi),
583 { Q_STATS_OFFSET32(total_bytes_transmitted_hi),
585 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
586 8, "tx_ucast_packets" },
587 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
588 8, "tx_mcast_packets" },
589 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
590 8, "tx_bcast_packets" },
591 { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
592 8, "tpa_aggregations" },
593 { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
594 8, "tpa_aggregated_frames"},
595 { Q_STATS_OFFSET32(total_tpa_bytes_hi),
597 { Q_STATS_OFFSET32(rx_calls),
599 { Q_STATS_OFFSET32(rx_pkts),
601 { Q_STATS_OFFSET32(rx_tpa_pkts),
603 { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
604 4, "rx_erroneous_jumbo_sge_pkts"},
605 { Q_STATS_OFFSET32(rx_bxe_service_rxsgl),
606 4, "rx_bxe_service_rxsgl"},
607 { Q_STATS_OFFSET32(rx_jumbo_sge_pkts),
608 4, "rx_jumbo_sge_pkts"},
609 { Q_STATS_OFFSET32(rx_soft_errors),
610 4, "rx_soft_errors"},
611 { Q_STATS_OFFSET32(rx_hw_csum_errors),
612 4, "rx_hw_csum_errors"},
613 { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip),
614 4, "rx_ofld_frames_csum_ip"},
615 { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
616 4, "rx_ofld_frames_csum_tcp_udp"},
617 { Q_STATS_OFFSET32(rx_budget_reached),
618 4, "rx_budget_reached"},
619 { Q_STATS_OFFSET32(tx_pkts),
621 { Q_STATS_OFFSET32(tx_soft_errors),
622 4, "tx_soft_errors"},
623 { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip),
624 4, "tx_ofld_frames_csum_ip"},
625 { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp),
626 4, "tx_ofld_frames_csum_tcp"},
627 { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp),
628 4, "tx_ofld_frames_csum_udp"},
629 { Q_STATS_OFFSET32(tx_ofld_frames_lso),
630 4, "tx_ofld_frames_lso"},
631 { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
632 4, "tx_ofld_frames_lso_hdr_splits"},
633 { Q_STATS_OFFSET32(tx_encap_failures),
634 4, "tx_encap_failures"},
635 { Q_STATS_OFFSET32(tx_hw_queue_full),
636 4, "tx_hw_queue_full"},
637 { Q_STATS_OFFSET32(tx_hw_max_queue_depth),
638 4, "tx_hw_max_queue_depth"},
639 { Q_STATS_OFFSET32(tx_dma_mapping_failure),
640 4, "tx_dma_mapping_failure"},
641 { Q_STATS_OFFSET32(tx_max_drbr_queue_depth),
642 4, "tx_max_drbr_queue_depth"},
643 { Q_STATS_OFFSET32(tx_window_violation_std),
644 4, "tx_window_violation_std"},
645 { Q_STATS_OFFSET32(tx_window_violation_tso),
646 4, "tx_window_violation_tso"},
648 { Q_STATS_OFFSET32(tx_unsupported_tso_request_ipv6),
649 4, "tx_unsupported_tso_request_ipv6"},
650 { Q_STATS_OFFSET32(tx_unsupported_tso_request_not_tcp),
651 4, "tx_unsupported_tso_request_not_tcp"},
653 { Q_STATS_OFFSET32(tx_chain_lost_mbuf),
654 4, "tx_chain_lost_mbuf"},
655 { Q_STATS_OFFSET32(tx_frames_deferred),
656 4, "tx_frames_deferred"},
657 { Q_STATS_OFFSET32(tx_queue_xoff),
659 { Q_STATS_OFFSET32(mbuf_defrag_attempts),
660 4, "mbuf_defrag_attempts"},
661 { Q_STATS_OFFSET32(mbuf_defrag_failures),
662 4, "mbuf_defrag_failures"},
663 { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
664 4, "mbuf_rx_bd_alloc_failed"},
665 { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
666 4, "mbuf_rx_bd_mapping_failed"},
667 { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
668 4, "mbuf_rx_tpa_alloc_failed"},
669 { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
670 4, "mbuf_rx_tpa_mapping_failed"},
671 { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
672 4, "mbuf_rx_sge_alloc_failed"},
673 { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
674 4, "mbuf_rx_sge_mapping_failed"},
675 { Q_STATS_OFFSET32(mbuf_alloc_tx),
677 { Q_STATS_OFFSET32(mbuf_alloc_rx),
679 { Q_STATS_OFFSET32(mbuf_alloc_sge),
680 4, "mbuf_alloc_sge"},
681 { Q_STATS_OFFSET32(mbuf_alloc_tpa),
685 #define BXE_NUM_ETH_STATS ARRAY_SIZE(bxe_eth_stats_arr)
686 #define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr)
689 static void bxe_cmng_fns_init(struct bxe_softc *sc,
692 static int bxe_get_cmng_fns_mode(struct bxe_softc *sc);
693 static void storm_memset_cmng(struct bxe_softc *sc,
694 struct cmng_init *cmng,
696 static void bxe_set_reset_global(struct bxe_softc *sc);
697 static void bxe_set_reset_in_progress(struct bxe_softc *sc);
698 static uint8_t bxe_reset_is_done(struct bxe_softc *sc,
700 static uint8_t bxe_clear_pf_load(struct bxe_softc *sc);
701 static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc,
704 static void bxe_int_disable(struct bxe_softc *sc);
705 static int bxe_release_leader_lock(struct bxe_softc *sc);
706 static void bxe_pf_disable(struct bxe_softc *sc);
707 static void bxe_free_fp_buffers(struct bxe_softc *sc);
708 static inline void bxe_update_rx_prod(struct bxe_softc *sc,
709 struct bxe_fastpath *fp,
712 uint16_t rx_sge_prod);
713 static void bxe_link_report_locked(struct bxe_softc *sc);
714 static void bxe_link_report(struct bxe_softc *sc);
715 static void bxe_link_status_update(struct bxe_softc *sc);
716 static void bxe_periodic_callout_func(void *xsc);
717 static void bxe_periodic_start(struct bxe_softc *sc);
718 static void bxe_periodic_stop(struct bxe_softc *sc);
719 static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
722 static int bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
724 static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
726 static uint8_t bxe_txeof(struct bxe_softc *sc,
727 struct bxe_fastpath *fp);
728 static void bxe_task_fp(struct bxe_fastpath *fp);
729 static __noinline void bxe_dump_mbuf(struct bxe_softc *sc,
732 static int bxe_alloc_mem(struct bxe_softc *sc);
733 static void bxe_free_mem(struct bxe_softc *sc);
734 static int bxe_alloc_fw_stats_mem(struct bxe_softc *sc);
735 static void bxe_free_fw_stats_mem(struct bxe_softc *sc);
736 static int bxe_interrupt_attach(struct bxe_softc *sc);
737 static void bxe_interrupt_detach(struct bxe_softc *sc);
738 static void bxe_set_rx_mode(struct bxe_softc *sc);
739 static int bxe_init_locked(struct bxe_softc *sc);
740 static int bxe_stop_locked(struct bxe_softc *sc);
741 static __noinline int bxe_nic_load(struct bxe_softc *sc,
743 static __noinline int bxe_nic_unload(struct bxe_softc *sc,
744 uint32_t unload_mode,
747 static void bxe_handle_sp_tq(void *context, int pending);
748 static void bxe_handle_fp_tq(void *context, int pending);
750 static int bxe_add_cdev(struct bxe_softc *sc);
751 static void bxe_del_cdev(struct bxe_softc *sc);
752 static int bxe_grc_dump(struct bxe_softc *sc);
754 /* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */
756 calc_crc32(uint8_t *crc32_packet,
757 uint32_t crc32_length,
766 uint8_t current_byte = 0;
767 uint32_t crc32_result = crc32_seed;
768 const uint32_t CRC32_POLY = 0x1edc6f41;
770 if ((crc32_packet == NULL) ||
771 (crc32_length == 0) ||
772 ((crc32_length % 8) != 0))
774 return (crc32_result);
777 for (byte = 0; byte < crc32_length; byte = byte + 1)
779 current_byte = crc32_packet[byte];
780 for (bit = 0; bit < 8; bit = bit + 1)
782 /* msb = crc32_result[31]; */
783 msb = (uint8_t)(crc32_result >> 31);
785 crc32_result = crc32_result << 1;
787 /* it (msb != current_byte[bit]) */
788 if (msb != (0x1 & (current_byte >> bit)))
790 crc32_result = crc32_result ^ CRC32_POLY;
791 /* crc32_result[0] = 1 */
798 * 1. "mirror" every bit
799 * 2. swap the 4 bytes
800 * 3. complement each bit
805 shft = sizeof(crc32_result) * 8 - 1;
807 for (crc32_result >>= 1; crc32_result; crc32_result >>= 1)
810 temp |= crc32_result & 1;
814 /* temp[31-bit] = crc32_result[bit] */
818 /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */
820 uint32_t t0, t1, t2, t3;
821 t0 = (0x000000ff & (temp >> 24));
822 t1 = (0x0000ff00 & (temp >> 8));
823 t2 = (0x00ff0000 & (temp << 8));
824 t3 = (0xff000000 & (temp << 24));
825 crc32_result = t0 | t1 | t2 | t3;
831 crc32_result = ~crc32_result;
834 return (crc32_result);
839 volatile unsigned long *addr)
841 return ((atomic_load_acq_long(addr) & (1 << nr)) != 0);
845 bxe_set_bit(unsigned int nr,
846 volatile unsigned long *addr)
848 atomic_set_acq_long(addr, (1 << nr));
852 bxe_clear_bit(int nr,
853 volatile unsigned long *addr)
855 atomic_clear_acq_long(addr, (1 << nr));
859 bxe_test_and_set_bit(int nr,
860 volatile unsigned long *addr)
866 } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0);
867 // if (x & nr) bit_was_set; else bit_was_not_set;
872 bxe_test_and_clear_bit(int nr,
873 volatile unsigned long *addr)
879 } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0);
880 // if (x & nr) bit_was_set; else bit_was_not_set;
885 bxe_cmpxchg(volatile int *addr,
892 } while (atomic_cmpset_acq_int(addr, old, new) == 0);
897 * Get DMA memory from the OS.
899 * Validates that the OS has provided DMA buffers in response to a
900 * bus_dmamap_load call and saves the physical address of those buffers.
901 * When the callback is used the OS will return 0 for the mapping function
902 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any
903 * failures back to the caller.
909 bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
911 struct bxe_dma *dma = arg;
916 BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error);
918 dma->paddr = segs->ds_addr;
921 BLOGD(dma->sc, DBG_LOAD,
922 "DMA alloc '%s': vaddr=%p paddr=%p nseg=%d size=%lu\n",
923 dma->msg, dma->vaddr, (void *)dma->paddr,
924 dma->nseg, dma->size);
930 * Allocate a block of memory and map it for DMA. No partial completions
931 * allowed and release any resources acquired if we can't acquire all
935 * 0 = Success, !0 = Failure
938 bxe_dma_alloc(struct bxe_softc *sc,
946 BLOGE(sc, "dma block '%s' already has size %lu\n", msg,
947 (unsigned long)dma->size);
951 memset(dma, 0, sizeof(*dma)); /* sanity */
954 snprintf(dma->msg, sizeof(dma->msg), "%s", msg);
956 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
957 BCM_PAGE_SIZE, /* alignment */
958 0, /* boundary limit */
959 BUS_SPACE_MAXADDR, /* restricted low */
960 BUS_SPACE_MAXADDR, /* restricted hi */
961 NULL, /* addr filter() */
962 NULL, /* addr filter() arg */
963 size, /* max map size */
964 1, /* num discontinuous */
965 size, /* max seg size */
966 BUS_DMA_ALLOCNOW, /* flags */
968 NULL, /* lock() arg */
969 &dma->tag); /* returned dma tag */
971 BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc);
972 memset(dma, 0, sizeof(*dma));
976 rc = bus_dmamem_alloc(dma->tag,
977 (void **)&dma->vaddr,
978 (BUS_DMA_NOWAIT | BUS_DMA_ZERO),
981 BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc);
982 bus_dma_tag_destroy(dma->tag);
983 memset(dma, 0, sizeof(*dma));
987 rc = bus_dmamap_load(dma->tag,
991 bxe_dma_map_addr, /* BLOGD in here */
995 BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc);
996 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
997 bus_dma_tag_destroy(dma->tag);
998 memset(dma, 0, sizeof(*dma));
1006 bxe_dma_free(struct bxe_softc *sc,
1007 struct bxe_dma *dma)
1009 if (dma->size > 0) {
1012 "DMA free '%s': vaddr=%p paddr=%p nseg=%d size=%lu\n",
1013 dma->msg, dma->vaddr, (void *)dma->paddr,
1014 dma->nseg, dma->size);
1017 DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL"));
1019 bus_dmamap_sync(dma->tag, dma->map,
1020 (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE));
1021 bus_dmamap_unload(dma->tag, dma->map);
1022 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1023 bus_dma_tag_destroy(dma->tag);
1026 memset(dma, 0, sizeof(*dma));
1030 * These indirect read and write routines are only during init.
1031 * The locking is handled by the MCP.
1035 bxe_reg_wr_ind(struct bxe_softc *sc,
1039 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
1040 pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4);
1041 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
1045 bxe_reg_rd_ind(struct bxe_softc *sc,
1050 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
1051 val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4);
1052 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
1058 void bxe_dp_dmae(struct bxe_softc *sc, struct dmae_command *dmae, int msglvl)
1060 uint32_t src_type = dmae->opcode & DMAE_COMMAND_SRC;
1062 switch (dmae->opcode & DMAE_COMMAND_DST) {
1063 case DMAE_CMD_DST_PCI:
1064 if (src_type == DMAE_CMD_SRC_PCI)
1065 DP(msglvl, "DMAE: opcode 0x%08x\n"
1066 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
1067 "comp_addr [%x:%08x], comp_val 0x%08x\n",
1068 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
1069 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
1070 dmae->comp_addr_hi, dmae->comp_addr_lo,
1073 DP(msglvl, "DMAE: opcode 0x%08x\n"
1074 "src [%08x], len [%d*4], dst [%x:%08x]\n"
1075 "comp_addr [%x:%08x], comp_val 0x%08x\n",
1076 dmae->opcode, dmae->src_addr_lo >> 2,
1077 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
1078 dmae->comp_addr_hi, dmae->comp_addr_lo,
1081 case DMAE_CMD_DST_GRC:
1082 if (src_type == DMAE_CMD_SRC_PCI)
1083 DP(msglvl, "DMAE: opcode 0x%08x\n"
1084 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
1085 "comp_addr [%x:%08x], comp_val 0x%08x\n",
1086 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
1087 dmae->len, dmae->dst_addr_lo >> 2,
1088 dmae->comp_addr_hi, dmae->comp_addr_lo,
1091 DP(msglvl, "DMAE: opcode 0x%08x\n"
1092 "src [%08x], len [%d*4], dst [%08x]\n"
1093 "comp_addr [%x:%08x], comp_val 0x%08x\n",
1094 dmae->opcode, dmae->src_addr_lo >> 2,
1095 dmae->len, dmae->dst_addr_lo >> 2,
1096 dmae->comp_addr_hi, dmae->comp_addr_lo,
1100 if (src_type == DMAE_CMD_SRC_PCI)
1101 DP(msglvl, "DMAE: opcode 0x%08x\n"
1102 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
1103 "comp_addr [%x:%08x] comp_val 0x%08x\n",
1104 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
1105 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
1108 DP(msglvl, "DMAE: opcode 0x%08x\n"
1109 "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
1110 "comp_addr [%x:%08x] comp_val 0x%08x\n",
1111 dmae->opcode, dmae->src_addr_lo >> 2,
1112 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
1121 bxe_acquire_hw_lock(struct bxe_softc *sc,
1124 uint32_t lock_status;
1125 uint32_t resource_bit = (1 << resource);
1126 int func = SC_FUNC(sc);
1127 uint32_t hw_lock_control_reg;
1130 /* validate the resource is within range */
1131 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1132 BLOGE(sc, "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE\n", resource);
1137 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1139 hw_lock_control_reg =
1140 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1143 /* validate the resource is not already taken */
1144 lock_status = REG_RD(sc, hw_lock_control_reg);
1145 if (lock_status & resource_bit) {
1146 BLOGE(sc, "resource in use (status 0x%x bit 0x%x)\n",
1147 lock_status, resource_bit);
1151 /* try every 5ms for 5 seconds */
1152 for (cnt = 0; cnt < 1000; cnt++) {
1153 REG_WR(sc, (hw_lock_control_reg + 4), resource_bit);
1154 lock_status = REG_RD(sc, hw_lock_control_reg);
1155 if (lock_status & resource_bit) {
1161 BLOGE(sc, "Resource lock timeout!\n");
1166 bxe_release_hw_lock(struct bxe_softc *sc,
1169 uint32_t lock_status;
1170 uint32_t resource_bit = (1 << resource);
1171 int func = SC_FUNC(sc);
1172 uint32_t hw_lock_control_reg;
1174 /* validate the resource is within range */
1175 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1176 BLOGE(sc, "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE\n", resource);
1181 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1183 hw_lock_control_reg =
1184 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1187 /* validate the resource is currently taken */
1188 lock_status = REG_RD(sc, hw_lock_control_reg);
1189 if (!(lock_status & resource_bit)) {
1190 BLOGE(sc, "resource not in use (status 0x%x bit 0x%x)\n",
1191 lock_status, resource_bit);
1195 REG_WR(sc, hw_lock_control_reg, resource_bit);
1198 static void bxe_acquire_phy_lock(struct bxe_softc *sc)
1201 bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1204 static void bxe_release_phy_lock(struct bxe_softc *sc)
1206 bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1210 * Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
1211 * had we done things the other way around, if two pfs from the same port
1212 * would attempt to access nvram at the same time, we could run into a
1214 * pf A takes the port lock.
1215 * pf B succeeds in taking the same lock since they are from the same port.
1216 * pf A takes the per pf misc lock. Performs eeprom access.
1217 * pf A finishes. Unlocks the per pf misc lock.
1218 * Pf B takes the lock and proceeds to perform it's own access.
1219 * pf A unlocks the per port lock, while pf B is still working (!).
1220 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
1221 * access corrupted by pf B).*
1224 bxe_acquire_nvram_lock(struct bxe_softc *sc)
1226 int port = SC_PORT(sc);
1230 /* acquire HW lock: protect against other PFs in PF Direct Assignment */
1231 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1233 /* adjust timeout for emulation/FPGA */
1234 count = NVRAM_TIMEOUT_COUNT;
1235 if (CHIP_REV_IS_SLOW(sc)) {
1239 /* request access to nvram interface */
1240 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1241 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
1243 for (i = 0; i < count*10; i++) {
1244 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1245 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1252 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1253 BLOGE(sc, "Cannot get access to nvram interface\n");
1261 bxe_release_nvram_lock(struct bxe_softc *sc)
1263 int port = SC_PORT(sc);
1267 /* adjust timeout for emulation/FPGA */
1268 count = NVRAM_TIMEOUT_COUNT;
1269 if (CHIP_REV_IS_SLOW(sc)) {
1273 /* relinquish nvram interface */
1274 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1275 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
1277 for (i = 0; i < count*10; i++) {
1278 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1279 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1286 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1287 BLOGE(sc, "Cannot free access to nvram interface\n");
1291 /* release HW lock: protect against other PFs in PF Direct Assignment */
1292 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1298 bxe_enable_nvram_access(struct bxe_softc *sc)
1302 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1304 /* enable both bits, even on read */
1305 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1306 (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN));
1310 bxe_disable_nvram_access(struct bxe_softc *sc)
1314 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1316 /* disable both bits, even after read */
1317 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1318 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
1319 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
1323 bxe_nvram_read_dword(struct bxe_softc *sc,
1331 /* build the command word */
1332 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
1334 /* need to clear DONE bit separately */
1335 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1337 /* address of the NVRAM to read from */
1338 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1339 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1341 /* issue a read command */
1342 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1344 /* adjust timeout for emulation/FPGA */
1345 count = NVRAM_TIMEOUT_COUNT;
1346 if (CHIP_REV_IS_SLOW(sc)) {
1350 /* wait for completion */
1353 for (i = 0; i < count; i++) {
1355 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1357 if (val & MCPR_NVM_COMMAND_DONE) {
1358 val = REG_RD(sc, MCP_REG_MCPR_NVM_READ);
1359 /* we read nvram data in cpu order
1360 * but ethtool sees it as an array of bytes
1361 * converting to big-endian will do the work
1363 *ret_val = htobe32(val);
1370 BLOGE(sc, "nvram read timeout expired\n");
1377 bxe_nvram_read(struct bxe_softc *sc,
1386 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
1387 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1392 if ((offset + buf_size) > sc->devinfo.flash_size) {
1393 BLOGE(sc, "Invalid parameter, "
1394 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1395 offset, buf_size, sc->devinfo.flash_size);
1399 /* request access to nvram interface */
1400 rc = bxe_acquire_nvram_lock(sc);
1405 /* enable access to nvram interface */
1406 bxe_enable_nvram_access(sc);
1408 /* read the first word(s) */
1409 cmd_flags = MCPR_NVM_COMMAND_FIRST;
1410 while ((buf_size > sizeof(uint32_t)) && (rc == 0)) {
1411 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1412 memcpy(ret_buf, &val, 4);
1414 /* advance to the next dword */
1415 offset += sizeof(uint32_t);
1416 ret_buf += sizeof(uint32_t);
1417 buf_size -= sizeof(uint32_t);
1422 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1423 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1424 memcpy(ret_buf, &val, 4);
1427 /* disable access to nvram interface */
1428 bxe_disable_nvram_access(sc);
1429 bxe_release_nvram_lock(sc);
1435 bxe_nvram_write_dword(struct bxe_softc *sc,
1442 /* build the command word */
1443 cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR);
1445 /* need to clear DONE bit separately */
1446 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1448 /* write the data */
1449 REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val);
1451 /* address of the NVRAM to write to */
1452 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1453 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1455 /* issue the write command */
1456 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1458 /* adjust timeout for emulation/FPGA */
1459 count = NVRAM_TIMEOUT_COUNT;
1460 if (CHIP_REV_IS_SLOW(sc)) {
1464 /* wait for completion */
1466 for (i = 0; i < count; i++) {
1468 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1469 if (val & MCPR_NVM_COMMAND_DONE) {
1476 BLOGE(sc, "nvram write timeout expired\n");
1482 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
1485 bxe_nvram_write1(struct bxe_softc *sc,
1491 uint32_t align_offset;
1495 if ((offset + buf_size) > sc->devinfo.flash_size) {
1496 BLOGE(sc, "Invalid parameter, "
1497 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1498 offset, buf_size, sc->devinfo.flash_size);
1502 /* request access to nvram interface */
1503 rc = bxe_acquire_nvram_lock(sc);
1508 /* enable access to nvram interface */
1509 bxe_enable_nvram_access(sc);
1511 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
1512 align_offset = (offset & ~0x03);
1513 rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags);
1516 val &= ~(0xff << BYTE_OFFSET(offset));
1517 val |= (*data_buf << BYTE_OFFSET(offset));
1519 /* nvram data is returned as an array of bytes
1520 * convert it back to cpu order
1524 rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags);
1527 /* disable access to nvram interface */
1528 bxe_disable_nvram_access(sc);
1529 bxe_release_nvram_lock(sc);
1535 bxe_nvram_write(struct bxe_softc *sc,
1542 uint32_t written_so_far;
1545 if (buf_size == 1) {
1546 return (bxe_nvram_write1(sc, offset, data_buf, buf_size));
1549 if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) {
1550 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1555 if (buf_size == 0) {
1556 return (0); /* nothing to do */
1559 if ((offset + buf_size) > sc->devinfo.flash_size) {
1560 BLOGE(sc, "Invalid parameter, "
1561 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1562 offset, buf_size, sc->devinfo.flash_size);
1566 /* request access to nvram interface */
1567 rc = bxe_acquire_nvram_lock(sc);
1572 /* enable access to nvram interface */
1573 bxe_enable_nvram_access(sc);
1576 cmd_flags = MCPR_NVM_COMMAND_FIRST;
1577 while ((written_so_far < buf_size) && (rc == 0)) {
1578 if (written_so_far == (buf_size - sizeof(uint32_t))) {
1579 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1580 } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) {
1581 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1582 } else if ((offset % NVRAM_PAGE_SIZE) == 0) {
1583 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
1586 memcpy(&val, data_buf, 4);
1588 rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags);
1590 /* advance to the next dword */
1591 offset += sizeof(uint32_t);
1592 data_buf += sizeof(uint32_t);
1593 written_so_far += sizeof(uint32_t);
1597 /* disable access to nvram interface */
1598 bxe_disable_nvram_access(sc);
1599 bxe_release_nvram_lock(sc);
1604 /* copy command into DMAE command memory and set DMAE command Go */
1606 bxe_post_dmae(struct bxe_softc *sc,
1607 struct dmae_command *dmae,
1610 uint32_t cmd_offset;
1613 cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_command) * idx));
1614 for (i = 0; i < ((sizeof(struct dmae_command) / 4)); i++) {
1615 REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i));
1618 REG_WR(sc, dmae_reg_go_c[idx], 1);
1622 bxe_dmae_opcode_add_comp(uint32_t opcode,
1625 return (opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
1626 DMAE_COMMAND_C_TYPE_ENABLE));
1630 bxe_dmae_opcode_clr_src_reset(uint32_t opcode)
1632 return (opcode & ~DMAE_COMMAND_SRC_RESET);
1636 bxe_dmae_opcode(struct bxe_softc *sc,
1642 uint32_t opcode = 0;
1644 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
1645 (dst_type << DMAE_COMMAND_DST_SHIFT));
1647 opcode |= (DMAE_COMMAND_SRC_RESET | DMAE_COMMAND_DST_RESET);
1649 opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
1651 opcode |= ((SC_VN(sc) << DMAE_COMMAND_E1HVN_SHIFT) |
1652 (SC_VN(sc) << DMAE_COMMAND_DST_VN_SHIFT));
1654 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
1657 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
1659 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
1663 opcode = bxe_dmae_opcode_add_comp(opcode, comp_type);
1670 bxe_prep_dmae_with_comp(struct bxe_softc *sc,
1671 struct dmae_command *dmae,
1675 memset(dmae, 0, sizeof(struct dmae_command));
1677 /* set the opcode */
1678 dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type,
1679 TRUE, DMAE_COMP_PCI);
1681 /* fill in the completion parameters */
1682 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp));
1683 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp));
1684 dmae->comp_val = DMAE_COMP_VAL;
1687 /* issue a DMAE command over the init channel and wait for completion */
1689 bxe_issue_dmae_with_comp(struct bxe_softc *sc,
1690 struct dmae_command *dmae)
1692 uint32_t *wb_comp = BXE_SP(sc, wb_comp);
1693 int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000;
1697 /* reset completion */
1700 /* post the command on the channel used for initializations */
1701 bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
1703 /* wait for completion */
1706 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
1708 (sc->recovery_state != BXE_RECOVERY_DONE &&
1709 sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) {
1710 BLOGE(sc, "DMAE timeout!\n");
1711 BXE_DMAE_UNLOCK(sc);
1712 return (DMAE_TIMEOUT);
1719 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
1720 BLOGE(sc, "DMAE PCI error!\n");
1721 BXE_DMAE_UNLOCK(sc);
1722 return (DMAE_PCI_ERROR);
1725 BXE_DMAE_UNLOCK(sc);
1730 bxe_read_dmae(struct bxe_softc *sc,
1734 struct dmae_command dmae;
1738 DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32));
1740 if (!sc->dmae_ready) {
1741 data = BXE_SP(sc, wb_data[0]);
1743 for (i = 0; i < len32; i++) {
1744 data[i] = (CHIP_IS_E1(sc)) ?
1745 bxe_reg_rd_ind(sc, (src_addr + (i * 4))) :
1746 REG_RD(sc, (src_addr + (i * 4)));
1752 /* set opcode and fixed command fields */
1753 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
1755 /* fill in addresses and len */
1756 dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */
1757 dmae.src_addr_hi = 0;
1758 dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data));
1759 dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data));
1762 /* issue the command and wait for completion */
1763 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1764 bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1769 bxe_write_dmae(struct bxe_softc *sc,
1770 bus_addr_t dma_addr,
1774 struct dmae_command dmae;
1777 if (!sc->dmae_ready) {
1778 DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32));
1780 if (CHIP_IS_E1(sc)) {
1781 ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1783 ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1789 /* set opcode and fixed command fields */
1790 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
1792 /* fill in addresses and len */
1793 dmae.src_addr_lo = U64_LO(dma_addr);
1794 dmae.src_addr_hi = U64_HI(dma_addr);
1795 dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */
1796 dmae.dst_addr_hi = 0;
1799 /* issue the command and wait for completion */
1800 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1801 bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1806 bxe_write_dmae_phys_len(struct bxe_softc *sc,
1807 bus_addr_t phys_addr,
1811 int dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
1814 while (len > dmae_wr_max) {
1816 (phys_addr + offset), /* src DMA address */
1817 (addr + offset), /* dst GRC address */
1819 offset += (dmae_wr_max * 4);
1824 (phys_addr + offset), /* src DMA address */
1825 (addr + offset), /* dst GRC address */
1830 bxe_set_ctx_validation(struct bxe_softc *sc,
1831 struct eth_context *cxt,
1834 /* ustorm cxt validation */
1835 cxt->ustorm_ag_context.cdu_usage =
1836 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1837 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
1838 /* xcontext validation */
1839 cxt->xstorm_ag_context.cdu_reserved =
1840 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1841 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
1845 bxe_storm_memset_hc_timeout(struct bxe_softc *sc,
1852 (BAR_CSTRORM_INTMEM +
1853 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index));
1855 REG_WR8(sc, addr, ticks);
1858 "port %d fw_sb_id %d sb_index %d ticks %d\n",
1859 port, fw_sb_id, sb_index, ticks);
1863 bxe_storm_memset_hc_disable(struct bxe_softc *sc,
1869 uint32_t enable_flag =
1870 (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
1872 (BAR_CSTRORM_INTMEM +
1873 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index));
1877 flags = REG_RD8(sc, addr);
1878 flags &= ~HC_INDEX_DATA_HC_ENABLED;
1879 flags |= enable_flag;
1880 REG_WR8(sc, addr, flags);
1883 "port %d fw_sb_id %d sb_index %d disable %d\n",
1884 port, fw_sb_id, sb_index, disable);
1888 bxe_update_coalesce_sb_index(struct bxe_softc *sc,
1894 int port = SC_PORT(sc);
1895 uint8_t ticks = (usec / 4); /* XXX ??? */
1897 bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks);
1899 disable = (disable) ? 1 : ((usec) ? 0 : 1);
1900 bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable);
1904 elink_cb_udelay(struct bxe_softc *sc,
1911 elink_cb_reg_read(struct bxe_softc *sc,
1914 return (REG_RD(sc, reg_addr));
1918 elink_cb_reg_write(struct bxe_softc *sc,
1922 REG_WR(sc, reg_addr, val);
1926 elink_cb_reg_wb_write(struct bxe_softc *sc,
1931 REG_WR_DMAE(sc, offset, wb_write, len);
1935 elink_cb_reg_wb_read(struct bxe_softc *sc,
1940 REG_RD_DMAE(sc, offset, wb_write, len);
1944 elink_cb_path_id(struct bxe_softc *sc)
1946 return (SC_PATH(sc));
1950 elink_cb_event_log(struct bxe_softc *sc,
1951 const elink_log_id_t elink_log_id,
1957 va_start(ap, elink_log_id);
1958 _XXX_(sc, lm_log_id, ap);
1961 BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id);
1965 bxe_set_spio(struct bxe_softc *sc,
1971 /* Only 2 SPIOs are configurable */
1972 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
1973 BLOGE(sc, "Invalid SPIO 0x%x\n", spio);
1977 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1979 /* read SPIO and mask except the float bits */
1980 spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
1983 case MISC_SPIO_OUTPUT_LOW:
1984 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio);
1985 /* clear FLOAT and set CLR */
1986 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1987 spio_reg |= (spio << MISC_SPIO_CLR_POS);
1990 case MISC_SPIO_OUTPUT_HIGH:
1991 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio);
1992 /* clear FLOAT and set SET */
1993 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1994 spio_reg |= (spio << MISC_SPIO_SET_POS);
1997 case MISC_SPIO_INPUT_HI_Z:
1998 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio);
2000 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
2007 REG_WR(sc, MISC_REG_SPIO, spio_reg);
2008 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
2014 bxe_gpio_read(struct bxe_softc *sc,
2018 /* The GPIO should be swapped if swap register is set and active */
2019 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
2020 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
2021 int gpio_shift = (gpio_num +
2022 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
2023 uint32_t gpio_mask = (1 << gpio_shift);
2026 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2027 BLOGE(sc, "Invalid GPIO %d\n", gpio_num);
2031 /* read GPIO value */
2032 gpio_reg = REG_RD(sc, MISC_REG_GPIO);
2034 /* get the requested pin value */
2035 return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0;
2039 bxe_gpio_write(struct bxe_softc *sc,
2044 /* The GPIO should be swapped if swap register is set and active */
2045 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
2046 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
2047 int gpio_shift = (gpio_num +
2048 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
2049 uint32_t gpio_mask = (1 << gpio_shift);
2052 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2053 BLOGE(sc, "Invalid GPIO %d\n", gpio_num);
2057 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2059 /* read GPIO and mask except the float bits */
2060 gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2063 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2065 "Set GPIO %d (shift %d) -> output low\n",
2066 gpio_num, gpio_shift);
2067 /* clear FLOAT and set CLR */
2068 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2069 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2072 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2074 "Set GPIO %d (shift %d) -> output high\n",
2075 gpio_num, gpio_shift);
2076 /* clear FLOAT and set SET */
2077 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2078 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2081 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2083 "Set GPIO %d (shift %d) -> input\n",
2084 gpio_num, gpio_shift);
2086 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2093 REG_WR(sc, MISC_REG_GPIO, gpio_reg);
2094 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2100 bxe_gpio_mult_write(struct bxe_softc *sc,
2106 /* any port swapping should be handled by caller */
2108 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2110 /* read GPIO and mask except the float bits */
2111 gpio_reg = REG_RD(sc, MISC_REG_GPIO);
2112 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2113 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2114 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2117 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2118 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins);
2120 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2123 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2124 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins);
2126 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2129 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2130 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins);
2132 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2136 BLOGE(sc, "Invalid GPIO mode assignment %d\n", mode);
2137 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2141 REG_WR(sc, MISC_REG_GPIO, gpio_reg);
2142 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2148 bxe_gpio_int_write(struct bxe_softc *sc,
2153 /* The GPIO should be swapped if swap register is set and active */
2154 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
2155 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
2156 int gpio_shift = (gpio_num +
2157 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
2158 uint32_t gpio_mask = (1 << gpio_shift);
2161 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2162 BLOGE(sc, "Invalid GPIO %d\n", gpio_num);
2166 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2169 gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT);
2172 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2174 "Clear GPIO INT %d (shift %d) -> output low\n",
2175 gpio_num, gpio_shift);
2176 /* clear SET and set CLR */
2177 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2178 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2181 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2183 "Set GPIO INT %d (shift %d) -> output high\n",
2184 gpio_num, gpio_shift);
2185 /* clear CLR and set SET */
2186 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2187 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2194 REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg);
2195 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2201 elink_cb_gpio_read(struct bxe_softc *sc,
2205 return (bxe_gpio_read(sc, gpio_num, port));
2209 elink_cb_gpio_write(struct bxe_softc *sc,
2211 uint8_t mode, /* 0=low 1=high */
2214 return (bxe_gpio_write(sc, gpio_num, mode, port));
2218 elink_cb_gpio_mult_write(struct bxe_softc *sc,
2220 uint8_t mode) /* 0=low 1=high */
2222 return (bxe_gpio_mult_write(sc, pins, mode));
2226 elink_cb_gpio_int_write(struct bxe_softc *sc,
2228 uint8_t mode, /* 0=low 1=high */
2231 return (bxe_gpio_int_write(sc, gpio_num, mode, port));
2235 elink_cb_notify_link_changed(struct bxe_softc *sc)
2237 REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 +
2238 (SC_FUNC(sc) * sizeof(uint32_t))), 1);
2241 /* send the MCP a request, block until there is a reply */
2243 elink_cb_fw_command(struct bxe_softc *sc,
2247 int mb_idx = SC_FW_MB_IDX(sc);
2251 uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10;
2256 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param);
2257 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq));
2260 "wrote command 0x%08x to FW MB param 0x%08x\n",
2261 (command | seq), param);
2263 /* Let the FW do it's magic. GIve it up to 5 seconds... */
2265 DELAY(delay * 1000);
2266 rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header);
2267 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2270 "[after %d ms] read 0x%x seq 0x%x from FW MB\n",
2271 cnt*delay, rc, seq);
2273 /* is this a reply to our command? */
2274 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
2275 rc &= FW_MSG_CODE_MASK;
2278 BLOGE(sc, "FW failed to respond!\n");
2279 // XXX bxe_fw_dump(sc);
2283 BXE_FWMB_UNLOCK(sc);
2288 bxe_fw_command(struct bxe_softc *sc,
2292 return (elink_cb_fw_command(sc, command, param));
2296 __storm_memset_dma_mapping(struct bxe_softc *sc,
2300 REG_WR(sc, addr, U64_LO(mapping));
2301 REG_WR(sc, (addr + 4), U64_HI(mapping));
2305 storm_memset_spq_addr(struct bxe_softc *sc,
2309 uint32_t addr = (XSEM_REG_FAST_MEMORY +
2310 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid));
2311 __storm_memset_dma_mapping(sc, addr, mapping);
2315 storm_memset_vf_to_pf(struct bxe_softc *sc,
2319 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2320 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2321 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2322 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2326 storm_memset_func_en(struct bxe_softc *sc,
2330 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2331 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2332 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2333 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2337 storm_memset_eq_data(struct bxe_softc *sc,
2338 struct event_ring_data *eq_data,
2344 addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid));
2345 size = sizeof(struct event_ring_data);
2346 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data);
2350 storm_memset_eq_prod(struct bxe_softc *sc,
2354 uint32_t addr = (BAR_CSTRORM_INTMEM +
2355 CSTORM_EVENT_RING_PROD_OFFSET(pfid));
2356 REG_WR16(sc, addr, eq_prod);
2360 * Post a slowpath command.
2362 * A slowpath command is used to propogate a configuration change through
2363 * the controller in a controlled manner, allowing each STORM processor and
2364 * other H/W blocks to phase in the change. The commands sent on the
2365 * slowpath are referred to as ramrods. Depending on the ramrod used the
2366 * completion of the ramrod will occur in different ways. Here's a
2367 * breakdown of ramrods and how they complete:
2369 * RAMROD_CMD_ID_ETH_PORT_SETUP
2370 * Used to setup the leading connection on a port. Completes on the
2371 * Receive Completion Queue (RCQ) of that port (typically fp[0]).
2373 * RAMROD_CMD_ID_ETH_CLIENT_SETUP
2374 * Used to setup an additional connection on a port. Completes on the
2375 * RCQ of the multi-queue/RSS connection being initialized.
2377 * RAMROD_CMD_ID_ETH_STAT_QUERY
2378 * Used to force the storm processors to update the statistics database
2379 * in host memory. This ramrod is send on the leading connection CID and
2380 * completes as an index increment of the CSTORM on the default status
2383 * RAMROD_CMD_ID_ETH_UPDATE
2384 * Used to update the state of the leading connection, usually to udpate
2385 * the RSS indirection table. Completes on the RCQ of the leading
2386 * connection. (Not currently used under FreeBSD until OS support becomes
2389 * RAMROD_CMD_ID_ETH_HALT
2390 * Used when tearing down a connection prior to driver unload. Completes
2391 * on the RCQ of the multi-queue/RSS connection being torn down. Don't
2392 * use this on the leading connection.
2394 * RAMROD_CMD_ID_ETH_SET_MAC
2395 * Sets the Unicast/Broadcast/Multicast used by the port. Completes on
2396 * the RCQ of the leading connection.
2398 * RAMROD_CMD_ID_ETH_CFC_DEL
2399 * Used when tearing down a conneciton prior to driver unload. Completes
2400 * on the RCQ of the leading connection (since the current connection
2401 * has been completely removed from controller memory).
2403 * RAMROD_CMD_ID_ETH_PORT_DEL
2404 * Used to tear down the leading connection prior to driver unload,
2405 * typically fp[0]. Completes as an index increment of the CSTORM on the
2406 * default status block.
2408 * RAMROD_CMD_ID_ETH_FORWARD_SETUP
2409 * Used for connection offload. Completes on the RCQ of the multi-queue
2410 * RSS connection that is being offloaded. (Not currently used under
2413 * There can only be one command pending per function.
2416 * 0 = Success, !0 = Failure.
2419 /* must be called under the spq lock */
2421 struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc)
2423 struct eth_spe *next_spe = sc->spq_prod_bd;
2425 if (sc->spq_prod_bd == sc->spq_last_bd) {
2426 /* wrap back to the first eth_spq */
2427 sc->spq_prod_bd = sc->spq;
2428 sc->spq_prod_idx = 0;
2437 /* must be called under the spq lock */
2439 void bxe_sp_prod_update(struct bxe_softc *sc)
2441 int func = SC_FUNC(sc);
2444 * Make sure that BD data is updated before writing the producer.
2445 * BD data is written to the memory, the producer is read from the
2446 * memory, thus we need a full memory barrier to ensure the ordering.
2450 REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)),
2453 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
2454 BUS_SPACE_BARRIER_WRITE);
2458 * bxe_is_contextless_ramrod - check if the current command ends on EQ
2460 * @cmd: command to check
2461 * @cmd_type: command type
2464 int bxe_is_contextless_ramrod(int cmd,
2467 if ((cmd_type == NONE_CONNECTION_TYPE) ||
2468 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
2469 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
2470 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
2471 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
2472 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
2473 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) {
2481 * bxe_sp_post - place a single command on an SP ring
2483 * @sc: driver handle
2484 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.)
2485 * @cid: SW CID the command is related to
2486 * @data_hi: command private data address (high 32 bits)
2487 * @data_lo: command private data address (low 32 bits)
2488 * @cmd_type: command type (e.g. NONE, ETH)
2490 * SP data is handled as if it's always an address pair, thus data fields are
2491 * not swapped to little endian in upper functions. Instead this function swaps
2492 * data as if it's two uint32 fields.
2495 bxe_sp_post(struct bxe_softc *sc,
2502 struct eth_spe *spe;
2506 common = bxe_is_contextless_ramrod(command, cmd_type);
2511 if (!atomic_load_acq_long(&sc->eq_spq_left)) {
2512 BLOGE(sc, "EQ ring is full!\n");
2517 if (!atomic_load_acq_long(&sc->cq_spq_left)) {
2518 BLOGE(sc, "SPQ ring is full!\n");
2524 spe = bxe_sp_get_next(sc);
2526 /* CID needs port number to be encoded int it */
2527 spe->hdr.conn_and_cmd_data =
2528 htole32((command << SPE_HDR_CMD_ID_SHIFT) | HW_CID(sc, cid));
2530 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
2532 /* TBD: Check if it works for VFs */
2533 type |= ((SC_FUNC(sc) << SPE_HDR_FUNCTION_ID_SHIFT) &
2534 SPE_HDR_FUNCTION_ID);
2536 spe->hdr.type = htole16(type);
2538 spe->data.update_data_addr.hi = htole32(data_hi);
2539 spe->data.update_data_addr.lo = htole32(data_lo);
2542 * It's ok if the actual decrement is issued towards the memory
2543 * somewhere between the lock and unlock. Thus no more explict
2544 * memory barrier is needed.
2547 atomic_subtract_acq_long(&sc->eq_spq_left, 1);
2549 atomic_subtract_acq_long(&sc->cq_spq_left, 1);
2552 BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr);
2553 BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n",
2554 BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata));
2556 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n",
2558 (uint32_t)U64_HI(sc->spq_dma.paddr),
2559 (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq),
2566 atomic_load_acq_long(&sc->cq_spq_left),
2567 atomic_load_acq_long(&sc->eq_spq_left));
2569 bxe_sp_prod_update(sc);
2576 * bxe_debug_print_ind_table - prints the indirection table configuration.
2578 * @sc: driver hanlde
2579 * @p: pointer to rss configuration
2583 bxe_debug_print_ind_table(struct bxe_softc *sc,
2584 struct ecore_config_rss_params *p)
2588 BLOGD(sc, DBG_LOAD, "Setting indirection table to:\n");
2589 BLOGD(sc, DBG_LOAD, " 0x0000: ");
2590 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
2591 BLOGD(sc, DBG_LOAD, "0x%02x ", p->ind_table[i]);
2593 /* Print 4 bytes in a line */
2594 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
2595 (((i + 1) & 0x3) == 0)) {
2596 BLOGD(sc, DBG_LOAD, "\n");
2597 BLOGD(sc, DBG_LOAD, "0x%04x: ", i + 1);
2601 BLOGD(sc, DBG_LOAD, "\n");
2606 * FreeBSD Device probe function.
2608 * Compares the device found to the driver's list of supported devices and
2609 * reports back to the bsd loader whether this is the right driver for the device.
2610 * This is the driver entry function called from the "kldload" command.
2613 * BUS_PROBE_DEFAULT on success, positive value on failure.
2616 bxe_probe(device_t dev)
2618 struct bxe_softc *sc;
2619 struct bxe_device_type *t;
2621 uint16_t did, sdid, svid, vid;
2623 /* Find our device structure */
2624 sc = device_get_softc(dev);
2628 /* Get the data for the device to be probed. */
2629 vid = pci_get_vendor(dev);
2630 did = pci_get_device(dev);
2631 svid = pci_get_subvendor(dev);
2632 sdid = pci_get_subdevice(dev);
2635 "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
2636 "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
2638 /* Look through the list of known devices for a match. */
2639 while (t->bxe_name != NULL) {
2640 if ((vid == t->bxe_vid) && (did == t->bxe_did) &&
2641 ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) &&
2642 ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) {
2643 descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
2644 if (descbuf == NULL)
2647 /* Print out the device identity. */
2648 snprintf(descbuf, BXE_DEVDESC_MAX,
2649 "%s (%c%d) BXE v:%s\n", t->bxe_name,
2650 (((pci_read_config(dev, PCIR_REVID, 4) &
2652 (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
2653 BXE_DRIVER_VERSION);
2655 device_set_desc_copy(dev, descbuf);
2656 free(descbuf, M_TEMP);
2657 return (BUS_PROBE_DEFAULT);
2666 bxe_init_mutexes(struct bxe_softc *sc)
2668 #ifdef BXE_CORE_LOCK_SX
2669 snprintf(sc->core_sx_name, sizeof(sc->core_sx_name),
2670 "bxe%d_core_lock", sc->unit);
2671 sx_init(&sc->core_sx, sc->core_sx_name);
2673 snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name),
2674 "bxe%d_core_lock", sc->unit);
2675 mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF);
2678 snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name),
2679 "bxe%d_sp_lock", sc->unit);
2680 mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF);
2682 snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name),
2683 "bxe%d_dmae_lock", sc->unit);
2684 mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF);
2686 snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name),
2687 "bxe%d_phy_lock", sc->unit);
2688 mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF);
2690 snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name),
2691 "bxe%d_fwmb_lock", sc->unit);
2692 mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF);
2694 snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name),
2695 "bxe%d_print_lock", sc->unit);
2696 mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF);
2698 snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name),
2699 "bxe%d_stats_lock", sc->unit);
2700 mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF);
2702 snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name),
2703 "bxe%d_mcast_lock", sc->unit);
2704 mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF);
2708 bxe_release_mutexes(struct bxe_softc *sc)
2710 #ifdef BXE_CORE_LOCK_SX
2711 sx_destroy(&sc->core_sx);
2713 if (mtx_initialized(&sc->core_mtx)) {
2714 mtx_destroy(&sc->core_mtx);
2718 if (mtx_initialized(&sc->sp_mtx)) {
2719 mtx_destroy(&sc->sp_mtx);
2722 if (mtx_initialized(&sc->dmae_mtx)) {
2723 mtx_destroy(&sc->dmae_mtx);
2726 if (mtx_initialized(&sc->port.phy_mtx)) {
2727 mtx_destroy(&sc->port.phy_mtx);
2730 if (mtx_initialized(&sc->fwmb_mtx)) {
2731 mtx_destroy(&sc->fwmb_mtx);
2734 if (mtx_initialized(&sc->print_mtx)) {
2735 mtx_destroy(&sc->print_mtx);
2738 if (mtx_initialized(&sc->stats_mtx)) {
2739 mtx_destroy(&sc->stats_mtx);
2742 if (mtx_initialized(&sc->mcast_mtx)) {
2743 mtx_destroy(&sc->mcast_mtx);
2748 bxe_tx_disable(struct bxe_softc* sc)
2750 struct ifnet *ifp = sc->ifnet;
2752 /* tell the stack the driver is stopped and TX queue is full */
2754 ifp->if_drv_flags = 0;
2759 bxe_drv_pulse(struct bxe_softc *sc)
2761 SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb,
2762 sc->fw_drv_pulse_wr_seq);
2765 static inline uint16_t
2766 bxe_tx_avail(struct bxe_softc *sc,
2767 struct bxe_fastpath *fp)
2773 prod = fp->tx_bd_prod;
2774 cons = fp->tx_bd_cons;
2776 used = SUB_S16(prod, cons);
2779 KASSERT((used < 0), ("used tx bds < 0"));
2780 KASSERT((used > sc->tx_ring_size), ("used tx bds > tx_ring_size"));
2781 KASSERT(((sc->tx_ring_size - used) > MAX_TX_AVAIL),
2782 ("invalid number of tx bds used"));
2785 return (int16_t)(sc->tx_ring_size) - used;
2789 bxe_tx_queue_has_work(struct bxe_fastpath *fp)
2793 mb(); /* status block fields can change */
2794 hw_cons = le16toh(*fp->tx_cons_sb);
2795 return (hw_cons != fp->tx_pkt_cons);
2798 static inline uint8_t
2799 bxe_has_tx_work(struct bxe_fastpath *fp)
2801 /* expand this for multi-cos if ever supported */
2802 return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE;
2806 bxe_has_rx_work(struct bxe_fastpath *fp)
2808 uint16_t rx_cq_cons_sb;
2810 mb(); /* status block fields can change */
2811 rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb);
2812 if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX)
2814 return (fp->rx_cq_cons != rx_cq_cons_sb);
2818 bxe_sp_event(struct bxe_softc *sc,
2819 struct bxe_fastpath *fp,
2820 union eth_rx_cqe *rr_cqe)
2822 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2823 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2824 enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX;
2825 struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
2827 BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n",
2828 fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type);
2832 * If cid is within VF range, replace the slowpath object with the
2833 * one corresponding to this VF
2835 if ((cid >= BXE_FIRST_VF_CID) && (cid < BXE_FIRST_VF_CID + BXE_VF_CIDS)) {
2836 bxe_iov_set_queue_sp_obj(sc, cid, &q_obj);
2841 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
2842 BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid);
2843 drv_cmd = ECORE_Q_CMD_UPDATE;
2846 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
2847 BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid);
2848 drv_cmd = ECORE_Q_CMD_SETUP;
2851 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
2852 BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
2853 drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY;
2856 case (RAMROD_CMD_ID_ETH_HALT):
2857 BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid);
2858 drv_cmd = ECORE_Q_CMD_HALT;
2861 case (RAMROD_CMD_ID_ETH_TERMINATE):
2862 BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid);
2863 drv_cmd = ECORE_Q_CMD_TERMINATE;
2866 case (RAMROD_CMD_ID_ETH_EMPTY):
2867 BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid);
2868 drv_cmd = ECORE_Q_CMD_EMPTY;
2872 BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n",
2873 command, fp->index);
2877 if ((drv_cmd != ECORE_Q_CMD_MAX) &&
2878 q_obj->complete_cmd(sc, q_obj, drv_cmd)) {
2880 * q_obj->complete_cmd() failure means that this was
2881 * an unexpected completion.
2883 * In this case we don't want to increase the sc->spq_left
2884 * because apparently we haven't sent this command the first
2887 // bxe_panic(sc, ("Unexpected SP completion\n"));
2892 /* SRIOV: reschedule any 'in_progress' operations */
2893 bxe_iov_sp_event(sc, cid, TRUE);
2896 atomic_add_acq_long(&sc->cq_spq_left, 1);
2898 BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n",
2899 atomic_load_acq_long(&sc->cq_spq_left));
2902 if ((drv_cmd == ECORE_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
2903 (!!bxe_test_bit(ECORE_AFEX_FCOE_Q_UPDATE_PENDING, &sc->sp_state))) {
2905 * If Queue update ramrod is completed for last Queue in AFEX VIF set
2906 * flow, then ACK MCP at the end. Mark pending ACK to MCP bit to
2907 * prevent case that both bits are cleared. At the end of load/unload
2908 * driver checks that sp_state is cleared and this order prevents
2911 bxe_set_bit(ECORE_AFEX_PENDING_VIFSET_MCP_ACK, &sc->sp_state);
2913 bxe_clear_bit(ECORE_AFEX_FCOE_Q_UPDATE_PENDING, &sc->sp_state);
2915 /* schedule the sp task as MCP ack is required */
2916 bxe_schedule_sp_task(sc);
2922 * The current mbuf is part of an aggregation. Move the mbuf into the TPA
2923 * aggregation queue, put an empty mbuf back onto the receive chain, and mark
2924 * the current aggregation queue as in-progress.
2927 bxe_tpa_start(struct bxe_softc *sc,
2928 struct bxe_fastpath *fp,
2932 struct eth_fast_path_rx_cqe *cqe)
2934 struct bxe_sw_rx_bd tmp_bd;
2935 struct bxe_sw_rx_bd *rx_buf;
2936 struct eth_rx_bd *rx_bd;
2938 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
2941 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START "
2942 "cons=%d prod=%d\n",
2943 fp->index, queue, cons, prod);
2945 max_agg_queues = MAX_AGG_QS(sc);
2947 KASSERT((queue < max_agg_queues),
2948 ("fp[%02d] invalid aggr queue (%d >= %d)!",
2949 fp->index, queue, max_agg_queues));
2951 KASSERT((tpa_info->state == BXE_TPA_STATE_STOP),
2952 ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!",
2955 /* copy the existing mbuf and mapping from the TPA pool */
2956 tmp_bd = tpa_info->bd;
2958 if (tmp_bd.m == NULL) {
2959 BLOGE(sc, "fp[%02d].tpa[%02d] mbuf not allocated!\n",
2961 /* XXX Error handling? */
2965 /* change the TPA queue to the start state */
2966 tpa_info->state = BXE_TPA_STATE_START;
2967 tpa_info->placement_offset = cqe->placement_offset;
2968 tpa_info->parsing_flags = le16toh(cqe->pars_flags.flags);
2969 tpa_info->vlan_tag = le16toh(cqe->vlan_tag);
2970 tpa_info->len_on_bd = le16toh(cqe->len_on_bd);
2972 fp->rx_tpa_queue_used |= (1 << queue);
2975 * If all the buffer descriptors are filled with mbufs then fill in
2976 * the current consumer index with a new BD. Else if a maximum Rx
2977 * buffer limit is imposed then fill in the next producer index.
2979 index = (sc->max_rx_bufs != RX_BD_USABLE) ?
2982 /* move the received mbuf and mapping to TPA pool */
2983 tpa_info->bd = fp->rx_mbuf_chain[cons];
2985 /* release any existing RX BD mbuf mappings */
2986 if (cons != index) {
2987 rx_buf = &fp->rx_mbuf_chain[cons];
2989 if (rx_buf->m_map != NULL) {
2990 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
2991 BUS_DMASYNC_POSTREAD);
2992 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
2996 * We get here when the maximum number of rx buffers is less than
2997 * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL
2998 * it out here without concern of a memory leak.
3000 fp->rx_mbuf_chain[cons].m = NULL;
3003 /* update the Rx SW BD with the mbuf info from the TPA pool */
3004 fp->rx_mbuf_chain[index] = tmp_bd;
3006 /* update the Rx BD with the empty mbuf phys address from the TPA pool */
3007 rx_bd = &fp->rx_chain[index];
3008 rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr));
3009 rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr));
3013 * When a TPA aggregation is completed, loop through the individual mbufs
3014 * of the aggregation, combining them into a single mbuf which will be sent
3015 * up the stack. Refill all freed SGEs with mbufs as we go along.
3018 bxe_fill_frag_mbuf(struct bxe_softc *sc,
3019 struct bxe_fastpath *fp,
3020 struct bxe_sw_tpa_info *tpa_info,
3024 struct eth_end_agg_rx_cqe *cqe,
3027 struct mbuf *m_frag;
3028 uint32_t frag_len, frag_size, i;
3033 frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd;
3036 "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n",
3037 fp->index, queue, tpa_info->len_on_bd, frag_size, pages);
3039 /* make sure the aggregated frame is not too big to handle */
3040 if (pages > 8 * PAGES_PER_SGE) {
3041 BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! "
3042 "pkt_len=%d len_on_bd=%d frag_size=%d\n",
3043 fp->index, cqe_idx, pages, le16toh(cqe->pkt_len),
3044 tpa_info->len_on_bd, frag_size);
3045 bxe_panic(sc, ("sge page count error\n"));
3050 * Scan through the scatter gather list pulling individual mbufs into a
3051 * single mbuf for the host stack.
3053 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
3054 sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j]));
3057 * Firmware gives the indices of the SGE as if the ring is an array
3058 * (meaning that the "next" element will consume 2 indices).
3060 frag_len = min(frag_size, (uint32_t)(SGE_PAGES));
3062 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d "
3063 "sge_idx=%d frag_size=%d frag_len=%d\n",
3064 fp->index, queue, i, j, sge_idx, frag_size, frag_len);
3066 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
3068 /* allocate a new mbuf for the SGE */
3069 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
3071 /* Leave all remaining SGEs in the ring! */
3075 /* update the fragment length */
3076 m_frag->m_len = frag_len;
3078 /* concatenate the fragment to the head mbuf */
3080 fp->eth_q_stats.mbuf_alloc_sge--;
3082 /* update the TPA mbuf size and remaining fragment size */
3083 m->m_pkthdr.len += frag_len;
3084 frag_size -= frag_len;
3088 "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n",
3089 fp->index, queue, frag_size);
3095 bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp)
3099 for (i = 1; i <= RX_SGE_NUM_PAGES; i++) {
3100 int idx = RX_SGE_TOTAL_PER_PAGE * i - 1;
3102 for (j = 0; j < 2; j++) {
3103 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
3110 bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp)
3112 /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */
3113 memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
3116 * Clear the two last indices in the page to 1. These are the indices that
3117 * correspond to the "next" element, hence will never be indicated and
3118 * should be removed from the calculations.
3120 bxe_clear_sge_mask_next_elems(fp);
3124 bxe_update_last_max_sge(struct bxe_fastpath *fp,
3127 uint16_t last_max = fp->last_max_sge;
3129 if (SUB_S16(idx, last_max) > 0) {
3130 fp->last_max_sge = idx;
3135 bxe_update_sge_prod(struct bxe_softc *sc,
3136 struct bxe_fastpath *fp,
3138 union eth_sgl_or_raw_data *cqe)
3140 uint16_t last_max, last_elem, first_elem;
3148 /* first mark all used pages */
3149 for (i = 0; i < sge_len; i++) {
3150 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
3151 RX_SGE(le16toh(cqe->sgl[i])));
3155 "fp[%02d] fp_cqe->sgl[%d] = %d\n",
3156 fp->index, sge_len - 1,
3157 le16toh(cqe->sgl[sge_len - 1]));
3159 /* assume that the last SGE index is the biggest */
3160 bxe_update_last_max_sge(fp,
3161 le16toh(cqe->sgl[sge_len - 1]));
3163 last_max = RX_SGE(fp->last_max_sge);
3164 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
3165 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
3167 /* if ring is not full */
3168 if (last_elem + 1 != first_elem) {
3172 /* now update the prod */
3173 for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) {
3174 if (__predict_true(fp->sge_mask[i])) {
3178 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
3179 delta += BIT_VEC64_ELEM_SZ;
3183 fp->rx_sge_prod += delta;
3184 /* clear page-end entries */
3185 bxe_clear_sge_mask_next_elems(fp);
3189 "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n",
3190 fp->index, fp->last_max_sge, fp->rx_sge_prod);
3194 * The aggregation on the current TPA queue has completed. Pull the individual
3195 * mbuf fragments together into a single mbuf, perform all necessary checksum
3196 * calculations, and send the resuting mbuf to the stack.
3199 bxe_tpa_stop(struct bxe_softc *sc,
3200 struct bxe_fastpath *fp,
3201 struct bxe_sw_tpa_info *tpa_info,
3204 struct eth_end_agg_rx_cqe *cqe,
3207 struct ifnet *ifp = sc->ifnet;
3212 "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n",
3213 fp->index, queue, tpa_info->placement_offset,
3214 le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag);
3218 /* allocate a replacement before modifying existing mbuf */
3219 rc = bxe_alloc_rx_tpa_mbuf(fp, queue);
3221 /* drop the frame and log an error */
3222 fp->eth_q_stats.rx_soft_errors++;
3223 goto bxe_tpa_stop_exit;
3226 /* we have a replacement, fixup the current mbuf */
3227 m_adj(m, tpa_info->placement_offset);
3228 m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd;
3230 /* mark the checksums valid (taken care of by the firmware) */
3231 fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3232 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3233 m->m_pkthdr.csum_data = 0xffff;
3234 m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
3239 /* aggregate all of the SGEs into a single mbuf */
3240 rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx);
3242 /* drop the packet and log an error */
3243 fp->eth_q_stats.rx_soft_errors++;
3246 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) {
3247 m->m_pkthdr.ether_vtag = tpa_info->vlan_tag;
3248 m->m_flags |= M_VLANTAG;
3251 /* assign packet to this interface interface */
3252 m->m_pkthdr.rcvif = ifp;
3254 #if __FreeBSD_version >= 800000
3255 /* specify what RSS queue was used for this flow */
3256 m->m_pkthdr.flowid = fp->index;
3257 m->m_flags |= M_FLOWID;
3261 fp->eth_q_stats.rx_tpa_pkts++;
3263 /* pass the frame to the stack */
3264 (*ifp->if_input)(ifp, m);
3267 /* we passed an mbuf up the stack or dropped the frame */
3268 fp->eth_q_stats.mbuf_alloc_tpa--;
3272 fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP;
3273 fp->rx_tpa_queue_used &= ~(1 << queue);
3278 struct bxe_fastpath *fp,
3282 struct eth_fast_path_rx_cqe *cqe_fp)
3284 struct mbuf *m_frag;
3285 uint16_t frags, frag_len;
3286 uint16_t sge_idx = 0;
3291 /* adjust the mbuf */
3294 frag_size = len - lenonbd;
3295 frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3297 for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) {
3298 sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j]));
3300 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
3301 frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE));
3302 m_frag->m_len = frag_len;
3304 /* allocate a new mbuf for the SGE */
3305 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
3307 /* Leave all remaining SGEs in the ring! */
3310 fp->eth_q_stats.mbuf_alloc_sge--;
3312 /* concatenate the fragment to the head mbuf */
3315 frag_size -= frag_len;
3318 bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data);
3324 bxe_rxeof(struct bxe_softc *sc,
3325 struct bxe_fastpath *fp)
3327 struct ifnet *ifp = sc->ifnet;
3328 uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
3329 uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
3335 /* CQ "next element" is of the size of the regular element */
3336 hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
3337 if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) {
3341 bd_cons = fp->rx_bd_cons;
3342 bd_prod = fp->rx_bd_prod;
3343 bd_prod_fw = bd_prod;
3344 sw_cq_cons = fp->rx_cq_cons;
3345 sw_cq_prod = fp->rx_cq_prod;
3348 * Memory barrier necessary as speculative reads of the rx
3349 * buffer can be ahead of the index in the status block
3354 "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n",
3355 fp->index, hw_cq_cons, sw_cq_cons);
3357 while (sw_cq_cons != hw_cq_cons) {
3358 struct bxe_sw_rx_bd *rx_buf = NULL;
3359 union eth_rx_cqe *cqe;
3360 struct eth_fast_path_rx_cqe *cqe_fp;
3361 uint8_t cqe_fp_flags;
3362 enum eth_rx_cqe_type cqe_fp_type;
3363 uint16_t len, lenonbd, pad;
3364 struct mbuf *m = NULL;
3366 comp_ring_cons = RCQ(sw_cq_cons);
3367 bd_prod = RX_BD(bd_prod);
3368 bd_cons = RX_BD(bd_cons);
3370 cqe = &fp->rcq_chain[comp_ring_cons];
3371 cqe_fp = &cqe->fast_path_cqe;
3372 cqe_fp_flags = cqe_fp->type_error_flags;
3373 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
3376 "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d "
3377 "BD prod=%d cons=%d CQE type=0x%x err=0x%x "
3378 "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n",
3384 CQE_TYPE(cqe_fp_flags),
3386 cqe_fp->status_flags,
3387 le32toh(cqe_fp->rss_hash_result),
3388 le16toh(cqe_fp->vlan_tag),
3389 le16toh(cqe_fp->pkt_len_or_gro_seg_len),
3390 le16toh(cqe_fp->len_on_bd));
3392 /* is this a slowpath msg? */
3393 if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) {
3394 bxe_sp_event(sc, fp, cqe);
3398 rx_buf = &fp->rx_mbuf_chain[bd_cons];
3400 if (!CQE_TYPE_FAST(cqe_fp_type)) {
3401 struct bxe_sw_tpa_info *tpa_info;
3402 uint16_t frag_size, pages;
3407 if (!fp->tpa_enable &&
3408 (CQE_TYPE_START(cqe_fp_type) || CQE_TYPE_STOP(cqe_fp_type))) {
3409 BLOGE(sc, "START/STOP packet while !tpa_enable type (0x%x)\n",
3410 CQE_TYPE(cqe_fp_type));
3414 if (CQE_TYPE_START(cqe_fp_type)) {
3415 bxe_tpa_start(sc, fp, cqe_fp->queue_index,
3416 bd_cons, bd_prod, cqe_fp);
3417 m = NULL; /* packet not ready yet */
3421 KASSERT(CQE_TYPE_STOP(cqe_fp_type),
3422 ("CQE type is not STOP! (0x%x)\n", cqe_fp_type));
3424 queue = cqe->end_agg_cqe.queue_index;
3425 tpa_info = &fp->rx_tpa_info[queue];
3427 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n",
3430 frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) -
3431 tpa_info->len_on_bd);
3432 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3434 bxe_tpa_stop(sc, fp, tpa_info, queue, pages,
3435 &cqe->end_agg_cqe, comp_ring_cons);
3437 bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data);
3444 /* is this an error packet? */
3445 if (__predict_false(cqe_fp_flags &
3446 ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
3447 BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons);
3448 fp->eth_q_stats.rx_soft_errors++;
3452 len = le16toh(cqe_fp->pkt_len_or_gro_seg_len);
3453 lenonbd = le16toh(cqe_fp->len_on_bd);
3454 pad = cqe_fp->placement_offset;
3458 if (__predict_false(m == NULL)) {
3459 BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n",
3460 bd_cons, fp->index);
3464 /* XXX double copy if packet length under a threshold */
3467 * If all the buffer descriptors are filled with mbufs then fill in
3468 * the current consumer index with a new BD. Else if a maximum Rx
3469 * buffer limit is imposed then fill in the next producer index.
3471 rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons,
3472 (sc->max_rx_bufs != RX_BD_USABLE) ?
3476 /* we simply reuse the received mbuf and don't post it to the stack */
3479 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
3481 fp->eth_q_stats.rx_soft_errors++;
3483 if (sc->max_rx_bufs != RX_BD_USABLE) {
3484 /* copy this consumer index to the producer index */
3485 memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf,
3486 sizeof(struct bxe_sw_rx_bd));
3487 memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd));
3493 /* current mbuf was detached from the bd */
3494 fp->eth_q_stats.mbuf_alloc_rx--;
3496 /* we allocated a replacement mbuf, fixup the current one */
3498 m->m_pkthdr.len = m->m_len = len;
3500 if ((len > 60) && (len > lenonbd)) {
3501 fp->eth_q_stats.rx_bxe_service_rxsgl++;
3502 rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp);
3505 fp->eth_q_stats.rx_jumbo_sge_pkts++;
3506 } else if (lenonbd < len) {
3507 fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++;
3510 /* assign packet to this interface interface */
3511 m->m_pkthdr.rcvif = ifp;
3513 /* assume no hardware checksum has complated */
3514 m->m_pkthdr.csum_flags = 0;
3516 /* validate checksum if offload enabled */
3517 if (ifp->if_capenable & IFCAP_RXCSUM) {
3518 /* check for a valid IP frame */
3519 if (!(cqe->fast_path_cqe.status_flags &
3520 ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) {
3521 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3522 if (__predict_false(cqe_fp_flags &
3523 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) {
3524 fp->eth_q_stats.rx_hw_csum_errors++;
3526 fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3527 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3531 /* check for a valid TCP/UDP frame */
3532 if (!(cqe->fast_path_cqe.status_flags &
3533 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) {
3534 if (__predict_false(cqe_fp_flags &
3535 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) {
3536 fp->eth_q_stats.rx_hw_csum_errors++;
3538 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3539 m->m_pkthdr.csum_data = 0xFFFF;
3540 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
3546 /* if there is a VLAN tag then flag that info */
3547 if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_VLAN) {
3548 m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag;
3549 m->m_flags |= M_VLANTAG;
3552 #if __FreeBSD_version >= 800000
3553 /* specify what RSS queue was used for this flow */
3554 m->m_pkthdr.flowid = fp->index;
3555 m->m_flags |= M_FLOWID;
3560 bd_cons = RX_BD_NEXT(bd_cons);
3561 bd_prod = RX_BD_NEXT(bd_prod);
3562 bd_prod_fw = RX_BD_NEXT(bd_prod_fw);
3564 /* pass the frame to the stack */
3565 if (__predict_true(m != NULL)) {
3568 (*ifp->if_input)(ifp, m);
3573 sw_cq_prod = RCQ_NEXT(sw_cq_prod);
3574 sw_cq_cons = RCQ_NEXT(sw_cq_cons);
3576 /* limit spinning on the queue */
3580 if (rx_pkts == sc->rx_budget) {
3581 fp->eth_q_stats.rx_budget_reached++;
3584 } /* while work to do */
3586 fp->rx_bd_cons = bd_cons;
3587 fp->rx_bd_prod = bd_prod_fw;
3588 fp->rx_cq_cons = sw_cq_cons;
3589 fp->rx_cq_prod = sw_cq_prod;
3591 /* Update producers */
3592 bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod);
3594 fp->eth_q_stats.rx_pkts += rx_pkts;
3595 fp->eth_q_stats.rx_calls++;
3597 BXE_FP_RX_UNLOCK(fp);
3599 return (sw_cq_cons != hw_cq_cons);
3603 bxe_free_tx_pkt(struct bxe_softc *sc,
3604 struct bxe_fastpath *fp,
3607 struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx];
3608 struct eth_tx_start_bd *tx_start_bd;
3609 uint16_t bd_idx = TX_BD(tx_buf->first_bd);
3613 /* unmap the mbuf from non-paged memory */
3614 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
3616 tx_start_bd = &fp->tx_chain[bd_idx].start_bd;
3617 nbd = le16toh(tx_start_bd->nbd) - 1;
3620 if ((nbd - 1) > (MAX_MBUF_FRAGS + 2)) {
3621 bxe_panic(sc, ("BAD nbd!\n"));
3625 new_cons = (tx_buf->first_bd + nbd);
3628 struct eth_tx_bd *tx_data_bd;
3631 * The following code doesn't do anything but is left here
3632 * for clarity on what the new value of new_cons skipped.
3635 /* get the next bd */
3636 bd_idx = TX_BD(TX_BD_NEXT(bd_idx));
3638 /* skip the parse bd */
3640 bd_idx = TX_BD(TX_BD_NEXT(bd_idx));
3642 /* skip the TSO split header bd since they have no mapping */
3643 if (tx_buf->flags & BXE_TSO_SPLIT_BD) {
3645 bd_idx = TX_BD(TX_BD_NEXT(bd_idx));
3648 /* now free frags */
3650 tx_data_bd = &fp->tx_chain[bd_idx].reg_bd;
3652 bd_idx = TX_BD(TX_BD_NEXT(bd_idx));
3658 if (__predict_true(tx_buf->m != NULL)) {
3660 fp->eth_q_stats.mbuf_alloc_tx--;
3662 fp->eth_q_stats.tx_chain_lost_mbuf++;
3666 tx_buf->first_bd = 0;
3671 /* transmit timeout watchdog */
3673 bxe_watchdog(struct bxe_softc *sc,
3674 struct bxe_fastpath *fp)
3678 if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) {
3679 BXE_FP_TX_UNLOCK(fp);
3683 BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index);
3685 BXE_FP_TX_UNLOCK(fp);
3687 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT);
3688 taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task);
3693 /* processes transmit completions */
3695 bxe_txeof(struct bxe_softc *sc,
3696 struct bxe_fastpath *fp)
3698 struct ifnet *ifp = sc->ifnet;
3699 uint16_t bd_cons, hw_cons, sw_cons, pkt_cons;
3700 uint16_t tx_bd_avail;
3702 BXE_FP_TX_LOCK_ASSERT(fp);
3704 bd_cons = fp->tx_bd_cons;
3705 hw_cons = le16toh(*fp->tx_cons_sb);
3706 sw_cons = fp->tx_pkt_cons;
3708 while (sw_cons != hw_cons) {
3709 pkt_cons = TX_BD(sw_cons);
3712 "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n",
3713 fp->index, hw_cons, sw_cons, pkt_cons);
3715 bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons);
3720 fp->tx_pkt_cons = sw_cons;
3721 fp->tx_bd_cons = bd_cons;
3724 "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n",
3725 fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod);
3729 tx_bd_avail = bxe_tx_avail(sc, fp);
3731 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
3732 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3734 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3737 if (fp->tx_pkt_prod != fp->tx_pkt_cons) {
3738 /* reset the watchdog timer if there are pending transmits */
3739 fp->watchdog_timer = BXE_TX_TIMEOUT;
3742 /* clear watchdog when there are no pending transmits */
3743 fp->watchdog_timer = 0;
3749 bxe_drain_tx_queues(struct bxe_softc *sc)
3751 struct bxe_fastpath *fp;
3754 /* wait until all TX fastpath tasks have completed */
3755 for (i = 0; i < sc->num_queues; i++) {
3760 while (bxe_has_tx_work(fp)) {
3764 BXE_FP_TX_UNLOCK(fp);
3767 BLOGE(sc, "Timeout waiting for fp[%d] "
3768 "transmits to complete!\n", i);
3769 bxe_panic(sc, ("tx drain failure\n"));
3783 bxe_del_all_macs(struct bxe_softc *sc,
3784 struct ecore_vlan_mac_obj *mac_obj,
3786 uint8_t wait_for_comp)
3788 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3791 /* wait for completion of requested */
3792 if (wait_for_comp) {
3793 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3796 /* Set the mac type of addresses we want to clear */
3797 bxe_set_bit(mac_type, &vlan_mac_flags);
3799 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
3801 BLOGE(sc, "Failed to delete MACs (%d)\n", rc);
3808 bxe_fill_accept_flags(struct bxe_softc *sc,
3810 unsigned long *rx_accept_flags,
3811 unsigned long *tx_accept_flags)
3813 /* Clear the flags first */
3814 *rx_accept_flags = 0;
3815 *tx_accept_flags = 0;
3818 case BXE_RX_MODE_NONE:
3820 * 'drop all' supersedes any accept flags that may have been
3821 * passed to the function.
3825 case BXE_RX_MODE_NORMAL:
3826 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3827 bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags);
3828 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3830 /* internal switching mode */
3831 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3832 bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags);
3833 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3837 case BXE_RX_MODE_ALLMULTI:
3838 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3839 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3840 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3842 /* internal switching mode */
3843 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3844 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3845 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3849 case BXE_RX_MODE_PROMISC:
3851 * According to deffinition of SI mode, iface in promisc mode
3852 * should receive matched and unmatched (in resolution of port)
3855 bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags);
3856 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3857 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3858 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3860 /* internal switching mode */
3861 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3862 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3865 bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags);
3867 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3873 BLOGE(sc, "Unknown rx_mode (%d)\n", rx_mode);
3877 /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
3878 if (rx_mode != BXE_RX_MODE_NONE) {
3879 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags);
3880 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags);
3887 bxe_set_q_rx_mode(struct bxe_softc *sc,
3889 unsigned long rx_mode_flags,
3890 unsigned long rx_accept_flags,
3891 unsigned long tx_accept_flags,
3892 unsigned long ramrod_flags)
3894 struct ecore_rx_mode_ramrod_params ramrod_param;
3897 memset(&ramrod_param, 0, sizeof(ramrod_param));
3899 /* Prepare ramrod parameters */
3900 ramrod_param.cid = 0;
3901 ramrod_param.cl_id = cl_id;
3902 ramrod_param.rx_mode_obj = &sc->rx_mode_obj;
3903 ramrod_param.func_id = SC_FUNC(sc);
3905 ramrod_param.pstate = &sc->sp_state;
3906 ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING;
3908 ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata);
3909 ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata);
3911 bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
3913 ramrod_param.ramrod_flags = ramrod_flags;
3914 ramrod_param.rx_mode_flags = rx_mode_flags;
3916 ramrod_param.rx_accept_flags = rx_accept_flags;
3917 ramrod_param.tx_accept_flags = tx_accept_flags;
3919 rc = ecore_config_rx_mode(sc, &ramrod_param);
3921 BLOGE(sc, "Set rx_mode %d failed\n", sc->rx_mode);
3929 bxe_set_storm_rx_mode(struct bxe_softc *sc)
3931 unsigned long rx_mode_flags = 0, ramrod_flags = 0;
3932 unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
3935 rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags,
3941 bxe_set_bit(RAMROD_RX, &ramrod_flags);
3942 bxe_set_bit(RAMROD_TX, &ramrod_flags);
3944 /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */
3945 return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags,
3946 rx_accept_flags, tx_accept_flags,
3950 /* returns the "mcp load_code" according to global load_count array */
3952 bxe_nic_load_no_mcp(struct bxe_softc *sc)
3954 int path = SC_PATH(sc);
3955 int port = SC_PORT(sc);
3957 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n",
3958 path, load_count[path][0], load_count[path][1],
3959 load_count[path][2]);
3960 load_count[path][0]++;
3961 load_count[path][1 + port]++;
3962 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n",
3963 path, load_count[path][0], load_count[path][1],
3964 load_count[path][2]);
3965 if (load_count[path][0] == 1) {
3966 return (FW_MSG_CODE_DRV_LOAD_COMMON);
3967 } else if (load_count[path][1 + port] == 1) {
3968 return (FW_MSG_CODE_DRV_LOAD_PORT);
3970 return (FW_MSG_CODE_DRV_LOAD_FUNCTION);
3974 /* returns the "mcp load_code" according to global load_count array */
3976 bxe_nic_unload_no_mcp(struct bxe_softc *sc)
3978 int port = SC_PORT(sc);
3979 int path = SC_PATH(sc);
3981 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n",
3982 path, load_count[path][0], load_count[path][1],
3983 load_count[path][2]);
3984 load_count[path][0]--;
3985 load_count[path][1 + port]--;
3986 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n",
3987 path, load_count[path][0], load_count[path][1],
3988 load_count[path][2]);
3989 if (load_count[path][0] == 0) {
3990 return (FW_MSG_CODE_DRV_UNLOAD_COMMON);
3991 } else if (load_count[path][1 + port] == 0) {
3992 return (FW_MSG_CODE_DRV_UNLOAD_PORT);
3994 return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION);
3998 /* request unload mode from the MCP: COMMON, PORT or FUNCTION */
4000 bxe_send_unload_req(struct bxe_softc *sc,
4003 uint32_t reset_code = 0;
4005 int port = SC_PORT(sc);
4006 int path = SC_PATH(sc);
4009 /* Select the UNLOAD request mode */
4010 if (unload_mode == UNLOAD_NORMAL) {
4011 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
4014 else if (sc->flags & BXE_NO_WOL_FLAG) {
4015 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
4016 } else if (sc->wol) {
4017 uint32_t emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
4018 uint8_t *mac_addr = sc->dev->dev_addr;
4023 * The mac address is written to entries 1-4 to
4024 * preserve entry 0 which is used by the PMF
4026 uint8_t entry = (SC_VN(sc) + 1)*8;
4028 val = (mac_addr[0] << 8) | mac_addr[1];
4029 EMAC_WR(sc, EMAC_REG_EMAC_MAC_MATCH + entry, val);
4031 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
4032 (mac_addr[4] << 8) | mac_addr[5];
4033 EMAC_WR(sc, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
4035 /* Enable the PME and clear the status */
4036 pmc = pci_read_config(sc->dev,
4037 (sc->devinfo.pcie_pm_cap_reg +
4040 pmc |= PCIM_PSTAT_PMEENABLE | PCIM_PSTAT_PME;
4041 pci_write_config(sc->dev,
4042 (sc->devinfo.pcie_pm_cap_reg +
4046 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
4050 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
4053 /* Send the request to the MCP */
4054 if (!BXE_NOMCP(sc)) {
4055 reset_code = bxe_fw_command(sc, reset_code, 0);
4057 reset_code = bxe_nic_unload_no_mcp(sc);
4060 return (reset_code);
4063 /* send UNLOAD_DONE command to the MCP */
4065 bxe_send_unload_done(struct bxe_softc *sc,
4068 uint32_t reset_param =
4069 keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
4071 /* Report UNLOAD_DONE to MCP */
4072 if (!BXE_NOMCP(sc)) {
4073 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
4078 bxe_func_wait_started(struct bxe_softc *sc)
4082 if (!sc->port.pmf) {
4087 * (assumption: No Attention from MCP at this stage)
4088 * PMF probably in the middle of TX disable/enable transaction
4089 * 1. Sync IRS for default SB
4090 * 2. Sync SP queue - this guarantees us that attention handling started
4091 * 3. Wait, that TX disable/enable transaction completes
4093 * 1+2 guarantee that if DCBX attention was scheduled it already changed
4094 * pending bit of transaction from STARTED-->TX_STOPPED, if we already
4095 * received completion for the transaction the state is TX_STOPPED.
4096 * State will return to STARTED after completion of TX_STOPPED-->STARTED
4100 /* XXX make sure default SB ISR is done */
4101 /* need a way to synchronize an irq (intr_mtx?) */
4103 /* XXX flush any work queues */
4105 while (ecore_func_get_state(sc, &sc->func_obj) !=
4106 ECORE_F_STATE_STARTED && tout--) {
4110 if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) {
4112 * Failed to complete the transaction in a "good way"
4113 * Force both transactions with CLR bit.
4115 struct ecore_func_state_params func_params = { NULL };
4117 BLOGE(sc, "Unexpected function state! "
4118 "Forcing STARTED-->TX_STOPPED-->STARTED\n");
4120 func_params.f_obj = &sc->func_obj;
4121 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
4123 /* STARTED-->TX_STOPPED */
4124 func_params.cmd = ECORE_F_CMD_TX_STOP;
4125 ecore_func_state_change(sc, &func_params);
4127 /* TX_STOPPED-->STARTED */
4128 func_params.cmd = ECORE_F_CMD_TX_START;
4129 return (ecore_func_state_change(sc, &func_params));
4136 bxe_stop_queue(struct bxe_softc *sc,
4139 struct bxe_fastpath *fp = &sc->fp[index];
4140 struct ecore_queue_state_params q_params = { NULL };
4143 BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index);
4145 q_params.q_obj = &sc->sp_objs[fp->index].q_obj;
4146 /* We want to wait for completion in this context */
4147 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
4149 /* Stop the primary connection: */
4151 /* ...halt the connection */
4152 q_params.cmd = ECORE_Q_CMD_HALT;
4153 rc = ecore_queue_state_change(sc, &q_params);
4158 /* ...terminate the connection */
4159 q_params.cmd = ECORE_Q_CMD_TERMINATE;
4160 memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate));
4161 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
4162 rc = ecore_queue_state_change(sc, &q_params);
4167 /* ...delete cfc entry */
4168 q_params.cmd = ECORE_Q_CMD_CFC_DEL;
4169 memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del));
4170 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
4171 return (ecore_queue_state_change(sc, &q_params));
4174 /* wait for the outstanding SP commands */
4175 static inline uint8_t
4176 bxe_wait_sp_comp(struct bxe_softc *sc,
4180 int tout = 5000; /* wait for 5 secs tops */
4184 if (!(atomic_load_acq_long(&sc->sp_state) & mask)) {
4193 tmp = atomic_load_acq_long(&sc->sp_state);
4195 BLOGE(sc, "Filtering completion timed out: "
4196 "sp_state 0x%lx, mask 0x%lx\n",
4205 bxe_func_stop(struct bxe_softc *sc)
4207 struct ecore_func_state_params func_params = { NULL };
4210 /* prepare parameters for function state transitions */
4211 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
4212 func_params.f_obj = &sc->func_obj;
4213 func_params.cmd = ECORE_F_CMD_STOP;
4216 * Try to stop the function the 'good way'. If it fails (in case
4217 * of a parity error during bxe_chip_cleanup()) and we are
4218 * not in a debug mode, perform a state transaction in order to
4219 * enable further HW_RESET transaction.
4221 rc = ecore_func_state_change(sc, &func_params);
4223 BLOGE(sc, "FUNC_STOP ramrod failed. "
4224 "Running a dry transaction\n");
4225 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
4226 return (ecore_func_state_change(sc, &func_params));
4233 bxe_reset_hw(struct bxe_softc *sc,
4236 struct ecore_func_state_params func_params = { NULL };
4238 /* Prepare parameters for function state transitions */
4239 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
4241 func_params.f_obj = &sc->func_obj;
4242 func_params.cmd = ECORE_F_CMD_HW_RESET;
4244 func_params.params.hw_init.load_phase = load_code;
4246 return (ecore_func_state_change(sc, &func_params));
4250 bxe_int_disable_sync(struct bxe_softc *sc,
4254 /* prevent the HW from sending interrupts */
4255 bxe_int_disable(sc);
4258 /* XXX need a way to synchronize ALL irqs (intr_mtx?) */
4259 /* make sure all ISRs are done */
4261 /* XXX make sure sp_task is not running */
4262 /* cancel and flush work queues */
4266 bxe_chip_cleanup(struct bxe_softc *sc,
4267 uint32_t unload_mode,
4270 int port = SC_PORT(sc);
4271 struct ecore_mcast_ramrod_params rparam = { NULL };
4272 uint32_t reset_code;
4275 bxe_drain_tx_queues(sc);
4277 /* give HW time to discard old tx messages */
4280 /* Clean all ETH MACs */
4281 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE);
4283 BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc);
4286 /* Clean up UC list */
4287 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE);
4289 BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc);
4293 if (!CHIP_IS_E1(sc)) {
4294 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
4297 /* Set "drop all" to stop Rx */
4300 * We need to take the BXE_MCAST_LOCK() here in order to prevent
4301 * a race between the completion code and this code.
4305 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
4306 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
4308 bxe_set_storm_rx_mode(sc);
4311 /* Clean up multicast configuration */
4312 rparam.mcast_obj = &sc->mcast_obj;
4313 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4315 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4318 BXE_MCAST_UNLOCK(sc);
4320 // XXX bxe_iov_chip_cleanup(sc);
4323 * Send the UNLOAD_REQUEST to the MCP. This will return if
4324 * this function should perform FUNCTION, PORT, or COMMON HW
4327 reset_code = bxe_send_unload_req(sc, unload_mode);
4330 * (assumption: No Attention from MCP at this stage)
4331 * PMF probably in the middle of TX disable/enable transaction
4333 rc = bxe_func_wait_started(sc);
4335 BLOGE(sc, "bxe_func_wait_started failed\n");
4339 * Close multi and leading connections
4340 * Completions for ramrods are collected in a synchronous way
4342 for (i = 0; i < sc->num_queues; i++) {
4343 if (bxe_stop_queue(sc, i)) {
4349 * If SP settings didn't get completed so far - something
4350 * very wrong has happen.
4352 if (!bxe_wait_sp_comp(sc, ~0x0UL)) {
4353 BLOGE(sc, "Common slow path ramrods got stuck!\n");
4358 rc = bxe_func_stop(sc);
4360 BLOGE(sc, "Function stop failed!\n");
4363 /* disable HW interrupts */
4364 bxe_int_disable_sync(sc, TRUE);
4366 /* detach interrupts */
4367 bxe_interrupt_detach(sc);
4369 /* Reset the chip */
4370 rc = bxe_reset_hw(sc, reset_code);
4372 BLOGE(sc, "Hardware reset failed\n");
4375 /* Report UNLOAD_DONE to MCP */
4376 bxe_send_unload_done(sc, keep_link);
4380 bxe_disable_close_the_gate(struct bxe_softc *sc)
4383 int port = SC_PORT(sc);
4386 "Disabling 'close the gates'\n");
4388 if (CHIP_IS_E1(sc)) {
4389 uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4390 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4391 val = REG_RD(sc, addr);
4393 REG_WR(sc, addr, val);
4395 val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK);
4396 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
4397 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
4398 REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val);
4403 * Cleans the object that have internal lists without sending
4404 * ramrods. Should be run when interrutps are disabled.
4407 bxe_squeeze_objects(struct bxe_softc *sc)
4409 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
4410 struct ecore_mcast_ramrod_params rparam = { NULL };
4411 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
4414 /* Cleanup MACs' object first... */
4416 /* Wait for completion of requested */
4417 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
4418 /* Perform a dry cleanup */
4419 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
4421 /* Clean ETH primary MAC */
4422 bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags);
4423 rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags,
4426 BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc);
4429 /* Cleanup UC list */
4431 bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags);
4432 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags,
4435 BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc);
4438 /* Now clean mcast object... */
4440 rparam.mcast_obj = &sc->mcast_obj;
4441 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
4443 /* Add a DEL command... */
4444 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4446 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4449 /* now wait until all pending commands are cleared */
4451 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4454 BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc);
4458 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4462 /* stop the controller */
4463 static __noinline int
4464 bxe_nic_unload(struct bxe_softc *sc,
4465 uint32_t unload_mode,
4468 uint8_t global = FALSE;
4471 BXE_CORE_LOCK_ASSERT(sc);
4473 BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n");
4475 /* mark driver as unloaded in shmem2 */
4476 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
4477 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
4478 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
4479 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
4482 if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE &&
4483 (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) {
4485 * We can get here if the driver has been unloaded
4486 * during parity error recovery and is either waiting for a
4487 * leader to complete or for other functions to unload and
4488 * then ifconfig down has been issued. In this case we want to
4489 * unload and let other functions to complete a recovery
4492 sc->recovery_state = BXE_RECOVERY_DONE;
4494 bxe_release_leader_lock(sc);
4497 BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n");
4498 BLOGE(sc, "Can't unload in closed or error state\n");
4503 * Nothing to do during unload if previous bxe_nic_load()
4504 * did not completed succesfully - all resourses are released.
4506 if ((sc->state == BXE_STATE_CLOSED) ||
4507 (sc->state == BXE_STATE_ERROR)) {
4511 sc->state = BXE_STATE_CLOSING_WAITING_HALT;
4517 sc->rx_mode = BXE_RX_MODE_NONE;
4518 /* XXX set rx mode ??? */
4520 if (IS_PF(sc) && !sc->grcdump_done) {
4521 /* set ALWAYS_ALIVE bit in shmem */
4522 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
4526 bxe_stats_handle(sc, STATS_EVENT_STOP);
4527 bxe_save_statistics(sc);
4530 /* wait till consumers catch up with producers in all queues */
4531 bxe_drain_tx_queues(sc);
4533 /* if VF indicate to PF this function is going down (PF will delete sp
4534 * elements and clear initializations
4537 ; /* bxe_vfpf_close_vf(sc); */
4538 } else if (unload_mode != UNLOAD_RECOVERY) {
4539 /* if this is a normal/close unload need to clean up chip */
4540 if (!sc->grcdump_done)
4541 bxe_chip_cleanup(sc, unload_mode, keep_link);
4543 /* Send the UNLOAD_REQUEST to the MCP */
4544 bxe_send_unload_req(sc, unload_mode);
4547 * Prevent transactions to host from the functions on the
4548 * engine that doesn't reset global blocks in case of global
4549 * attention once gloabl blocks are reset and gates are opened
4550 * (the engine which leader will perform the recovery
4553 if (!CHIP_IS_E1x(sc)) {
4557 /* disable HW interrupts */
4558 bxe_int_disable_sync(sc, TRUE);
4560 /* detach interrupts */
4561 bxe_interrupt_detach(sc);
4563 /* Report UNLOAD_DONE to MCP */
4564 bxe_send_unload_done(sc, FALSE);
4568 * At this stage no more interrupts will arrive so we may safely clean
4569 * the queue'able objects here in case they failed to get cleaned so far.
4572 bxe_squeeze_objects(sc);
4575 /* There should be no more pending SP commands at this stage */
4580 bxe_free_fp_buffers(sc);
4586 bxe_free_fw_stats_mem(sc);
4588 sc->state = BXE_STATE_CLOSED;
4591 * Check if there are pending parity attentions. If there are - set
4592 * RECOVERY_IN_PROGRESS.
4594 if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) {
4595 bxe_set_reset_in_progress(sc);
4597 /* Set RESET_IS_GLOBAL if needed */
4599 bxe_set_reset_global(sc);
4604 * The last driver must disable a "close the gate" if there is no
4605 * parity attention or "process kill" pending.
4607 if (IS_PF(sc) && !bxe_clear_pf_load(sc) &&
4608 bxe_reset_is_done(sc, SC_PATH(sc))) {
4609 bxe_disable_close_the_gate(sc);
4612 BLOGD(sc, DBG_LOAD, "Ended NIC unload\n");
4618 * Called by the OS to set various media options (i.e. link, speed, etc.) when
4619 * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...".
4622 bxe_ifmedia_update(struct ifnet *ifp)
4624 struct bxe_softc *sc = (struct bxe_softc *)ifp->if_softc;
4625 struct ifmedia *ifm;
4629 /* We only support Ethernet media type. */
4630 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
4634 switch (IFM_SUBTYPE(ifm->ifm_media)) {
4640 case IFM_10G_TWINAX:
4642 /* We don't support changing the media type. */
4643 BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n",
4644 IFM_SUBTYPE(ifm->ifm_media));
4652 * Called by the OS to get the current media status (i.e. link, speed, etc.).
4655 bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
4657 struct bxe_softc *sc = ifp->if_softc;
4659 /* Report link down if the driver isn't running. */
4660 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
4661 ifmr->ifm_active |= IFM_NONE;
4665 /* Setup the default interface info. */
4666 ifmr->ifm_status = IFM_AVALID;
4667 ifmr->ifm_active = IFM_ETHER;
4669 if (sc->link_vars.link_up) {
4670 ifmr->ifm_status |= IFM_ACTIVE;
4672 ifmr->ifm_active |= IFM_NONE;
4676 ifmr->ifm_active |= sc->media;
4678 if (sc->link_vars.duplex == DUPLEX_FULL) {
4679 ifmr->ifm_active |= IFM_FDX;
4681 ifmr->ifm_active |= IFM_HDX;
4686 bxe_ioctl_nvram(struct bxe_softc *sc,
4690 struct bxe_nvram_data nvdata_base;
4691 struct bxe_nvram_data *nvdata;
4695 copyin(ifr->ifr_data, &nvdata_base, sizeof(nvdata_base));
4697 len = (sizeof(struct bxe_nvram_data) +
4701 if (len > sizeof(struct bxe_nvram_data)) {
4702 if ((nvdata = (struct bxe_nvram_data *)
4703 malloc(len, M_DEVBUF,
4704 (M_NOWAIT | M_ZERO))) == NULL) {
4705 BLOGE(sc, "BXE_IOC_RD_NVRAM malloc failed\n");
4708 memcpy(nvdata, &nvdata_base, sizeof(struct bxe_nvram_data));
4710 nvdata = &nvdata_base;
4713 if (priv_op == BXE_IOC_RD_NVRAM) {
4714 BLOGD(sc, DBG_IOCTL, "IOC_RD_NVRAM 0x%x %d\n",
4715 nvdata->offset, nvdata->len);
4716 error = bxe_nvram_read(sc,
4718 (uint8_t *)nvdata->value,
4720 copyout(nvdata, ifr->ifr_data, len);
4721 } else { /* BXE_IOC_WR_NVRAM */
4722 BLOGD(sc, DBG_IOCTL, "IOC_WR_NVRAM 0x%x %d\n",
4723 nvdata->offset, nvdata->len);
4724 copyin(ifr->ifr_data, nvdata, len);
4725 error = bxe_nvram_write(sc,
4727 (uint8_t *)nvdata->value,
4731 if (len > sizeof(struct bxe_nvram_data)) {
4732 free(nvdata, M_DEVBUF);
4739 bxe_ioctl_stats_show(struct bxe_softc *sc,
4743 const size_t str_size = (BXE_NUM_ETH_STATS * STAT_NAME_LEN);
4744 const size_t stats_size = (BXE_NUM_ETH_STATS * sizeof(uint64_t));
4751 case BXE_IOC_STATS_SHOW_NUM:
4752 memset(ifr->ifr_data, 0, sizeof(union bxe_stats_show_data));
4753 ((union bxe_stats_show_data *)ifr->ifr_data)->desc.num =
4755 ((union bxe_stats_show_data *)ifr->ifr_data)->desc.len =
4759 case BXE_IOC_STATS_SHOW_STR:
4760 memset(ifr->ifr_data, 0, str_size);
4761 p_tmp = ifr->ifr_data;
4762 for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
4763 strcpy(p_tmp, bxe_eth_stats_arr[i].string);
4764 p_tmp += STAT_NAME_LEN;
4768 case BXE_IOC_STATS_SHOW_CNT:
4769 memset(ifr->ifr_data, 0, stats_size);
4770 p_tmp = ifr->ifr_data;
4771 for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
4772 offset = ((uint32_t *)&sc->eth_stats +
4773 bxe_eth_stats_arr[i].offset);
4774 switch (bxe_eth_stats_arr[i].size) {
4776 *((uint64_t *)p_tmp) = (uint64_t)*offset;
4779 *((uint64_t *)p_tmp) = HILO_U64(*offset, *(offset + 1));
4782 *((uint64_t *)p_tmp) = 0;
4784 p_tmp += sizeof(uint64_t);
4794 bxe_handle_chip_tq(void *context,
4797 struct bxe_softc *sc = (struct bxe_softc *)context;
4798 long work = atomic_load_acq_long(&sc->chip_tq_flags);
4802 case CHIP_TQ_REINIT:
4803 if (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING) {
4804 /* restart the interface */
4805 BLOGD(sc, DBG_LOAD, "Restarting the interface...\n");
4806 bxe_periodic_stop(sc);
4808 bxe_stop_locked(sc);
4809 bxe_init_locked(sc);
4810 BXE_CORE_UNLOCK(sc);
4820 * Handles any IOCTL calls from the operating system.
4823 * 0 = Success, >0 Failure
4826 bxe_ioctl(struct ifnet *ifp,
4830 struct bxe_softc *sc = ifp->if_softc;
4831 struct ifreq *ifr = (struct ifreq *)data;
4832 struct bxe_nvram_data *nvdata;
4838 int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN);
4839 int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING);
4844 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n",
4847 if (sc->mtu == ifr->ifr_mtu) {
4848 /* nothing to change */
4852 if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) {
4853 BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n",
4854 ifr->ifr_mtu, mtu_min, mtu_max);
4859 atomic_store_rel_int((volatile unsigned int *)&sc->mtu,
4860 (unsigned long)ifr->ifr_mtu);
4861 atomic_store_rel_long((volatile unsigned long *)&ifp->if_mtu,
4862 (unsigned long)ifr->ifr_mtu);
4868 /* toggle the interface state up or down */
4869 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n");
4872 /* check if the interface is up */
4873 if (ifp->if_flags & IFF_UP) {
4874 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4875 /* set the receive mode flags */
4876 bxe_set_rx_mode(sc);
4878 bxe_init_locked(sc);
4881 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4882 bxe_periodic_stop(sc);
4883 bxe_stop_locked(sc);
4886 BXE_CORE_UNLOCK(sc);
4892 /* add/delete multicast addresses */
4893 BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n");
4895 /* check if the interface is up */
4896 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4897 /* set the receive mode flags */
4899 bxe_set_rx_mode(sc);
4900 BXE_CORE_UNLOCK(sc);
4906 /* find out which capabilities have changed */
4907 mask = (ifr->ifr_reqcap ^ ifp->if_capenable);
4909 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n",
4912 /* toggle the LRO capabilites enable flag */
4913 if (mask & IFCAP_LRO) {
4914 ifp->if_capenable ^= IFCAP_LRO;
4915 BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n",
4916 (ifp->if_capenable & IFCAP_LRO) ? "ON" : "OFF");
4920 /* toggle the TXCSUM checksum capabilites enable flag */
4921 if (mask & IFCAP_TXCSUM) {
4922 ifp->if_capenable ^= IFCAP_TXCSUM;
4923 BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n",
4924 (ifp->if_capenable & IFCAP_TXCSUM) ? "ON" : "OFF");
4925 if (ifp->if_capenable & IFCAP_TXCSUM) {
4926 ifp->if_hwassist = (CSUM_IP |
4933 ifp->if_hwassist = 0;
4937 /* toggle the RXCSUM checksum capabilities enable flag */
4938 if (mask & IFCAP_RXCSUM) {
4939 ifp->if_capenable ^= IFCAP_RXCSUM;
4940 BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n",
4941 (ifp->if_capenable & IFCAP_RXCSUM) ? "ON" : "OFF");
4942 if (ifp->if_capenable & IFCAP_RXCSUM) {
4943 ifp->if_hwassist = (CSUM_IP |
4950 ifp->if_hwassist = 0;
4954 /* toggle TSO4 capabilities enabled flag */
4955 if (mask & IFCAP_TSO4) {
4956 ifp->if_capenable ^= IFCAP_TSO4;
4957 BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n",
4958 (ifp->if_capenable & IFCAP_TSO4) ? "ON" : "OFF");
4961 /* toggle TSO6 capabilities enabled flag */
4962 if (mask & IFCAP_TSO6) {
4963 ifp->if_capenable ^= IFCAP_TSO6;
4964 BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n",
4965 (ifp->if_capenable & IFCAP_TSO6) ? "ON" : "OFF");
4968 /* toggle VLAN_HWTSO capabilities enabled flag */
4969 if (mask & IFCAP_VLAN_HWTSO) {
4970 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
4971 BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n",
4972 (ifp->if_capenable & IFCAP_VLAN_HWTSO) ? "ON" : "OFF");
4975 /* toggle VLAN_HWCSUM capabilities enabled flag */
4976 if (mask & IFCAP_VLAN_HWCSUM) {
4977 /* XXX investigate this... */
4978 BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n");
4982 /* toggle VLAN_MTU capabilities enable flag */
4983 if (mask & IFCAP_VLAN_MTU) {
4984 /* XXX investigate this... */
4985 BLOGE(sc, "Changing VLAN_MTU is not supported!\n");
4989 /* toggle VLAN_HWTAGGING capabilities enabled flag */
4990 if (mask & IFCAP_VLAN_HWTAGGING) {
4991 /* XXX investigate this... */
4992 BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n");
4996 /* toggle VLAN_HWFILTER capabilities enabled flag */
4997 if (mask & IFCAP_VLAN_HWFILTER) {
4998 /* XXX investigate this... */
4999 BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n");
5011 /* set/get interface media */
5012 BLOGD(sc, DBG_IOCTL,
5013 "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n",
5015 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
5018 case SIOCGPRIVATE_0:
5019 copyin(ifr->ifr_data, &priv_op, sizeof(priv_op));
5023 case BXE_IOC_RD_NVRAM:
5024 case BXE_IOC_WR_NVRAM:
5025 nvdata = (struct bxe_nvram_data *)ifr->ifr_data;
5026 BLOGD(sc, DBG_IOCTL,
5027 "Received Private NVRAM ioctl addr=0x%x size=%u\n",
5028 nvdata->offset, nvdata->len);
5029 error = bxe_ioctl_nvram(sc, priv_op, ifr);
5032 case BXE_IOC_STATS_SHOW_NUM:
5033 case BXE_IOC_STATS_SHOW_STR:
5034 case BXE_IOC_STATS_SHOW_CNT:
5035 BLOGD(sc, DBG_IOCTL, "Received Private Stats ioctl (%d)\n",
5037 error = bxe_ioctl_stats_show(sc, priv_op, ifr);
5041 BLOGW(sc, "Received Private Unknown ioctl (%d)\n", priv_op);
5049 BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n",
5051 error = ether_ioctl(ifp, command, data);
5055 if (reinit && (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING)) {
5056 BLOGD(sc, DBG_LOAD | DBG_IOCTL,
5057 "Re-initializing hardware from IOCTL change\n");
5058 bxe_periodic_stop(sc);
5060 bxe_stop_locked(sc);
5061 bxe_init_locked(sc);
5062 BXE_CORE_UNLOCK(sc);
5068 static __noinline void
5069 bxe_dump_mbuf(struct bxe_softc *sc,
5076 if (!(sc->debug & DBG_MBUF)) {
5081 BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n");
5087 "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
5088 i, m, m->m_len, m->m_flags,
5089 "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", m->m_data);
5091 if (m->m_flags & M_PKTHDR) {
5093 "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
5094 i, m->m_pkthdr.len, m->m_flags,
5095 "\20\12M_BCAST\13M_MCAST\14M_FRAG"
5096 "\15M_FIRSTFRAG\16M_LASTFRAG\21M_VLANTAG"
5097 "\22M_PROMISC\23M_NOFREE",
5098 (int)m->m_pkthdr.csum_flags,
5099 "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS"
5100 "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED"
5101 "\12CSUM_IP_VALID\13CSUM_DATA_VALID"
5102 "\14CSUM_PSEUDO_HDR");
5105 if (m->m_flags & M_EXT) {
5106 switch (m->m_ext.ext_type) {
5107 case EXT_CLUSTER: type = "EXT_CLUSTER"; break;
5108 case EXT_SFBUF: type = "EXT_SFBUF"; break;
5109 case EXT_JUMBOP: type = "EXT_JUMBOP"; break;
5110 case EXT_JUMBO9: type = "EXT_JUMBO9"; break;
5111 case EXT_JUMBO16: type = "EXT_JUMBO16"; break;
5112 case EXT_PACKET: type = "EXT_PACKET"; break;
5113 case EXT_MBUF: type = "EXT_MBUF"; break;
5114 case EXT_NET_DRV: type = "EXT_NET_DRV"; break;
5115 case EXT_MOD_TYPE: type = "EXT_MOD_TYPE"; break;
5116 case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break;
5117 case EXT_EXTREF: type = "EXT_EXTREF"; break;
5118 default: type = "UNKNOWN"; break;
5122 "%02d: - m_ext: %p ext_size=%d type=%s\n",
5123 i, m->m_ext.ext_buf, m->m_ext.ext_size, type);
5127 bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE);
5136 * Checks to ensure the 13 bd sliding window is >= MSS for TSO.
5137 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS.
5138 * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD
5139 * The headers comes in a seperate bd in FreeBSD so 13-3=10.
5140 * Returns: 0 if OK to send, 1 if packet needs further defragmentation
5143 bxe_chktso_window(struct bxe_softc *sc,
5145 bus_dma_segment_t *segs,
5148 uint32_t num_wnds, wnd_size, wnd_sum;
5149 int32_t frag_idx, wnd_idx;
5150 unsigned short lso_mss;
5156 num_wnds = nsegs - wnd_size;
5157 lso_mss = htole16(m->m_pkthdr.tso_segsz);
5160 * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the
5161 * first window sum of data while skipping the first assuming it is the
5162 * header in FreeBSD.
5164 for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) {
5165 wnd_sum += htole16(segs[frag_idx].ds_len);
5168 /* check the first 10 bd window size */
5169 if (wnd_sum < lso_mss) {
5173 /* run through the windows */
5174 for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) {
5175 /* subtract the first mbuf->m_len of the last wndw(-header) */
5176 wnd_sum -= htole16(segs[wnd_idx+1].ds_len);
5177 /* add the next mbuf len to the len of our new window */
5178 wnd_sum += htole16(segs[frag_idx].ds_len);
5179 if (wnd_sum < lso_mss) {
5188 bxe_set_pbd_csum_e2(struct bxe_fastpath *fp,
5190 uint32_t *parsing_data)
5192 struct ether_vlan_header *eh = NULL;
5193 struct ip *ip4 = NULL;
5194 struct ip6_hdr *ip6 = NULL;
5196 struct tcphdr *th = NULL;
5197 int e_hlen, ip_hlen, l4_off;
5200 if (m->m_pkthdr.csum_flags == CSUM_IP) {
5201 /* no L4 checksum offload needed */
5205 /* get the Ethernet header */
5206 eh = mtod(m, struct ether_vlan_header *);
5208 /* handle VLAN encapsulation if present */
5209 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
5210 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
5211 proto = ntohs(eh->evl_proto);
5213 e_hlen = ETHER_HDR_LEN;
5214 proto = ntohs(eh->evl_encap_proto);
5219 /* get the IP header, if mbuf len < 20 then header in next mbuf */
5220 ip4 = (m->m_len < sizeof(struct ip)) ?
5221 (struct ip *)m->m_next->m_data :
5222 (struct ip *)(m->m_data + e_hlen);
5223 /* ip_hl is number of 32-bit words */
5224 ip_hlen = (ip4->ip_hl << 2);
5227 case ETHERTYPE_IPV6:
5228 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
5229 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
5230 (struct ip6_hdr *)m->m_next->m_data :
5231 (struct ip6_hdr *)(m->m_data + e_hlen);
5232 /* XXX cannot support offload with IPv6 extensions */
5233 ip_hlen = sizeof(struct ip6_hdr);
5237 /* We can't offload in this case... */
5238 /* XXX error stat ??? */
5242 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
5243 l4_off = (e_hlen + ip_hlen);
5246 (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
5247 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W);
5249 if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5252 fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
5253 th = (struct tcphdr *)(ip + ip_hlen);
5254 /* th_off is number of 32-bit words */
5255 *parsing_data |= ((th->th_off <<
5256 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
5257 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW);
5258 return (l4_off + (th->th_off << 2)); /* entire header length */
5259 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5261 fp->eth_q_stats.tx_ofld_frames_csum_udp++;
5262 return (l4_off + sizeof(struct udphdr)); /* entire header length */
5264 /* XXX error stat ??? */
5270 bxe_set_pbd_csum(struct bxe_fastpath *fp,
5272 struct eth_tx_parse_bd_e1x *pbd)
5274 struct ether_vlan_header *eh = NULL;
5275 struct ip *ip4 = NULL;
5276 struct ip6_hdr *ip6 = NULL;
5278 struct tcphdr *th = NULL;
5279 struct udphdr *uh = NULL;
5280 int e_hlen, ip_hlen;
5286 /* get the Ethernet header */
5287 eh = mtod(m, struct ether_vlan_header *);
5289 /* handle VLAN encapsulation if present */
5290 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
5291 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
5292 proto = ntohs(eh->evl_proto);
5294 e_hlen = ETHER_HDR_LEN;
5295 proto = ntohs(eh->evl_encap_proto);
5300 /* get the IP header, if mbuf len < 20 then header in next mbuf */
5301 ip4 = (m->m_len < sizeof(struct ip)) ?
5302 (struct ip *)m->m_next->m_data :
5303 (struct ip *)(m->m_data + e_hlen);
5304 /* ip_hl is number of 32-bit words */
5305 ip_hlen = (ip4->ip_hl << 1);
5308 case ETHERTYPE_IPV6:
5309 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
5310 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
5311 (struct ip6_hdr *)m->m_next->m_data :
5312 (struct ip6_hdr *)(m->m_data + e_hlen);
5313 /* XXX cannot support offload with IPv6 extensions */
5314 ip_hlen = (sizeof(struct ip6_hdr) >> 1);
5318 /* We can't offload in this case... */
5319 /* XXX error stat ??? */
5323 hlen = (e_hlen >> 1);
5325 /* note that rest of global_data is indirectly zeroed here */
5326 if (m->m_flags & M_VLANTAG) {
5328 htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
5330 pbd->global_data = htole16(hlen);
5333 pbd->ip_hlen_w = ip_hlen;
5335 hlen += pbd->ip_hlen_w;
5337 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
5339 if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5342 th = (struct tcphdr *)(ip + (ip_hlen << 1));
5343 /* th_off is number of 32-bit words */
5344 hlen += (uint16_t)(th->th_off << 1);
5345 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5347 uh = (struct udphdr *)(ip + (ip_hlen << 1));
5348 hlen += (sizeof(struct udphdr) / 2);
5350 /* valid case as only CSUM_IP was set */
5354 pbd->total_hlen_w = htole16(hlen);
5356 if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5359 fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
5360 pbd->tcp_pseudo_csum = ntohs(th->th_sum);
5361 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5363 fp->eth_q_stats.tx_ofld_frames_csum_udp++;
5366 * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP
5367 * checksums and does not know anything about the UDP header and where
5368 * the checksum field is located. It only knows about TCP. Therefore
5369 * we "lie" to the hardware for outgoing UDP packets w/ checksum
5370 * offload. Since the checksum field offset for TCP is 16 bytes and
5371 * for UDP it is 6 bytes we pass a pointer to the hardware that is 10
5372 * bytes less than the start of the UDP header. This allows the
5373 * hardware to write the checksum in the correct spot. But the
5374 * hardware will compute a checksum which includes the last 10 bytes
5375 * of the IP header. To correct this we tweak the stack computed
5376 * pseudo checksum by folding in the calculation of the inverse
5377 * checksum for those final 10 bytes of the IP header. This allows
5378 * the correct checksum to be computed by the hardware.
5381 /* set pointer 10 bytes before UDP header */
5382 tmp_uh = (uint32_t *)((uint8_t *)uh - 10);
5384 /* calculate a pseudo header checksum over the first 10 bytes */
5385 tmp_csum = in_pseudo(*tmp_uh,
5387 *(uint16_t *)(tmp_uh + 2));
5389 pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum));
5392 return (hlen * 2); /* entire header length, number of bytes */
5396 bxe_set_pbd_lso_e2(struct mbuf *m,
5397 uint32_t *parsing_data)
5399 *parsing_data |= ((m->m_pkthdr.tso_segsz <<
5400 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
5401 ETH_TX_PARSE_BD_E2_LSO_MSS);
5403 /* XXX test for IPv6 with extension header... */
5405 struct ip6_hdr *ip6;
5406 if (ip6 && ip6->ip6_nxt == 'some ipv6 extension header')
5407 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
5412 bxe_set_pbd_lso(struct mbuf *m,
5413 struct eth_tx_parse_bd_e1x *pbd)
5415 struct ether_vlan_header *eh = NULL;
5416 struct ip *ip = NULL;
5417 struct tcphdr *th = NULL;
5420 /* get the Ethernet header */
5421 eh = mtod(m, struct ether_vlan_header *);
5423 /* handle VLAN encapsulation if present */
5424 e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ?
5425 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN;
5427 /* get the IP and TCP header, with LSO entire header in first mbuf */
5428 /* XXX assuming IPv4 */
5429 ip = (struct ip *)(m->m_data + e_hlen);
5430 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
5432 pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz);
5433 pbd->tcp_send_seq = ntohl(th->th_seq);
5434 pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff);
5438 pbd->ip_id = ntohs(ip->ip_id);
5439 pbd->tcp_pseudo_csum =
5440 ntohs(in_pseudo(ip->ip_src.s_addr,
5442 htons(IPPROTO_TCP)));
5445 pbd->tcp_pseudo_csum =
5446 ntohs(in_pseudo(&ip6->ip6_src,
5448 htons(IPPROTO_TCP)));
5452 htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
5456 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory
5457 * visible to the controller.
5459 * If an mbuf is submitted to this routine and cannot be given to the
5460 * controller (e.g. it has too many fragments) then the function may free
5461 * the mbuf and return to the caller.
5464 * 0 = Success, !0 = Failure
5465 * Note the side effect that an mbuf may be freed if it causes a problem.
5468 bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
5470 bus_dma_segment_t segs[32];
5472 struct bxe_sw_tx_bd *tx_buf;
5473 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
5474 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
5475 /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */
5476 struct eth_tx_bd *tx_data_bd;
5477 struct eth_tx_bd *tx_total_pkt_size_bd;
5478 struct eth_tx_start_bd *tx_start_bd;
5479 uint16_t bd_prod, pkt_prod, total_pkt_size;
5481 int defragged, error, nsegs, rc, nbds, vlan_off, ovlan;
5482 struct bxe_softc *sc;
5483 uint16_t tx_bd_avail;
5484 struct ether_vlan_header *eh;
5485 uint32_t pbd_e2_parsing_data = 0;
5492 M_ASSERTPKTHDR(*m_head);
5495 rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0;
5498 tx_total_pkt_size_bd = NULL;
5500 /* get the H/W pointer for packets and BDs */
5501 pkt_prod = fp->tx_pkt_prod;
5502 bd_prod = fp->tx_bd_prod;
5504 mac_type = UNICAST_ADDRESS;
5506 /* map the mbuf into the next open DMAable memory */
5507 tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)];
5508 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5510 segs, &nsegs, BUS_DMA_NOWAIT);
5512 /* mapping errors */
5513 if(__predict_false(error != 0)) {
5514 fp->eth_q_stats.tx_dma_mapping_failure++;
5515 if (error == ENOMEM) {
5516 /* resource issue, try again later */
5518 } else if (error == EFBIG) {
5519 /* possibly recoverable with defragmentation */
5520 fp->eth_q_stats.mbuf_defrag_attempts++;
5521 m0 = m_defrag(*m_head, M_DONTWAIT);
5523 fp->eth_q_stats.mbuf_defrag_failures++;
5526 /* defrag successful, try mapping again */
5528 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5530 segs, &nsegs, BUS_DMA_NOWAIT);
5532 fp->eth_q_stats.tx_dma_mapping_failure++;
5537 /* unknown, unrecoverable mapping error */
5538 BLOGE(sc, "Unknown TX mapping error rc=%d\n", error);
5539 bxe_dump_mbuf(sc, m0, FALSE);
5543 goto bxe_tx_encap_continue;
5546 tx_bd_avail = bxe_tx_avail(sc, fp);
5548 /* make sure there is enough room in the send queue */
5549 if (__predict_false(tx_bd_avail < (nsegs + 2))) {
5550 /* Recoverable, try again later. */
5551 fp->eth_q_stats.tx_hw_queue_full++;
5552 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5554 goto bxe_tx_encap_continue;
5557 /* capture the current H/W TX chain high watermark */
5558 if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth <
5559 (TX_BD_USABLE - tx_bd_avail))) {
5560 fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail);
5563 /* make sure it fits in the packet window */
5564 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5566 * The mbuf may be to big for the controller to handle. If the frame
5567 * is a TSO frame we'll need to do an additional check.
5569 if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5570 if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) {
5571 goto bxe_tx_encap_continue; /* OK to send */
5573 fp->eth_q_stats.tx_window_violation_tso++;
5576 fp->eth_q_stats.tx_window_violation_std++;
5579 /* lets try to defragment this mbuf and remap it */
5580 fp->eth_q_stats.mbuf_defrag_attempts++;
5581 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5583 m0 = m_defrag(*m_head, M_DONTWAIT);
5585 fp->eth_q_stats.mbuf_defrag_failures++;
5586 /* Ugh, just drop the frame... :( */
5589 /* defrag successful, try mapping again */
5591 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5593 segs, &nsegs, BUS_DMA_NOWAIT);
5595 fp->eth_q_stats.tx_dma_mapping_failure++;
5596 /* No sense in trying to defrag/copy chain, drop it. :( */
5600 /* if the chain is still too long then drop it */
5601 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5602 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5609 bxe_tx_encap_continue:
5611 /* Check for errors */
5614 /* recoverable try again later */
5616 fp->eth_q_stats.tx_soft_errors++;
5617 fp->eth_q_stats.mbuf_alloc_tx--;
5625 /* set flag according to packet type (UNICAST_ADDRESS is default) */
5626 if (m0->m_flags & M_BCAST) {
5627 mac_type = BROADCAST_ADDRESS;
5628 } else if (m0->m_flags & M_MCAST) {
5629 mac_type = MULTICAST_ADDRESS;
5632 /* store the mbuf into the mbuf ring */
5634 tx_buf->first_bd = fp->tx_bd_prod;
5637 /* prepare the first transmit (start) BD for the mbuf */
5638 tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd;
5641 "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n",
5642 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
5644 tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
5645 tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
5646 tx_start_bd->nbytes = htole16(segs[0].ds_len);
5647 total_pkt_size += tx_start_bd->nbytes;
5648 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
5650 tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
5652 /* all frames have at least Start BD + Parsing BD */
5654 tx_start_bd->nbd = htole16(nbds);
5656 if (m0->m_flags & M_VLANTAG) {
5657 tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag);
5658 tx_start_bd->bd_flags.as_bitfield |=
5659 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
5661 /* vf tx, start bd must hold the ethertype for fw to enforce it */
5663 /* map ethernet header to find type and header length */
5664 eh = mtod(m0, struct ether_vlan_header *);
5665 tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto;
5667 /* used by FW for packet accounting */
5668 tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod);
5671 * If NPAR-SD is active then FW should do the tagging regardless
5672 * of value of priority. Otherwise, if priority indicates this is
5673 * a control packet we need to indicate to FW to avoid tagging.
5675 if (!IS_MF_AFEX(sc) && (mbuf priority == PRIO_CONTROL)) {
5676 SET_FLAG(tx_start_bd->general_data,
5677 ETH_TX_START_BD_FORCE_VLAN_MODE, 1);
5684 * add a parsing BD from the chain. The parsing BD is always added
5685 * though it is only used for TSO and chksum
5687 bd_prod = TX_BD_NEXT(bd_prod);
5689 if (m0->m_pkthdr.csum_flags) {
5690 if (m0->m_pkthdr.csum_flags & CSUM_IP) {
5691 fp->eth_q_stats.tx_ofld_frames_csum_ip++;
5692 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
5695 if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) {
5696 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5697 ETH_TX_BD_FLAGS_L4_CSUM);
5698 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) {
5699 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5700 ETH_TX_BD_FLAGS_IS_UDP |
5701 ETH_TX_BD_FLAGS_L4_CSUM);
5702 } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) ||
5703 (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5704 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
5705 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) {
5706 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM |
5707 ETH_TX_BD_FLAGS_IS_UDP);
5711 if (!CHIP_IS_E1x(sc)) {
5712 pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2;
5713 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
5715 if (m0->m_pkthdr.csum_flags) {
5716 hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data);
5721 * Add the MACs to the parsing BD if the module param was
5722 * explicitly set, if this is a vf, or in switch independent
5725 if (sc->flags & BXE_TX_SWITCHING || IS_VF(sc) || IS_MF_SI(sc)) {
5726 eh = mtod(m0, struct ether_vlan_header *);
5727 bxe_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
5728 &pbd_e2->data.mac_addr.src_mid,
5729 &pbd_e2->data.mac_addr.src_lo,
5731 bxe_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
5732 &pbd_e2->data.mac_addr.dst_mid,
5733 &pbd_e2->data.mac_addr.dst_lo,
5738 SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE,
5741 uint16_t global_data = 0;
5743 pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x;
5744 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
5746 if (m0->m_pkthdr.csum_flags) {
5747 hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x);
5750 SET_FLAG(global_data,
5751 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
5752 pbd_e1x->global_data |= htole16(global_data);
5755 /* setup the parsing BD with TSO specific info */
5756 if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5757 fp->eth_q_stats.tx_ofld_frames_lso++;
5758 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
5760 if (__predict_false(tx_start_bd->nbytes > hlen)) {
5761 fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++;
5763 /* split the first BD into header/data making the fw job easy */
5765 tx_start_bd->nbd = htole16(nbds);
5766 tx_start_bd->nbytes = htole16(hlen);
5768 bd_prod = TX_BD_NEXT(bd_prod);
5770 /* new transmit BD after the tx_parse_bd */
5771 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5772 tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen));
5773 tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen));
5774 tx_data_bd->nbytes = htole16(segs[0].ds_len - hlen);
5775 if (tx_total_pkt_size_bd == NULL) {
5776 tx_total_pkt_size_bd = tx_data_bd;
5780 "TSO split header size is %d (%x:%x) nbds %d\n",
5781 le16toh(tx_start_bd->nbytes),
5782 le32toh(tx_start_bd->addr_hi),
5783 le32toh(tx_start_bd->addr_lo),
5787 if (!CHIP_IS_E1x(sc)) {
5788 bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data);
5790 bxe_set_pbd_lso(m0, pbd_e1x);
5794 if (pbd_e2_parsing_data) {
5795 pbd_e2->parsing_data = htole32(pbd_e2_parsing_data);
5798 /* prepare remaining BDs, start tx bd contains first seg/frag */
5799 for (i = 1; i < nsegs ; i++) {
5800 bd_prod = TX_BD_NEXT(bd_prod);
5801 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5802 tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr));
5803 tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr));
5804 tx_data_bd->nbytes = htole16(segs[i].ds_len);
5805 if (tx_total_pkt_size_bd == NULL) {
5806 tx_total_pkt_size_bd = tx_data_bd;
5808 total_pkt_size += tx_data_bd->nbytes;
5811 BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd);
5813 if (tx_total_pkt_size_bd != NULL) {
5814 tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size;
5817 if (__predict_false(sc->debug & DBG_TX)) {
5818 tmp_bd = tx_buf->first_bd;
5819 for (i = 0; i < nbds; i++)
5823 "TX Strt: %p bd=%d nbd=%d vlan=0x%x "
5824 "bd_flags=0x%x hdr_nbds=%d\n",
5827 le16toh(tx_start_bd->nbd),
5828 le16toh(tx_start_bd->vlan_or_ethertype),
5829 tx_start_bd->bd_flags.as_bitfield,
5830 (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS));
5831 } else if (i == 1) {
5834 "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u "
5835 "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x "
5836 "tcp_seq=%u total_hlen_w=%u\n",
5839 pbd_e1x->global_data,
5844 pbd_e1x->tcp_pseudo_csum,
5845 pbd_e1x->tcp_send_seq,
5846 le16toh(pbd_e1x->total_hlen_w));
5847 } else { /* if (pbd_e2) */
5849 "-> Parse: %p bd=%d dst=%02x:%02x:%02x "
5850 "src=%02x:%02x:%02x parsing_data=0x%x\n",
5853 pbd_e2->data.mac_addr.dst_hi,
5854 pbd_e2->data.mac_addr.dst_mid,
5855 pbd_e2->data.mac_addr.dst_lo,
5856 pbd_e2->data.mac_addr.src_hi,
5857 pbd_e2->data.mac_addr.src_mid,
5858 pbd_e2->data.mac_addr.src_lo,
5859 pbd_e2->parsing_data);
5863 if (i != 1) { /* skip parse db as it doesn't hold data */
5864 tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd;
5866 "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n",
5869 le16toh(tx_data_bd->nbytes),
5870 le32toh(tx_data_bd->addr_hi),
5871 le32toh(tx_data_bd->addr_lo));
5874 tmp_bd = TX_BD_NEXT(tmp_bd);
5878 BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod);
5880 /* update TX BD producer index value for next TX */
5881 bd_prod = TX_BD_NEXT(bd_prod);
5884 * If the chain of tx_bd's describing this frame is adjacent to or spans
5885 * an eth_tx_next_bd element then we need to increment the nbds value.
5887 if (TX_BD_IDX(bd_prod) < nbds) {
5891 /* don't allow reordering of writes for nbd and packets */
5894 fp->tx_db.data.prod += nbds;
5896 /* producer points to the next free tx_bd at this point */
5898 fp->tx_bd_prod = bd_prod;
5900 DOORBELL(sc, fp->index, fp->tx_db.raw);
5902 fp->eth_q_stats.tx_pkts++;
5904 /* Prevent speculative reads from getting ahead of the status block. */
5905 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle,
5906 0, 0, BUS_SPACE_BARRIER_READ);
5908 /* Prevent speculative reads from getting ahead of the doorbell. */
5909 bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle,
5910 0, 0, BUS_SPACE_BARRIER_READ);
5916 bxe_tx_start_locked(struct bxe_softc *sc,
5918 struct bxe_fastpath *fp)
5920 struct mbuf *m = NULL;
5922 uint16_t tx_bd_avail;
5924 BXE_FP_TX_LOCK_ASSERT(fp);
5926 /* keep adding entries while there are frames to send */
5927 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
5930 * check for any frames to send
5931 * dequeue can still be NULL even if queue is not empty
5933 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
5934 if (__predict_false(m == NULL)) {
5938 /* the mbuf now belongs to us */
5939 fp->eth_q_stats.mbuf_alloc_tx++;
5942 * Put the frame into the transmit ring. If we don't have room,
5943 * place the mbuf back at the head of the TX queue, set the
5944 * OACTIVE flag, and wait for the NIC to drain the chain.
5946 if (__predict_false(bxe_tx_encap(fp, &m))) {
5947 fp->eth_q_stats.tx_encap_failures++;
5949 /* mark the TX queue as full and return the frame */
5950 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
5951 IFQ_DRV_PREPEND(&ifp->if_snd, m);
5952 fp->eth_q_stats.mbuf_alloc_tx--;
5953 fp->eth_q_stats.tx_queue_xoff++;
5956 /* stop looking for more work */
5960 /* the frame was enqueued successfully */
5963 /* send a copy of the frame to any BPF listeners. */
5966 tx_bd_avail = bxe_tx_avail(sc, fp);
5968 /* handle any completions if we're running low */
5969 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5970 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5972 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
5978 /* all TX packets were dequeued and/or the tx ring is full */
5980 /* reset the TX watchdog timeout timer */
5981 fp->watchdog_timer = BXE_TX_TIMEOUT;
5985 /* Legacy (non-RSS) dispatch routine */
5987 bxe_tx_start(struct ifnet *ifp)
5989 struct bxe_softc *sc;
5990 struct bxe_fastpath *fp;
5994 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
5995 BLOGW(sc, "Interface not running, ignoring transmit request\n");
5999 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
6000 BLOGW(sc, "Interface TX queue is full, ignoring transmit request\n");
6004 if (!sc->link_vars.link_up) {
6005 BLOGW(sc, "Interface link is down, ignoring transmit request\n");
6012 bxe_tx_start_locked(sc, ifp, fp);
6013 BXE_FP_TX_UNLOCK(fp);
6016 #if __FreeBSD_version >= 800000
6019 bxe_tx_mq_start_locked(struct bxe_softc *sc,
6021 struct bxe_fastpath *fp,
6024 struct buf_ring *tx_br = fp->tx_br;
6026 int depth, rc, tx_count;
6027 uint16_t tx_bd_avail;
6031 BXE_FP_TX_LOCK_ASSERT(fp);
6034 BLOGE(sc, "Multiqueue TX and no buf_ring!\n");
6038 if (!sc->link_vars.link_up ||
6039 (ifp->if_drv_flags &
6040 (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) {
6041 rc = drbr_enqueue(ifp, tx_br, m);
6042 goto bxe_tx_mq_start_locked_exit;
6045 /* fetch the depth of the driver queue */
6046 depth = drbr_inuse(ifp, tx_br);
6047 if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) {
6048 fp->eth_q_stats.tx_max_drbr_queue_depth = depth;
6052 /* no new work, check for pending frames */
6053 next = drbr_dequeue(ifp, tx_br);
6054 } else if (drbr_needs_enqueue(ifp, tx_br)) {
6055 /* have both new and pending work, maintain packet order */
6056 rc = drbr_enqueue(ifp, tx_br, m);
6058 fp->eth_q_stats.tx_soft_errors++;
6059 goto bxe_tx_mq_start_locked_exit;
6061 next = drbr_dequeue(ifp, tx_br);
6063 /* new work only and nothing pending */
6067 /* keep adding entries while there are frames to send */
6068 while (next != NULL) {
6070 /* the mbuf now belongs to us */
6071 fp->eth_q_stats.mbuf_alloc_tx++;
6074 * Put the frame into the transmit ring. If we don't have room,
6075 * place the mbuf back at the head of the TX queue, set the
6076 * OACTIVE flag, and wait for the NIC to drain the chain.
6078 rc = bxe_tx_encap(fp, &next);
6079 if (__predict_false(rc != 0)) {
6080 fp->eth_q_stats.tx_encap_failures++;
6082 /* mark the TX queue as full and save the frame */
6083 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
6084 /* XXX this may reorder the frame */
6085 rc = drbr_enqueue(ifp, tx_br, next);
6086 fp->eth_q_stats.mbuf_alloc_tx--;
6087 fp->eth_q_stats.tx_frames_deferred++;
6090 /* stop looking for more work */
6094 /* the transmit frame was enqueued successfully */
6097 /* send a copy of the frame to any BPF listeners */
6098 BPF_MTAP(ifp, next);
6100 tx_bd_avail = bxe_tx_avail(sc, fp);
6102 /* handle any completions if we're running low */
6103 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
6104 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
6106 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
6111 next = drbr_dequeue(ifp, tx_br);
6114 /* all TX packets were dequeued and/or the tx ring is full */
6116 /* reset the TX watchdog timeout timer */
6117 fp->watchdog_timer = BXE_TX_TIMEOUT;
6120 bxe_tx_mq_start_locked_exit:
6125 /* Multiqueue (TSS) dispatch routine. */
6127 bxe_tx_mq_start(struct ifnet *ifp,
6130 struct bxe_softc *sc = ifp->if_softc;
6131 struct bxe_fastpath *fp;
6134 fp_index = 0; /* default is the first queue */
6136 /* change the queue if using flow ID */
6137 if ((m->m_flags & M_FLOWID) != 0) {
6138 fp_index = (m->m_pkthdr.flowid % sc->num_queues);
6141 fp = &sc->fp[fp_index];
6143 if (BXE_FP_TX_TRYLOCK(fp)) {
6144 rc = bxe_tx_mq_start_locked(sc, ifp, fp, m);
6145 BXE_FP_TX_UNLOCK(fp);
6147 rc = drbr_enqueue(ifp, fp->tx_br, m);
6153 bxe_mq_flush(struct ifnet *ifp)
6155 struct bxe_softc *sc = ifp->if_softc;
6156 struct bxe_fastpath *fp;
6160 for (i = 0; i < sc->num_queues; i++) {
6163 if (fp->state != BXE_FP_STATE_OPEN) {
6164 BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n",
6165 fp->index, fp->state);
6169 if (fp->tx_br != NULL) {
6170 BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index);
6172 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) {
6175 BXE_FP_TX_UNLOCK(fp);
6182 #endif /* FreeBSD_version >= 800000 */
6185 bxe_cid_ilt_lines(struct bxe_softc *sc)
6188 return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS);
6190 return (L2_ILT_LINES(sc));
6194 bxe_ilt_set_info(struct bxe_softc *sc)
6196 struct ilt_client_info *ilt_client;
6197 struct ecore_ilt *ilt = sc->ilt;
6200 ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc));
6201 BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line);
6204 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6205 ilt_client->client_num = ILT_CLIENT_CDU;
6206 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6207 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6208 ilt_client->start = line;
6209 line += bxe_cid_ilt_lines(sc);
6211 if (CNIC_SUPPORT(sc)) {
6212 line += CNIC_ILT_LINES;
6215 ilt_client->end = (line - 1);
6218 "ilt client[CDU]: start %d, end %d, "
6219 "psz 0x%x, flags 0x%x, hw psz %d\n",
6220 ilt_client->start, ilt_client->end,
6221 ilt_client->page_size,
6223 ilog2(ilt_client->page_size >> 12));
6226 if (QM_INIT(sc->qm_cid_count)) {
6227 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6228 ilt_client->client_num = ILT_CLIENT_QM;
6229 ilt_client->page_size = QM_ILT_PAGE_SZ;
6230 ilt_client->flags = 0;
6231 ilt_client->start = line;
6233 /* 4 bytes for each cid */
6234 line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6237 ilt_client->end = (line - 1);
6240 "ilt client[QM]: start %d, end %d, "
6241 "psz 0x%x, flags 0x%x, hw psz %d\n",
6242 ilt_client->start, ilt_client->end,
6243 ilt_client->page_size, ilt_client->flags,
6244 ilog2(ilt_client->page_size >> 12));
6247 if (CNIC_SUPPORT(sc)) {
6249 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6250 ilt_client->client_num = ILT_CLIENT_SRC;
6251 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6252 ilt_client->flags = 0;
6253 ilt_client->start = line;
6254 line += SRC_ILT_LINES;
6255 ilt_client->end = (line - 1);
6258 "ilt client[SRC]: start %d, end %d, "
6259 "psz 0x%x, flags 0x%x, hw psz %d\n",
6260 ilt_client->start, ilt_client->end,
6261 ilt_client->page_size, ilt_client->flags,
6262 ilog2(ilt_client->page_size >> 12));
6265 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6266 ilt_client->client_num = ILT_CLIENT_TM;
6267 ilt_client->page_size = TM_ILT_PAGE_SZ;
6268 ilt_client->flags = 0;
6269 ilt_client->start = line;
6270 line += TM_ILT_LINES;
6271 ilt_client->end = (line - 1);
6274 "ilt client[TM]: start %d, end %d, "
6275 "psz 0x%x, flags 0x%x, hw psz %d\n",
6276 ilt_client->start, ilt_client->end,
6277 ilt_client->page_size, ilt_client->flags,
6278 ilog2(ilt_client->page_size >> 12));
6281 KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!"));
6285 bxe_set_fp_rx_buf_size(struct bxe_softc *sc)
6288 uint32_t rx_buf_size;
6290 rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu);
6292 for (i = 0; i < sc->num_queues; i++) {
6293 if(rx_buf_size <= MCLBYTES){
6294 sc->fp[i].rx_buf_size = rx_buf_size;
6295 sc->fp[i].mbuf_alloc_size = MCLBYTES;
6296 }else if (rx_buf_size <= MJUMPAGESIZE){
6297 sc->fp[i].rx_buf_size = rx_buf_size;
6298 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
6299 }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){
6300 sc->fp[i].rx_buf_size = MCLBYTES;
6301 sc->fp[i].mbuf_alloc_size = MCLBYTES;
6302 }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){
6303 sc->fp[i].rx_buf_size = MJUMPAGESIZE;
6304 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
6306 sc->fp[i].rx_buf_size = MCLBYTES;
6307 sc->fp[i].mbuf_alloc_size = MCLBYTES;
6313 bxe_alloc_ilt_mem(struct bxe_softc *sc)
6318 (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt),
6320 (M_NOWAIT | M_ZERO))) == NULL) {
6328 bxe_alloc_ilt_lines_mem(struct bxe_softc *sc)
6332 if ((sc->ilt->lines =
6333 (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES),
6335 (M_NOWAIT | M_ZERO))) == NULL) {
6343 bxe_free_ilt_mem(struct bxe_softc *sc)
6345 if (sc->ilt != NULL) {
6346 free(sc->ilt, M_BXE_ILT);
6352 bxe_free_ilt_lines_mem(struct bxe_softc *sc)
6354 if (sc->ilt->lines != NULL) {
6355 free(sc->ilt->lines, M_BXE_ILT);
6356 sc->ilt->lines = NULL;
6361 bxe_free_mem(struct bxe_softc *sc)
6366 if (!CONFIGURE_NIC_MODE(sc)) {
6367 /* free searcher T2 table */
6368 bxe_dma_free(sc, &sc->t2);
6372 for (i = 0; i < L2_ILT_LINES(sc); i++) {
6373 bxe_dma_free(sc, &sc->context[i].vcxt_dma);
6374 sc->context[i].vcxt = NULL;
6375 sc->context[i].size = 0;
6378 ecore_ilt_mem_op(sc, ILT_MEMOP_FREE);
6380 bxe_free_ilt_lines_mem(sc);
6383 bxe_iov_free_mem(sc);
6388 bxe_alloc_mem(struct bxe_softc *sc)
6395 if (!CONFIGURE_NIC_MODE(sc)) {
6396 /* allocate searcher T2 table */
6397 if (bxe_dma_alloc(sc, SRC_T2_SZ,
6398 &sc->t2, "searcher t2 table") != 0) {
6405 * Allocate memory for CDU context:
6406 * This memory is allocated separately and not in the generic ILT
6407 * functions because CDU differs in few aspects:
6408 * 1. There can be multiple entities allocating memory for context -
6409 * regular L2, CNIC, and SRIOV drivers. Each separately controls
6410 * its own ILT lines.
6411 * 2. Since CDU page-size is not a single 4KB page (which is the case
6412 * for the other ILT clients), to be efficient we want to support
6413 * allocation of sub-page-size in the last entry.
6414 * 3. Context pointers are used by the driver to pass to FW / update
6415 * the context (for the other ILT clients the pointers are used just to
6416 * free the memory during unload).
6418 context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc));
6419 for (i = 0, allocated = 0; allocated < context_size; i++) {
6420 sc->context[i].size = min(CDU_ILT_PAGE_SZ,
6421 (context_size - allocated));
6423 if (bxe_dma_alloc(sc, sc->context[i].size,
6424 &sc->context[i].vcxt_dma,
6425 "cdu context") != 0) {
6430 sc->context[i].vcxt =
6431 (union cdu_context *)sc->context[i].vcxt_dma.vaddr;
6433 allocated += sc->context[i].size;
6436 bxe_alloc_ilt_lines_mem(sc);
6438 BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n",
6439 sc->ilt, sc->ilt->start_line, sc->ilt->lines);
6441 for (i = 0; i < 4; i++) {
6443 "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n",
6445 sc->ilt->clients[i].page_size,
6446 sc->ilt->clients[i].start,
6447 sc->ilt->clients[i].end,
6448 sc->ilt->clients[i].client_num,
6449 sc->ilt->clients[i].flags);
6452 if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) {
6453 BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n");
6459 if (bxe_iov_alloc_mem(sc)) {
6460 BLOGE(sc, "Failed to allocate memory for SRIOV\n");
6470 bxe_free_rx_bd_chain(struct bxe_fastpath *fp)
6472 struct bxe_softc *sc;
6477 if (fp->rx_mbuf_tag == NULL) {
6481 /* free all mbufs and unload all maps */
6482 for (i = 0; i < RX_BD_TOTAL; i++) {
6483 if (fp->rx_mbuf_chain[i].m_map != NULL) {
6484 bus_dmamap_sync(fp->rx_mbuf_tag,
6485 fp->rx_mbuf_chain[i].m_map,
6486 BUS_DMASYNC_POSTREAD);
6487 bus_dmamap_unload(fp->rx_mbuf_tag,
6488 fp->rx_mbuf_chain[i].m_map);
6491 if (fp->rx_mbuf_chain[i].m != NULL) {
6492 m_freem(fp->rx_mbuf_chain[i].m);
6493 fp->rx_mbuf_chain[i].m = NULL;
6494 fp->eth_q_stats.mbuf_alloc_rx--;
6500 bxe_free_tpa_pool(struct bxe_fastpath *fp)
6502 struct bxe_softc *sc;
6503 int i, max_agg_queues;
6507 if (fp->rx_mbuf_tag == NULL) {
6511 max_agg_queues = MAX_AGG_QS(sc);
6513 /* release all mbufs and unload all DMA maps in the TPA pool */
6514 for (i = 0; i < max_agg_queues; i++) {
6515 if (fp->rx_tpa_info[i].bd.m_map != NULL) {
6516 bus_dmamap_sync(fp->rx_mbuf_tag,
6517 fp->rx_tpa_info[i].bd.m_map,
6518 BUS_DMASYNC_POSTREAD);
6519 bus_dmamap_unload(fp->rx_mbuf_tag,
6520 fp->rx_tpa_info[i].bd.m_map);
6523 if (fp->rx_tpa_info[i].bd.m != NULL) {
6524 m_freem(fp->rx_tpa_info[i].bd.m);
6525 fp->rx_tpa_info[i].bd.m = NULL;
6526 fp->eth_q_stats.mbuf_alloc_tpa--;
6532 bxe_free_sge_chain(struct bxe_fastpath *fp)
6534 struct bxe_softc *sc;
6539 if (fp->rx_sge_mbuf_tag == NULL) {
6543 /* rree all mbufs and unload all maps */
6544 for (i = 0; i < RX_SGE_TOTAL; i++) {
6545 if (fp->rx_sge_mbuf_chain[i].m_map != NULL) {
6546 bus_dmamap_sync(fp->rx_sge_mbuf_tag,
6547 fp->rx_sge_mbuf_chain[i].m_map,
6548 BUS_DMASYNC_POSTREAD);
6549 bus_dmamap_unload(fp->rx_sge_mbuf_tag,
6550 fp->rx_sge_mbuf_chain[i].m_map);
6553 if (fp->rx_sge_mbuf_chain[i].m != NULL) {
6554 m_freem(fp->rx_sge_mbuf_chain[i].m);
6555 fp->rx_sge_mbuf_chain[i].m = NULL;
6556 fp->eth_q_stats.mbuf_alloc_sge--;
6562 bxe_free_fp_buffers(struct bxe_softc *sc)
6564 struct bxe_fastpath *fp;
6567 for (i = 0; i < sc->num_queues; i++) {
6570 #if __FreeBSD_version >= 800000
6571 if (fp->tx_br != NULL) {
6572 /* just in case bxe_mq_flush() wasn't called */
6573 if (mtx_initialized(&fp->tx_mtx)) {
6577 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL)
6579 BXE_FP_TX_UNLOCK(fp);
6581 buf_ring_free(fp->tx_br, M_DEVBUF);
6586 /* free all RX buffers */
6587 bxe_free_rx_bd_chain(fp);
6588 bxe_free_tpa_pool(fp);
6589 bxe_free_sge_chain(fp);
6591 if (fp->eth_q_stats.mbuf_alloc_rx != 0) {
6592 BLOGE(sc, "failed to claim all rx mbufs (%d left)\n",
6593 fp->eth_q_stats.mbuf_alloc_rx);
6596 if (fp->eth_q_stats.mbuf_alloc_sge != 0) {
6597 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6598 fp->eth_q_stats.mbuf_alloc_sge);
6601 if (fp->eth_q_stats.mbuf_alloc_tpa != 0) {
6602 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6603 fp->eth_q_stats.mbuf_alloc_tpa);
6606 if (fp->eth_q_stats.mbuf_alloc_tx != 0) {
6607 BLOGE(sc, "failed to release tx mbufs (%d left)\n",
6608 fp->eth_q_stats.mbuf_alloc_tx);
6611 /* XXX verify all mbufs were reclaimed */
6613 if (mtx_initialized(&fp->tx_mtx)) {
6614 mtx_destroy(&fp->tx_mtx);
6617 if (mtx_initialized(&fp->rx_mtx)) {
6618 mtx_destroy(&fp->rx_mtx);
6624 bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
6625 uint16_t prev_index,
6628 struct bxe_sw_rx_bd *rx_buf;
6629 struct eth_rx_bd *rx_bd;
6630 bus_dma_segment_t segs[1];
6637 /* allocate the new RX BD mbuf */
6638 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6639 if (__predict_false(m == NULL)) {
6640 fp->eth_q_stats.mbuf_rx_bd_alloc_failed++;
6644 fp->eth_q_stats.mbuf_alloc_rx++;
6646 /* initialize the mbuf buffer length */
6647 m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6649 /* map the mbuf into non-paged pool */
6650 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6651 fp->rx_mbuf_spare_map,
6652 m, segs, &nsegs, BUS_DMA_NOWAIT);
6653 if (__predict_false(rc != 0)) {
6654 fp->eth_q_stats.mbuf_rx_bd_mapping_failed++;
6656 fp->eth_q_stats.mbuf_alloc_rx--;
6660 /* all mbufs must map to a single segment */
6661 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6663 /* release any existing RX BD mbuf mappings */
6665 if (prev_index != index) {
6666 rx_buf = &fp->rx_mbuf_chain[prev_index];
6668 if (rx_buf->m_map != NULL) {
6669 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6670 BUS_DMASYNC_POSTREAD);
6671 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6675 * We only get here from bxe_rxeof() when the maximum number
6676 * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already
6677 * holds the mbuf in the prev_index so it's OK to NULL it out
6678 * here without concern of a memory leak.
6680 fp->rx_mbuf_chain[prev_index].m = NULL;
6683 rx_buf = &fp->rx_mbuf_chain[index];
6685 if (rx_buf->m_map != NULL) {
6686 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6687 BUS_DMASYNC_POSTREAD);
6688 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6691 /* save the mbuf and mapping info for a future packet */
6692 map = (prev_index != index) ?
6693 fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map;
6694 rx_buf->m_map = fp->rx_mbuf_spare_map;
6695 fp->rx_mbuf_spare_map = map;
6696 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6697 BUS_DMASYNC_PREREAD);
6700 rx_bd = &fp->rx_chain[index];
6701 rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6702 rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6708 bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
6711 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
6712 bus_dma_segment_t segs[1];
6718 /* allocate the new TPA mbuf */
6719 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6720 if (__predict_false(m == NULL)) {
6721 fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++;
6725 fp->eth_q_stats.mbuf_alloc_tpa++;
6727 /* initialize the mbuf buffer length */
6728 m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6730 /* map the mbuf into non-paged pool */
6731 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6732 fp->rx_tpa_info_mbuf_spare_map,
6733 m, segs, &nsegs, BUS_DMA_NOWAIT);
6734 if (__predict_false(rc != 0)) {
6735 fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++;
6737 fp->eth_q_stats.mbuf_alloc_tpa--;
6741 /* all mbufs must map to a single segment */
6742 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6744 /* release any existing TPA mbuf mapping */
6745 if (tpa_info->bd.m_map != NULL) {
6746 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6747 BUS_DMASYNC_POSTREAD);
6748 bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map);
6751 /* save the mbuf and mapping info for the TPA mbuf */
6752 map = tpa_info->bd.m_map;
6753 tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map;
6754 fp->rx_tpa_info_mbuf_spare_map = map;
6755 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6756 BUS_DMASYNC_PREREAD);
6758 tpa_info->seg = segs[0];
6764 * Allocate an mbuf and assign it to the receive scatter gather chain. The
6765 * caller must take care to save a copy of the existing mbuf in the SG mbuf
6769 bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
6772 struct bxe_sw_rx_bd *sge_buf;
6773 struct eth_rx_sge *sge;
6774 bus_dma_segment_t segs[1];
6780 /* allocate a new SGE mbuf */
6781 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
6782 if (__predict_false(m == NULL)) {
6783 fp->eth_q_stats.mbuf_rx_sge_alloc_failed++;
6787 fp->eth_q_stats.mbuf_alloc_sge++;
6789 /* initialize the mbuf buffer length */
6790 m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE;
6792 /* map the SGE mbuf into non-paged pool */
6793 rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag,
6794 fp->rx_sge_mbuf_spare_map,
6795 m, segs, &nsegs, BUS_DMA_NOWAIT);
6796 if (__predict_false(rc != 0)) {
6797 fp->eth_q_stats.mbuf_rx_sge_mapping_failed++;
6799 fp->eth_q_stats.mbuf_alloc_sge--;
6803 /* all mbufs must map to a single segment */
6804 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6806 sge_buf = &fp->rx_sge_mbuf_chain[index];
6808 /* release any existing SGE mbuf mapping */
6809 if (sge_buf->m_map != NULL) {
6810 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6811 BUS_DMASYNC_POSTREAD);
6812 bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map);
6815 /* save the mbuf and mapping info for a future packet */
6816 map = sge_buf->m_map;
6817 sge_buf->m_map = fp->rx_sge_mbuf_spare_map;
6818 fp->rx_sge_mbuf_spare_map = map;
6819 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6820 BUS_DMASYNC_PREREAD);
6823 sge = &fp->rx_sge_chain[index];
6824 sge->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6825 sge->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6830 static __noinline int
6831 bxe_alloc_fp_buffers(struct bxe_softc *sc)
6833 struct bxe_fastpath *fp;
6835 int ring_prod, cqe_ring_prod;
6838 for (i = 0; i < sc->num_queues; i++) {
6841 #if __FreeBSD_version >= 800000
6842 fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF,
6843 M_DONTWAIT, &fp->tx_mtx);
6844 if (fp->tx_br == NULL) {
6845 BLOGE(sc, "buf_ring alloc fail for fp[%02d]\n", i);
6846 goto bxe_alloc_fp_buffers_error;
6850 ring_prod = cqe_ring_prod = 0;
6854 /* allocate buffers for the RX BDs in RX BD chain */
6855 for (j = 0; j < sc->max_rx_bufs; j++) {
6856 rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod);
6858 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
6860 goto bxe_alloc_fp_buffers_error;
6863 ring_prod = RX_BD_NEXT(ring_prod);
6864 cqe_ring_prod = RCQ_NEXT(cqe_ring_prod);
6867 fp->rx_bd_prod = ring_prod;
6868 fp->rx_cq_prod = cqe_ring_prod;
6869 fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0;
6871 max_agg_queues = MAX_AGG_QS(sc);
6873 fp->tpa_enable = TRUE;
6875 /* fill the TPA pool */
6876 for (j = 0; j < max_agg_queues; j++) {
6877 rc = bxe_alloc_rx_tpa_mbuf(fp, j);
6879 BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n",
6881 fp->tpa_enable = FALSE;
6882 goto bxe_alloc_fp_buffers_error;
6885 fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP;
6888 if (fp->tpa_enable) {
6889 /* fill the RX SGE chain */
6891 for (j = 0; j < RX_SGE_USABLE; j++) {
6892 rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod);
6894 BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n",
6896 fp->tpa_enable = FALSE;
6898 goto bxe_alloc_fp_buffers_error;
6901 ring_prod = RX_SGE_NEXT(ring_prod);
6904 fp->rx_sge_prod = ring_prod;
6910 bxe_alloc_fp_buffers_error:
6912 /* unwind what was already allocated */
6913 bxe_free_rx_bd_chain(fp);
6914 bxe_free_tpa_pool(fp);
6915 bxe_free_sge_chain(fp);
6921 bxe_free_fw_stats_mem(struct bxe_softc *sc)
6923 bxe_dma_free(sc, &sc->fw_stats_dma);
6925 sc->fw_stats_num = 0;
6927 sc->fw_stats_req_size = 0;
6928 sc->fw_stats_req = NULL;
6929 sc->fw_stats_req_mapping = 0;
6931 sc->fw_stats_data_size = 0;
6932 sc->fw_stats_data = NULL;
6933 sc->fw_stats_data_mapping = 0;
6937 bxe_alloc_fw_stats_mem(struct bxe_softc *sc)
6939 uint8_t num_queue_stats;
6942 /* number of queues for statistics is number of eth queues */
6943 num_queue_stats = BXE_NUM_ETH_QUEUES(sc);
6946 * Total number of FW statistics requests =
6947 * 1 for port stats + 1 for PF stats + num of queues
6949 sc->fw_stats_num = (2 + num_queue_stats);
6952 * Request is built from stats_query_header and an array of
6953 * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT
6954 * rules. The real number or requests is configured in the
6955 * stats_query_header.
6958 ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) +
6959 ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0));
6961 BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n",
6962 sc->fw_stats_num, num_groups);
6964 sc->fw_stats_req_size =
6965 (sizeof(struct stats_query_header) +
6966 (num_groups * sizeof(struct stats_query_cmd_group)));
6969 * Data for statistics requests + stats_counter.
6970 * stats_counter holds per-STORM counters that are incremented when
6971 * STORM has finished with the current request. Memory for FCoE
6972 * offloaded statistics are counted anyway, even if they will not be sent.
6973 * VF stats are not accounted for here as the data of VF stats is stored
6974 * in memory allocated by the VF, not here.
6976 sc->fw_stats_data_size =
6977 (sizeof(struct stats_counter) +
6978 sizeof(struct per_port_stats) +
6979 sizeof(struct per_pf_stats) +
6980 /* sizeof(struct fcoe_statistics_params) + */
6981 (sizeof(struct per_queue_stats) * num_queue_stats));
6983 if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size),
6984 &sc->fw_stats_dma, "fw stats") != 0) {
6985 bxe_free_fw_stats_mem(sc);
6989 /* set up the shortcuts */
6992 (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr;
6993 sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr;
6996 (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr +
6997 sc->fw_stats_req_size);
6998 sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr +
6999 sc->fw_stats_req_size);
7001 BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n",
7002 (uintmax_t)sc->fw_stats_req_mapping);
7004 BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n",
7005 (uintmax_t)sc->fw_stats_data_mapping);
7012 * 0-7 - Engine0 load counter.
7013 * 8-15 - Engine1 load counter.
7014 * 16 - Engine0 RESET_IN_PROGRESS bit.
7015 * 17 - Engine1 RESET_IN_PROGRESS bit.
7016 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active
7017 * function on the engine
7018 * 19 - Engine1 ONE_IS_LOADED.
7019 * 20 - Chip reset flow bit. When set none-leader must wait for both engines
7020 * leader to complete (check for both RESET_IN_PROGRESS bits and not
7021 * for just the one belonging to its engine).
7023 #define BXE_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
7024 #define BXE_PATH0_LOAD_CNT_MASK 0x000000ff
7025 #define BXE_PATH0_LOAD_CNT_SHIFT 0
7026 #define BXE_PATH1_LOAD_CNT_MASK 0x0000ff00
7027 #define BXE_PATH1_LOAD_CNT_SHIFT 8
7028 #define BXE_PATH0_RST_IN_PROG_BIT 0x00010000
7029 #define BXE_PATH1_RST_IN_PROG_BIT 0x00020000
7030 #define BXE_GLOBAL_RESET_BIT 0x00040000
7032 /* set the GLOBAL_RESET bit, should be run under rtnl lock */
7034 bxe_set_reset_global(struct bxe_softc *sc)
7037 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7038 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
7039 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT);
7040 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7043 /* clear the GLOBAL_RESET bit, should be run under rtnl lock */
7045 bxe_clear_reset_global(struct bxe_softc *sc)
7048 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7049 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
7050 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT));
7051 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7054 /* checks the GLOBAL_RESET bit, should be run under rtnl lock */
7056 bxe_reset_is_global(struct bxe_softc *sc)
7058 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
7059 BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val);
7060 return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE;
7063 /* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */
7065 bxe_set_reset_done(struct bxe_softc *sc)
7068 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
7069 BXE_PATH0_RST_IN_PROG_BIT;
7071 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7073 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
7076 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
7078 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7081 /* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */
7083 bxe_set_reset_in_progress(struct bxe_softc *sc)
7086 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
7087 BXE_PATH0_RST_IN_PROG_BIT;
7089 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7091 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
7094 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
7096 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7099 /* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */
7101 bxe_reset_is_done(struct bxe_softc *sc,
7104 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
7105 uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT :
7106 BXE_PATH0_RST_IN_PROG_BIT;
7108 /* return false if bit is set */
7109 return (val & bit) ? FALSE : TRUE;
7112 /* get the load status for an engine, should be run under rtnl lock */
7114 bxe_get_load_status(struct bxe_softc *sc,
7117 uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK :
7118 BXE_PATH0_LOAD_CNT_MASK;
7119 uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT :
7120 BXE_PATH0_LOAD_CNT_SHIFT;
7121 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
7123 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
7125 val = ((val & mask) >> shift);
7127 BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val);
7132 /* set pf load mark */
7133 /* XXX needs to be under rtnl lock */
7135 bxe_set_pf_load(struct bxe_softc *sc)
7139 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
7140 BXE_PATH0_LOAD_CNT_MASK;
7141 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
7142 BXE_PATH0_LOAD_CNT_SHIFT;
7144 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7146 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
7147 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
7149 /* get the current counter value */
7150 val1 = ((val & mask) >> shift);
7152 /* set bit of this PF */
7153 val1 |= (1 << SC_ABS_FUNC(sc));
7155 /* clear the old value */
7158 /* set the new one */
7159 val |= ((val1 << shift) & mask);
7161 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
7163 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7166 /* clear pf load mark */
7167 /* XXX needs to be under rtnl lock */
7169 bxe_clear_pf_load(struct bxe_softc *sc)
7172 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
7173 BXE_PATH0_LOAD_CNT_MASK;
7174 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
7175 BXE_PATH0_LOAD_CNT_SHIFT;
7177 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7178 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
7179 BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val);
7181 /* get the current counter value */
7182 val1 = (val & mask) >> shift;
7184 /* clear bit of that PF */
7185 val1 &= ~(1 << SC_ABS_FUNC(sc));
7187 /* clear the old value */
7190 /* set the new one */
7191 val |= ((val1 << shift) & mask);
7193 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
7194 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7198 /* send load requrest to mcp and analyze response */
7200 bxe_nic_load_request(struct bxe_softc *sc,
7201 uint32_t *load_code)
7205 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
7206 DRV_MSG_SEQ_NUMBER_MASK);
7208 BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq);
7210 /* get the current FW pulse sequence */
7211 sc->fw_drv_pulse_wr_seq =
7212 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) &
7213 DRV_PULSE_SEQ_MASK);
7215 BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n",
7216 sc->fw_drv_pulse_wr_seq);
7219 (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
7220 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
7222 /* if the MCP fails to respond we must abort */
7223 if (!(*load_code)) {
7224 BLOGE(sc, "MCP response failure!\n");
7228 /* if MCP refused then must abort */
7229 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7230 BLOGE(sc, "MCP refused load request\n");
7238 * Check whether another PF has already loaded FW to chip. In virtualized
7239 * environments a pf from anoth VM may have already initialized the device
7240 * including loading FW.
7243 bxe_nic_load_analyze_req(struct bxe_softc *sc,
7246 uint32_t my_fw, loaded_fw;
7248 /* is another pf loaded on this engine? */
7249 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
7250 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
7251 /* build my FW version dword */
7252 my_fw = (BCM_5710_FW_MAJOR_VERSION +
7253 (BCM_5710_FW_MINOR_VERSION << 8 ) +
7254 (BCM_5710_FW_REVISION_VERSION << 16) +
7255 (BCM_5710_FW_ENGINEERING_VERSION << 24));
7257 /* read loaded FW from chip */
7258 loaded_fw = REG_RD(sc, XSEM_REG_PRAM);
7259 BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n",
7262 /* abort nic load if version mismatch */
7263 if (my_fw != loaded_fw) {
7264 BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)",
7273 /* mark PMF if applicable */
7275 bxe_nic_load_pmf(struct bxe_softc *sc,
7278 uint32_t ncsi_oem_data_addr;
7280 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7281 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
7282 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
7284 * Barrier here for ordering between the writing to sc->port.pmf here
7285 * and reading it from the periodic task.
7293 BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf);
7296 if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) {
7297 if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) {
7298 ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr);
7299 if (ncsi_oem_data_addr) {
7301 (ncsi_oem_data_addr +
7302 offsetof(struct glob_ncsi_oem_data, driver_version)),
7310 bxe_read_mf_cfg(struct bxe_softc *sc)
7312 int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1);
7316 if (BXE_NOMCP(sc)) {
7317 return; /* what should be the default bvalue in this case */
7321 * The formula for computing the absolute function number is...
7322 * For 2 port configuration (4 functions per port):
7323 * abs_func = 2 * vn + SC_PORT + SC_PATH
7324 * For 4 port configuration (2 functions per port):
7325 * abs_func = 4 * vn + 2 * SC_PORT + SC_PATH
7327 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
7328 abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc));
7329 if (abs_func >= E1H_FUNC_MAX) {
7332 sc->devinfo.mf_info.mf_config[vn] =
7333 MFCFG_RD(sc, func_mf_config[abs_func].config);
7336 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] &
7337 FUNC_MF_CFG_FUNC_DISABLED) {
7338 BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n");
7339 sc->flags |= BXE_MF_FUNC_DIS;
7341 BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n");
7342 sc->flags &= ~BXE_MF_FUNC_DIS;
7346 /* acquire split MCP access lock register */
7347 static int bxe_acquire_alr(struct bxe_softc *sc)
7351 for (j = 0; j < 1000; j++) {
7353 REG_WR(sc, GRCBASE_MCP + 0x9c, val);
7354 val = REG_RD(sc, GRCBASE_MCP + 0x9c);
7355 if (val & (1L << 31))
7361 if (!(val & (1L << 31))) {
7362 BLOGE(sc, "Cannot acquire MCP access lock register\n");
7369 /* release split MCP access lock register */
7370 static void bxe_release_alr(struct bxe_softc *sc)
7372 REG_WR(sc, GRCBASE_MCP + 0x9c, 0);
7376 bxe_fan_failure(struct bxe_softc *sc)
7378 int port = SC_PORT(sc);
7379 uint32_t ext_phy_config;
7381 /* mark the failure */
7383 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
7385 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
7386 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
7387 SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config,
7390 /* log the failure */
7391 BLOGW(sc, "Fan Failure has caused the driver to shutdown "
7392 "the card to prevent permanent damage. "
7393 "Please contact OEM Support for assistance\n");
7397 bxe_panic(sc, ("Schedule task to handle fan failure\n"));
7400 * Schedule device reset (unload)
7401 * This is due to some boards consuming sufficient power when driver is
7402 * up to overheat if fan fails.
7404 bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state);
7405 schedule_delayed_work(&sc->sp_rtnl_task, 0);
7409 /* this function is called upon a link interrupt */
7411 bxe_link_attn(struct bxe_softc *sc)
7413 uint32_t pause_enabled = 0;
7414 struct host_port_stats *pstats;
7417 /* Make sure that we are synced with the current statistics */
7418 bxe_stats_handle(sc, STATS_EVENT_STOP);
7420 elink_link_update(&sc->link_params, &sc->link_vars);
7422 if (sc->link_vars.link_up) {
7424 /* dropless flow control */
7425 if (!CHIP_IS_E1(sc) && sc->dropless_fc) {
7428 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
7433 (BAR_USTRORM_INTMEM +
7434 USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))),
7438 if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) {
7439 pstats = BXE_SP(sc, port_stats);
7440 /* reset old mac stats */
7441 memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx));
7444 if (sc->state == BXE_STATE_OPEN) {
7445 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
7449 if (sc->link_vars.link_up && sc->link_vars.line_speed) {
7450 cmng_fns = bxe_get_cmng_fns_mode(sc);
7452 if (cmng_fns != CMNG_FNS_NONE) {
7453 bxe_cmng_fns_init(sc, FALSE, cmng_fns);
7454 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7456 /* rate shaping and fairness are disabled */
7457 BLOGD(sc, DBG_LOAD, "single function mode without fairness\n");
7461 bxe_link_report_locked(sc);
7464 ; // XXX bxe_link_sync_notify(sc);
7469 bxe_attn_int_asserted(struct bxe_softc *sc,
7472 int port = SC_PORT(sc);
7473 uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7474 MISC_REG_AEU_MASK_ATTN_FUNC_0;
7475 uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
7476 NIG_REG_MASK_INTERRUPT_PORT0;
7478 uint32_t nig_mask = 0;
7483 if (sc->attn_state & asserted) {
7484 BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted);
7487 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7489 aeu_mask = REG_RD(sc, aeu_addr);
7491 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n",
7492 aeu_mask, asserted);
7494 aeu_mask &= ~(asserted & 0x3ff);
7496 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
7498 REG_WR(sc, aeu_addr, aeu_mask);
7500 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7502 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
7503 sc->attn_state |= asserted;
7504 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
7506 if (asserted & ATTN_HARD_WIRED_MASK) {
7507 if (asserted & ATTN_NIG_FOR_FUNC) {
7509 bxe_acquire_phy_lock(sc);
7510 /* save nig interrupt mask */
7511 nig_mask = REG_RD(sc, nig_int_mask_addr);
7513 /* If nig_mask is not set, no need to call the update function */
7515 REG_WR(sc, nig_int_mask_addr, 0);
7520 /* handle unicore attn? */
7523 if (asserted & ATTN_SW_TIMER_4_FUNC) {
7524 BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n");
7527 if (asserted & GPIO_2_FUNC) {
7528 BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n");
7531 if (asserted & GPIO_3_FUNC) {
7532 BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n");
7535 if (asserted & GPIO_4_FUNC) {
7536 BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n");
7540 if (asserted & ATTN_GENERAL_ATTN_1) {
7541 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n");
7542 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
7544 if (asserted & ATTN_GENERAL_ATTN_2) {
7545 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n");
7546 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
7548 if (asserted & ATTN_GENERAL_ATTN_3) {
7549 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n");
7550 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
7553 if (asserted & ATTN_GENERAL_ATTN_4) {
7554 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n");
7555 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
7557 if (asserted & ATTN_GENERAL_ATTN_5) {
7558 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n");
7559 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
7561 if (asserted & ATTN_GENERAL_ATTN_6) {
7562 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n");
7563 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
7568 if (sc->devinfo.int_block == INT_BLOCK_HC) {
7569 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET);
7571 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
7574 BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n",
7576 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
7577 REG_WR(sc, reg_addr, asserted);
7579 /* now set back the mask */
7580 if (asserted & ATTN_NIG_FOR_FUNC) {
7582 * Verify that IGU ack through BAR was written before restoring
7583 * NIG mask. This loop should exit after 2-3 iterations max.
7585 if (sc->devinfo.int_block != INT_BLOCK_HC) {
7589 igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS);
7590 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
7591 (++cnt < MAX_IGU_ATTN_ACK_TO));
7594 BLOGE(sc, "Failed to verify IGU ack on time\n");
7600 REG_WR(sc, nig_int_mask_addr, nig_mask);
7602 bxe_release_phy_lock(sc);
7607 bxe_print_next_block(struct bxe_softc *sc,
7611 BLOGI(sc, "%s%s", idx ? ", " : "", blk);
7615 bxe_check_blocks_with_parity0(struct bxe_softc *sc,
7620 uint32_t cur_bit = 0;
7623 for (i = 0; sig; i++) {
7624 cur_bit = ((uint32_t)0x1 << i);
7625 if (sig & cur_bit) {
7627 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
7629 bxe_print_next_block(sc, par_num++, "BRB");
7631 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
7633 bxe_print_next_block(sc, par_num++, "PARSER");
7635 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
7637 bxe_print_next_block(sc, par_num++, "TSDM");
7639 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
7641 bxe_print_next_block(sc, par_num++, "SEARCHER");
7643 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
7645 bxe_print_next_block(sc, par_num++, "TCM");
7647 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
7649 bxe_print_next_block(sc, par_num++, "TSEMI");
7651 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
7653 bxe_print_next_block(sc, par_num++, "XPB");
7666 bxe_check_blocks_with_parity1(struct bxe_softc *sc,
7673 uint32_t cur_bit = 0;
7674 for (i = 0; sig; i++) {
7675 cur_bit = ((uint32_t)0x1 << i);
7676 if (sig & cur_bit) {
7678 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
7680 bxe_print_next_block(sc, par_num++, "PBF");
7682 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
7684 bxe_print_next_block(sc, par_num++, "QM");
7686 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
7688 bxe_print_next_block(sc, par_num++, "TM");
7690 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
7692 bxe_print_next_block(sc, par_num++, "XSDM");
7694 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
7696 bxe_print_next_block(sc, par_num++, "XCM");
7698 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
7700 bxe_print_next_block(sc, par_num++, "XSEMI");
7702 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
7704 bxe_print_next_block(sc, par_num++, "DOORBELLQ");
7706 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
7708 bxe_print_next_block(sc, par_num++, "NIG");
7710 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
7712 bxe_print_next_block(sc, par_num++, "VAUX PCI CORE");
7715 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
7717 bxe_print_next_block(sc, par_num++, "DEBUG");
7719 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
7721 bxe_print_next_block(sc, par_num++, "USDM");
7723 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
7725 bxe_print_next_block(sc, par_num++, "UCM");
7727 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
7729 bxe_print_next_block(sc, par_num++, "USEMI");
7731 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
7733 bxe_print_next_block(sc, par_num++, "UPB");
7735 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
7737 bxe_print_next_block(sc, par_num++, "CSDM");
7739 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
7741 bxe_print_next_block(sc, par_num++, "CCM");
7754 bxe_check_blocks_with_parity2(struct bxe_softc *sc,
7759 uint32_t cur_bit = 0;
7762 for (i = 0; sig; i++) {
7763 cur_bit = ((uint32_t)0x1 << i);
7764 if (sig & cur_bit) {
7766 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
7768 bxe_print_next_block(sc, par_num++, "CSEMI");
7770 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
7772 bxe_print_next_block(sc, par_num++, "PXP");
7774 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
7776 bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT");
7778 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
7780 bxe_print_next_block(sc, par_num++, "CFC");
7782 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
7784 bxe_print_next_block(sc, par_num++, "CDU");
7786 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
7788 bxe_print_next_block(sc, par_num++, "DMAE");
7790 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
7792 bxe_print_next_block(sc, par_num++, "IGU");
7794 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
7796 bxe_print_next_block(sc, par_num++, "MISC");
7809 bxe_check_blocks_with_parity3(struct bxe_softc *sc,
7815 uint32_t cur_bit = 0;
7818 for (i = 0; sig; i++) {
7819 cur_bit = ((uint32_t)0x1 << i);
7820 if (sig & cur_bit) {
7822 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
7824 bxe_print_next_block(sc, par_num++, "MCP ROM");
7827 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
7829 bxe_print_next_block(sc, par_num++,
7833 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
7835 bxe_print_next_block(sc, par_num++,
7839 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
7841 bxe_print_next_block(sc, par_num++,
7856 bxe_check_blocks_with_parity4(struct bxe_softc *sc,
7861 uint32_t cur_bit = 0;
7864 for (i = 0; sig; i++) {
7865 cur_bit = ((uint32_t)0x1 << i);
7866 if (sig & cur_bit) {
7868 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
7870 bxe_print_next_block(sc, par_num++, "PGLUE_B");
7872 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
7874 bxe_print_next_block(sc, par_num++, "ATC");
7887 bxe_parity_attn(struct bxe_softc *sc,
7894 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
7895 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
7896 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
7897 (sig[3] & HW_PRTY_ASSERT_SET_3) ||
7898 (sig[4] & HW_PRTY_ASSERT_SET_4)) {
7899 BLOGE(sc, "Parity error: HW block parity attention:\n"
7900 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
7901 (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0),
7902 (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1),
7903 (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2),
7904 (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3),
7905 (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4));
7908 BLOGI(sc, "Parity errors detected in blocks: ");
7911 bxe_check_blocks_with_parity0(sc, sig[0] &
7912 HW_PRTY_ASSERT_SET_0,
7915 bxe_check_blocks_with_parity1(sc, sig[1] &
7916 HW_PRTY_ASSERT_SET_1,
7917 par_num, global, print);
7919 bxe_check_blocks_with_parity2(sc, sig[2] &
7920 HW_PRTY_ASSERT_SET_2,
7923 bxe_check_blocks_with_parity3(sc, sig[3] &
7924 HW_PRTY_ASSERT_SET_3,
7925 par_num, global, print);
7927 bxe_check_blocks_with_parity4(sc, sig[4] &
7928 HW_PRTY_ASSERT_SET_4,
7941 bxe_chk_parity_attn(struct bxe_softc *sc,
7945 struct attn_route attn = { {0} };
7946 int port = SC_PORT(sc);
7948 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
7949 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
7950 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
7951 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
7954 * Since MCP attentions can't be disabled inside the block, we need to
7955 * read AEU registers to see whether they're currently disabled
7957 attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
7958 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) &
7959 MISC_AEU_ENABLE_MCP_PRTY_BITS) |
7960 ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
7963 if (!CHIP_IS_E1x(sc))
7964 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
7966 return (bxe_parity_attn(sc, global, print, attn.sig));
7970 bxe_attn_int_deasserted4(struct bxe_softc *sc,
7975 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
7976 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
7977 BLOGE(sc, "PGLUE hw attention 0x%08x\n", val);
7978 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
7979 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
7980 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
7981 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
7982 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
7983 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
7984 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
7985 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
7986 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
7987 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
7988 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
7989 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
7990 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
7991 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
7992 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
7993 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
7994 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
7995 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
7998 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
7999 val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR);
8000 BLOGE(sc, "ATC hw attention 0x%08x\n", val);
8001 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
8002 BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
8003 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
8004 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
8005 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
8006 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
8007 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
8008 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
8009 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
8010 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
8011 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
8012 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
8015 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
8016 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
8017 BLOGE(sc, "FATAL parity attention set4 0x%08x\n",
8018 (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
8019 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
8024 bxe_e1h_disable(struct bxe_softc *sc)
8026 int port = SC_PORT(sc);
8030 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8034 bxe_e1h_enable(struct bxe_softc *sc)
8036 int port = SC_PORT(sc);
8038 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
8040 // XXX bxe_tx_enable(sc);
8044 * called due to MCP event (on pmf):
8045 * reread new bandwidth configuration
8047 * notify others function about the change
8050 bxe_config_mf_bw(struct bxe_softc *sc)
8052 if (sc->link_vars.link_up) {
8053 bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX);
8054 // XXX bxe_link_sync_notify(sc);
8057 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
8061 bxe_set_mf_bw(struct bxe_softc *sc)
8063 bxe_config_mf_bw(sc);
8064 bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
8068 bxe_handle_eee_event(struct bxe_softc *sc)
8070 BLOGD(sc, DBG_INTR, "EEE - LLDP event\n");
8071 bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
8074 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
8077 bxe_drv_info_ether_stat(struct bxe_softc *sc)
8079 struct eth_stats_info *ether_stat =
8080 &sc->sp->drv_info_to_mcp.ether_stat;
8082 strlcpy(ether_stat->version, BXE_DRIVER_VERSION,
8083 ETH_STAT_INFO_VERSION_LEN);
8085 /* XXX (+ MAC_PAD) taken from other driver... verify this is right */
8086 sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj,
8087 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
8088 ether_stat->mac_local + MAC_PAD,
8091 ether_stat->mtu_size = sc->mtu;
8093 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
8094 if (sc->ifnet->if_capenable & (IFCAP_TSO4 | IFCAP_TSO6)) {
8095 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
8098 // XXX ether_stat->feature_flags |= ???;
8100 ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0;
8102 ether_stat->txq_size = sc->tx_ring_size;
8103 ether_stat->rxq_size = sc->rx_ring_size;
8107 bxe_handle_drv_info_req(struct bxe_softc *sc)
8109 enum drv_info_opcode op_code;
8110 uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control);
8112 /* if drv_info version supported by MFW doesn't match - send NACK */
8113 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
8114 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
8118 op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
8119 DRV_INFO_CONTROL_OP_CODE_SHIFT);
8121 memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp));
8124 case ETH_STATS_OPCODE:
8125 bxe_drv_info_ether_stat(sc);
8127 case FCOE_STATS_OPCODE:
8128 case ISCSI_STATS_OPCODE:
8130 /* if op code isn't supported - send NACK */
8131 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
8136 * If we got drv_info attn from MFW then these fields are defined in
8139 SHMEM2_WR(sc, drv_info_host_addr_lo,
8140 U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
8141 SHMEM2_WR(sc, drv_info_host_addr_hi,
8142 U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
8144 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0);
8148 bxe_dcc_event(struct bxe_softc *sc,
8151 BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event);
8153 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
8155 * This is the only place besides the function initialization
8156 * where the sc->flags can change so it is done without any
8159 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
8160 BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n");
8161 sc->flags |= BXE_MF_FUNC_DIS;
8162 bxe_e1h_disable(sc);
8164 BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n");
8165 sc->flags &= ~BXE_MF_FUNC_DIS;
8168 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
8171 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
8172 bxe_config_mf_bw(sc);
8173 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
8176 /* Report results to MCP */
8178 bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0);
8180 bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0);
8184 bxe_pmf_update(struct bxe_softc *sc)
8186 int port = SC_PORT(sc);
8190 BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf);
8193 * We need the mb() to ensure the ordering between the writing to
8194 * sc->port.pmf here and reading it from the bxe_periodic_task().
8198 /* queue a periodic task */
8199 // XXX schedule task...
8201 // XXX bxe_dcbx_pmf_update(sc);
8203 /* enable nig attention */
8204 val = (0xff0f | (1 << (SC_VN(sc) + 4)));
8205 if (sc->devinfo.int_block == INT_BLOCK_HC) {
8206 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val);
8207 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val);
8208 } else if (!CHIP_IS_E1x(sc)) {
8209 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
8210 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
8213 bxe_stats_handle(sc, STATS_EVENT_PMF);
8217 bxe_mc_assert(struct bxe_softc *sc)
8221 uint32_t row0, row1, row2, row3;
8224 last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET);
8226 BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
8228 /* print the asserts */
8229 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
8231 row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i));
8232 row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4);
8233 row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8);
8234 row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12);
8236 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
8237 BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
8238 i, row3, row2, row1, row0);
8246 last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET);
8248 BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
8251 /* print the asserts */
8252 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
8254 row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i));
8255 row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4);
8256 row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8);
8257 row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12);
8259 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
8260 BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
8261 i, row3, row2, row1, row0);
8269 last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET);
8271 BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
8274 /* print the asserts */
8275 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
8277 row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i));
8278 row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4);
8279 row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8);
8280 row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12);
8282 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
8283 BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
8284 i, row3, row2, row1, row0);
8292 last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET);
8294 BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
8297 /* print the asserts */
8298 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
8300 row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i));
8301 row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4);
8302 row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8);
8303 row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12);
8305 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
8306 BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
8307 i, row3, row2, row1, row0);
8318 bxe_attn_int_deasserted3(struct bxe_softc *sc,
8321 int func = SC_FUNC(sc);
8324 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
8326 if (attn & BXE_PMF_LINK_ASSERT(sc)) {
8328 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8329 bxe_read_mf_cfg(sc);
8330 sc->devinfo.mf_info.mf_config[SC_VN(sc)] =
8331 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
8332 val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status);
8334 if (val & DRV_STATUS_DCC_EVENT_MASK)
8335 bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK));
8337 if (val & DRV_STATUS_SET_MF_BW)
8340 if (val & DRV_STATUS_DRV_INFO_REQ)
8341 bxe_handle_drv_info_req(sc);
8344 if (val & DRV_STATUS_VF_DISABLED)
8345 bxe_vf_handle_flr_event(sc);
8348 if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF))
8353 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
8354 (sc->dcbx_enabled > 0))
8355 /* start dcbx state machine */
8356 bxe_dcbx_set_params(sc, BXE_DCBX_STATE_NEG_RECEIVED);
8360 if (val & DRV_STATUS_AFEX_EVENT_MASK)
8361 bxe_handle_afex_cmd(sc, val & DRV_STATUS_AFEX_EVENT_MASK);
8364 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
8365 bxe_handle_eee_event(sc);
8367 if (sc->link_vars.periodic_flags &
8368 ELINK_PERIODIC_FLAGS_LINK_EVENT) {
8369 /* sync with link */
8370 bxe_acquire_phy_lock(sc);
8371 sc->link_vars.periodic_flags &=
8372 ~ELINK_PERIODIC_FLAGS_LINK_EVENT;
8373 bxe_release_phy_lock(sc);
8375 ; // XXX bxe_link_sync_notify(sc);
8376 bxe_link_report(sc);
8380 * Always call it here: bxe_link_report() will
8381 * prevent the link indication duplication.
8383 bxe_link_status_update(sc);
8385 } else if (attn & BXE_MC_ASSERT_BITS) {
8387 BLOGE(sc, "MC assert!\n");
8389 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0);
8390 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0);
8391 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0);
8392 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0);
8393 bxe_panic(sc, ("MC assert!\n"));
8395 } else if (attn & BXE_MCP_ASSERT) {
8397 BLOGE(sc, "MCP assert!\n");
8398 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0);
8399 // XXX bxe_fw_dump(sc);
8402 BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn);
8406 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
8407 BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn);
8408 if (attn & BXE_GRC_TIMEOUT) {
8409 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN);
8410 BLOGE(sc, "GRC time-out 0x%08x\n", val);
8412 if (attn & BXE_GRC_RSV) {
8413 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN);
8414 BLOGE(sc, "GRC reserved 0x%08x\n", val);
8416 REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
8421 bxe_attn_int_deasserted2(struct bxe_softc *sc,
8424 int port = SC_PORT(sc);
8426 uint32_t val0, mask0, val1, mask1;
8429 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
8430 val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR);
8431 BLOGE(sc, "CFC hw attention 0x%08x\n", val);
8432 /* CFC error attention */
8434 BLOGE(sc, "FATAL error from CFC\n");
8438 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
8439 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0);
8440 BLOGE(sc, "PXP hw attention-0 0x%08x\n", val);
8441 /* RQ_USDMDP_FIFO_OVERFLOW */
8442 if (val & 0x18000) {
8443 BLOGE(sc, "FATAL error from PXP\n");
8446 if (!CHIP_IS_E1x(sc)) {
8447 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1);
8448 BLOGE(sc, "PXP hw attention-1 0x%08x\n", val);
8452 #define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR
8453 #define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT
8455 if (attn & AEU_PXP2_HW_INT_BIT) {
8456 /* CQ47854 workaround do not panic on
8457 * PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8459 if (!CHIP_IS_E1x(sc)) {
8460 mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0);
8461 val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1);
8462 mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1);
8463 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0);
8465 * If the olny PXP2_EOP_ERROR_BIT is set in
8466 * STS0 and STS1 - clear it
8468 * probably we lose additional attentions between
8469 * STS0 and STS_CLR0, in this case user will not
8470 * be notified about them
8472 if (val0 & mask0 & PXP2_EOP_ERROR_BIT &&
8474 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
8476 /* print the register, since no one can restore it */
8477 BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0);
8480 * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8483 if (val0 & PXP2_EOP_ERROR_BIT) {
8484 BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n");
8487 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is
8488 * set then clear attention from PXP2 block without panic
8490 if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) &&
8491 ((val1 & mask1) == 0))
8492 attn &= ~AEU_PXP2_HW_INT_BIT;
8497 if (attn & HW_INTERRUT_ASSERT_SET_2) {
8498 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
8499 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
8501 val = REG_RD(sc, reg_offset);
8502 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
8503 REG_WR(sc, reg_offset, val);
8505 BLOGE(sc, "FATAL HW block attention set2 0x%x\n",
8506 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2));
8507 bxe_panic(sc, ("HW block attention set2\n"));
8512 bxe_attn_int_deasserted1(struct bxe_softc *sc,
8515 int port = SC_PORT(sc);
8519 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
8520 val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR);
8521 BLOGE(sc, "DB hw attention 0x%08x\n", val);
8522 /* DORQ discard attention */
8524 BLOGE(sc, "FATAL error from DORQ\n");
8528 if (attn & HW_INTERRUT_ASSERT_SET_1) {
8529 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
8530 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
8532 val = REG_RD(sc, reg_offset);
8533 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
8534 REG_WR(sc, reg_offset, val);
8536 BLOGE(sc, "FATAL HW block attention set1 0x%08x\n",
8537 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1));
8538 bxe_panic(sc, ("HW block attention set1\n"));
8543 bxe_attn_int_deasserted0(struct bxe_softc *sc,
8546 int port = SC_PORT(sc);
8550 reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
8551 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
8553 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
8554 val = REG_RD(sc, reg_offset);
8555 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
8556 REG_WR(sc, reg_offset, val);
8558 BLOGW(sc, "SPIO5 hw attention\n");
8560 /* Fan failure attention */
8561 elink_hw_reset_phy(&sc->link_params);
8562 bxe_fan_failure(sc);
8565 if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) {
8566 bxe_acquire_phy_lock(sc);
8567 elink_handle_module_detect_int(&sc->link_params);
8568 bxe_release_phy_lock(sc);
8571 if (attn & HW_INTERRUT_ASSERT_SET_0) {
8572 val = REG_RD(sc, reg_offset);
8573 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
8574 REG_WR(sc, reg_offset, val);
8576 bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n",
8577 (attn & HW_INTERRUT_ASSERT_SET_0)));
8582 bxe_attn_int_deasserted(struct bxe_softc *sc,
8583 uint32_t deasserted)
8585 struct attn_route attn;
8586 struct attn_route *group_mask;
8587 int port = SC_PORT(sc);
8592 uint8_t global = FALSE;
8595 * Need to take HW lock because MCP or other port might also
8596 * try to handle this event.
8598 bxe_acquire_alr(sc);
8600 if (bxe_chk_parity_attn(sc, &global, TRUE)) {
8602 * In case of parity errors don't handle attentions so that
8603 * other function would "see" parity errors.
8605 sc->recovery_state = BXE_RECOVERY_INIT;
8606 // XXX schedule a recovery task...
8607 /* disable HW interrupts */
8608 bxe_int_disable(sc);
8609 bxe_release_alr(sc);
8613 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
8614 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
8615 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
8616 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
8617 if (!CHIP_IS_E1x(sc)) {
8618 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
8623 BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
8624 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
8626 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
8627 if (deasserted & (1 << index)) {
8628 group_mask = &sc->attn_group[index];
8631 "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index,
8632 group_mask->sig[0], group_mask->sig[1],
8633 group_mask->sig[2], group_mask->sig[3],
8634 group_mask->sig[4]);
8636 bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]);
8637 bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]);
8638 bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]);
8639 bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]);
8640 bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]);
8644 bxe_release_alr(sc);
8646 if (sc->devinfo.int_block == INT_BLOCK_HC) {
8647 reg_addr = (HC_REG_COMMAND_REG + port*32 +
8648 COMMAND_REG_ATTN_BITS_CLR);
8650 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
8655 "about to mask 0x%08x at %s addr 0x%08x\n", val,
8656 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
8657 REG_WR(sc, reg_addr, val);
8659 if (~sc->attn_state & deasserted) {
8660 BLOGE(sc, "IGU error\n");
8663 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8664 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8666 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8668 aeu_mask = REG_RD(sc, reg_addr);
8670 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n",
8671 aeu_mask, deasserted);
8672 aeu_mask |= (deasserted & 0x3ff);
8673 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
8675 REG_WR(sc, reg_addr, aeu_mask);
8676 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8678 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
8679 sc->attn_state &= ~deasserted;
8680 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
8684 bxe_attn_int(struct bxe_softc *sc)
8686 /* read local copy of bits */
8687 uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits);
8688 uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack);
8689 uint32_t attn_state = sc->attn_state;
8691 /* look for changed bits */
8692 uint32_t asserted = attn_bits & ~attn_ack & ~attn_state;
8693 uint32_t deasserted = ~attn_bits & attn_ack & attn_state;
8696 "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n",
8697 attn_bits, attn_ack, asserted, deasserted);
8699 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) {
8700 BLOGE(sc, "BAD attention state\n");
8703 /* handle bits that were raised */
8705 bxe_attn_int_asserted(sc, asserted);
8709 bxe_attn_int_deasserted(sc, deasserted);
8714 bxe_update_dsb_idx(struct bxe_softc *sc)
8716 struct host_sp_status_block *def_sb = sc->def_sb;
8719 mb(); /* status block is written to by the chip */
8721 if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
8722 sc->def_att_idx = def_sb->atten_status_block.attn_bits_index;
8723 rc |= BXE_DEF_SB_ATT_IDX;
8726 if (sc->def_idx != def_sb->sp_sb.running_index) {
8727 sc->def_idx = def_sb->sp_sb.running_index;
8728 rc |= BXE_DEF_SB_IDX;
8736 static inline struct ecore_queue_sp_obj *
8737 bxe_cid_to_q_obj(struct bxe_softc *sc,
8740 BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid);
8741 return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj);
8745 bxe_handle_mcast_eqe(struct bxe_softc *sc)
8747 struct ecore_mcast_ramrod_params rparam;
8750 memset(&rparam, 0, sizeof(rparam));
8752 rparam.mcast_obj = &sc->mcast_obj;
8756 /* clear pending state for the last command */
8757 sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw);
8759 /* if there are pending mcast commands - send them */
8760 if (sc->mcast_obj.check_pending(&sc->mcast_obj)) {
8761 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
8764 "ERROR: Failed to send pending mcast commands (%d)\n",
8769 BXE_MCAST_UNLOCK(sc);
8773 bxe_handle_classification_eqe(struct bxe_softc *sc,
8774 union event_ring_elem *elem)
8776 unsigned long ramrod_flags = 0;
8778 uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8779 struct ecore_vlan_mac_obj *vlan_mac_obj;
8781 /* always push next commands out, don't wait here */
8782 bit_set(&ramrod_flags, RAMROD_CONT);
8784 switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) {
8785 case ECORE_FILTER_MAC_PENDING:
8786 BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n");
8787 vlan_mac_obj = &sc->sp_objs[cid].mac_obj;
8790 case ECORE_FILTER_MCAST_PENDING:
8791 BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n");
8793 * This is only relevant for 57710 where multicast MACs are
8794 * configured as unicast MACs using the same ramrod.
8796 bxe_handle_mcast_eqe(sc);
8800 BLOGE(sc, "Unsupported classification command: %d\n",
8801 elem->message.data.eth_event.echo);
8805 rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags);
8808 BLOGE(sc, "Failed to schedule new commands (%d)\n", rc);
8809 } else if (rc > 0) {
8810 BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n");
8815 bxe_handle_rx_mode_eqe(struct bxe_softc *sc,
8816 union event_ring_elem *elem)
8818 bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
8820 /* send rx_mode command again if was requested */
8821 if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED,
8823 bxe_set_storm_rx_mode(sc);
8826 else if (bxe_test_and_clear_bit(ECORE_FILTER_ISCSI_ETH_START_SCHED,
8828 bxe_set_iscsi_eth_rx_mode(sc, TRUE);
8830 else if (bxe_test_and_clear_bit(ECORE_FILTER_ISCSI_ETH_STOP_SCHED,
8832 bxe_set_iscsi_eth_rx_mode(sc, FALSE);
8838 bxe_update_eq_prod(struct bxe_softc *sc,
8841 storm_memset_eq_prod(sc, prod, SC_FUNC(sc));
8842 wmb(); /* keep prod updates ordered */
8846 bxe_eq_int(struct bxe_softc *sc)
8848 uint16_t hw_cons, sw_cons, sw_prod;
8849 union event_ring_elem *elem;
8854 struct ecore_queue_sp_obj *q_obj;
8855 struct ecore_func_sp_obj *f_obj = &sc->func_obj;
8856 struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw;
8858 hw_cons = le16toh(*sc->eq_cons_sb);
8861 * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256.
8862 * when we get to the next-page we need to adjust so the loop
8863 * condition below will be met. The next element is the size of a
8864 * regular element and hence incrementing by 1
8866 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) {
8871 * This function may never run in parallel with itself for a
8872 * specific sc and no need for a read memory barrier here.
8874 sw_cons = sc->eq_cons;
8875 sw_prod = sc->eq_prod;
8877 BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n",
8878 hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left));
8882 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
8884 elem = &sc->eq[EQ_DESC(sw_cons)];
8888 rc = bxe_iov_eq_sp_event(sc, elem);
8890 BLOGE(sc, "bxe_iov_eq_sp_event returned %d\n", rc);
8895 /* elem CID originates from FW, actually LE */
8896 cid = SW_CID(elem->message.data.cfc_del_event.cid);
8897 opcode = elem->message.opcode;
8899 /* handle eq element */
8902 case EVENT_RING_OPCODE_VF_PF_CHANNEL:
8903 BLOGD(sc, DBG_SP, "vf/pf channel element on eq\n");
8904 bxe_vf_mbx(sc, &elem->message.data.vf_pf_event);
8908 case EVENT_RING_OPCODE_STAT_QUERY:
8909 BLOGD(sc, DBG_SP, "got statistics completion event %d\n",
8911 /* nothing to do with stats comp */
8914 case EVENT_RING_OPCODE_CFC_DEL:
8915 /* handle according to cid range */
8916 /* we may want to verify here that the sc state is HALTING */
8917 BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid);
8918 q_obj = bxe_cid_to_q_obj(sc, cid);
8919 if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) {
8924 case EVENT_RING_OPCODE_STOP_TRAFFIC:
8925 BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n");
8926 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) {
8929 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED);
8932 case EVENT_RING_OPCODE_START_TRAFFIC:
8933 BLOGD(sc, DBG_SP, "got START TRAFFIC\n");
8934 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) {
8937 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED);
8940 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
8941 echo = elem->message.data.function_update_event.echo;
8942 if (echo == SWITCH_UPDATE) {
8943 BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n");
8944 if (f_obj->complete_cmd(sc, f_obj,
8945 ECORE_F_CMD_SWITCH_UPDATE)) {
8951 "AFEX: ramrod completed FUNCTION_UPDATE\n");
8953 f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_AFEX_UPDATE);
8955 * We will perform the queues update from the sp_core_task as
8956 * all queue SP operations should run with CORE_LOCK.
8958 bxe_set_bit(BXE_SP_CORE_AFEX_F_UPDATE, &sc->sp_core_state);
8959 taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8965 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
8966 f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_AFEX_VIFLISTS);
8967 bxe_after_afex_vif_lists(sc, elem);
8971 case EVENT_RING_OPCODE_FORWARD_SETUP:
8972 q_obj = &bxe_fwd_sp_obj(sc, q_obj);
8973 if (q_obj->complete_cmd(sc, q_obj,
8974 ECORE_Q_CMD_SETUP_TX_ONLY)) {
8979 case EVENT_RING_OPCODE_FUNCTION_START:
8980 BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n");
8981 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) {
8986 case EVENT_RING_OPCODE_FUNCTION_STOP:
8987 BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n");
8988 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) {
8994 switch (opcode | sc->state) {
8995 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN):
8996 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT):
8997 cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8998 BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid);
8999 rss_raw->clear_pending(rss_raw);
9002 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN):
9003 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG):
9004 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT):
9005 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN):
9006 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG):
9007 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT):
9008 BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n");
9009 bxe_handle_classification_eqe(sc, elem);
9012 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN):
9013 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG):
9014 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT):
9015 BLOGD(sc, DBG_SP, "got mcast ramrod\n");
9016 bxe_handle_mcast_eqe(sc);
9019 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN):
9020 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG):
9021 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT):
9022 BLOGD(sc, DBG_SP, "got rx_mode ramrod\n");
9023 bxe_handle_rx_mode_eqe(sc, elem);
9027 /* unknown event log error and continue */
9028 BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n",
9029 elem->message.opcode, sc->state);
9037 atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt);
9039 sc->eq_cons = sw_cons;
9040 sc->eq_prod = sw_prod;
9042 /* make sure that above mem writes were issued towards the memory */
9045 /* update producer */
9046 bxe_update_eq_prod(sc, sc->eq_prod);
9050 bxe_handle_sp_tq(void *context,
9053 struct bxe_softc *sc = (struct bxe_softc *)context;
9056 BLOGD(sc, DBG_SP, "---> SP TASK <---\n");
9058 /* what work needs to be performed? */
9059 status = bxe_update_dsb_idx(sc);
9061 BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status);
9064 if (status & BXE_DEF_SB_ATT_IDX) {
9065 BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n");
9067 status &= ~BXE_DEF_SB_ATT_IDX;
9070 /* SP events: STAT_QUERY and others */
9071 if (status & BXE_DEF_SB_IDX) {
9072 /* handle EQ completions */
9073 BLOGD(sc, DBG_SP, "---> EQ INTR <---\n");
9075 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
9076 le16toh(sc->def_idx), IGU_INT_NOP, 1);
9077 status &= ~BXE_DEF_SB_IDX;
9080 /* if status is non zero then something went wrong */
9081 if (__predict_false(status)) {
9082 BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status);
9085 /* ack status block only if something was actually handled */
9086 bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID,
9087 le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1);
9090 * Must be called after the EQ processing (since eq leads to sriov
9091 * ramrod completion flows).
9092 * This flow may have been scheduled by the arrival of a ramrod
9093 * completion, or by the sriov code rescheduling itself.
9095 // XXX bxe_iov_sp_task(sc);
9098 /* AFEX - poll to check if VIFSET_ACK should be sent to MFW */
9099 if (bxe_test_and_clear_bit(ECORE_AFEX_PENDING_VIFSET_MCP_ACK,
9101 bxe_link_report(sc);
9102 bxe_fw_command(sc, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
9108 bxe_handle_fp_tq(void *context,
9111 struct bxe_fastpath *fp = (struct bxe_fastpath *)context;
9112 struct bxe_softc *sc = fp->sc;
9113 uint8_t more_tx = FALSE;
9114 uint8_t more_rx = FALSE;
9116 BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index);
9119 * IFF_DRV_RUNNING state can't be checked here since we process
9120 * slowpath events on a client queue during setup. Instead
9121 * we need to add a "process/continue" flag here that the driver
9122 * can use to tell the task here not to do anything.
9125 if (!(sc->ifnet->if_drv_flags & IFF_DRV_RUNNING)) {
9130 /* update the fastpath index */
9131 bxe_update_fp_sb_idx(fp);
9133 /* XXX add loop here if ever support multiple tx CoS */
9134 /* fp->txdata[cos] */
9135 if (bxe_has_tx_work(fp)) {
9137 more_tx = bxe_txeof(sc, fp);
9138 BXE_FP_TX_UNLOCK(fp);
9141 if (bxe_has_rx_work(fp)) {
9142 more_rx = bxe_rxeof(sc, fp);
9145 if (more_rx /*|| more_tx*/) {
9146 /* still more work to do */
9147 taskqueue_enqueue_fast(fp->tq, &fp->tq_task);
9151 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
9152 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
9156 bxe_task_fp(struct bxe_fastpath *fp)
9158 struct bxe_softc *sc = fp->sc;
9159 uint8_t more_tx = FALSE;
9160 uint8_t more_rx = FALSE;
9162 BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index);
9164 /* update the fastpath index */
9165 bxe_update_fp_sb_idx(fp);
9167 /* XXX add loop here if ever support multiple tx CoS */
9168 /* fp->txdata[cos] */
9169 if (bxe_has_tx_work(fp)) {
9171 more_tx = bxe_txeof(sc, fp);
9172 BXE_FP_TX_UNLOCK(fp);
9175 if (bxe_has_rx_work(fp)) {
9176 more_rx = bxe_rxeof(sc, fp);
9179 if (more_rx /*|| more_tx*/) {
9180 /* still more work to do, bail out if this ISR and process later */
9181 taskqueue_enqueue_fast(fp->tq, &fp->tq_task);
9186 * Here we write the fastpath index taken before doing any tx or rx work.
9187 * It is very well possible other hw events occurred up to this point and
9188 * they were actually processed accordingly above. Since we're going to
9189 * write an older fastpath index, an interrupt is coming which we might
9190 * not do any work in.
9192 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
9193 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
9197 * Legacy interrupt entry point.
9199 * Verifies that the controller generated the interrupt and
9200 * then calls a separate routine to handle the various
9201 * interrupt causes: link, RX, and TX.
9204 bxe_intr_legacy(void *xsc)
9206 struct bxe_softc *sc = (struct bxe_softc *)xsc;
9207 struct bxe_fastpath *fp;
9208 uint16_t status, mask;
9211 BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n");
9214 /* Don't handle any interrupts if we're not ready. */
9215 if (__predict_false(sc->intr_sem != 0)) {
9221 * 0 for ustorm, 1 for cstorm
9222 * the bits returned from ack_int() are 0-15
9223 * bit 0 = attention status block
9224 * bit 1 = fast path status block
9225 * a mask of 0x2 or more = tx/rx event
9226 * a mask of 1 = slow path event
9229 status = bxe_ack_int(sc);
9231 /* the interrupt is not for us */
9232 if (__predict_false(status == 0)) {
9233 BLOGD(sc, DBG_INTR, "Not our interrupt!\n");
9237 BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status);
9239 FOR_EACH_ETH_QUEUE(sc, i) {
9241 mask = (0x2 << (fp->index + CNIC_SUPPORT(sc)));
9242 if (status & mask) {
9243 /* acknowledge and disable further fastpath interrupts */
9244 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9251 if (CNIC_SUPPORT(sc)) {
9253 if (status & (mask | 0x1)) {
9260 if (__predict_false(status & 0x1)) {
9261 /* acknowledge and disable further slowpath interrupts */
9262 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9264 /* schedule slowpath handler */
9265 taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task);
9270 if (__predict_false(status)) {
9271 BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status);
9275 /* slowpath interrupt entry point */
9277 bxe_intr_sp(void *xsc)
9279 struct bxe_softc *sc = (struct bxe_softc *)xsc;
9281 BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n");
9283 /* acknowledge and disable further slowpath interrupts */
9284 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9286 /* schedule slowpath handler */
9287 taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task);
9290 /* fastpath interrupt entry point */
9292 bxe_intr_fp(void *xfp)
9294 struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp;
9295 struct bxe_softc *sc = fp->sc;
9297 BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index);
9300 "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n",
9301 curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id);
9304 /* Don't handle any interrupts if we're not ready. */
9305 if (__predict_false(sc->intr_sem != 0)) {
9310 /* acknowledge and disable further fastpath interrupts */
9311 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9316 /* Release all interrupts allocated by the driver. */
9318 bxe_interrupt_free(struct bxe_softc *sc)
9322 switch (sc->interrupt_mode) {
9323 case INTR_MODE_INTX:
9324 BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n");
9325 if (sc->intr[0].resource != NULL) {
9326 bus_release_resource(sc->dev,
9329 sc->intr[0].resource);
9333 for (i = 0; i < sc->intr_count; i++) {
9334 BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i);
9335 if (sc->intr[i].resource && sc->intr[i].rid) {
9336 bus_release_resource(sc->dev,
9339 sc->intr[i].resource);
9342 pci_release_msi(sc->dev);
9344 case INTR_MODE_MSIX:
9345 for (i = 0; i < sc->intr_count; i++) {
9346 BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i);
9347 if (sc->intr[i].resource && sc->intr[i].rid) {
9348 bus_release_resource(sc->dev,
9351 sc->intr[i].resource);
9354 pci_release_msi(sc->dev);
9357 /* nothing to do as initial allocation failed */
9363 * This function determines and allocates the appropriate
9364 * interrupt based on system capabilites and user request.
9366 * The user may force a particular interrupt mode, specify
9367 * the number of receive queues, specify the method for
9368 * distribuitng received frames to receive queues, or use
9369 * the default settings which will automatically select the
9370 * best supported combination. In addition, the OS may or
9371 * may not support certain combinations of these settings.
9372 * This routine attempts to reconcile the settings requested
9373 * by the user with the capabilites available from the system
9374 * to select the optimal combination of features.
9377 * 0 = Success, !0 = Failure.
9380 bxe_interrupt_alloc(struct bxe_softc *sc)
9384 int num_requested = 0;
9385 int num_allocated = 0;
9389 /* get the number of available MSI/MSI-X interrupts from the OS */
9390 if (sc->interrupt_mode > 0) {
9391 if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) {
9392 msix_count = pci_msix_count(sc->dev);
9395 if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) {
9396 msi_count = pci_msi_count(sc->dev);
9399 BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n",
9400 msi_count, msix_count);
9403 do { /* try allocating MSI-X interrupt resources (at least 2) */
9404 if (sc->interrupt_mode != INTR_MODE_MSIX) {
9408 if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) ||
9410 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9414 /* ask for the necessary number of MSI-X vectors */
9415 num_requested = min((sc->num_queues + 1), msix_count);
9417 BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested);
9419 num_allocated = num_requested;
9420 if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) {
9421 BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc);
9422 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9426 if (num_allocated < 2) { /* possible? */
9427 BLOGE(sc, "MSI-X allocation less than 2!\n");
9428 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9429 pci_release_msi(sc->dev);
9433 BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n",
9434 num_requested, num_allocated);
9436 /* best effort so use the number of vectors allocated to us */
9437 sc->intr_count = num_allocated;
9438 sc->num_queues = num_allocated - 1;
9440 rid = 1; /* initial resource identifier */
9442 /* allocate the MSI-X vectors */
9443 for (i = 0; i < num_allocated; i++) {
9444 sc->intr[i].rid = (rid + i);
9446 if ((sc->intr[i].resource =
9447 bus_alloc_resource_any(sc->dev,
9450 RF_ACTIVE)) == NULL) {
9451 BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n",
9454 for (j = (i - 1); j >= 0; j--) {
9455 bus_release_resource(sc->dev,
9458 sc->intr[j].resource);
9463 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9464 pci_release_msi(sc->dev);
9468 BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i));
9472 do { /* try allocating MSI vector resources (at least 2) */
9473 if (sc->interrupt_mode != INTR_MODE_MSI) {
9477 if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) ||
9479 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9483 /* ask for a single MSI vector */
9486 BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested);
9488 num_allocated = num_requested;
9489 if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) {
9490 BLOGE(sc, "MSI alloc failed (%d)!\n", rc);
9491 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9495 if (num_allocated != 1) { /* possible? */
9496 BLOGE(sc, "MSI allocation is not 1!\n");
9497 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9498 pci_release_msi(sc->dev);
9502 BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n",
9503 num_requested, num_allocated);
9505 /* best effort so use the number of vectors allocated to us */
9506 sc->intr_count = num_allocated;
9507 sc->num_queues = num_allocated;
9509 rid = 1; /* initial resource identifier */
9511 sc->intr[0].rid = rid;
9513 if ((sc->intr[0].resource =
9514 bus_alloc_resource_any(sc->dev,
9517 RF_ACTIVE)) == NULL) {
9518 BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid);
9521 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9522 pci_release_msi(sc->dev);
9526 BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid);
9529 do { /* try allocating INTx vector resources */
9530 if (sc->interrupt_mode != INTR_MODE_INTX) {
9534 BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n");
9536 /* only one vector for INTx */
9540 rid = 0; /* initial resource identifier */
9542 sc->intr[0].rid = rid;
9544 if ((sc->intr[0].resource =
9545 bus_alloc_resource_any(sc->dev,
9548 (RF_ACTIVE | RF_SHAREABLE))) == NULL) {
9549 BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid);
9552 sc->interrupt_mode = -1; /* Failed! */
9556 BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid);
9559 if (sc->interrupt_mode == -1) {
9560 BLOGE(sc, "Interrupt Allocation: FAILED!!!\n");
9564 "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n",
9565 sc->interrupt_mode, sc->num_queues);
9573 bxe_interrupt_detach(struct bxe_softc *sc)
9575 struct bxe_fastpath *fp;
9578 /* release interrupt resources */
9579 for (i = 0; i < sc->intr_count; i++) {
9580 if (sc->intr[i].resource && sc->intr[i].tag) {
9581 BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i);
9582 bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag);
9586 for (i = 0; i < sc->num_queues; i++) {
9589 taskqueue_drain(fp->tq, &fp->tq_task);
9590 taskqueue_free(fp->tq);
9597 taskqueue_drain(sc->sp_tq, &sc->sp_tq_task);
9598 taskqueue_free(sc->sp_tq);
9604 * Enables interrupts and attach to the ISR.
9606 * When using multiple MSI/MSI-X vectors the first vector
9607 * is used for slowpath operations while all remaining
9608 * vectors are used for fastpath operations. If only a
9609 * single MSI/MSI-X vector is used (SINGLE_ISR) then the
9610 * ISR must look for both slowpath and fastpath completions.
9613 bxe_interrupt_attach(struct bxe_softc *sc)
9615 struct bxe_fastpath *fp;
9619 snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name),
9620 "bxe%d_sp_tq", sc->unit);
9621 TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc);
9622 sc->sp_tq = taskqueue_create_fast(sc->sp_tq_name, M_NOWAIT,
9623 taskqueue_thread_enqueue,
9625 taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */
9626 "%s", sc->sp_tq_name);
9629 for (i = 0; i < sc->num_queues; i++) {
9631 snprintf(fp->tq_name, sizeof(fp->tq_name),
9632 "bxe%d_fp%d_tq", sc->unit, i);
9633 TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
9634 fp->tq = taskqueue_create_fast(fp->tq_name, M_NOWAIT,
9635 taskqueue_thread_enqueue,
9637 taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */
9641 /* setup interrupt handlers */
9642 if (sc->interrupt_mode == INTR_MODE_MSIX) {
9643 BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n");
9646 * Setup the interrupt handler. Note that we pass the driver instance
9647 * to the interrupt handler for the slowpath.
9649 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9650 (INTR_TYPE_NET | INTR_MPSAFE),
9651 NULL, bxe_intr_sp, sc,
9652 &sc->intr[0].tag)) != 0) {
9653 BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc);
9654 goto bxe_interrupt_attach_exit;
9657 bus_describe_intr(sc->dev, sc->intr[0].resource,
9658 sc->intr[0].tag, "sp");
9660 /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */
9662 /* initialize the fastpath vectors (note the first was used for sp) */
9663 for (i = 0; i < sc->num_queues; i++) {
9665 BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1));
9668 * Setup the interrupt handler. Note that we pass the
9669 * fastpath context to the interrupt handler in this
9672 if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource,
9673 (INTR_TYPE_NET | INTR_MPSAFE),
9674 NULL, bxe_intr_fp, fp,
9675 &sc->intr[i + 1].tag)) != 0) {
9676 BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n",
9678 goto bxe_interrupt_attach_exit;
9681 bus_describe_intr(sc->dev, sc->intr[i + 1].resource,
9682 sc->intr[i + 1].tag, "fp%02d", i);
9684 /* bind the fastpath instance to a cpu */
9685 if (sc->num_queues > 1) {
9686 bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i);
9689 fp->state = BXE_FP_STATE_IRQ;
9691 } else if (sc->interrupt_mode == INTR_MODE_MSI) {
9692 BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n");
9695 * Setup the interrupt handler. Note that we pass the
9696 * driver instance to the interrupt handler which
9697 * will handle both the slowpath and fastpath.
9699 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9700 (INTR_TYPE_NET | INTR_MPSAFE),
9701 NULL, bxe_intr_legacy, sc,
9702 &sc->intr[0].tag)) != 0) {
9703 BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc);
9704 goto bxe_interrupt_attach_exit;
9707 } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */
9708 BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n");
9711 * Setup the interrupt handler. Note that we pass the
9712 * driver instance to the interrupt handler which
9713 * will handle both the slowpath and fastpath.
9715 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9716 (INTR_TYPE_NET | INTR_MPSAFE),
9717 NULL, bxe_intr_legacy, sc,
9718 &sc->intr[0].tag)) != 0) {
9719 BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc);
9720 goto bxe_interrupt_attach_exit;
9724 bxe_interrupt_attach_exit:
9729 static int bxe_init_hw_common_chip(struct bxe_softc *sc);
9730 static int bxe_init_hw_common(struct bxe_softc *sc);
9731 static int bxe_init_hw_port(struct bxe_softc *sc);
9732 static int bxe_init_hw_func(struct bxe_softc *sc);
9733 static void bxe_reset_common(struct bxe_softc *sc);
9734 static void bxe_reset_port(struct bxe_softc *sc);
9735 static void bxe_reset_func(struct bxe_softc *sc);
9736 static int bxe_gunzip_init(struct bxe_softc *sc);
9737 static void bxe_gunzip_end(struct bxe_softc *sc);
9738 static int bxe_init_firmware(struct bxe_softc *sc);
9739 static void bxe_release_firmware(struct bxe_softc *sc);
9742 ecore_func_sp_drv_ops bxe_func_sp_drv = {
9743 .init_hw_cmn_chip = bxe_init_hw_common_chip,
9744 .init_hw_cmn = bxe_init_hw_common,
9745 .init_hw_port = bxe_init_hw_port,
9746 .init_hw_func = bxe_init_hw_func,
9748 .reset_hw_cmn = bxe_reset_common,
9749 .reset_hw_port = bxe_reset_port,
9750 .reset_hw_func = bxe_reset_func,
9752 .gunzip_init = bxe_gunzip_init,
9753 .gunzip_end = bxe_gunzip_end,
9755 .init_fw = bxe_init_firmware,
9756 .release_fw = bxe_release_firmware,
9760 bxe_init_func_obj(struct bxe_softc *sc)
9764 ecore_init_func_obj(sc,
9766 BXE_SP(sc, func_rdata),
9767 BXE_SP_MAPPING(sc, func_rdata),
9768 BXE_SP(sc, func_afex_rdata),
9769 BXE_SP_MAPPING(sc, func_afex_rdata),
9774 bxe_init_hw(struct bxe_softc *sc,
9777 struct ecore_func_state_params func_params = { NULL };
9780 /* prepare the parameters for function state transitions */
9781 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
9783 func_params.f_obj = &sc->func_obj;
9784 func_params.cmd = ECORE_F_CMD_HW_INIT;
9786 func_params.params.hw_init.load_phase = load_code;
9789 * Via a plethora of function pointers, we will eventually reach
9790 * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func().
9792 rc = ecore_func_state_change(sc, &func_params);
9798 bxe_fill(struct bxe_softc *sc,
9805 if (!(len % 4) && !(addr % 4)) {
9806 for (i = 0; i < len; i += 4) {
9807 REG_WR(sc, (addr + i), fill);
9810 for (i = 0; i < len; i++) {
9811 REG_WR8(sc, (addr + i), fill);
9816 /* writes FP SP data to FW - data_size in dwords */
9818 bxe_wr_fp_sb_data(struct bxe_softc *sc,
9820 uint32_t *sb_data_p,
9825 for (index = 0; index < data_size; index++) {
9827 (BAR_CSTRORM_INTMEM +
9828 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
9829 (sizeof(uint32_t) * index)),
9830 *(sb_data_p + index));
9835 bxe_zero_fp_sb(struct bxe_softc *sc,
9838 struct hc_status_block_data_e2 sb_data_e2;
9839 struct hc_status_block_data_e1x sb_data_e1x;
9840 uint32_t *sb_data_p;
9841 uint32_t data_size = 0;
9843 if (!CHIP_IS_E1x(sc)) {
9844 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9845 sb_data_e2.common.state = SB_DISABLED;
9846 sb_data_e2.common.p_func.vf_valid = FALSE;
9847 sb_data_p = (uint32_t *)&sb_data_e2;
9848 data_size = (sizeof(struct hc_status_block_data_e2) /
9851 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9852 sb_data_e1x.common.state = SB_DISABLED;
9853 sb_data_e1x.common.p_func.vf_valid = FALSE;
9854 sb_data_p = (uint32_t *)&sb_data_e1x;
9855 data_size = (sizeof(struct hc_status_block_data_e1x) /
9859 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9861 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)),
9862 0, CSTORM_STATUS_BLOCK_SIZE);
9863 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)),
9864 0, CSTORM_SYNC_BLOCK_SIZE);
9868 bxe_wr_sp_sb_data(struct bxe_softc *sc,
9869 struct hc_sp_status_block_data *sp_sb_data)
9874 i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t));
9877 (BAR_CSTRORM_INTMEM +
9878 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) +
9879 (i * sizeof(uint32_t))),
9880 *((uint32_t *)sp_sb_data + i));
9885 bxe_zero_sp_sb(struct bxe_softc *sc)
9887 struct hc_sp_status_block_data sp_sb_data;
9889 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9891 sp_sb_data.state = SB_DISABLED;
9892 sp_sb_data.p_func.vf_valid = FALSE;
9894 bxe_wr_sp_sb_data(sc, &sp_sb_data);
9897 (BAR_CSTRORM_INTMEM +
9898 CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))),
9899 0, CSTORM_SP_STATUS_BLOCK_SIZE);
9901 (BAR_CSTRORM_INTMEM +
9902 CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))),
9903 0, CSTORM_SP_SYNC_BLOCK_SIZE);
9907 bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
9911 hc_sm->igu_sb_id = igu_sb_id;
9912 hc_sm->igu_seg_id = igu_seg_id;
9913 hc_sm->timer_value = 0xFF;
9914 hc_sm->time_to_expire = 0xFFFFFFFF;
9918 bxe_map_sb_state_machines(struct hc_index_data *index_data)
9920 /* zero out state machine indices */
9923 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9926 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9927 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
9928 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
9929 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
9934 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
9935 (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9938 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
9939 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9940 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
9941 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9942 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
9943 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9944 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
9945 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9949 bxe_init_sb(struct bxe_softc *sc,
9956 struct hc_status_block_data_e2 sb_data_e2;
9957 struct hc_status_block_data_e1x sb_data_e1x;
9958 struct hc_status_block_sm *hc_sm_p;
9959 uint32_t *sb_data_p;
9963 if (CHIP_INT_MODE_IS_BC(sc)) {
9964 igu_seg_id = HC_SEG_ACCESS_NORM;
9966 igu_seg_id = IGU_SEG_ACCESS_NORM;
9969 bxe_zero_fp_sb(sc, fw_sb_id);
9971 if (!CHIP_IS_E1x(sc)) {
9972 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9973 sb_data_e2.common.state = SB_ENABLED;
9974 sb_data_e2.common.p_func.pf_id = SC_FUNC(sc);
9975 sb_data_e2.common.p_func.vf_id = vfid;
9976 sb_data_e2.common.p_func.vf_valid = vf_valid;
9977 sb_data_e2.common.p_func.vnic_id = SC_VN(sc);
9978 sb_data_e2.common.same_igu_sb_1b = TRUE;
9979 sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr);
9980 sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr);
9981 hc_sm_p = sb_data_e2.common.state_machine;
9982 sb_data_p = (uint32_t *)&sb_data_e2;
9983 data_size = (sizeof(struct hc_status_block_data_e2) /
9985 bxe_map_sb_state_machines(sb_data_e2.index_data);
9987 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9988 sb_data_e1x.common.state = SB_ENABLED;
9989 sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc);
9990 sb_data_e1x.common.p_func.vf_id = 0xff;
9991 sb_data_e1x.common.p_func.vf_valid = FALSE;
9992 sb_data_e1x.common.p_func.vnic_id = SC_VN(sc);
9993 sb_data_e1x.common.same_igu_sb_1b = TRUE;
9994 sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr);
9995 sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr);
9996 hc_sm_p = sb_data_e1x.common.state_machine;
9997 sb_data_p = (uint32_t *)&sb_data_e1x;
9998 data_size = (sizeof(struct hc_status_block_data_e1x) /
10000 bxe_map_sb_state_machines(sb_data_e1x.index_data);
10003 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id);
10004 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id);
10006 BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id);
10008 /* write indices to HW - PCI guarantees endianity of regpairs */
10009 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
10012 static inline uint8_t
10013 bxe_fp_qzone_id(struct bxe_fastpath *fp)
10015 if (CHIP_IS_E1x(fp->sc)) {
10016 return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H);
10018 return (fp->cl_id);
10022 static inline uint32_t
10023 bxe_rx_ustorm_prods_offset(struct bxe_softc *sc,
10024 struct bxe_fastpath *fp)
10026 uint32_t offset = BAR_USTRORM_INTMEM;
10030 return (PXP_VF_ADDR_USDM_QUEUES_START +
10031 (sc->acquire_resp.resc.hw_qid[fp->index] *
10032 sizeof(struct ustorm_queue_zone_data)));
10035 if (!CHIP_IS_E1x(sc)) {
10036 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
10038 offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id);
10045 bxe_init_eth_fp(struct bxe_softc *sc,
10048 struct bxe_fastpath *fp = &sc->fp[idx];
10049 uint32_t cids[ECORE_MULTI_TX_COS] = { 0 };
10050 unsigned long q_type = 0;
10056 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
10057 "bxe%d_fp%d_tx_lock", sc->unit, idx);
10058 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
10060 snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name),
10061 "bxe%d_fp%d_rx_lock", sc->unit, idx);
10062 mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF);
10064 fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc));
10065 fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc));
10067 fp->cl_id = (CHIP_IS_E1x(sc)) ?
10068 (SC_L_ID(sc) + idx) :
10069 /* want client ID same as IGU SB ID for non-E1 */
10071 fp->cl_qzone_id = bxe_fp_qzone_id(fp);
10073 /* setup sb indices */
10074 if (!CHIP_IS_E1x(sc)) {
10075 fp->sb_index_values = fp->status_block.e2_sb->sb.index_values;
10076 fp->sb_running_index = fp->status_block.e2_sb->sb.running_index;
10078 fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values;
10079 fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index;
10082 /* init shortcut */
10083 fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp);
10085 fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS];
10088 * XXX If multiple CoS is ever supported then each fastpath structure
10089 * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
10091 for (cos = 0; cos < sc->max_cos; cos++) {
10094 fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0];
10096 /* nothing more for a VF to do */
10101 bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE,
10102 fp->fw_sb_id, fp->igu_sb_id);
10104 bxe_update_fp_sb_idx(fp);
10106 /* Configure Queue State object */
10107 bit_set(&q_type, ECORE_Q_TYPE_HAS_RX);
10108 bit_set(&q_type, ECORE_Q_TYPE_HAS_TX);
10110 ecore_init_queue_obj(sc,
10111 &sc->sp_objs[idx].q_obj,
10116 BXE_SP(sc, q_rdata),
10117 BXE_SP_MAPPING(sc, q_rdata),
10120 /* configure classification DBs */
10121 ecore_init_mac_obj(sc,
10122 &sc->sp_objs[idx].mac_obj,
10126 BXE_SP(sc, mac_rdata),
10127 BXE_SP_MAPPING(sc, mac_rdata),
10128 ECORE_FILTER_MAC_PENDING,
10130 ECORE_OBJ_TYPE_RX_TX,
10133 BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n",
10134 idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id);
10138 bxe_update_rx_prod(struct bxe_softc *sc,
10139 struct bxe_fastpath *fp,
10140 uint16_t rx_bd_prod,
10141 uint16_t rx_cq_prod,
10142 uint16_t rx_sge_prod)
10144 struct ustorm_eth_rx_producers rx_prods = { 0 };
10147 /* update producers */
10148 rx_prods.bd_prod = rx_bd_prod;
10149 rx_prods.cqe_prod = rx_cq_prod;
10150 rx_prods.sge_prod = rx_sge_prod;
10153 * Make sure that the BD and SGE data is updated before updating the
10154 * producers since FW might read the BD/SGE right after the producer
10156 * This is only applicable for weak-ordered memory model archs such
10157 * as IA-64. The following barrier is also mandatory since FW will
10158 * assumes BDs must have buffers.
10162 for (i = 0; i < (sizeof(rx_prods) / 4); i++) {
10164 (fp->ustorm_rx_prods_offset + (i * 4)),
10165 ((uint32_t *)&rx_prods)[i]);
10168 wmb(); /* keep prod updates ordered */
10171 "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n",
10172 fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod);
10176 bxe_init_rx_rings(struct bxe_softc *sc)
10178 struct bxe_fastpath *fp;
10181 for (i = 0; i < sc->num_queues; i++) {
10184 fp->rx_bd_cons = 0;
10187 * Activate the BD ring...
10188 * Warning, this will generate an interrupt (to the TSTORM)
10189 * so this can only be done after the chip is initialized
10191 bxe_update_rx_prod(sc, fp,
10200 if (CHIP_IS_E1(sc)) {
10202 (BAR_USTRORM_INTMEM +
10203 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))),
10204 U64_LO(fp->rcq_dma.paddr));
10206 (BAR_USTRORM_INTMEM +
10207 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4),
10208 U64_HI(fp->rcq_dma.paddr));
10214 bxe_init_tx_ring_one(struct bxe_fastpath *fp)
10216 SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
10217 fp->tx_db.data.zero_fill1 = 0;
10218 fp->tx_db.data.prod = 0;
10220 fp->tx_pkt_prod = 0;
10221 fp->tx_pkt_cons = 0;
10222 fp->tx_bd_prod = 0;
10223 fp->tx_bd_cons = 0;
10224 fp->eth_q_stats.tx_pkts = 0;
10228 bxe_init_tx_rings(struct bxe_softc *sc)
10232 for (i = 0; i < sc->num_queues; i++) {
10235 for (cos = 0; cos < sc->max_cos; cos++) {
10236 bxe_init_tx_ring_one(&sc->fp[i].txdata[cos]);
10239 bxe_init_tx_ring_one(&sc->fp[i]);
10245 bxe_init_def_sb(struct bxe_softc *sc)
10247 struct host_sp_status_block *def_sb = sc->def_sb;
10248 bus_addr_t mapping = sc->def_sb_dma.paddr;
10249 int igu_sp_sb_index;
10251 int port = SC_PORT(sc);
10252 int func = SC_FUNC(sc);
10253 int reg_offset, reg_offset_en5;
10256 struct hc_sp_status_block_data sp_sb_data;
10258 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
10260 if (CHIP_INT_MODE_IS_BC(sc)) {
10261 igu_sp_sb_index = DEF_SB_IGU_ID;
10262 igu_seg_id = HC_SEG_ACCESS_DEF;
10264 igu_sp_sb_index = sc->igu_dsb_id;
10265 igu_seg_id = IGU_SEG_ACCESS_DEF;
10269 section = ((uint64_t)mapping +
10270 offsetof(struct host_sp_status_block, atten_status_block));
10271 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
10272 sc->attn_state = 0;
10274 reg_offset = (port) ?
10275 MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
10276 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
10277 reg_offset_en5 = (port) ?
10278 MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
10279 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0;
10281 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
10282 /* take care of sig[0]..sig[4] */
10283 for (sindex = 0; sindex < 4; sindex++) {
10284 sc->attn_group[index].sig[sindex] =
10285 REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index)));
10288 if (!CHIP_IS_E1x(sc)) {
10290 * enable5 is separate from the rest of the registers,
10291 * and the address skip is 4 and not 16 between the
10294 sc->attn_group[index].sig[4] =
10295 REG_RD(sc, (reg_offset_en5 + (0x4 * index)));
10297 sc->attn_group[index].sig[4] = 0;
10301 if (sc->devinfo.int_block == INT_BLOCK_HC) {
10302 reg_offset = (port) ?
10303 HC_REG_ATTN_MSG1_ADDR_L :
10304 HC_REG_ATTN_MSG0_ADDR_L;
10305 REG_WR(sc, reg_offset, U64_LO(section));
10306 REG_WR(sc, (reg_offset + 4), U64_HI(section));
10307 } else if (!CHIP_IS_E1x(sc)) {
10308 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
10309 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
10312 section = ((uint64_t)mapping +
10313 offsetof(struct host_sp_status_block, sp_sb));
10315 bxe_zero_sp_sb(sc);
10317 /* PCI guarantees endianity of regpair */
10318 sp_sb_data.state = SB_ENABLED;
10319 sp_sb_data.host_sb_addr.lo = U64_LO(section);
10320 sp_sb_data.host_sb_addr.hi = U64_HI(section);
10321 sp_sb_data.igu_sb_id = igu_sp_sb_index;
10322 sp_sb_data.igu_seg_id = igu_seg_id;
10323 sp_sb_data.p_func.pf_id = func;
10324 sp_sb_data.p_func.vnic_id = SC_VN(sc);
10325 sp_sb_data.p_func.vf_id = 0xff;
10327 bxe_wr_sp_sb_data(sc, &sp_sb_data);
10329 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
10333 bxe_init_sp_ring(struct bxe_softc *sc)
10335 atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING);
10336 sc->spq_prod_idx = 0;
10337 sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS];
10338 sc->spq_prod_bd = sc->spq;
10339 sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT);
10343 bxe_init_eq_ring(struct bxe_softc *sc)
10345 union event_ring_elem *elem;
10348 for (i = 1; i <= NUM_EQ_PAGES; i++) {
10349 elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1];
10351 elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr +
10353 (i % NUM_EQ_PAGES)));
10354 elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr +
10356 (i % NUM_EQ_PAGES)));
10360 sc->eq_prod = NUM_EQ_DESC;
10361 sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS];
10363 atomic_store_rel_long(&sc->eq_spq_left,
10364 (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING),
10365 NUM_EQ_DESC) - 1));
10369 bxe_init_internal_common(struct bxe_softc *sc)
10373 if (IS_MF_SI(sc)) {
10375 * In switch independent mode, the TSTORM needs to accept
10376 * packets that failed classification, since approximate match
10377 * mac addresses aren't written to NIG LLH.
10380 (BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET),
10382 } else if (!CHIP_IS_E1(sc)) { /* 57710 doesn't support MF */
10384 (BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET),
10389 * Zero this manually as its initialization is currently missing
10392 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) {
10394 (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)),
10398 if (!CHIP_IS_E1x(sc)) {
10399 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET),
10400 CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
10405 bxe_init_internal(struct bxe_softc *sc,
10406 uint32_t load_code)
10408 switch (load_code) {
10409 case FW_MSG_CODE_DRV_LOAD_COMMON:
10410 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
10411 bxe_init_internal_common(sc);
10414 case FW_MSG_CODE_DRV_LOAD_PORT:
10415 /* nothing to do */
10418 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
10419 /* internal memory per function is initialized inside bxe_pf_init */
10423 BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code);
10429 storm_memset_func_cfg(struct bxe_softc *sc,
10430 struct tstorm_eth_function_common_config *tcfg,
10436 addr = (BAR_TSTRORM_INTMEM +
10437 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid));
10438 size = sizeof(struct tstorm_eth_function_common_config);
10439 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg);
10443 bxe_func_init(struct bxe_softc *sc,
10444 struct bxe_func_init_params *p)
10446 struct tstorm_eth_function_common_config tcfg = { 0 };
10448 if (CHIP_IS_E1x(sc)) {
10449 storm_memset_func_cfg(sc, &tcfg, p->func_id);
10452 /* Enable the function in the FW */
10453 storm_memset_vf_to_pf(sc, p->func_id, p->pf_id);
10454 storm_memset_func_en(sc, p->func_id, 1);
10457 if (p->func_flgs & FUNC_FLG_SPQ) {
10458 storm_memset_spq_addr(sc, p->spq_map, p->func_id);
10460 (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)),
10466 * Calculates the sum of vn_min_rates.
10467 * It's needed for further normalizing of the min_rates.
10469 * sum of vn_min_rates.
10471 * 0 - if all the min_rates are 0.
10472 * In the later case fainess algorithm should be deactivated.
10473 * If all min rates are not zero then those that are zeroes will be set to 1.
10476 bxe_calc_vn_min(struct bxe_softc *sc,
10477 struct cmng_init_input *input)
10480 uint32_t vn_min_rate;
10484 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10485 vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10486 vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
10487 FUNC_MF_CFG_MIN_BW_SHIFT) * 100);
10489 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10490 /* skip hidden VNs */
10492 } else if (!vn_min_rate) {
10493 /* If min rate is zero - set it to 100 */
10494 vn_min_rate = DEF_MIN_RATE;
10499 input->vnic_min_rate[vn] = vn_min_rate;
10502 /* if ETS or all min rates are zeros - disable fairness */
10503 if (BXE_IS_ETS_ENABLED(sc)) {
10504 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10505 BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n");
10506 } else if (all_zero) {
10507 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10508 BLOGD(sc, DBG_LOAD,
10509 "Fariness disabled (all MIN values are zeroes)\n");
10511 input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10515 static inline uint16_t
10516 bxe_extract_max_cfg(struct bxe_softc *sc,
10519 uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
10520 FUNC_MF_CFG_MAX_BW_SHIFT);
10523 BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n");
10531 bxe_calc_vn_max(struct bxe_softc *sc,
10533 struct cmng_init_input *input)
10535 uint16_t vn_max_rate;
10536 uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10539 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10542 max_cfg = bxe_extract_max_cfg(sc, vn_cfg);
10544 if (IS_MF_SI(sc)) {
10545 /* max_cfg in percents of linkspeed */
10546 vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100);
10547 } else { /* SD modes */
10548 /* max_cfg is absolute in 100Mb units */
10549 vn_max_rate = (max_cfg * 100);
10553 BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
10555 input->vnic_max_rate[vn] = vn_max_rate;
10559 bxe_cmng_fns_init(struct bxe_softc *sc,
10563 struct cmng_init_input input;
10566 memset(&input, 0, sizeof(struct cmng_init_input));
10568 input.port_rate = sc->link_vars.line_speed;
10570 if (cmng_type == CMNG_FNS_MINMAX) {
10571 /* read mf conf from shmem */
10573 bxe_read_mf_cfg(sc);
10576 /* get VN min rate and enable fairness if not 0 */
10577 bxe_calc_vn_min(sc, &input);
10579 /* get VN max rate */
10580 if (sc->port.pmf) {
10581 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10582 bxe_calc_vn_max(sc, vn, &input);
10586 /* always enable rate shaping and fairness */
10587 input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
10589 ecore_init_cmng(&input, &sc->cmng);
10593 /* rate shaping and fairness are disabled */
10594 BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n");
10598 bxe_get_cmng_fns_mode(struct bxe_softc *sc)
10600 if (CHIP_REV_IS_SLOW(sc)) {
10601 return (CMNG_FNS_NONE);
10605 return (CMNG_FNS_MINMAX);
10608 return (CMNG_FNS_NONE);
10612 storm_memset_cmng(struct bxe_softc *sc,
10613 struct cmng_init *cmng,
10621 addr = (BAR_XSTRORM_INTMEM +
10622 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port));
10623 size = sizeof(struct cmng_struct_per_port);
10624 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port);
10626 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10627 func = func_by_vn(sc, vn);
10629 addr = (BAR_XSTRORM_INTMEM +
10630 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func));
10631 size = sizeof(struct rate_shaping_vars_per_vn);
10632 ecore_storm_memset_struct(sc, addr, size,
10633 (uint32_t *)&cmng->vnic.vnic_max_rate[vn]);
10635 addr = (BAR_XSTRORM_INTMEM +
10636 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func));
10637 size = sizeof(struct fairness_vars_per_vn);
10638 ecore_storm_memset_struct(sc, addr, size,
10639 (uint32_t *)&cmng->vnic.vnic_min_rate[vn]);
10644 bxe_pf_init(struct bxe_softc *sc)
10646 struct bxe_func_init_params func_init = { 0 };
10647 struct event_ring_data eq_data = { { 0 } };
10650 if (!CHIP_IS_E1x(sc)) {
10651 /* reset IGU PF statistics: MSIX + ATTN */
10654 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10655 (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10656 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10660 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10661 (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10662 (BXE_IGU_STAS_MSG_PF_CNT * 4) +
10663 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10667 /* function setup flags */
10668 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
10671 * This flag is relevant for E1x only.
10672 * E2 doesn't have a TPA configuration in a function level.
10674 flags |= (sc->ifnet->if_capenable & IFCAP_LRO) ? FUNC_FLG_TPA : 0;
10676 func_init.func_flgs = flags;
10677 func_init.pf_id = SC_FUNC(sc);
10678 func_init.func_id = SC_FUNC(sc);
10679 func_init.spq_map = sc->spq_dma.paddr;
10680 func_init.spq_prod = sc->spq_prod_idx;
10682 bxe_func_init(sc, &func_init);
10684 memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port));
10687 * Congestion management values depend on the link rate.
10688 * There is no active link so initial link rate is set to 10Gbps.
10689 * When the link comes up the congestion management values are
10690 * re-calculated according to the actual link rate.
10692 sc->link_vars.line_speed = SPEED_10000;
10693 bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc));
10695 /* Only the PMF sets the HW */
10696 if (sc->port.pmf) {
10697 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
10700 /* init Event Queue - PCI bus guarantees correct endainity */
10701 eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr);
10702 eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr);
10703 eq_data.producer = sc->eq_prod;
10704 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
10705 eq_data.sb_id = DEF_SB_ID;
10706 storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc));
10710 bxe_hc_int_enable(struct bxe_softc *sc)
10712 int port = SC_PORT(sc);
10713 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10714 uint32_t val = REG_RD(sc, addr);
10715 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10716 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10717 (sc->intr_count == 1)) ? TRUE : FALSE;
10718 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10721 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10722 HC_CONFIG_0_REG_INT_LINE_EN_0);
10723 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10724 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10726 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
10729 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
10730 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10731 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10732 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10734 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10735 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10736 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10737 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10739 if (!CHIP_IS_E1(sc)) {
10740 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n",
10743 REG_WR(sc, addr, val);
10745 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
10749 if (CHIP_IS_E1(sc)) {
10750 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF);
10753 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
10754 val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10756 REG_WR(sc, addr, val);
10758 /* ensure that HC_CONFIG is written before leading/trailing edge config */
10761 if (!CHIP_IS_E1(sc)) {
10762 /* init leading/trailing edge */
10764 val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10765 if (sc->port.pmf) {
10766 /* enable nig and gpio3 attention */
10773 REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val);
10774 REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val);
10777 /* make sure that interrupts are indeed enabled from here on */
10782 bxe_igu_int_enable(struct bxe_softc *sc)
10785 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10786 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10787 (sc->intr_count == 1)) ? TRUE : FALSE;
10788 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10790 val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10793 val &= ~(IGU_PF_CONF_INT_LINE_EN |
10794 IGU_PF_CONF_SINGLE_ISR_EN);
10795 val |= (IGU_PF_CONF_MSI_MSIX_EN |
10796 IGU_PF_CONF_ATTN_BIT_EN);
10798 val |= IGU_PF_CONF_SINGLE_ISR_EN;
10801 val &= ~IGU_PF_CONF_INT_LINE_EN;
10802 val |= (IGU_PF_CONF_MSI_MSIX_EN |
10803 IGU_PF_CONF_ATTN_BIT_EN |
10804 IGU_PF_CONF_SINGLE_ISR_EN);
10806 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
10807 val |= (IGU_PF_CONF_INT_LINE_EN |
10808 IGU_PF_CONF_ATTN_BIT_EN |
10809 IGU_PF_CONF_SINGLE_ISR_EN);
10812 /* clean previous status - need to configure igu prior to ack*/
10813 if ((!msix) || single_msix) {
10814 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10818 val |= IGU_PF_CONF_FUNC_EN;
10820 BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n",
10821 val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10823 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10827 /* init leading/trailing edge */
10829 val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10830 if (sc->port.pmf) {
10831 /* enable nig and gpio3 attention */
10838 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
10839 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
10841 /* make sure that interrupts are indeed enabled from here on */
10846 bxe_int_enable(struct bxe_softc *sc)
10848 if (sc->devinfo.int_block == INT_BLOCK_HC) {
10849 bxe_hc_int_enable(sc);
10851 bxe_igu_int_enable(sc);
10856 bxe_hc_int_disable(struct bxe_softc *sc)
10858 int port = SC_PORT(sc);
10859 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10860 uint32_t val = REG_RD(sc, addr);
10863 * In E1 we must use only PCI configuration space to disable MSI/MSIX
10864 * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC
10867 if (CHIP_IS_E1(sc)) {
10869 * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register
10870 * to prevent from HC sending interrupts after we exit the function
10872 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0);
10874 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10875 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10876 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10878 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10879 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10880 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10881 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10884 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr);
10886 /* flush all outstanding writes */
10889 REG_WR(sc, addr, val);
10890 if (REG_RD(sc, addr) != val) {
10891 BLOGE(sc, "proper val not read from HC IGU!\n");
10896 bxe_igu_int_disable(struct bxe_softc *sc)
10898 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10900 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
10901 IGU_PF_CONF_INT_LINE_EN |
10902 IGU_PF_CONF_ATTN_BIT_EN);
10904 BLOGD(sc, DBG_INTR, "write %x to IGU\n", val);
10906 /* flush all outstanding writes */
10909 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10910 if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) {
10911 BLOGE(sc, "proper val not read from IGU!\n");
10916 bxe_int_disable(struct bxe_softc *sc)
10918 if (sc->devinfo.int_block == INT_BLOCK_HC) {
10919 bxe_hc_int_disable(sc);
10921 bxe_igu_int_disable(sc);
10926 bxe_nic_init(struct bxe_softc *sc,
10931 for (i = 0; i < sc->num_queues; i++) {
10932 bxe_init_eth_fp(sc, i);
10935 rmb(); /* ensure status block indices were read */
10937 bxe_init_rx_rings(sc);
10938 bxe_init_tx_rings(sc);
10944 /* initialize MOD_ABS interrupts */
10945 elink_init_mod_abs_int(sc, &sc->link_vars,
10946 sc->devinfo.chip_id,
10947 sc->devinfo.shmem_base,
10948 sc->devinfo.shmem2_base,
10951 bxe_init_def_sb(sc);
10952 bxe_update_dsb_idx(sc);
10953 bxe_init_sp_ring(sc);
10954 bxe_init_eq_ring(sc);
10955 bxe_init_internal(sc, load_code);
10957 bxe_stats_init(sc);
10959 /* flush all before enabling interrupts */
10962 bxe_int_enable(sc);
10964 /* check for SPIO5 */
10965 bxe_attn_int_deasserted0(sc,
10967 (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
10969 AEU_INPUTS_ATTN_BITS_SPIO5);
10973 bxe_init_objs(struct bxe_softc *sc)
10975 /* mcast rules must be added to tx if tx switching is enabled */
10976 ecore_obj_type o_type =
10977 (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX :
10980 /* RX_MODE controlling object */
10981 ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj);
10983 /* multicast configuration controlling object */
10984 ecore_init_mcast_obj(sc,
10990 BXE_SP(sc, mcast_rdata),
10991 BXE_SP_MAPPING(sc, mcast_rdata),
10992 ECORE_FILTER_MCAST_PENDING,
10996 /* Setup CAM credit pools */
10997 ecore_init_mac_credit_pool(sc,
11000 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
11001 VNICS_PER_PATH(sc));
11003 ecore_init_vlan_credit_pool(sc,
11005 SC_ABS_FUNC(sc) >> 1,
11006 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
11007 VNICS_PER_PATH(sc));
11009 /* RSS configuration object */
11010 ecore_init_rss_config_obj(sc,
11016 BXE_SP(sc, rss_rdata),
11017 BXE_SP_MAPPING(sc, rss_rdata),
11018 ECORE_FILTER_RSS_CONF_PENDING,
11019 &sc->sp_state, ECORE_OBJ_TYPE_RX);
11023 * Initialize the function. This must be called before sending CLIENT_SETUP
11024 * for the first client.
11027 bxe_func_start(struct bxe_softc *sc)
11029 struct ecore_func_state_params func_params = { NULL };
11030 struct ecore_func_start_params *start_params = &func_params.params.start;
11032 /* Prepare parameters for function state transitions */
11033 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
11035 func_params.f_obj = &sc->func_obj;
11036 func_params.cmd = ECORE_F_CMD_START;
11038 /* Function parameters */
11039 start_params->mf_mode = sc->devinfo.mf_info.mf_mode;
11040 start_params->sd_vlan_tag = OVLAN(sc);
11042 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
11043 start_params->network_cos_mode = STATIC_COS;
11044 } else { /* CHIP_IS_E1X */
11045 start_params->network_cos_mode = FW_WRR;
11048 start_params->gre_tunnel_mode = 0;
11049 start_params->gre_tunnel_rss = 0;
11051 return (ecore_func_state_change(sc, &func_params));
11055 bxe_set_power_state(struct bxe_softc *sc,
11060 /* If there is no power capability, silently succeed */
11061 if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) {
11062 BLOGW(sc, "No power capability\n");
11066 pmcsr = pci_read_config(sc->dev,
11067 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
11072 pci_write_config(sc->dev,
11073 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
11074 ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2);
11076 if (pmcsr & PCIM_PSTAT_DMASK) {
11077 /* delay required during transition out of D3hot */
11084 /* XXX if there are other clients above don't shut down the power */
11086 /* don't shut down the power for emulation and FPGA */
11087 if (CHIP_REV_IS_SLOW(sc)) {
11091 pmcsr &= ~PCIM_PSTAT_DMASK;
11092 pmcsr |= PCIM_PSTAT_D3;
11095 pmcsr |= PCIM_PSTAT_PMEENABLE;
11098 pci_write_config(sc->dev,
11099 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
11103 * No more memory access after this point until device is brought back
11109 BLOGE(sc, "Can't support PCI power state = %d\n", state);
11117 /* return true if succeeded to acquire the lock */
11119 bxe_trylock_hw_lock(struct bxe_softc *sc,
11122 uint32_t lock_status;
11123 uint32_t resource_bit = (1 << resource);
11124 int func = SC_FUNC(sc);
11125 uint32_t hw_lock_control_reg;
11127 BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource);
11129 /* Validating that the resource is within range */
11130 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
11131 BLOGD(sc, DBG_LOAD,
11132 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
11133 resource, HW_LOCK_MAX_RESOURCE_VALUE);
11138 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
11140 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
11143 /* try to acquire the lock */
11144 REG_WR(sc, hw_lock_control_reg + 4, resource_bit);
11145 lock_status = REG_RD(sc, hw_lock_control_reg);
11146 if (lock_status & resource_bit) {
11150 BLOGE(sc, "Failed to get a resource lock 0x%x\n", resource);
11156 * Get the recovery leader resource id according to the engine this function
11157 * belongs to. Currently only only 2 engines is supported.
11160 bxe_get_leader_lock_resource(struct bxe_softc *sc)
11163 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1);
11165 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0);
11169 /* try to acquire a leader lock for current engine */
11171 bxe_trylock_leader_lock(struct bxe_softc *sc)
11173 return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
11177 bxe_release_leader_lock(struct bxe_softc *sc)
11179 return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
11182 /* close gates #2, #3 and #4 */
11184 bxe_set_234_gates(struct bxe_softc *sc,
11189 /* gates #2 and #4a are closed/opened for "not E1" only */
11190 if (!CHIP_IS_E1(sc)) {
11192 REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
11194 REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
11198 if (CHIP_IS_E1x(sc)) {
11199 /* prevent interrupts from HC on both ports */
11200 val = REG_RD(sc, HC_REG_CONFIG_1);
11201 REG_WR(sc, HC_REG_CONFIG_1,
11202 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
11203 (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
11205 val = REG_RD(sc, HC_REG_CONFIG_0);
11206 REG_WR(sc, HC_REG_CONFIG_0,
11207 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
11208 (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
11210 /* Prevent incomming interrupts in IGU */
11211 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
11213 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION,
11215 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
11216 (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
11219 BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n",
11220 close ? "closing" : "opening");
11225 /* poll for pending writes bit, it should get cleared in no more than 1s */
11227 bxe_er_poll_igu_vq(struct bxe_softc *sc)
11229 uint32_t cnt = 1000;
11230 uint32_t pend_bits = 0;
11233 pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS);
11235 if (pend_bits == 0) {
11240 } while (--cnt > 0);
11243 BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits);
11250 #define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */
11253 bxe_clp_reset_prep(struct bxe_softc *sc,
11254 uint32_t *magic_val)
11256 /* Do some magic... */
11257 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
11258 *magic_val = val & SHARED_MF_CLP_MAGIC;
11259 MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
11262 /* restore the value of the 'magic' bit */
11264 bxe_clp_reset_done(struct bxe_softc *sc,
11265 uint32_t magic_val)
11267 /* Restore the 'magic' bit value... */
11268 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
11269 MFCFG_WR(sc, shared_mf_config.clp_mb,
11270 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
11273 /* prepare for MCP reset, takes care of CLP configurations */
11275 bxe_reset_mcp_prep(struct bxe_softc *sc,
11276 uint32_t *magic_val)
11279 uint32_t validity_offset;
11281 /* set `magic' bit in order to save MF config */
11282 if (!CHIP_IS_E1(sc)) {
11283 bxe_clp_reset_prep(sc, magic_val);
11286 /* get shmem offset */
11287 shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
11289 offsetof(struct shmem_region, validity_map[SC_PORT(sc)]);
11291 /* Clear validity map flags */
11293 REG_WR(sc, shmem + validity_offset, 0);
11297 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
11298 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
11301 bxe_mcp_wait_one(struct bxe_softc *sc)
11303 /* special handling for emulation and FPGA (10 times longer) */
11304 if (CHIP_REV_IS_SLOW(sc)) {
11305 DELAY((MCP_ONE_TIMEOUT*10) * 1000);
11307 DELAY((MCP_ONE_TIMEOUT) * 1000);
11311 /* initialize shmem_base and waits for validity signature to appear */
11313 bxe_init_shmem(struct bxe_softc *sc)
11319 sc->devinfo.shmem_base =
11320 sc->link_params.shmem_base =
11321 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
11323 if (sc->devinfo.shmem_base) {
11324 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
11325 if (val & SHR_MEM_VALIDITY_MB)
11329 bxe_mcp_wait_one(sc);
11331 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
11333 BLOGE(sc, "BAD MCP validity signature\n");
11339 bxe_reset_mcp_comp(struct bxe_softc *sc,
11340 uint32_t magic_val)
11342 int rc = bxe_init_shmem(sc);
11344 /* Restore the `magic' bit value */
11345 if (!CHIP_IS_E1(sc)) {
11346 bxe_clp_reset_done(sc, magic_val);
11353 bxe_pxp_prep(struct bxe_softc *sc)
11355 if (!CHIP_IS_E1(sc)) {
11356 REG_WR(sc, PXP2_REG_RD_START_INIT, 0);
11357 REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0);
11363 * Reset the whole chip except for:
11365 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit)
11367 * - MISC (including AEU)
11372 bxe_process_kill_chip_reset(struct bxe_softc *sc,
11375 uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
11376 uint32_t global_bits2, stay_reset2;
11379 * Bits that have to be set in reset_mask2 if we want to reset 'global'
11380 * (per chip) blocks.
11383 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
11384 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
11387 * Don't reset the following blocks.
11388 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
11389 * reset, as in 4 port device they might still be owned
11390 * by the MCP (there is only one leader per path).
11393 MISC_REGISTERS_RESET_REG_1_RST_HC |
11394 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
11395 MISC_REGISTERS_RESET_REG_1_RST_PXP;
11398 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
11399 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
11400 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
11401 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
11402 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
11403 MISC_REGISTERS_RESET_REG_2_RST_GRC |
11404 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
11405 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
11406 MISC_REGISTERS_RESET_REG_2_RST_ATC |
11407 MISC_REGISTERS_RESET_REG_2_PGLC |
11408 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
11409 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
11410 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
11411 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
11412 MISC_REGISTERS_RESET_REG_2_UMAC0 |
11413 MISC_REGISTERS_RESET_REG_2_UMAC1;
11416 * Keep the following blocks in reset:
11417 * - all xxMACs are handled by the elink code.
11420 MISC_REGISTERS_RESET_REG_2_XMAC |
11421 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
11423 /* Full reset masks according to the chip */
11424 reset_mask1 = 0xffffffff;
11426 if (CHIP_IS_E1(sc))
11427 reset_mask2 = 0xffff;
11428 else if (CHIP_IS_E1H(sc))
11429 reset_mask2 = 0x1ffff;
11430 else if (CHIP_IS_E2(sc))
11431 reset_mask2 = 0xfffff;
11432 else /* CHIP_IS_E3 */
11433 reset_mask2 = 0x3ffffff;
11435 /* Don't reset global blocks unless we need to */
11437 reset_mask2 &= ~global_bits2;
11440 * In case of attention in the QM, we need to reset PXP
11441 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
11442 * because otherwise QM reset would release 'close the gates' shortly
11443 * before resetting the PXP, then the PSWRQ would send a write
11444 * request to PGLUE. Then when PXP is reset, PGLUE would try to
11445 * read the payload data from PSWWR, but PSWWR would not
11446 * respond. The write queue in PGLUE would stuck, dmae commands
11447 * would not return. Therefore it's important to reset the second
11448 * reset register (containing the
11449 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
11450 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
11453 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
11454 reset_mask2 & (~not_reset_mask2));
11456 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
11457 reset_mask1 & (~not_reset_mask1));
11462 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
11463 reset_mask2 & (~stay_reset2));
11468 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
11473 bxe_process_kill(struct bxe_softc *sc,
11478 uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
11479 uint32_t tags_63_32 = 0;
11481 /* Empty the Tetris buffer, wait for 1s */
11483 sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT);
11484 blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT);
11485 port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0);
11486 port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1);
11487 pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2);
11488 if (CHIP_IS_E3(sc)) {
11489 tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32);
11492 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
11493 ((port_is_idle_0 & 0x1) == 0x1) &&
11494 ((port_is_idle_1 & 0x1) == 0x1) &&
11495 (pgl_exp_rom2 == 0xffffffff) &&
11496 (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff)))
11499 } while (cnt-- > 0);
11502 BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there "
11503 "are still outstanding read requests after 1s! "
11504 "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, "
11505 "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
11506 sr_cnt, blk_cnt, port_is_idle_0,
11507 port_is_idle_1, pgl_exp_rom2);
11513 /* Close gates #2, #3 and #4 */
11514 bxe_set_234_gates(sc, TRUE);
11516 /* Poll for IGU VQs for 57712 and newer chips */
11517 if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) {
11521 /* XXX indicate that "process kill" is in progress to MCP */
11523 /* clear "unprepared" bit */
11524 REG_WR(sc, MISC_REG_UNPREPARED, 0);
11527 /* Make sure all is written to the chip before the reset */
11531 * Wait for 1ms to empty GLUE and PCI-E core queues,
11532 * PSWHST, GRC and PSWRD Tetris buffer.
11536 /* Prepare to chip reset: */
11539 bxe_reset_mcp_prep(sc, &val);
11546 /* reset the chip */
11547 bxe_process_kill_chip_reset(sc, global);
11550 /* clear errors in PGB */
11551 if (!CHIP_IS_E1(sc))
11552 REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
11554 /* Recover after reset: */
11556 if (global && bxe_reset_mcp_comp(sc, val)) {
11560 /* XXX add resetting the NO_MCP mode DB here */
11562 /* Open the gates #2, #3 and #4 */
11563 bxe_set_234_gates(sc, FALSE);
11566 * IGU/AEU preparation bring back the AEU/IGU to a reset state
11567 * re-enable attentions
11574 bxe_leader_reset(struct bxe_softc *sc)
11577 uint8_t global = bxe_reset_is_global(sc);
11578 uint32_t load_code;
11581 * If not going to reset MCP, load "fake" driver to reset HW while
11582 * driver is owner of the HW.
11584 if (!global && !BXE_NOMCP(sc)) {
11585 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
11586 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
11588 BLOGE(sc, "MCP response failure, aborting\n");
11590 goto exit_leader_reset;
11593 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
11594 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
11595 BLOGE(sc, "MCP unexpected response, aborting\n");
11597 goto exit_leader_reset2;
11600 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
11602 BLOGE(sc, "MCP response failure, aborting\n");
11604 goto exit_leader_reset2;
11608 /* try to recover after the failure */
11609 if (bxe_process_kill(sc, global)) {
11610 BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc));
11612 goto exit_leader_reset2;
11616 * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver
11619 bxe_set_reset_done(sc);
11621 bxe_clear_reset_global(sc);
11624 exit_leader_reset2:
11626 /* unload "fake driver" if it was loaded */
11627 if (!global && !BXE_NOMCP(sc)) {
11628 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
11629 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
11635 bxe_release_leader_lock(sc);
11642 * prepare INIT transition, parameters configured:
11643 * - HC configuration
11644 * - Queue's CDU context
11647 bxe_pf_q_prep_init(struct bxe_softc *sc,
11648 struct bxe_fastpath *fp,
11649 struct ecore_queue_init_params *init_params)
11652 int cxt_index, cxt_offset;
11654 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags);
11655 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags);
11657 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags);
11658 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags);
11661 init_params->rx.hc_rate =
11662 sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0;
11663 init_params->tx.hc_rate =
11664 sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0;
11667 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id;
11669 /* CQ index among the SB indices */
11670 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11671 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
11673 /* set maximum number of COSs supported by this queue */
11674 init_params->max_cos = sc->max_cos;
11676 BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n",
11677 fp->index, init_params->max_cos);
11679 /* set the context pointers queue object */
11680 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
11681 /* XXX change index/cid here if ever support multiple tx CoS */
11682 /* fp->txdata[cos]->cid */
11683 cxt_index = fp->index / ILT_PAGE_CIDS;
11684 cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS);
11685 init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth;
11689 /* set flags that are common for the Tx-only and not normal connections */
11690 static unsigned long
11691 bxe_get_common_flags(struct bxe_softc *sc,
11692 struct bxe_fastpath *fp,
11693 uint8_t zero_stats)
11695 unsigned long flags = 0;
11697 /* PF driver will always initialize the Queue to an ACTIVE state */
11698 bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags);
11701 * tx only connections collect statistics (on the same index as the
11702 * parent connection). The statistics are zeroed when the parent
11703 * connection is initialized.
11706 bxe_set_bit(ECORE_Q_FLG_STATS, &flags);
11708 bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags);
11712 * tx only connections can support tx-switching, though their
11713 * CoS-ness doesn't survive the loopback
11715 if (sc->flags & BXE_TX_SWITCHING) {
11716 bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags);
11719 bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags);
11724 static unsigned long
11725 bxe_get_q_flags(struct bxe_softc *sc,
11726 struct bxe_fastpath *fp,
11729 unsigned long flags = 0;
11731 if (IS_MF_SD(sc)) {
11732 bxe_set_bit(ECORE_Q_FLG_OV, &flags);
11735 if (sc->ifnet->if_capenable & IFCAP_LRO) {
11736 bxe_set_bit(ECORE_Q_FLG_TPA, &flags);
11737 bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags);
11739 if (fp->mode == TPA_MODE_GRO)
11740 __set_bit(ECORE_Q_FLG_TPA_GRO, &flags);
11745 bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags);
11746 bxe_set_bit(ECORE_Q_FLG_MCAST, &flags);
11749 bxe_set_bit(ECORE_Q_FLG_VLAN, &flags);
11752 /* configure silent vlan removal */
11753 if (IS_MF_AFEX(sc)) {
11754 bxe_set_bit(ECORE_Q_FLG_SILENT_VLAN_REM, &flags);
11758 /* merge with common flags */
11759 return (flags | bxe_get_common_flags(sc, fp, TRUE));
11763 bxe_pf_q_prep_general(struct bxe_softc *sc,
11764 struct bxe_fastpath *fp,
11765 struct ecore_general_setup_params *gen_init,
11768 gen_init->stat_id = bxe_stats_id(fp);
11769 gen_init->spcl_id = fp->cl_id;
11770 gen_init->mtu = sc->mtu;
11771 gen_init->cos = cos;
11775 bxe_pf_rx_q_prep(struct bxe_softc *sc,
11776 struct bxe_fastpath *fp,
11777 struct rxq_pause_params *pause,
11778 struct ecore_rxq_setup_params *rxq_init)
11780 uint8_t max_sge = 0;
11781 uint16_t sge_sz = 0;
11782 uint16_t tpa_agg_size = 0;
11784 pause->sge_th_lo = SGE_TH_LO(sc);
11785 pause->sge_th_hi = SGE_TH_HI(sc);
11787 /* validate SGE ring has enough to cross high threshold */
11788 if (sc->dropless_fc &&
11789 (pause->sge_th_hi + FW_PREFETCH_CNT) >
11790 (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) {
11791 BLOGW(sc, "sge ring threshold limit\n");
11794 /* minimum max_aggregation_size is 2*MTU (two full buffers) */
11795 tpa_agg_size = (2 * sc->mtu);
11796 if (tpa_agg_size < sc->max_aggregation_size) {
11797 tpa_agg_size = sc->max_aggregation_size;
11800 max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT;
11801 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
11802 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
11803 sge_sz = (uint16_t)min(SGE_PAGES, 0xffff);
11805 /* pause - not for e1 */
11806 if (!CHIP_IS_E1(sc)) {
11807 pause->bd_th_lo = BD_TH_LO(sc);
11808 pause->bd_th_hi = BD_TH_HI(sc);
11810 pause->rcq_th_lo = RCQ_TH_LO(sc);
11811 pause->rcq_th_hi = RCQ_TH_HI(sc);
11813 /* validate rings have enough entries to cross high thresholds */
11814 if (sc->dropless_fc &&
11815 pause->bd_th_hi + FW_PREFETCH_CNT >
11816 sc->rx_ring_size) {
11817 BLOGW(sc, "rx bd ring threshold limit\n");
11820 if (sc->dropless_fc &&
11821 pause->rcq_th_hi + FW_PREFETCH_CNT >
11822 RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) {
11823 BLOGW(sc, "rcq ring threshold limit\n");
11826 pause->pri_map = 1;
11830 rxq_init->dscr_map = fp->rx_dma.paddr;
11831 rxq_init->sge_map = fp->rx_sge_dma.paddr;
11832 rxq_init->rcq_map = fp->rcq_dma.paddr;
11833 rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE);
11836 * This should be a maximum number of data bytes that may be
11837 * placed on the BD (not including paddings).
11839 rxq_init->buf_sz = (fp->rx_buf_size -
11840 IP_HEADER_ALIGNMENT_PADDING);
11842 rxq_init->cl_qzone_id = fp->cl_qzone_id;
11843 rxq_init->tpa_agg_sz = tpa_agg_size;
11844 rxq_init->sge_buf_sz = sge_sz;
11845 rxq_init->max_sges_pkt = max_sge;
11846 rxq_init->rss_engine_id = SC_FUNC(sc);
11847 rxq_init->mcast_engine_id = SC_FUNC(sc);
11850 * Maximum number or simultaneous TPA aggregation for this Queue.
11851 * For PF Clients it should be the maximum available number.
11852 * VF driver(s) may want to define it to a smaller value.
11854 rxq_init->max_tpa_queues = MAX_AGG_QS(sc);
11856 rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT;
11857 rxq_init->fw_sb_id = fp->fw_sb_id;
11859 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11862 * configure silent vlan removal
11863 * if multi function mode is afex, then mask default vlan
11865 if (IS_MF_AFEX(sc)) {
11866 rxq_init->silent_removal_value =
11867 sc->devinfo.mf_info.afex_def_vlan_tag;
11868 rxq_init->silent_removal_mask = EVL_VLID_MASK;
11873 bxe_pf_tx_q_prep(struct bxe_softc *sc,
11874 struct bxe_fastpath *fp,
11875 struct ecore_txq_setup_params *txq_init,
11879 * XXX If multiple CoS is ever supported then each fastpath structure
11880 * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
11881 * fp->txdata[cos]->tx_dma.paddr;
11883 txq_init->dscr_map = fp->tx_dma.paddr;
11884 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
11885 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
11886 txq_init->fw_sb_id = fp->fw_sb_id;
11889 * set the TSS leading client id for TX classfication to the
11890 * leading RSS client id
11892 txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id);
11896 * This function performs 2 steps in a queue state machine:
11901 bxe_setup_queue(struct bxe_softc *sc,
11902 struct bxe_fastpath *fp,
11905 struct ecore_queue_state_params q_params = { NULL };
11906 struct ecore_queue_setup_params *setup_params =
11907 &q_params.params.setup;
11909 struct ecore_queue_setup_tx_only_params *tx_only_params =
11910 &q_params.params.tx_only;
11915 BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index);
11917 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
11919 q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
11921 /* we want to wait for completion in this context */
11922 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
11924 /* prepare the INIT parameters */
11925 bxe_pf_q_prep_init(sc, fp, &q_params.params.init);
11927 /* Set the command */
11928 q_params.cmd = ECORE_Q_CMD_INIT;
11930 /* Change the state to INIT */
11931 rc = ecore_queue_state_change(sc, &q_params);
11933 BLOGE(sc, "Queue(%d) INIT failed\n", fp->index);
11937 BLOGD(sc, DBG_LOAD, "init complete\n");
11939 /* now move the Queue to the SETUP state */
11940 memset(setup_params, 0, sizeof(*setup_params));
11942 /* set Queue flags */
11943 setup_params->flags = bxe_get_q_flags(sc, fp, leading);
11945 /* set general SETUP parameters */
11946 bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params,
11947 FIRST_TX_COS_INDEX);
11949 bxe_pf_rx_q_prep(sc, fp,
11950 &setup_params->pause_params,
11951 &setup_params->rxq_params);
11953 bxe_pf_tx_q_prep(sc, fp,
11954 &setup_params->txq_params,
11955 FIRST_TX_COS_INDEX);
11957 /* Set the command */
11958 q_params.cmd = ECORE_Q_CMD_SETUP;
11960 /* change the state to SETUP */
11961 rc = ecore_queue_state_change(sc, &q_params);
11963 BLOGE(sc, "Queue(%d) SETUP failed\n", fp->index);
11968 /* loop through the relevant tx-only indices */
11969 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
11970 tx_index < sc->max_cos;
11972 /* prepare and send tx-only ramrod*/
11973 rc = bxe_setup_tx_only(sc, fp, &q_params,
11974 tx_only_params, tx_index, leading);
11976 BLOGE(sc, "Queue(%d.%d) TX_ONLY_SETUP failed\n",
11977 fp->index, tx_index);
11987 bxe_setup_leading(struct bxe_softc *sc)
11989 return (bxe_setup_queue(sc, &sc->fp[0], TRUE));
11993 bxe_config_rss_pf(struct bxe_softc *sc,
11994 struct ecore_rss_config_obj *rss_obj,
11995 uint8_t config_hash)
11997 struct ecore_config_rss_params params = { NULL };
12001 * Although RSS is meaningless when there is a single HW queue we
12002 * still need it enabled in order to have HW Rx hash generated.
12005 params.rss_obj = rss_obj;
12007 bxe_set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
12009 bxe_set_bit(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags);
12011 /* RSS configuration */
12012 bxe_set_bit(ECORE_RSS_IPV4, ¶ms.rss_flags);
12013 bxe_set_bit(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags);
12014 bxe_set_bit(ECORE_RSS_IPV6, ¶ms.rss_flags);
12015 bxe_set_bit(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags);
12016 if (rss_obj->udp_rss_v4) {
12017 bxe_set_bit(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags);
12019 if (rss_obj->udp_rss_v6) {
12020 bxe_set_bit(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags);
12024 params.rss_result_mask = MULTI_MASK;
12026 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
12030 for (i = 0; i < sizeof(params.rss_key) / 4; i++) {
12031 params.rss_key[i] = arc4random();
12034 bxe_set_bit(ECORE_RSS_SET_SRCH, ¶ms.rss_flags);
12037 return (ecore_config_rss(sc, ¶ms));
12041 bxe_config_rss_eth(struct bxe_softc *sc,
12042 uint8_t config_hash)
12044 return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash));
12048 bxe_init_rss_pf(struct bxe_softc *sc)
12050 uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc);
12054 * Prepare the initial contents of the indirection table if
12057 for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) {
12058 sc->rss_conf_obj.ind_table[i] =
12059 (sc->fp->cl_id + (i % num_eth_queues));
12063 sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1;
12067 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
12068 * per-port, so if explicit configuration is needed, do it only
12071 * For 57712 and newer it's a per-function configuration.
12073 return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)));
12077 bxe_set_mac_one(struct bxe_softc *sc,
12079 struct ecore_vlan_mac_obj *obj,
12082 unsigned long *ramrod_flags)
12084 struct ecore_vlan_mac_ramrod_params ramrod_param;
12087 memset(&ramrod_param, 0, sizeof(ramrod_param));
12089 /* fill in general parameters */
12090 ramrod_param.vlan_mac_obj = obj;
12091 ramrod_param.ramrod_flags = *ramrod_flags;
12093 /* fill a user request section if needed */
12094 if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) {
12095 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
12097 bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
12099 /* Set the command: ADD or DEL */
12100 ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD :
12101 ECORE_VLAN_MAC_DEL;
12104 rc = ecore_config_vlan_mac(sc, &ramrod_param);
12106 if (rc == ECORE_EXISTS) {
12107 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
12108 /* do not treat adding same MAC as error */
12110 } else if (rc < 0) {
12111 BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc);
12118 bxe_set_eth_mac(struct bxe_softc *sc,
12121 unsigned long ramrod_flags = 0;
12123 BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n");
12125 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
12127 /* Eth MAC is set on RSS leading client (fp[0]) */
12128 return (bxe_set_mac_one(sc, sc->link_params.mac_addr,
12129 &sc->sp_objs->mac_obj,
12130 set, ECORE_ETH_MAC, &ramrod_flags));
12135 bxe_update_max_mf_config(struct bxe_softc *sc,
12138 /* load old values */
12139 uint32_t mf_cfg = sc->devinfo.mf_info.mf_config[SC_VN(sc)];
12141 if (value != bxe_extract_max_cfg(sc, mf_cfg)) {
12142 /* leave all but MAX value */
12143 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
12145 /* set new MAX value */
12146 mf_cfg |= ((value << FUNC_MF_CFG_MAX_BW_SHIFT) &
12147 FUNC_MF_CFG_MAX_BW_MASK);
12149 bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
12155 bxe_get_cur_phy_idx(struct bxe_softc *sc)
12157 uint32_t sel_phy_idx = 0;
12159 if (sc->link_params.num_phys <= 1) {
12160 return (ELINK_INT_PHY);
12163 if (sc->link_vars.link_up) {
12164 sel_phy_idx = ELINK_EXT_PHY1;
12165 /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */
12166 if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
12167 (sc->link_params.phy[ELINK_EXT_PHY2].supported &
12168 ELINK_SUPPORTED_FIBRE))
12169 sel_phy_idx = ELINK_EXT_PHY2;
12171 switch (elink_phy_selection(&sc->link_params)) {
12172 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
12173 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
12174 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
12175 sel_phy_idx = ELINK_EXT_PHY1;
12177 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
12178 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
12179 sel_phy_idx = ELINK_EXT_PHY2;
12184 return (sel_phy_idx);
12188 bxe_get_link_cfg_idx(struct bxe_softc *sc)
12190 uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc);
12193 * The selected activated PHY is always after swapping (in case PHY
12194 * swapping is enabled). So when swapping is enabled, we need to reverse
12195 * the configuration
12198 if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
12199 if (sel_phy_idx == ELINK_EXT_PHY1)
12200 sel_phy_idx = ELINK_EXT_PHY2;
12201 else if (sel_phy_idx == ELINK_EXT_PHY2)
12202 sel_phy_idx = ELINK_EXT_PHY1;
12205 return (ELINK_LINK_CONFIG_IDX(sel_phy_idx));
12209 bxe_set_requested_fc(struct bxe_softc *sc)
12212 * Initialize link parameters structure variables
12213 * It is recommended to turn off RX FC for jumbo frames
12214 * for better performance
12216 if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) {
12217 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX;
12219 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH;
12224 bxe_calc_fc_adv(struct bxe_softc *sc)
12226 uint8_t cfg_idx = bxe_get_link_cfg_idx(sc);
12227 switch (sc->link_vars.ieee_fc &
12228 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
12229 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
12231 sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
12235 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
12236 sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
12240 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
12241 sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
12247 bxe_get_mf_speed(struct bxe_softc *sc)
12249 uint16_t line_speed = sc->link_vars.line_speed;
12252 bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]);
12254 /* calculate the current MAX line speed limit for the MF devices */
12255 if (IS_MF_SI(sc)) {
12256 line_speed = (line_speed * maxCfg) / 100;
12257 } else { /* SD mode */
12258 uint16_t vn_max_rate = maxCfg * 100;
12260 if (vn_max_rate < line_speed) {
12261 line_speed = vn_max_rate;
12266 return (line_speed);
12270 bxe_fill_report_data(struct bxe_softc *sc,
12271 struct bxe_link_report_data *data)
12273 uint16_t line_speed = bxe_get_mf_speed(sc);
12275 memset(data, 0, sizeof(*data));
12277 /* fill the report data with the effective line speed */
12278 data->line_speed = line_speed;
12281 if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) {
12282 bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags);
12286 if (sc->link_vars.duplex == DUPLEX_FULL) {
12287 bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags);
12290 /* Rx Flow Control is ON */
12291 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) {
12292 bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
12295 /* Tx Flow Control is ON */
12296 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
12297 bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
12301 /* report link status to OS, should be called under phy_lock */
12303 bxe_link_report_locked(struct bxe_softc *sc)
12305 struct bxe_link_report_data cur_data;
12307 /* reread mf_cfg */
12308 if (IS_PF(sc) && !CHIP_IS_E1(sc)) {
12309 bxe_read_mf_cfg(sc);
12312 /* Read the current link report info */
12313 bxe_fill_report_data(sc, &cur_data);
12315 /* Don't report link down or exactly the same link status twice */
12316 if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) ||
12317 (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
12318 &sc->last_reported_link.link_report_flags) &&
12319 bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
12320 &cur_data.link_report_flags))) {
12326 /* report new link params and remember the state for the next time */
12327 memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
12329 if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
12330 &cur_data.link_report_flags)) {
12331 if_link_state_change(sc->ifnet, LINK_STATE_DOWN);
12332 BLOGI(sc, "NIC Link is Down\n");
12334 const char *duplex;
12337 if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX,
12338 &cur_data.link_report_flags)) {
12345 * Handle the FC at the end so that only these flags would be
12346 * possibly set. This way we may easily check if there is no FC
12349 if (cur_data.link_report_flags) {
12350 if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
12351 &cur_data.link_report_flags) &&
12352 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
12353 &cur_data.link_report_flags)) {
12354 flow = "ON - receive & transmit";
12355 } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
12356 &cur_data.link_report_flags) &&
12357 !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
12358 &cur_data.link_report_flags)) {
12359 flow = "ON - receive";
12360 } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
12361 &cur_data.link_report_flags) &&
12362 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
12363 &cur_data.link_report_flags)) {
12364 flow = "ON - transmit";
12366 flow = "none"; /* possible? */
12372 if_link_state_change(sc->ifnet, LINK_STATE_UP);
12373 BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
12374 cur_data.line_speed, duplex, flow);
12379 bxe_link_report(struct bxe_softc *sc)
12381 bxe_acquire_phy_lock(sc);
12382 bxe_link_report_locked(sc);
12383 bxe_release_phy_lock(sc);
12387 bxe_link_status_update(struct bxe_softc *sc)
12389 if (sc->state != BXE_STATE_OPEN) {
12394 /* read updated dcb configuration */
12396 bxe_dcbx_pmf_update(sc);
12399 if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) {
12400 elink_link_status_update(&sc->link_params, &sc->link_vars);
12402 sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half |
12403 ELINK_SUPPORTED_10baseT_Full |
12404 ELINK_SUPPORTED_100baseT_Half |
12405 ELINK_SUPPORTED_100baseT_Full |
12406 ELINK_SUPPORTED_1000baseT_Full |
12407 ELINK_SUPPORTED_2500baseX_Full |
12408 ELINK_SUPPORTED_10000baseT_Full |
12409 ELINK_SUPPORTED_TP |
12410 ELINK_SUPPORTED_FIBRE |
12411 ELINK_SUPPORTED_Autoneg |
12412 ELINK_SUPPORTED_Pause |
12413 ELINK_SUPPORTED_Asym_Pause);
12414 sc->port.advertising[0] = sc->port.supported[0];
12416 sc->link_params.sc = sc;
12417 sc->link_params.port = SC_PORT(sc);
12418 sc->link_params.req_duplex[0] = DUPLEX_FULL;
12419 sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE;
12420 sc->link_params.req_line_speed[0] = SPEED_10000;
12421 sc->link_params.speed_cap_mask[0] = 0x7f0000;
12422 sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G;
12424 if (CHIP_REV_IS_FPGA(sc)) {
12425 sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC;
12426 sc->link_vars.line_speed = ELINK_SPEED_1000;
12427 sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
12428 LINK_STATUS_SPEED_AND_DUPLEX_1000TFD);
12430 sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC;
12431 sc->link_vars.line_speed = ELINK_SPEED_10000;
12432 sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
12433 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
12436 sc->link_vars.link_up = 1;
12438 sc->link_vars.duplex = DUPLEX_FULL;
12439 sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE;
12442 REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0);
12443 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
12444 bxe_link_report(sc);
12449 if (sc->link_vars.link_up) {
12450 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
12452 bxe_stats_handle(sc, STATS_EVENT_STOP);
12454 bxe_link_report(sc);
12456 bxe_link_report(sc);
12457 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
12462 bxe_initial_phy_init(struct bxe_softc *sc,
12465 int rc, cfg_idx = bxe_get_link_cfg_idx(sc);
12466 uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx];
12467 struct elink_params *lp = &sc->link_params;
12469 bxe_set_requested_fc(sc);
12471 if (CHIP_REV_IS_SLOW(sc)) {
12472 uint32_t bond = CHIP_BOND_ID(sc);
12475 if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) {
12476 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
12477 } else if (bond & 0x4) {
12478 if (CHIP_IS_E3(sc)) {
12479 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC;
12481 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
12483 } else if (bond & 0x8) {
12484 if (CHIP_IS_E3(sc)) {
12485 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC;
12487 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
12491 /* disable EMAC for E3 and above */
12493 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
12496 sc->link_params.feature_config_flags |= feat;
12499 bxe_acquire_phy_lock(sc);
12501 if (load_mode == LOAD_DIAG) {
12502 lp->loopback_mode = ELINK_LOOPBACK_XGXS;
12503 /* Prefer doing PHY loopback at 10G speed, if possible */
12504 if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) {
12505 if (lp->speed_cap_mask[cfg_idx] &
12506 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
12507 lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000;
12509 lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000;
12514 if (load_mode == LOAD_LOOPBACK_EXT) {
12515 lp->loopback_mode = ELINK_LOOPBACK_EXT;
12518 rc = elink_phy_init(&sc->link_params, &sc->link_vars);
12520 bxe_release_phy_lock(sc);
12522 bxe_calc_fc_adv(sc);
12524 if (sc->link_vars.link_up) {
12525 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
12526 bxe_link_report(sc);
12529 if (!CHIP_REV_IS_SLOW(sc)) {
12530 bxe_periodic_start(sc);
12533 sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
12537 /* must be called under IF_ADDR_LOCK */
12539 bxe_init_mcast_macs_list(struct bxe_softc *sc,
12540 struct ecore_mcast_ramrod_params *p)
12542 struct ifnet *ifp = sc->ifnet;
12544 struct ifmultiaddr *ifma;
12545 struct ecore_mcast_list_elem *mc_mac;
12547 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
12548 if (ifma->ifma_addr->sa_family != AF_LINK) {
12555 ECORE_LIST_INIT(&p->mcast_list);
12556 p->mcast_list_len = 0;
12562 mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF,
12563 (M_NOWAIT | M_ZERO));
12565 BLOGE(sc, "Failed to allocate temp mcast list\n");
12568 bzero(mc_mac, (sizeof(*mc_mac) * mc_count));
12570 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
12571 if (ifma->ifma_addr->sa_family != AF_LINK) {
12575 mc_mac->mac = (uint8_t *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
12576 ECORE_LIST_PUSH_TAIL(&mc_mac->link, &p->mcast_list);
12578 BLOGD(sc, DBG_LOAD,
12579 "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X\n",
12580 mc_mac->mac[0], mc_mac->mac[1], mc_mac->mac[2],
12581 mc_mac->mac[3], mc_mac->mac[4], mc_mac->mac[5]);
12586 p->mcast_list_len = mc_count;
12592 bxe_free_mcast_macs_list(struct ecore_mcast_ramrod_params *p)
12594 struct ecore_mcast_list_elem *mc_mac =
12595 ECORE_LIST_FIRST_ENTRY(&p->mcast_list,
12596 struct ecore_mcast_list_elem,
12600 /* only a single free as all mc_macs are in the same heap array */
12601 free(mc_mac, M_DEVBUF);
12606 bxe_set_mc_list(struct bxe_softc *sc)
12608 struct ecore_mcast_ramrod_params rparam = { NULL };
12611 rparam.mcast_obj = &sc->mcast_obj;
12613 BXE_MCAST_LOCK(sc);
12615 /* first, clear all configured multicast MACs */
12616 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
12618 BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc);
12619 BXE_MCAST_UNLOCK(sc);
12623 /* configure a new MACs list */
12624 rc = bxe_init_mcast_macs_list(sc, &rparam);
12626 BLOGE(sc, "Failed to create mcast MACs list (%d)\n", rc);
12627 BXE_MCAST_UNLOCK(sc);
12631 /* Now add the new MACs */
12632 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD);
12634 BLOGE(sc, "Failed to set new mcast config (%d)\n", rc);
12637 bxe_free_mcast_macs_list(&rparam);
12639 BXE_MCAST_UNLOCK(sc);
12645 bxe_set_uc_list(struct bxe_softc *sc)
12647 struct ifnet *ifp = sc->ifnet;
12648 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
12649 struct ifaddr *ifa;
12650 unsigned long ramrod_flags = 0;
12653 #if __FreeBSD_version < 800000
12656 if_addr_rlock(ifp);
12659 /* first schedule a cleanup up of old configuration */
12660 rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE);
12662 BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc);
12663 #if __FreeBSD_version < 800000
12664 IF_ADDR_UNLOCK(ifp);
12666 if_addr_runlock(ifp);
12671 ifa = ifp->if_addr;
12673 if (ifa->ifa_addr->sa_family != AF_LINK) {
12674 ifa = TAILQ_NEXT(ifa, ifa_link);
12678 rc = bxe_set_mac_one(sc, (uint8_t *)LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
12679 mac_obj, TRUE, ECORE_UC_LIST_MAC, &ramrod_flags);
12680 if (rc == -EEXIST) {
12681 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
12682 /* do not treat adding same MAC as an error */
12684 } else if (rc < 0) {
12685 BLOGE(sc, "Failed to schedule ADD operations (%d)\n", rc);
12686 #if __FreeBSD_version < 800000
12687 IF_ADDR_UNLOCK(ifp);
12689 if_addr_runlock(ifp);
12694 ifa = TAILQ_NEXT(ifa, ifa_link);
12697 #if __FreeBSD_version < 800000
12698 IF_ADDR_UNLOCK(ifp);
12700 if_addr_runlock(ifp);
12703 /* Execute the pending commands */
12704 bit_set(&ramrod_flags, RAMROD_CONT);
12705 return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */,
12706 ECORE_UC_LIST_MAC, &ramrod_flags));
12710 bxe_set_rx_mode(struct bxe_softc *sc)
12712 struct ifnet *ifp = sc->ifnet;
12713 uint32_t rx_mode = BXE_RX_MODE_NORMAL;
12715 if (sc->state != BXE_STATE_OPEN) {
12716 BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state);
12720 BLOGD(sc, DBG_SP, "ifp->if_flags=0x%x\n", ifp->if_flags);
12722 if (ifp->if_flags & IFF_PROMISC) {
12723 rx_mode = BXE_RX_MODE_PROMISC;
12724 } else if ((ifp->if_flags & IFF_ALLMULTI) ||
12725 ((ifp->if_amcount > BXE_MAX_MULTICAST) &&
12727 rx_mode = BXE_RX_MODE_ALLMULTI;
12730 /* some multicasts */
12731 if (bxe_set_mc_list(sc) < 0) {
12732 rx_mode = BXE_RX_MODE_ALLMULTI;
12734 if (bxe_set_uc_list(sc) < 0) {
12735 rx_mode = BXE_RX_MODE_PROMISC;
12741 * Configuring mcast to a VF involves sleeping (when we
12742 * wait for the PF's response). Since this function is
12743 * called from a non sleepable context we must schedule
12744 * a work item for this purpose
12746 bxe_set_bit(BXE_SP_RTNL_VFPF_MCAST, &sc->sp_rtnl_state);
12747 schedule_delayed_work(&sc->sp_rtnl_task, 0);
12752 sc->rx_mode = rx_mode;
12754 /* schedule the rx_mode command */
12755 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
12756 BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n");
12757 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
12762 bxe_set_storm_rx_mode(sc);
12767 * Configuring mcast to a VF involves sleeping (when we
12768 * wait for the PF's response). Since this function is
12769 * called from a non sleepable context we must schedule
12770 * a work item for this purpose
12772 bxe_set_bit(BXE_SP_RTNL_VFPF_STORM_RX_MODE, &sc->sp_rtnl_state);
12773 schedule_delayed_work(&sc->sp_rtnl_task, 0);
12780 /* update flags in shmem */
12782 bxe_update_drv_flags(struct bxe_softc *sc,
12786 uint32_t drv_flags;
12788 if (SHMEM2_HAS(sc, drv_flags)) {
12789 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12790 drv_flags = SHMEM2_RD(sc, drv_flags);
12793 SET_FLAGS(drv_flags, flags);
12795 RESET_FLAGS(drv_flags, flags);
12798 SHMEM2_WR(sc, drv_flags, drv_flags);
12799 BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags);
12801 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12805 /* periodic timer callout routine, only runs when the interface is up */
12808 bxe_periodic_callout_func(void *xsc)
12810 struct bxe_softc *sc = (struct bxe_softc *)xsc;
12813 if (!BXE_CORE_TRYLOCK(sc)) {
12814 /* just bail and try again next time */
12816 if ((sc->state == BXE_STATE_OPEN) &&
12817 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12818 /* schedule the next periodic callout */
12819 callout_reset(&sc->periodic_callout, hz,
12820 bxe_periodic_callout_func, sc);
12826 if ((sc->state != BXE_STATE_OPEN) ||
12827 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
12828 BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state);
12829 BXE_CORE_UNLOCK(sc);
12833 /* Check for TX timeouts on any fastpath. */
12834 FOR_EACH_QUEUE(sc, i) {
12835 if (bxe_watchdog(sc, &sc->fp[i]) != 0) {
12836 /* Ruh-Roh, chip was reset! */
12841 if (!CHIP_REV_IS_SLOW(sc)) {
12843 * This barrier is needed to ensure the ordering between the writing
12844 * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and
12845 * the reading here.
12848 if (sc->port.pmf) {
12849 bxe_acquire_phy_lock(sc);
12850 elink_period_func(&sc->link_params, &sc->link_vars);
12851 bxe_release_phy_lock(sc);
12855 if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) {
12856 int mb_idx = SC_FW_MB_IDX(sc);
12857 uint32_t drv_pulse;
12858 uint32_t mcp_pulse;
12860 ++sc->fw_drv_pulse_wr_seq;
12861 sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
12863 drv_pulse = sc->fw_drv_pulse_wr_seq;
12866 mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) &
12867 MCP_PULSE_SEQ_MASK);
12870 * The delta between driver pulse and mcp response should
12871 * be 1 (before mcp response) or 0 (after mcp response).
12873 if ((drv_pulse != mcp_pulse) &&
12874 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
12875 /* someone lost a heartbeat... */
12876 BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
12877 drv_pulse, mcp_pulse);
12881 /* state is BXE_STATE_OPEN */
12882 bxe_stats_handle(sc, STATS_EVENT_UPDATE);
12885 /* sample VF bulletin board for new posts from PF */
12887 bxe_sample_bulletin(sc);
12891 BXE_CORE_UNLOCK(sc);
12893 if ((sc->state == BXE_STATE_OPEN) &&
12894 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12895 /* schedule the next periodic callout */
12896 callout_reset(&sc->periodic_callout, hz,
12897 bxe_periodic_callout_func, sc);
12902 bxe_periodic_start(struct bxe_softc *sc)
12904 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
12905 callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc);
12909 bxe_periodic_stop(struct bxe_softc *sc)
12911 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
12912 callout_drain(&sc->periodic_callout);
12915 /* start the controller */
12916 static __noinline int
12917 bxe_nic_load(struct bxe_softc *sc,
12924 BXE_CORE_LOCK_ASSERT(sc);
12926 BLOGD(sc, DBG_LOAD, "Starting NIC load...\n");
12928 sc->state = BXE_STATE_OPENING_WAITING_LOAD;
12931 /* must be called before memory allocation and HW init */
12932 bxe_ilt_set_info(sc);
12935 sc->last_reported_link_state = LINK_STATE_UNKNOWN;
12937 bxe_set_fp_rx_buf_size(sc);
12939 if (bxe_alloc_fp_buffers(sc) != 0) {
12940 BLOGE(sc, "Failed to allocate fastpath memory\n");
12941 sc->state = BXE_STATE_CLOSED;
12943 goto bxe_nic_load_error0;
12946 if (bxe_alloc_mem(sc) != 0) {
12947 sc->state = BXE_STATE_CLOSED;
12949 goto bxe_nic_load_error0;
12952 if (bxe_alloc_fw_stats_mem(sc) != 0) {
12953 sc->state = BXE_STATE_CLOSED;
12955 goto bxe_nic_load_error0;
12959 /* set pf load just before approaching the MCP */
12960 bxe_set_pf_load(sc);
12962 /* if MCP exists send load request and analyze response */
12963 if (!BXE_NOMCP(sc)) {
12964 /* attempt to load pf */
12965 if (bxe_nic_load_request(sc, &load_code) != 0) {
12966 sc->state = BXE_STATE_CLOSED;
12968 goto bxe_nic_load_error1;
12971 /* what did the MCP say? */
12972 if (bxe_nic_load_analyze_req(sc, load_code) != 0) {
12973 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12974 sc->state = BXE_STATE_CLOSED;
12976 goto bxe_nic_load_error2;
12979 BLOGI(sc, "Device has no MCP!\n");
12980 load_code = bxe_nic_load_no_mcp(sc);
12983 /* mark PMF if applicable */
12984 bxe_nic_load_pmf(sc, load_code);
12986 /* Init Function state controlling object */
12987 bxe_init_func_obj(sc);
12989 /* Initialize HW */
12990 if (bxe_init_hw(sc, load_code) != 0) {
12991 BLOGE(sc, "HW init failed\n");
12992 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12993 sc->state = BXE_STATE_CLOSED;
12995 goto bxe_nic_load_error2;
12999 /* set ALWAYS_ALIVE bit in shmem */
13000 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
13002 sc->flags |= BXE_NO_PULSE;
13004 /* attach interrupts */
13005 if (bxe_interrupt_attach(sc) != 0) {
13006 sc->state = BXE_STATE_CLOSED;
13008 goto bxe_nic_load_error2;
13011 bxe_nic_init(sc, load_code);
13013 /* Init per-function objects */
13016 // XXX bxe_iov_nic_init(sc);
13018 /* set AFEX default VLAN tag to an invalid value */
13019 sc->devinfo.mf_info.afex_def_vlan_tag = -1;
13020 // XXX bxe_nic_load_afex_dcc(sc, load_code);
13022 sc->state = BXE_STATE_OPENING_WAITING_PORT;
13023 rc = bxe_func_start(sc);
13025 BLOGE(sc, "Function start failed!\n");
13026 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
13027 sc->state = BXE_STATE_ERROR;
13028 goto bxe_nic_load_error3;
13031 /* send LOAD_DONE command to MCP */
13032 if (!BXE_NOMCP(sc)) {
13033 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
13035 BLOGE(sc, "MCP response failure, aborting\n");
13036 sc->state = BXE_STATE_ERROR;
13038 goto bxe_nic_load_error3;
13042 rc = bxe_setup_leading(sc);
13044 BLOGE(sc, "Setup leading failed!\n");
13045 sc->state = BXE_STATE_ERROR;
13046 goto bxe_nic_load_error3;
13049 FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) {
13050 rc = bxe_setup_queue(sc, &sc->fp[i], FALSE);
13052 BLOGE(sc, "Queue(%d) setup failed\n", i);
13053 sc->state = BXE_STATE_ERROR;
13054 goto bxe_nic_load_error3;
13058 rc = bxe_init_rss_pf(sc);
13060 BLOGE(sc, "PF RSS init failed\n");
13061 sc->state = BXE_STATE_ERROR;
13062 goto bxe_nic_load_error3;
13068 FOR_EACH_ETH_QUEUE(sc, i) {
13069 rc = bxe_vfpf_setup_q(sc, i);
13071 BLOGE(sc, "Queue(%d) setup failed\n", i);
13072 sc->state = BXE_STATE_ERROR;
13073 goto bxe_nic_load_error3;
13079 /* now when Clients are configured we are ready to work */
13080 sc->state = BXE_STATE_OPEN;
13082 /* Configure a ucast MAC */
13084 rc = bxe_set_eth_mac(sc, TRUE);
13087 else { /* IS_VF(sc) */
13088 rc = bxe_vfpf_set_mac(sc);
13092 BLOGE(sc, "Setting Ethernet MAC failed\n");
13093 sc->state = BXE_STATE_ERROR;
13094 goto bxe_nic_load_error3;
13098 if (IS_PF(sc) && sc->pending_max) {
13100 bxe_update_max_mf_config(sc, sc->pending_max);
13101 sc->pending_max = 0;
13105 if (sc->port.pmf) {
13106 rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN);
13108 sc->state = BXE_STATE_ERROR;
13109 goto bxe_nic_load_error3;
13113 sc->link_params.feature_config_flags &=
13114 ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN;
13116 /* start fast path */
13118 /* Initialize Rx filter */
13119 bxe_set_rx_mode(sc);
13122 switch (/* XXX load_mode */LOAD_OPEN) {
13128 case LOAD_LOOPBACK_EXT:
13129 sc->state = BXE_STATE_DIAG;
13136 if (sc->port.pmf) {
13137 bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0);
13139 bxe_link_status_update(sc);
13142 /* start the periodic timer callout */
13143 bxe_periodic_start(sc);
13145 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
13146 /* mark driver is loaded in shmem2 */
13147 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
13148 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
13150 DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
13151 DRV_FLAGS_CAPABILITIES_LOADED_L2));
13154 /* wait for all pending SP commands to complete */
13155 if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) {
13156 BLOGE(sc, "Timeout waiting for all SPs to complete!\n");
13157 bxe_periodic_stop(sc);
13158 bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE);
13163 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
13164 if (sc->port.pmf && (sc->state != BXE_STATE_DIAG)) {
13165 bxe_dcbx_init(sc, FALSE);
13169 /* Tell the stack the driver is running! */
13170 sc->ifnet->if_drv_flags = IFF_DRV_RUNNING;
13172 BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n");
13176 bxe_nic_load_error3:
13179 bxe_int_disable_sync(sc, 1);
13181 /* clean out queued objects */
13182 bxe_squeeze_objects(sc);
13185 bxe_interrupt_detach(sc);
13187 bxe_nic_load_error2:
13189 if (IS_PF(sc) && !BXE_NOMCP(sc)) {
13190 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
13191 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
13196 bxe_nic_load_error1:
13198 /* clear pf_load status, as it was already set */
13200 bxe_clear_pf_load(sc);
13203 bxe_nic_load_error0:
13205 bxe_free_fw_stats_mem(sc);
13206 bxe_free_fp_buffers(sc);
13213 bxe_init_locked(struct bxe_softc *sc)
13215 int other_engine = SC_PATH(sc) ? 0 : 1;
13216 uint8_t other_load_status, load_status;
13217 uint8_t global = FALSE;
13220 BXE_CORE_LOCK_ASSERT(sc);
13222 /* check if the driver is already running */
13223 if (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING) {
13224 BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n");
13228 bxe_set_power_state(sc, PCI_PM_D0);
13231 * If parity occurred during the unload, then attentions and/or
13232 * RECOVERY_IN_PROGRES may still be set. If so we want the first function
13233 * loaded on the current engine to complete the recovery. Parity recovery
13234 * is only relevant for PF driver.
13237 other_load_status = bxe_get_load_status(sc, other_engine);
13238 load_status = bxe_get_load_status(sc, SC_PATH(sc));
13240 if (!bxe_reset_is_done(sc, SC_PATH(sc)) ||
13241 bxe_chk_parity_attn(sc, &global, TRUE)) {
13244 * If there are attentions and they are in global blocks, set
13245 * the GLOBAL_RESET bit regardless whether it will be this
13246 * function that will complete the recovery or not.
13249 bxe_set_reset_global(sc);
13253 * Only the first function on the current engine should try
13254 * to recover in open. In case of attentions in global blocks
13255 * only the first in the chip should try to recover.
13257 if ((!load_status && (!global || !other_load_status)) &&
13258 bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) {
13259 BLOGI(sc, "Recovered during init\n");
13263 /* recovery has failed... */
13264 bxe_set_power_state(sc, PCI_PM_D3hot);
13265 sc->recovery_state = BXE_RECOVERY_FAILED;
13267 BLOGE(sc, "Recovery flow hasn't properly "
13268 "completed yet, try again later. "
13269 "If you still see this message after a "
13270 "few retries then power cycle is required.\n");
13273 goto bxe_init_locked_done;
13278 sc->recovery_state = BXE_RECOVERY_DONE;
13280 rc = bxe_nic_load(sc, LOAD_OPEN);
13282 bxe_init_locked_done:
13285 /* Tell the stack the driver is NOT running! */
13286 BLOGE(sc, "Initialization failed, "
13287 "stack notified driver is NOT running!\n");
13288 sc->ifnet->if_drv_flags &= ~IFF_DRV_RUNNING;
13295 bxe_stop_locked(struct bxe_softc *sc)
13297 BXE_CORE_LOCK_ASSERT(sc);
13298 return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE));
13302 * Handles controller initialization when called from an unlocked routine.
13303 * ifconfig calls this function.
13309 bxe_init(void *xsc)
13311 struct bxe_softc *sc = (struct bxe_softc *)xsc;
13314 bxe_init_locked(sc);
13315 BXE_CORE_UNLOCK(sc);
13319 bxe_init_ifnet(struct bxe_softc *sc)
13323 /* ifconfig entrypoint for media type/status reporting */
13324 ifmedia_init(&sc->ifmedia, IFM_IMASK,
13325 bxe_ifmedia_update,
13326 bxe_ifmedia_status);
13328 /* set the default interface values */
13329 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL);
13330 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
13331 ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO));
13333 sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */
13335 /* allocate the ifnet structure */
13336 if ((ifp = if_alloc(IFT_ETHER)) == NULL) {
13337 BLOGE(sc, "Interface allocation failed!\n");
13341 ifp->if_softc = sc;
13342 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
13343 ifp->if_flags = (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
13344 ifp->if_ioctl = bxe_ioctl;
13345 ifp->if_start = bxe_tx_start;
13346 #if __FreeBSD_version >= 800000
13347 ifp->if_transmit = bxe_tx_mq_start;
13348 ifp->if_qflush = bxe_mq_flush;
13353 ifp->if_init = bxe_init;
13354 ifp->if_mtu = sc->mtu;
13355 ifp->if_hwassist = (CSUM_IP |
13361 ifp->if_capabilities =
13362 #if __FreeBSD_version < 700000
13364 IFCAP_VLAN_HWTAGGING |
13370 IFCAP_VLAN_HWTAGGING |
13372 IFCAP_VLAN_HWFILTER |
13373 IFCAP_VLAN_HWCSUM |
13381 ifp->if_capenable = ifp->if_capabilities;
13382 ifp->if_capenable &= ~IFCAP_WOL_MAGIC; /* XXX not yet... */
13383 #if __FreeBSD_version < 1000025
13384 ifp->if_baudrate = 1000000000;
13386 if_initbaudrate(ifp, IF_Gbps(10));
13388 ifp->if_snd.ifq_drv_maxlen = sc->tx_ring_size;
13390 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
13391 IFQ_SET_READY(&ifp->if_snd);
13395 /* attach to the Ethernet interface list */
13396 ether_ifattach(ifp, sc->link_params.mac_addr);
13402 bxe_deallocate_bars(struct bxe_softc *sc)
13406 for (i = 0; i < MAX_BARS; i++) {
13407 if (sc->bar[i].resource != NULL) {
13408 bus_release_resource(sc->dev,
13411 sc->bar[i].resource);
13412 BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n",
13419 bxe_allocate_bars(struct bxe_softc *sc)
13424 memset(sc->bar, 0, sizeof(sc->bar));
13426 for (i = 0; i < MAX_BARS; i++) {
13428 /* memory resources reside at BARs 0, 2, 4 */
13429 /* Run `pciconf -lb` to see mappings */
13430 if ((i != 0) && (i != 2) && (i != 4)) {
13434 sc->bar[i].rid = PCIR_BAR(i);
13438 flags |= RF_SHAREABLE;
13441 if ((sc->bar[i].resource =
13442 bus_alloc_resource_any(sc->dev,
13447 /* BAR4 doesn't exist for E1 */
13448 BLOGE(sc, "PCI BAR%d [%02x] memory allocation failed\n",
13454 sc->bar[i].tag = rman_get_bustag(sc->bar[i].resource);
13455 sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource);
13456 sc->bar[i].kva = (vm_offset_t)rman_get_virtual(sc->bar[i].resource);
13458 BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %p-%p (%ld) -> %p\n",
13460 (void *)rman_get_start(sc->bar[i].resource),
13461 (void *)rman_get_end(sc->bar[i].resource),
13462 rman_get_size(sc->bar[i].resource),
13463 (void *)sc->bar[i].kva);
13470 bxe_get_function_num(struct bxe_softc *sc)
13475 * Read the ME register to get the function number. The ME register
13476 * holds the relative-function number and absolute-function number. The
13477 * absolute-function number appears only in E2 and above. Before that
13478 * these bits always contained zero, therefore we cannot blindly use them.
13481 val = REG_RD(sc, BAR_ME_REGISTER);
13484 (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT);
13486 (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1;
13488 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13489 sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id);
13491 sc->pfunc_abs = (sc->pfunc_rel | sc->path_id);
13494 BLOGD(sc, DBG_LOAD,
13495 "Relative function %d, Absolute function %d, Path %d\n",
13496 sc->pfunc_rel, sc->pfunc_abs, sc->path_id);
13500 bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc)
13502 uint32_t shmem2_size;
13504 uint32_t mf_cfg_offset_value;
13507 offset = (SHMEM_RD(sc, func_mb) +
13508 (MAX_FUNC_NUM * sizeof(struct drv_func_mb)));
13511 if (sc->devinfo.shmem2_base != 0) {
13512 shmem2_size = SHMEM2_RD(sc, size);
13513 if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) {
13514 mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr);
13515 if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) {
13516 offset = mf_cfg_offset_value;
13525 bxe_pcie_capability_read(struct bxe_softc *sc,
13531 /* ensure PCIe capability is enabled */
13532 if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) {
13533 if (pcie_reg != 0) {
13534 BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg);
13535 return (pci_read_config(sc->dev, (pcie_reg + reg), width));
13539 BLOGE(sc, "PCIe capability NOT FOUND!!!\n");
13545 bxe_is_pcie_pending(struct bxe_softc *sc)
13547 return (bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA, 2) &
13548 PCIM_EXP_STA_TRANSACTION_PND);
13552 * Walk the PCI capabiites list for the device to find what features are
13553 * supported. These capabilites may be enabled/disabled by firmware so it's
13554 * best to walk the list rather than make assumptions.
13557 bxe_probe_pci_caps(struct bxe_softc *sc)
13559 uint16_t link_status;
13562 /* check if PCI Power Management is enabled */
13563 if (pci_find_cap(sc->dev, PCIY_PMG, ®) == 0) {
13565 BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg);
13567 sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG;
13568 sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg;
13572 link_status = bxe_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA, 2);
13574 /* handle PCIe 2.0 workarounds for 57710 */
13575 if (CHIP_IS_E1(sc)) {
13576 /* workaround for 57710 errata E4_57710_27462 */
13577 sc->devinfo.pcie_link_speed =
13578 (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1;
13580 /* workaround for 57710 errata E4_57710_27488 */
13581 sc->devinfo.pcie_link_width =
13582 ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
13583 if (sc->devinfo.pcie_link_speed > 1) {
13584 sc->devinfo.pcie_link_width =
13585 ((link_status & PCIM_LINK_STA_WIDTH) >> 4) >> 1;
13588 sc->devinfo.pcie_link_speed =
13589 (link_status & PCIM_LINK_STA_SPEED);
13590 sc->devinfo.pcie_link_width =
13591 ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
13594 BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n",
13595 sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width);
13597 sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG;
13598 sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg;
13600 /* check if MSI capability is enabled */
13601 if (pci_find_cap(sc->dev, PCIY_MSI, ®) == 0) {
13603 BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg);
13605 sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG;
13606 sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg;
13610 /* check if MSI-X capability is enabled */
13611 if (pci_find_cap(sc->dev, PCIY_MSIX, ®) == 0) {
13613 BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg);
13615 sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG;
13616 sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg;
13622 bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc)
13624 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13627 /* get the outer vlan if we're in switch-dependent mode */
13629 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13630 mf_info->ext_id = (uint16_t)val;
13632 mf_info->multi_vnics_mode = 1;
13634 if (!VALID_OVLAN(mf_info->ext_id)) {
13635 BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id);
13639 /* get the capabilities */
13640 if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13641 FUNC_MF_CFG_PROTOCOL_ISCSI) {
13642 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI;
13643 } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13644 FUNC_MF_CFG_PROTOCOL_FCOE) {
13645 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE;
13647 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET;
13650 mf_info->vnics_per_port =
13651 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13657 bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc)
13659 uint32_t retval = 0;
13662 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13664 if (val & MACP_FUNC_CFG_FLAGS_ENABLED) {
13665 if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) {
13666 retval |= MF_PROTO_SUPPORT_ETHERNET;
13668 if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
13669 retval |= MF_PROTO_SUPPORT_ISCSI;
13671 if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
13672 retval |= MF_PROTO_SUPPORT_FCOE;
13680 bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc)
13682 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13686 * There is no outer vlan if we're in switch-independent mode.
13687 * If the mac is valid then assume multi-function.
13690 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13692 mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0);
13694 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13696 mf_info->vnics_per_port =
13697 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13703 bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc)
13705 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13706 uint32_t e1hov_tag;
13707 uint32_t func_config;
13708 uint32_t niv_config;
13710 mf_info->multi_vnics_mode = 1;
13712 e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13713 func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13714 niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config);
13717 (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >>
13718 FUNC_MF_CFG_E1HOV_TAG_SHIFT);
13720 mf_info->default_vlan =
13721 (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >>
13722 FUNC_MF_CFG_AFEX_VLAN_SHIFT);
13724 mf_info->niv_allowed_priorities =
13725 (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
13726 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT);
13728 mf_info->niv_default_cos =
13729 (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
13730 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT);
13732 mf_info->afex_vlan_mode =
13733 ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
13734 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT);
13736 mf_info->niv_mba_enabled =
13737 ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >>
13738 FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT);
13740 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13742 mf_info->vnics_per_port =
13743 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13749 bxe_check_valid_mf_cfg(struct bxe_softc *sc)
13751 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13758 BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n",
13760 BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n",
13761 mf_info->mf_config[SC_VN(sc)]);
13762 BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n",
13763 mf_info->multi_vnics_mode);
13764 BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n",
13765 mf_info->vnics_per_port);
13766 BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n",
13768 BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n",
13769 mf_info->min_bw[0], mf_info->min_bw[1],
13770 mf_info->min_bw[2], mf_info->min_bw[3]);
13771 BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n",
13772 mf_info->max_bw[0], mf_info->max_bw[1],
13773 mf_info->max_bw[2], mf_info->max_bw[3]);
13774 BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n",
13777 /* various MF mode sanity checks... */
13779 if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) {
13780 BLOGE(sc, "Enumerated function %d is marked as hidden\n",
13785 if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) {
13786 BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n",
13787 mf_info->vnics_per_port, mf_info->multi_vnics_mode);
13791 if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13792 /* vnic id > 0 must have valid ovlan in switch-dependent mode */
13793 if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) {
13794 BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n",
13795 SC_VN(sc), OVLAN(sc));
13799 if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) {
13800 BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n",
13801 mf_info->multi_vnics_mode, OVLAN(sc));
13806 * Verify all functions are either MF or SF mode. If MF, make sure
13807 * sure that all non-hidden functions have a valid ovlan. If SF,
13808 * make sure that all non-hidden functions have an invalid ovlan.
13810 FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13811 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13812 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13813 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13814 (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) ||
13815 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) {
13816 BLOGE(sc, "mf_mode=SD function %d MF config "
13817 "mismatch, multi_vnics_mode=%d ovlan=%d\n",
13818 i, mf_info->multi_vnics_mode, ovlan1);
13823 /* Verify all funcs on the same port each have a different ovlan. */
13824 FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13825 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13826 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13827 /* iterate from the next function on the port to the max func */
13828 for (j = i + 2; j < MAX_FUNC_NUM; j += 2) {
13829 mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config);
13830 ovlan2 = MFCFG_RD(sc, func_mf_config[j].e1hov_tag);
13831 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13832 VALID_OVLAN(ovlan1) &&
13833 !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) &&
13834 VALID_OVLAN(ovlan2) &&
13835 (ovlan1 == ovlan2)) {
13836 BLOGE(sc, "mf_mode=SD functions %d and %d "
13837 "have the same ovlan (%d)\n",
13843 } /* MULTI_FUNCTION_SD */
13849 bxe_get_mf_cfg_info(struct bxe_softc *sc)
13851 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13852 uint32_t val, mac_upper;
13855 /* initialize mf_info defaults */
13856 mf_info->vnics_per_port = 1;
13857 mf_info->multi_vnics_mode = FALSE;
13858 mf_info->path_has_ovlan = FALSE;
13859 mf_info->mf_mode = SINGLE_FUNCTION;
13861 if (!CHIP_IS_MF_CAP(sc)) {
13865 if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) {
13866 BLOGE(sc, "Invalid mf_cfg_base!\n");
13870 /* get the MF mode (switch dependent / independent / single-function) */
13872 val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13874 switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)
13876 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
13878 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13880 /* check for legal upper mac bytes */
13881 if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) {
13882 mf_info->mf_mode = MULTI_FUNCTION_SI;
13884 BLOGE(sc, "Invalid config for Switch Independent mode\n");
13889 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
13890 case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4:
13892 /* get outer vlan configuration */
13893 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13895 if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) !=
13896 FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
13897 mf_info->mf_mode = MULTI_FUNCTION_SD;
13899 BLOGE(sc, "Invalid config for Switch Dependent mode\n");
13904 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
13906 /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */
13909 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
13912 * Mark MF mode as NIV if MCP version includes NPAR-SD support
13913 * and the MAC address is valid.
13915 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13917 if ((SHMEM2_HAS(sc, afex_driver_support)) &&
13918 (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) {
13919 mf_info->mf_mode = MULTI_FUNCTION_AFEX;
13921 BLOGE(sc, "Invalid config for AFEX mode\n");
13928 BLOGE(sc, "Unknown MF mode (0x%08x)\n",
13929 (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK));
13934 /* set path mf_mode (which could be different than function mf_mode) */
13935 if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13936 mf_info->path_has_ovlan = TRUE;
13937 } else if (mf_info->mf_mode == SINGLE_FUNCTION) {
13939 * Decide on path multi vnics mode. If we're not in MF mode and in
13940 * 4-port mode, this is good enough to check vnic-0 of the other port
13943 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13944 uint8_t other_port = !(PORT_ID(sc) & 1);
13945 uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port));
13947 val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag);
13949 mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0;
13953 if (mf_info->mf_mode == SINGLE_FUNCTION) {
13954 /* invalid MF config */
13955 if (SC_VN(sc) >= 1) {
13956 BLOGE(sc, "VNIC ID >= 1 in SF mode\n");
13963 /* get the MF configuration */
13964 mf_info->mf_config[SC_VN(sc)] =
13965 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13967 switch(mf_info->mf_mode)
13969 case MULTI_FUNCTION_SD:
13971 bxe_get_shmem_mf_cfg_info_sd(sc);
13974 case MULTI_FUNCTION_SI:
13976 bxe_get_shmem_mf_cfg_info_si(sc);
13979 case MULTI_FUNCTION_AFEX:
13981 bxe_get_shmem_mf_cfg_info_niv(sc);
13986 BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n",
13991 /* get the congestion management parameters */
13994 FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13995 /* get min/max bw */
13996 val = MFCFG_RD(sc, func_mf_config[i].config);
13997 mf_info->min_bw[vnic] =
13998 ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT);
13999 mf_info->max_bw[vnic] =
14000 ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT);
14004 return (bxe_check_valid_mf_cfg(sc));
14008 bxe_get_shmem_info(struct bxe_softc *sc)
14011 uint32_t mac_hi, mac_lo, val;
14013 port = SC_PORT(sc);
14014 mac_hi = mac_lo = 0;
14016 sc->link_params.sc = sc;
14017 sc->link_params.port = port;
14019 /* get the hardware config info */
14020 sc->devinfo.hw_config =
14021 SHMEM_RD(sc, dev_info.shared_hw_config.config);
14022 sc->devinfo.hw_config2 =
14023 SHMEM_RD(sc, dev_info.shared_hw_config.config2);
14025 sc->link_params.hw_led_mode =
14026 ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
14027 SHARED_HW_CFG_LED_MODE_SHIFT);
14029 /* get the port feature config */
14031 SHMEM_RD(sc, dev_info.port_feature_config[port].config),
14033 /* get the link params */
14034 sc->link_params.speed_cap_mask[0] =
14035 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask);
14036 sc->link_params.speed_cap_mask[1] =
14037 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2);
14039 /* get the lane config */
14040 sc->link_params.lane_config =
14041 SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config);
14043 /* get the link config */
14044 val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config);
14045 sc->port.link_config[ELINK_INT_PHY] = val;
14046 sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK);
14047 sc->port.link_config[ELINK_EXT_PHY1] =
14048 SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2);
14050 /* get the override preemphasis flag and enable it or turn it off */
14051 val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
14052 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) {
14053 sc->link_params.feature_config_flags |=
14054 ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
14056 sc->link_params.feature_config_flags &=
14057 ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
14060 /* get the initial value of the link params */
14061 sc->link_params.multi_phy_config =
14062 SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config);
14064 /* get external phy info */
14065 sc->port.ext_phy_config =
14066 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
14068 /* get the multifunction configuration */
14069 bxe_get_mf_cfg_info(sc);
14071 /* get the mac address */
14073 mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
14074 mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower);
14076 mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper);
14077 mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower);
14080 if ((mac_lo == 0) && (mac_hi == 0)) {
14081 *sc->mac_addr_str = 0;
14082 BLOGE(sc, "No Ethernet address programmed!\n");
14084 sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8);
14085 sc->link_params.mac_addr[1] = (uint8_t)(mac_hi);
14086 sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24);
14087 sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16);
14088 sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8);
14089 sc->link_params.mac_addr[5] = (uint8_t)(mac_lo);
14090 snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str),
14091 "%02x:%02x:%02x:%02x:%02x:%02x",
14092 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1],
14093 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3],
14094 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]);
14095 BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str);
14100 ((sc->port.config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
14101 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE)) {
14102 sc->flags |= BXE_NO_ISCSI;
14105 ((sc->port.config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
14106 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI)) {
14107 sc->flags |= BXE_NO_FCOE_FLAG;
14115 bxe_get_tunable_params(struct bxe_softc *sc)
14117 /* sanity checks */
14119 if ((bxe_interrupt_mode != INTR_MODE_INTX) &&
14120 (bxe_interrupt_mode != INTR_MODE_MSI) &&
14121 (bxe_interrupt_mode != INTR_MODE_MSIX)) {
14122 BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode);
14123 bxe_interrupt_mode = INTR_MODE_MSIX;
14126 if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) {
14127 BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count);
14128 bxe_queue_count = 0;
14131 if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) {
14132 if (bxe_max_rx_bufs == 0) {
14133 bxe_max_rx_bufs = RX_BD_USABLE;
14135 BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs);
14136 bxe_max_rx_bufs = 2048;
14140 if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) {
14141 BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks);
14142 bxe_hc_rx_ticks = 25;
14145 if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) {
14146 BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks);
14147 bxe_hc_tx_ticks = 50;
14150 if (bxe_max_aggregation_size == 0) {
14151 bxe_max_aggregation_size = TPA_AGG_SIZE;
14154 if (bxe_max_aggregation_size > 0xffff) {
14155 BLOGW(sc, "invalid max_aggregation_size (%d)\n",
14156 bxe_max_aggregation_size);
14157 bxe_max_aggregation_size = TPA_AGG_SIZE;
14160 if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) {
14161 BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs);
14165 if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) {
14166 BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen);
14167 bxe_autogreeen = 0;
14170 if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) {
14171 BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss);
14175 /* pull in user settings */
14177 sc->interrupt_mode = bxe_interrupt_mode;
14178 sc->max_rx_bufs = bxe_max_rx_bufs;
14179 sc->hc_rx_ticks = bxe_hc_rx_ticks;
14180 sc->hc_tx_ticks = bxe_hc_tx_ticks;
14181 sc->max_aggregation_size = bxe_max_aggregation_size;
14182 sc->mrrs = bxe_mrrs;
14183 sc->autogreeen = bxe_autogreeen;
14184 sc->udp_rss = bxe_udp_rss;
14186 if (bxe_interrupt_mode == INTR_MODE_INTX) {
14187 sc->num_queues = 1;
14188 } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */
14190 min((bxe_queue_count ? bxe_queue_count : mp_ncpus),
14192 if (sc->num_queues > mp_ncpus) {
14193 sc->num_queues = mp_ncpus;
14197 BLOGD(sc, DBG_LOAD,
14200 "interrupt_mode=%d "
14205 "max_aggregation_size=%d "
14210 sc->interrupt_mode,
14215 sc->max_aggregation_size,
14222 bxe_media_detect(struct bxe_softc *sc)
14224 uint32_t phy_idx = bxe_get_cur_phy_idx(sc);
14225 switch (sc->link_params.phy[phy_idx].media_type) {
14226 case ELINK_ETH_PHY_SFPP_10G_FIBER:
14227 case ELINK_ETH_PHY_XFP_FIBER:
14228 BLOGI(sc, "Found 10Gb Fiber media.\n");
14229 sc->media = IFM_10G_SR;
14231 case ELINK_ETH_PHY_SFP_1G_FIBER:
14232 BLOGI(sc, "Found 1Gb Fiber media.\n");
14233 sc->media = IFM_1000_SX;
14235 case ELINK_ETH_PHY_KR:
14236 case ELINK_ETH_PHY_CX4:
14237 BLOGI(sc, "Found 10GBase-CX4 media.\n");
14238 sc->media = IFM_10G_CX4;
14240 case ELINK_ETH_PHY_DA_TWINAX:
14241 BLOGI(sc, "Found 10Gb Twinax media.\n");
14242 sc->media = IFM_10G_TWINAX;
14244 case ELINK_ETH_PHY_BASE_T:
14245 if (sc->link_params.speed_cap_mask[0] &
14246 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
14247 BLOGI(sc, "Found 10GBase-T media.\n");
14248 sc->media = IFM_10G_T;
14250 BLOGI(sc, "Found 1000Base-T media.\n");
14251 sc->media = IFM_1000_T;
14254 case ELINK_ETH_PHY_NOT_PRESENT:
14255 BLOGI(sc, "Media not present.\n");
14258 case ELINK_ETH_PHY_UNSPECIFIED:
14260 BLOGI(sc, "Unknown media!\n");
14266 #define GET_FIELD(value, fname) \
14267 (((value) & (fname##_MASK)) >> (fname##_SHIFT))
14268 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
14269 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
14272 bxe_get_igu_cam_info(struct bxe_softc *sc)
14274 int pfid = SC_FUNC(sc);
14277 uint8_t fid, igu_sb_cnt = 0;
14279 sc->igu_base_sb = 0xff;
14281 if (CHIP_INT_MODE_IS_BC(sc)) {
14282 int vn = SC_VN(sc);
14283 igu_sb_cnt = sc->igu_sb_cnt;
14284 sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) *
14286 sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x +
14287 (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn));
14291 /* IGU in normal mode - read CAM */
14292 for (igu_sb_id = 0;
14293 igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
14295 val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
14296 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) {
14299 fid = IGU_FID(val);
14300 if ((fid & IGU_FID_ENCODE_IS_PF)) {
14301 if ((fid & IGU_FID_PF_NUM_MASK) != pfid) {
14304 if (IGU_VEC(val) == 0) {
14305 /* default status block */
14306 sc->igu_dsb_id = igu_sb_id;
14308 if (sc->igu_base_sb == 0xff) {
14309 sc->igu_base_sb = igu_sb_id;
14317 * Due to new PF resource allocation by MFW T7.4 and above, it's optional
14318 * that number of CAM entries will not be equal to the value advertised in
14319 * PCI. Driver should use the minimal value of both as the actual status
14322 sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt);
14324 if (igu_sb_cnt == 0) {
14325 BLOGE(sc, "CAM configuration error\n");
14333 * Gather various information from the device config space, the device itself,
14334 * shmem, and the user input.
14337 bxe_get_device_info(struct bxe_softc *sc)
14342 /* Get the data for the device */
14343 sc->devinfo.vendor_id = pci_get_vendor(sc->dev);
14344 sc->devinfo.device_id = pci_get_device(sc->dev);
14345 sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev);
14346 sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev);
14348 /* get the chip revision (chip metal comes from pci config space) */
14349 sc->devinfo.chip_id =
14350 sc->link_params.chip_id =
14351 (((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) |
14352 ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) |
14353 (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) |
14354 ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0));
14356 /* force 57811 according to MISC register */
14357 if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
14358 if (CHIP_IS_57810(sc)) {
14359 sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) |
14360 (sc->devinfo.chip_id & 0x0000ffff));
14361 } else if (CHIP_IS_57810_MF(sc)) {
14362 sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) |
14363 (sc->devinfo.chip_id & 0x0000ffff));
14365 sc->devinfo.chip_id |= 0x1;
14368 BLOGD(sc, DBG_LOAD,
14369 "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n",
14370 sc->devinfo.chip_id,
14371 ((sc->devinfo.chip_id >> 16) & 0xffff),
14372 ((sc->devinfo.chip_id >> 12) & 0xf),
14373 ((sc->devinfo.chip_id >> 4) & 0xff),
14374 ((sc->devinfo.chip_id >> 0) & 0xf));
14376 val = (REG_RD(sc, 0x2874) & 0x55);
14377 if ((sc->devinfo.chip_id & 0x1) ||
14378 (CHIP_IS_E1(sc) && val) ||
14379 (CHIP_IS_E1H(sc) && (val == 0x55))) {
14380 sc->flags |= BXE_ONE_PORT_FLAG;
14381 BLOGD(sc, DBG_LOAD, "single port device\n");
14384 /* set the doorbell size */
14385 sc->doorbell_size = (1 << BXE_DB_SHIFT);
14387 /* determine whether the device is in 2 port or 4 port mode */
14388 sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/
14389 if (CHIP_IS_E2E3(sc)) {
14391 * Read port4mode_en_ovwr[0]:
14392 * If 1, four port mode is in port4mode_en_ovwr[1].
14393 * If 0, four port mode is in port4mode_en[0].
14395 val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR);
14397 val = ((val >> 1) & 1);
14399 val = REG_RD(sc, MISC_REG_PORT4MODE_EN);
14402 sc->devinfo.chip_port_mode =
14403 (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE;
14405 BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2");
14408 /* get the function and path info for the device */
14409 bxe_get_function_num(sc);
14411 /* get the shared memory base address */
14412 sc->devinfo.shmem_base =
14413 sc->link_params.shmem_base =
14414 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
14415 sc->devinfo.shmem2_base =
14416 REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 :
14417 MISC_REG_GENERIC_CR_0));
14419 BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n",
14420 sc->devinfo.shmem_base, sc->devinfo.shmem2_base);
14422 if (!sc->devinfo.shmem_base) {
14423 /* this should ONLY prevent upcoming shmem reads */
14424 BLOGI(sc, "MCP not active\n");
14425 sc->flags |= BXE_NO_MCP_FLAG;
14429 /* make sure the shared memory contents are valid */
14430 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
14431 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
14432 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
14433 BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val);
14436 BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val);
14438 /* get the bootcode version */
14439 sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev);
14440 snprintf(sc->devinfo.bc_ver_str,
14441 sizeof(sc->devinfo.bc_ver_str),
14443 ((sc->devinfo.bc_ver >> 24) & 0xff),
14444 ((sc->devinfo.bc_ver >> 16) & 0xff),
14445 ((sc->devinfo.bc_ver >> 8) & 0xff));
14446 BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str);
14448 /* get the bootcode shmem address */
14449 sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc);
14450 BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base);
14452 /* clean indirect addresses as they're not used */
14453 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
14455 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0);
14456 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0);
14457 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0);
14458 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0);
14459 if (CHIP_IS_E1x(sc)) {
14460 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0);
14461 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0);
14462 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
14463 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
14467 * Enable internal target-read (in case we are probed after PF
14468 * FLR). Must be done prior to any BAR read access. Only for
14471 if (!CHIP_IS_E1x(sc)) {
14472 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
14476 /* get the nvram size */
14477 val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4);
14478 sc->devinfo.flash_size =
14479 (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
14480 BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size);
14482 /* get PCI capabilites */
14483 bxe_probe_pci_caps(sc);
14485 bxe_set_power_state(sc, PCI_PM_D0);
14487 /* get various configuration parameters from shmem */
14488 bxe_get_shmem_info(sc);
14490 if (sc->devinfo.pcie_msix_cap_reg != 0) {
14491 val = pci_read_config(sc->dev,
14492 (sc->devinfo.pcie_msix_cap_reg +
14495 sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE);
14497 sc->igu_sb_cnt = 1;
14500 sc->igu_base_addr = BAR_IGU_INTMEM;
14502 /* initialize IGU parameters */
14503 if (CHIP_IS_E1x(sc)) {
14504 sc->devinfo.int_block = INT_BLOCK_HC;
14505 sc->igu_dsb_id = DEF_SB_IGU_ID;
14506 sc->igu_base_sb = 0;
14508 sc->devinfo.int_block = INT_BLOCK_IGU;
14510 /* do not allow device reset during IGU info preocessing */
14511 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14513 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
14515 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
14518 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n");
14520 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
14521 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val);
14522 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f);
14524 while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
14529 if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
14530 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n");
14531 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14536 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
14537 BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n");
14538 sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP;
14540 BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n");
14543 rc = bxe_get_igu_cam_info(sc);
14545 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14553 * Get base FW non-default (fast path) status block ID. This value is
14554 * used to initialize the fw_sb_id saved on the fp/queue structure to
14555 * determine the id used by the FW.
14557 if (CHIP_IS_E1x(sc)) {
14558 sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc));
14561 * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of
14562 * the same queue are indicated on the same IGU SB). So we prefer
14563 * FW and IGU SBs to be the same value.
14565 sc->base_fw_ndsb = sc->igu_base_sb;
14568 BLOGD(sc, DBG_LOAD,
14569 "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n",
14570 sc->igu_dsb_id, sc->igu_base_sb,
14571 sc->igu_sb_cnt, sc->base_fw_ndsb);
14573 elink_phy_probe(&sc->link_params);
14579 bxe_link_settings_supported(struct bxe_softc *sc,
14580 uint32_t switch_cfg)
14582 uint32_t cfg_size = 0;
14584 uint8_t port = SC_PORT(sc);
14586 /* aggregation of supported attributes of all external phys */
14587 sc->port.supported[0] = 0;
14588 sc->port.supported[1] = 0;
14590 switch (sc->link_params.num_phys) {
14592 sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported;
14596 sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported;
14600 if (sc->link_params.multi_phy_config &
14601 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
14602 sc->port.supported[1] =
14603 sc->link_params.phy[ELINK_EXT_PHY1].supported;
14604 sc->port.supported[0] =
14605 sc->link_params.phy[ELINK_EXT_PHY2].supported;
14607 sc->port.supported[0] =
14608 sc->link_params.phy[ELINK_EXT_PHY1].supported;
14609 sc->port.supported[1] =
14610 sc->link_params.phy[ELINK_EXT_PHY2].supported;
14616 if (!(sc->port.supported[0] || sc->port.supported[1])) {
14617 BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n",
14619 dev_info.port_hw_config[port].external_phy_config),
14621 dev_info.port_hw_config[port].external_phy_config2));
14625 if (CHIP_IS_E3(sc))
14626 sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR);
14628 switch (switch_cfg) {
14629 case ELINK_SWITCH_CFG_1G:
14630 sc->port.phy_addr =
14631 REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
14633 case ELINK_SWITCH_CFG_10G:
14634 sc->port.phy_addr =
14635 REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
14638 BLOGE(sc, "Invalid switch config in link_config=0x%08x\n",
14639 sc->port.link_config[0]);
14644 BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr);
14646 /* mask what we support according to speed_cap_mask per configuration */
14647 for (idx = 0; idx < cfg_size; idx++) {
14648 if (!(sc->link_params.speed_cap_mask[idx] &
14649 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
14650 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half;
14653 if (!(sc->link_params.speed_cap_mask[idx] &
14654 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) {
14655 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full;
14658 if (!(sc->link_params.speed_cap_mask[idx] &
14659 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
14660 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half;
14663 if (!(sc->link_params.speed_cap_mask[idx] &
14664 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) {
14665 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full;
14668 if (!(sc->link_params.speed_cap_mask[idx] &
14669 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) {
14670 sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full;
14673 if (!(sc->link_params.speed_cap_mask[idx] &
14674 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) {
14675 sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full;
14678 if (!(sc->link_params.speed_cap_mask[idx] &
14679 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
14680 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full;
14683 if (!(sc->link_params.speed_cap_mask[idx] &
14684 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) {
14685 sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full;
14689 BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n",
14690 sc->port.supported[0], sc->port.supported[1]);
14694 bxe_link_settings_requested(struct bxe_softc *sc)
14696 uint32_t link_config;
14698 uint32_t cfg_size = 0;
14700 sc->port.advertising[0] = 0;
14701 sc->port.advertising[1] = 0;
14703 switch (sc->link_params.num_phys) {
14713 for (idx = 0; idx < cfg_size; idx++) {
14714 sc->link_params.req_duplex[idx] = DUPLEX_FULL;
14715 link_config = sc->port.link_config[idx];
14717 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
14718 case PORT_FEATURE_LINK_SPEED_AUTO:
14719 if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) {
14720 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14721 sc->port.advertising[idx] |= sc->port.supported[idx];
14722 if (sc->link_params.phy[ELINK_EXT_PHY1].type ==
14723 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
14724 sc->port.advertising[idx] |=
14725 (ELINK_SUPPORTED_100baseT_Half |
14726 ELINK_SUPPORTED_100baseT_Full);
14728 /* force 10G, no AN */
14729 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14730 sc->port.advertising[idx] |=
14731 (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
14736 case PORT_FEATURE_LINK_SPEED_10M_FULL:
14737 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) {
14738 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14739 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full |
14742 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14743 "speed_cap_mask=0x%08x\n",
14744 link_config, sc->link_params.speed_cap_mask[idx]);
14749 case PORT_FEATURE_LINK_SPEED_10M_HALF:
14750 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) {
14751 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14752 sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14753 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half |
14756 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14757 "speed_cap_mask=0x%08x\n",
14758 link_config, sc->link_params.speed_cap_mask[idx]);
14763 case PORT_FEATURE_LINK_SPEED_100M_FULL:
14764 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) {
14765 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14766 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full |
14769 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14770 "speed_cap_mask=0x%08x\n",
14771 link_config, sc->link_params.speed_cap_mask[idx]);
14776 case PORT_FEATURE_LINK_SPEED_100M_HALF:
14777 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) {
14778 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14779 sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14780 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half |
14783 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14784 "speed_cap_mask=0x%08x\n",
14785 link_config, sc->link_params.speed_cap_mask[idx]);
14790 case PORT_FEATURE_LINK_SPEED_1G:
14791 if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) {
14792 sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000;
14793 sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full |
14796 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14797 "speed_cap_mask=0x%08x\n",
14798 link_config, sc->link_params.speed_cap_mask[idx]);
14803 case PORT_FEATURE_LINK_SPEED_2_5G:
14804 if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) {
14805 sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500;
14806 sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full |
14809 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14810 "speed_cap_mask=0x%08x\n",
14811 link_config, sc->link_params.speed_cap_mask[idx]);
14816 case PORT_FEATURE_LINK_SPEED_10G_CX4:
14817 if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) {
14818 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14819 sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full |
14822 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14823 "speed_cap_mask=0x%08x\n",
14824 link_config, sc->link_params.speed_cap_mask[idx]);
14829 case PORT_FEATURE_LINK_SPEED_20G:
14830 sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000;
14834 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14835 "speed_cap_mask=0x%08x\n",
14836 link_config, sc->link_params.speed_cap_mask[idx]);
14837 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14838 sc->port.advertising[idx] = sc->port.supported[idx];
14842 sc->link_params.req_flow_ctrl[idx] =
14843 (link_config & PORT_FEATURE_FLOW_CONTROL_MASK);
14845 if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) {
14846 if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) {
14847 sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE;
14849 bxe_set_requested_fc(sc);
14853 BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d "
14854 "req_flow_ctrl=0x%x advertising=0x%x\n",
14855 sc->link_params.req_line_speed[idx],
14856 sc->link_params.req_duplex[idx],
14857 sc->link_params.req_flow_ctrl[idx],
14858 sc->port.advertising[idx]);
14863 bxe_get_phy_info(struct bxe_softc *sc)
14865 uint8_t port = SC_PORT(sc);
14866 uint32_t config = sc->port.config;
14869 /* shmem data already read in bxe_get_shmem_info() */
14871 BLOGD(sc, DBG_LOAD, "lane_config=0x%08x speed_cap_mask0=0x%08x "
14872 "link_config0=0x%08x\n",
14873 sc->link_params.lane_config,
14874 sc->link_params.speed_cap_mask[0],
14875 sc->port.link_config[0]);
14877 bxe_link_settings_supported(sc, sc->link_params.switch_cfg);
14878 bxe_link_settings_requested(sc);
14880 if (sc->autogreeen == AUTO_GREEN_FORCE_ON) {
14881 sc->link_params.feature_config_flags |=
14882 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14883 } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) {
14884 sc->link_params.feature_config_flags &=
14885 ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14886 } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) {
14887 sc->link_params.feature_config_flags |=
14888 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14891 /* configure link feature according to nvram value */
14893 (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) &
14894 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
14895 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
14896 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
14897 sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI |
14898 ELINK_EEE_MODE_ENABLE_LPI |
14899 ELINK_EEE_MODE_OUTPUT_TIME);
14901 sc->link_params.eee_mode = 0;
14904 /* get the media type */
14905 bxe_media_detect(sc);
14909 bxe_get_params(struct bxe_softc *sc)
14911 /* get user tunable params */
14912 bxe_get_tunable_params(sc);
14914 /* select the RX and TX ring sizes */
14915 sc->tx_ring_size = TX_BD_USABLE;
14916 sc->rx_ring_size = RX_BD_USABLE;
14918 /* XXX disable WoL */
14923 bxe_set_modes_bitmap(struct bxe_softc *sc)
14925 uint32_t flags = 0;
14927 if (CHIP_REV_IS_FPGA(sc)) {
14928 SET_FLAGS(flags, MODE_FPGA);
14929 } else if (CHIP_REV_IS_EMUL(sc)) {
14930 SET_FLAGS(flags, MODE_EMUL);
14932 SET_FLAGS(flags, MODE_ASIC);
14935 if (CHIP_IS_MODE_4_PORT(sc)) {
14936 SET_FLAGS(flags, MODE_PORT4);
14938 SET_FLAGS(flags, MODE_PORT2);
14941 if (CHIP_IS_E2(sc)) {
14942 SET_FLAGS(flags, MODE_E2);
14943 } else if (CHIP_IS_E3(sc)) {
14944 SET_FLAGS(flags, MODE_E3);
14945 if (CHIP_REV(sc) == CHIP_REV_Ax) {
14946 SET_FLAGS(flags, MODE_E3_A0);
14947 } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ {
14948 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
14953 SET_FLAGS(flags, MODE_MF);
14954 switch (sc->devinfo.mf_info.mf_mode) {
14955 case MULTI_FUNCTION_SD:
14956 SET_FLAGS(flags, MODE_MF_SD);
14958 case MULTI_FUNCTION_SI:
14959 SET_FLAGS(flags, MODE_MF_SI);
14961 case MULTI_FUNCTION_AFEX:
14962 SET_FLAGS(flags, MODE_MF_AFEX);
14966 SET_FLAGS(flags, MODE_SF);
14969 #if defined(__LITTLE_ENDIAN)
14970 SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
14971 #else /* __BIG_ENDIAN */
14972 SET_FLAGS(flags, MODE_BIG_ENDIAN);
14975 INIT_MODE_FLAGS(sc) = flags;
14979 bxe_alloc_hsi_mem(struct bxe_softc *sc)
14981 struct bxe_fastpath *fp;
14982 bus_addr_t busaddr;
14983 int max_agg_queues;
14985 bus_size_t max_size;
14986 bus_size_t max_seg_size;
14991 /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */
14993 /* allocate the parent bus DMA tag */
14994 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */
14996 0, /* boundary limit */
14997 BUS_SPACE_MAXADDR, /* restricted low */
14998 BUS_SPACE_MAXADDR, /* restricted hi */
14999 NULL, /* addr filter() */
15000 NULL, /* addr filter() arg */
15001 BUS_SPACE_MAXSIZE_32BIT, /* max map size */
15002 BUS_SPACE_UNRESTRICTED, /* num discontinuous */
15003 BUS_SPACE_MAXSIZE_32BIT, /* max seg size */
15006 NULL, /* lock() arg */
15007 &sc->parent_dma_tag); /* returned dma tag */
15009 BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc);
15013 /************************/
15014 /* DEFAULT STATUS BLOCK */
15015 /************************/
15017 if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block),
15018 &sc->def_sb_dma, "default status block") != 0) {
15020 bus_dma_tag_destroy(sc->parent_dma_tag);
15024 sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr;
15030 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
15031 &sc->eq_dma, "event queue") != 0) {
15033 bxe_dma_free(sc, &sc->def_sb_dma);
15035 bus_dma_tag_destroy(sc->parent_dma_tag);
15039 sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr;
15045 if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath),
15046 &sc->sp_dma, "slow path") != 0) {
15048 bxe_dma_free(sc, &sc->eq_dma);
15050 bxe_dma_free(sc, &sc->def_sb_dma);
15052 bus_dma_tag_destroy(sc->parent_dma_tag);
15056 sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr;
15058 /*******************/
15059 /* SLOW PATH QUEUE */
15060 /*******************/
15062 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
15063 &sc->spq_dma, "slow path queue") != 0) {
15065 bxe_dma_free(sc, &sc->sp_dma);
15067 bxe_dma_free(sc, &sc->eq_dma);
15069 bxe_dma_free(sc, &sc->def_sb_dma);
15071 bus_dma_tag_destroy(sc->parent_dma_tag);
15075 sc->spq = (struct eth_spe *)sc->spq_dma.vaddr;
15077 /***************************/
15078 /* FW DECOMPRESSION BUFFER */
15079 /***************************/
15081 if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
15082 "fw decompression buffer") != 0) {
15084 bxe_dma_free(sc, &sc->spq_dma);
15086 bxe_dma_free(sc, &sc->sp_dma);
15088 bxe_dma_free(sc, &sc->eq_dma);
15090 bxe_dma_free(sc, &sc->def_sb_dma);
15092 bus_dma_tag_destroy(sc->parent_dma_tag);
15096 sc->gz_buf = (void *)sc->gz_buf_dma.vaddr;
15099 malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) {
15101 bxe_dma_free(sc, &sc->gz_buf_dma);
15103 bxe_dma_free(sc, &sc->spq_dma);
15105 bxe_dma_free(sc, &sc->sp_dma);
15107 bxe_dma_free(sc, &sc->eq_dma);
15109 bxe_dma_free(sc, &sc->def_sb_dma);
15111 bus_dma_tag_destroy(sc->parent_dma_tag);
15119 /* allocate DMA memory for each fastpath structure */
15120 for (i = 0; i < sc->num_queues; i++) {
15125 /*******************/
15126 /* FP STATUS BLOCK */
15127 /*******************/
15129 snprintf(buf, sizeof(buf), "fp %d status block", i);
15130 if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block),
15131 &fp->sb_dma, buf) != 0) {
15132 /* XXX unwind and free previous fastpath allocations */
15133 BLOGE(sc, "Failed to alloc %s\n", buf);
15136 if (CHIP_IS_E2E3(sc)) {
15137 fp->status_block.e2_sb =
15138 (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr;
15140 fp->status_block.e1x_sb =
15141 (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr;
15145 /******************/
15146 /* FP TX BD CHAIN */
15147 /******************/
15149 snprintf(buf, sizeof(buf), "fp %d tx bd chain", i);
15150 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES),
15151 &fp->tx_dma, buf) != 0) {
15152 /* XXX unwind and free previous fastpath allocations */
15153 BLOGE(sc, "Failed to alloc %s\n", buf);
15156 fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr;
15159 /* link together the tx bd chain pages */
15160 for (j = 1; j <= TX_BD_NUM_PAGES; j++) {
15161 /* index into the tx bd chain array to last entry per page */
15162 struct eth_tx_next_bd *tx_next_bd =
15163 &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd;
15164 /* point to the next page and wrap from last page */
15165 busaddr = (fp->tx_dma.paddr +
15166 (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES)));
15167 tx_next_bd->addr_hi = htole32(U64_HI(busaddr));
15168 tx_next_bd->addr_lo = htole32(U64_LO(busaddr));
15171 /******************/
15172 /* FP RX BD CHAIN */
15173 /******************/
15175 snprintf(buf, sizeof(buf), "fp %d rx bd chain", i);
15176 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES),
15177 &fp->rx_dma, buf) != 0) {
15178 /* XXX unwind and free previous fastpath allocations */
15179 BLOGE(sc, "Failed to alloc %s\n", buf);
15182 fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr;
15185 /* link together the rx bd chain pages */
15186 for (j = 1; j <= RX_BD_NUM_PAGES; j++) {
15187 /* index into the rx bd chain array to last entry per page */
15188 struct eth_rx_bd *rx_bd =
15189 &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2];
15190 /* point to the next page and wrap from last page */
15191 busaddr = (fp->rx_dma.paddr +
15192 (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES)));
15193 rx_bd->addr_hi = htole32(U64_HI(busaddr));
15194 rx_bd->addr_lo = htole32(U64_LO(busaddr));
15197 /*******************/
15198 /* FP RX RCQ CHAIN */
15199 /*******************/
15201 snprintf(buf, sizeof(buf), "fp %d rcq chain", i);
15202 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES),
15203 &fp->rcq_dma, buf) != 0) {
15204 /* XXX unwind and free previous fastpath allocations */
15205 BLOGE(sc, "Failed to alloc %s\n", buf);
15208 fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr;
15211 /* link together the rcq chain pages */
15212 for (j = 1; j <= RCQ_NUM_PAGES; j++) {
15213 /* index into the rcq chain array to last entry per page */
15214 struct eth_rx_cqe_next_page *rx_cqe_next =
15215 (struct eth_rx_cqe_next_page *)
15216 &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1];
15217 /* point to the next page and wrap from last page */
15218 busaddr = (fp->rcq_dma.paddr +
15219 (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES)));
15220 rx_cqe_next->addr_hi = htole32(U64_HI(busaddr));
15221 rx_cqe_next->addr_lo = htole32(U64_LO(busaddr));
15224 /*******************/
15225 /* FP RX SGE CHAIN */
15226 /*******************/
15228 snprintf(buf, sizeof(buf), "fp %d sge chain", i);
15229 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES),
15230 &fp->rx_sge_dma, buf) != 0) {
15231 /* XXX unwind and free previous fastpath allocations */
15232 BLOGE(sc, "Failed to alloc %s\n", buf);
15235 fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr;
15238 /* link together the sge chain pages */
15239 for (j = 1; j <= RX_SGE_NUM_PAGES; j++) {
15240 /* index into the rcq chain array to last entry per page */
15241 struct eth_rx_sge *rx_sge =
15242 &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2];
15243 /* point to the next page and wrap from last page */
15244 busaddr = (fp->rx_sge_dma.paddr +
15245 (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES)));
15246 rx_sge->addr_hi = htole32(U64_HI(busaddr));
15247 rx_sge->addr_lo = htole32(U64_LO(busaddr));
15250 /***********************/
15251 /* FP TX MBUF DMA MAPS */
15252 /***********************/
15254 /* set required sizes before mapping to conserve resources */
15255 if (sc->ifnet->if_capenable & (IFCAP_TSO4 | IFCAP_TSO6)) {
15256 max_size = BXE_TSO_MAX_SIZE;
15257 max_segments = BXE_TSO_MAX_SEGMENTS;
15258 max_seg_size = BXE_TSO_MAX_SEG_SIZE;
15260 max_size = (MCLBYTES * BXE_MAX_SEGMENTS);
15261 max_segments = BXE_MAX_SEGMENTS;
15262 max_seg_size = MCLBYTES;
15265 /* create a dma tag for the tx mbufs */
15266 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
15268 0, /* boundary limit */
15269 BUS_SPACE_MAXADDR, /* restricted low */
15270 BUS_SPACE_MAXADDR, /* restricted hi */
15271 NULL, /* addr filter() */
15272 NULL, /* addr filter() arg */
15273 max_size, /* max map size */
15274 max_segments, /* num discontinuous */
15275 max_seg_size, /* max seg size */
15278 NULL, /* lock() arg */
15279 &fp->tx_mbuf_tag); /* returned dma tag */
15281 /* XXX unwind and free previous fastpath allocations */
15282 BLOGE(sc, "Failed to create dma tag for "
15283 "'fp %d tx mbufs' (%d)\n",
15288 /* create dma maps for each of the tx mbuf clusters */
15289 for (j = 0; j < TX_BD_TOTAL; j++) {
15290 if (bus_dmamap_create(fp->tx_mbuf_tag,
15292 &fp->tx_mbuf_chain[j].m_map)) {
15293 /* XXX unwind and free previous fastpath allocations */
15294 BLOGE(sc, "Failed to create dma map for "
15295 "'fp %d tx mbuf %d' (%d)\n",
15301 /***********************/
15302 /* FP RX MBUF DMA MAPS */
15303 /***********************/
15305 /* create a dma tag for the rx mbufs */
15306 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
15308 0, /* boundary limit */
15309 BUS_SPACE_MAXADDR, /* restricted low */
15310 BUS_SPACE_MAXADDR, /* restricted hi */
15311 NULL, /* addr filter() */
15312 NULL, /* addr filter() arg */
15313 MJUM9BYTES, /* max map size */
15314 1, /* num discontinuous */
15315 MJUM9BYTES, /* max seg size */
15318 NULL, /* lock() arg */
15319 &fp->rx_mbuf_tag); /* returned dma tag */
15321 /* XXX unwind and free previous fastpath allocations */
15322 BLOGE(sc, "Failed to create dma tag for "
15323 "'fp %d rx mbufs' (%d)\n",
15328 /* create dma maps for each of the rx mbuf clusters */
15329 for (j = 0; j < RX_BD_TOTAL; j++) {
15330 if (bus_dmamap_create(fp->rx_mbuf_tag,
15332 &fp->rx_mbuf_chain[j].m_map)) {
15333 /* XXX unwind and free previous fastpath allocations */
15334 BLOGE(sc, "Failed to create dma map for "
15335 "'fp %d rx mbuf %d' (%d)\n",
15341 /* create dma map for the spare rx mbuf cluster */
15342 if (bus_dmamap_create(fp->rx_mbuf_tag,
15344 &fp->rx_mbuf_spare_map)) {
15345 /* XXX unwind and free previous fastpath allocations */
15346 BLOGE(sc, "Failed to create dma map for "
15347 "'fp %d spare rx mbuf' (%d)\n",
15352 /***************************/
15353 /* FP RX SGE MBUF DMA MAPS */
15354 /***************************/
15356 /* create a dma tag for the rx sge mbufs */
15357 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
15359 0, /* boundary limit */
15360 BUS_SPACE_MAXADDR, /* restricted low */
15361 BUS_SPACE_MAXADDR, /* restricted hi */
15362 NULL, /* addr filter() */
15363 NULL, /* addr filter() arg */
15364 BCM_PAGE_SIZE, /* max map size */
15365 1, /* num discontinuous */
15366 BCM_PAGE_SIZE, /* max seg size */
15369 NULL, /* lock() arg */
15370 &fp->rx_sge_mbuf_tag); /* returned dma tag */
15372 /* XXX unwind and free previous fastpath allocations */
15373 BLOGE(sc, "Failed to create dma tag for "
15374 "'fp %d rx sge mbufs' (%d)\n",
15379 /* create dma maps for the rx sge mbuf clusters */
15380 for (j = 0; j < RX_SGE_TOTAL; j++) {
15381 if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
15383 &fp->rx_sge_mbuf_chain[j].m_map)) {
15384 /* XXX unwind and free previous fastpath allocations */
15385 BLOGE(sc, "Failed to create dma map for "
15386 "'fp %d rx sge mbuf %d' (%d)\n",
15392 /* create dma map for the spare rx sge mbuf cluster */
15393 if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
15395 &fp->rx_sge_mbuf_spare_map)) {
15396 /* XXX unwind and free previous fastpath allocations */
15397 BLOGE(sc, "Failed to create dma map for "
15398 "'fp %d spare rx sge mbuf' (%d)\n",
15403 /***************************/
15404 /* FP RX TPA MBUF DMA MAPS */
15405 /***************************/
15407 /* create dma maps for the rx tpa mbuf clusters */
15408 max_agg_queues = MAX_AGG_QS(sc);
15410 for (j = 0; j < max_agg_queues; j++) {
15411 if (bus_dmamap_create(fp->rx_mbuf_tag,
15413 &fp->rx_tpa_info[j].bd.m_map)) {
15414 /* XXX unwind and free previous fastpath allocations */
15415 BLOGE(sc, "Failed to create dma map for "
15416 "'fp %d rx tpa mbuf %d' (%d)\n",
15422 /* create dma map for the spare rx tpa mbuf cluster */
15423 if (bus_dmamap_create(fp->rx_mbuf_tag,
15425 &fp->rx_tpa_info_mbuf_spare_map)) {
15426 /* XXX unwind and free previous fastpath allocations */
15427 BLOGE(sc, "Failed to create dma map for "
15428 "'fp %d spare rx tpa mbuf' (%d)\n",
15433 bxe_init_sge_ring_bit_mask(fp);
15440 bxe_free_hsi_mem(struct bxe_softc *sc)
15442 struct bxe_fastpath *fp;
15443 int max_agg_queues;
15446 if (sc->parent_dma_tag == NULL) {
15447 return; /* assume nothing was allocated */
15450 for (i = 0; i < sc->num_queues; i++) {
15453 /*******************/
15454 /* FP STATUS BLOCK */
15455 /*******************/
15457 bxe_dma_free(sc, &fp->sb_dma);
15458 memset(&fp->status_block, 0, sizeof(fp->status_block));
15460 /******************/
15461 /* FP TX BD CHAIN */
15462 /******************/
15464 bxe_dma_free(sc, &fp->tx_dma);
15465 fp->tx_chain = NULL;
15467 /******************/
15468 /* FP RX BD CHAIN */
15469 /******************/
15471 bxe_dma_free(sc, &fp->rx_dma);
15472 fp->rx_chain = NULL;
15474 /*******************/
15475 /* FP RX RCQ CHAIN */
15476 /*******************/
15478 bxe_dma_free(sc, &fp->rcq_dma);
15479 fp->rcq_chain = NULL;
15481 /*******************/
15482 /* FP RX SGE CHAIN */
15483 /*******************/
15485 bxe_dma_free(sc, &fp->rx_sge_dma);
15486 fp->rx_sge_chain = NULL;
15488 /***********************/
15489 /* FP TX MBUF DMA MAPS */
15490 /***********************/
15492 if (fp->tx_mbuf_tag != NULL) {
15493 for (j = 0; j < TX_BD_TOTAL; j++) {
15494 if (fp->tx_mbuf_chain[j].m_map != NULL) {
15495 bus_dmamap_unload(fp->tx_mbuf_tag,
15496 fp->tx_mbuf_chain[j].m_map);
15497 bus_dmamap_destroy(fp->tx_mbuf_tag,
15498 fp->tx_mbuf_chain[j].m_map);
15502 bus_dma_tag_destroy(fp->tx_mbuf_tag);
15503 fp->tx_mbuf_tag = NULL;
15506 /***********************/
15507 /* FP RX MBUF DMA MAPS */
15508 /***********************/
15510 if (fp->rx_mbuf_tag != NULL) {
15511 for (j = 0; j < RX_BD_TOTAL; j++) {
15512 if (fp->rx_mbuf_chain[j].m_map != NULL) {
15513 bus_dmamap_unload(fp->rx_mbuf_tag,
15514 fp->rx_mbuf_chain[j].m_map);
15515 bus_dmamap_destroy(fp->rx_mbuf_tag,
15516 fp->rx_mbuf_chain[j].m_map);
15520 if (fp->rx_mbuf_spare_map != NULL) {
15521 bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
15522 bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
15525 /***************************/
15526 /* FP RX TPA MBUF DMA MAPS */
15527 /***************************/
15529 max_agg_queues = MAX_AGG_QS(sc);
15531 for (j = 0; j < max_agg_queues; j++) {
15532 if (fp->rx_tpa_info[j].bd.m_map != NULL) {
15533 bus_dmamap_unload(fp->rx_mbuf_tag,
15534 fp->rx_tpa_info[j].bd.m_map);
15535 bus_dmamap_destroy(fp->rx_mbuf_tag,
15536 fp->rx_tpa_info[j].bd.m_map);
15540 if (fp->rx_tpa_info_mbuf_spare_map != NULL) {
15541 bus_dmamap_unload(fp->rx_mbuf_tag,
15542 fp->rx_tpa_info_mbuf_spare_map);
15543 bus_dmamap_destroy(fp->rx_mbuf_tag,
15544 fp->rx_tpa_info_mbuf_spare_map);
15547 bus_dma_tag_destroy(fp->rx_mbuf_tag);
15548 fp->rx_mbuf_tag = NULL;
15551 /***************************/
15552 /* FP RX SGE MBUF DMA MAPS */
15553 /***************************/
15555 if (fp->rx_sge_mbuf_tag != NULL) {
15556 for (j = 0; j < RX_SGE_TOTAL; j++) {
15557 if (fp->rx_sge_mbuf_chain[j].m_map != NULL) {
15558 bus_dmamap_unload(fp->rx_sge_mbuf_tag,
15559 fp->rx_sge_mbuf_chain[j].m_map);
15560 bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
15561 fp->rx_sge_mbuf_chain[j].m_map);
15565 if (fp->rx_sge_mbuf_spare_map != NULL) {
15566 bus_dmamap_unload(fp->rx_sge_mbuf_tag,
15567 fp->rx_sge_mbuf_spare_map);
15568 bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
15569 fp->rx_sge_mbuf_spare_map);
15572 bus_dma_tag_destroy(fp->rx_sge_mbuf_tag);
15573 fp->rx_sge_mbuf_tag = NULL;
15577 /***************************/
15578 /* FW DECOMPRESSION BUFFER */
15579 /***************************/
15581 bxe_dma_free(sc, &sc->gz_buf_dma);
15583 free(sc->gz_strm, M_DEVBUF);
15584 sc->gz_strm = NULL;
15586 /*******************/
15587 /* SLOW PATH QUEUE */
15588 /*******************/
15590 bxe_dma_free(sc, &sc->spq_dma);
15597 bxe_dma_free(sc, &sc->sp_dma);
15604 bxe_dma_free(sc, &sc->eq_dma);
15607 /************************/
15608 /* DEFAULT STATUS BLOCK */
15609 /************************/
15611 bxe_dma_free(sc, &sc->def_sb_dma);
15614 bus_dma_tag_destroy(sc->parent_dma_tag);
15615 sc->parent_dma_tag = NULL;
15619 * Previous driver DMAE transaction may have occurred when pre-boot stage
15620 * ended and boot began. This would invalidate the addresses of the
15621 * transaction, resulting in was-error bit set in the PCI causing all
15622 * hw-to-host PCIe transactions to timeout. If this happened we want to clear
15623 * the interrupt which detected this from the pglueb and the was-done bit
15626 bxe_prev_interrupted_dmae(struct bxe_softc *sc)
15630 if (!CHIP_IS_E1x(sc)) {
15631 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS);
15632 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
15633 BLOGD(sc, DBG_LOAD,
15634 "Clearing 'was-error' bit that was set in pglueb");
15635 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc));
15641 bxe_prev_mcp_done(struct bxe_softc *sc)
15643 uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE,
15644 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
15646 BLOGE(sc, "MCP response failure, aborting\n");
15653 static struct bxe_prev_list_node *
15654 bxe_prev_path_get_entry(struct bxe_softc *sc)
15656 struct bxe_prev_list_node *tmp;
15658 LIST_FOREACH(tmp, &bxe_prev_list, node) {
15659 if ((sc->pcie_bus == tmp->bus) &&
15660 (sc->pcie_device == tmp->slot) &&
15661 (SC_PATH(sc) == tmp->path)) {
15670 bxe_prev_is_path_marked(struct bxe_softc *sc)
15672 struct bxe_prev_list_node *tmp;
15675 mtx_lock(&bxe_prev_mtx);
15677 tmp = bxe_prev_path_get_entry(sc);
15680 BLOGD(sc, DBG_LOAD,
15681 "Path %d/%d/%d was marked by AER\n",
15682 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15685 BLOGD(sc, DBG_LOAD,
15686 "Path %d/%d/%d was already cleaned from previous drivers\n",
15687 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15691 mtx_unlock(&bxe_prev_mtx);
15697 bxe_prev_mark_path(struct bxe_softc *sc,
15698 uint8_t after_undi)
15700 struct bxe_prev_list_node *tmp;
15702 mtx_lock(&bxe_prev_mtx);
15704 /* Check whether the entry for this path already exists */
15705 tmp = bxe_prev_path_get_entry(sc);
15708 BLOGD(sc, DBG_LOAD,
15709 "Re-marking AER in path %d/%d/%d\n",
15710 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15712 BLOGD(sc, DBG_LOAD,
15713 "Removing AER indication from path %d/%d/%d\n",
15714 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15718 mtx_unlock(&bxe_prev_mtx);
15722 mtx_unlock(&bxe_prev_mtx);
15724 /* Create an entry for this path and add it */
15725 tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF,
15726 (M_NOWAIT | M_ZERO));
15728 BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n");
15732 tmp->bus = sc->pcie_bus;
15733 tmp->slot = sc->pcie_device;
15734 tmp->path = SC_PATH(sc);
15736 tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0;
15738 mtx_lock(&bxe_prev_mtx);
15740 BLOGD(sc, DBG_LOAD,
15741 "Marked path %d/%d/%d - finished previous unload\n",
15742 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15743 LIST_INSERT_HEAD(&bxe_prev_list, tmp, node);
15745 mtx_unlock(&bxe_prev_mtx);
15751 bxe_do_flr(struct bxe_softc *sc)
15755 /* only E2 and onwards support FLR */
15756 if (CHIP_IS_E1x(sc)) {
15757 BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n");
15761 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
15762 if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
15763 BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n",
15764 sc->devinfo.bc_ver);
15768 /* Wait for Transaction Pending bit clean */
15769 for (i = 0; i < 4; i++) {
15771 DELAY(((1 << (i - 1)) * 100) * 1000);
15774 if (!bxe_is_pcie_pending(sc)) {
15779 BLOGE(sc, "PCIE transaction is not cleared, "
15780 "proceeding with reset anyway\n");
15784 BLOGD(sc, DBG_LOAD, "Initiating FLR\n");
15785 bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0);
15790 struct bxe_mac_vals {
15791 uint32_t xmac_addr;
15793 uint32_t emac_addr;
15795 uint32_t umac_addr;
15797 uint32_t bmac_addr;
15798 uint32_t bmac_val[2];
15802 bxe_prev_unload_close_mac(struct bxe_softc *sc,
15803 struct bxe_mac_vals *vals)
15805 uint32_t val, base_addr, offset, mask, reset_reg;
15806 uint8_t mac_stopped = FALSE;
15807 uint8_t port = SC_PORT(sc);
15808 uint32_t wb_data[2];
15810 /* reset addresses as they also mark which values were changed */
15811 vals->bmac_addr = 0;
15812 vals->umac_addr = 0;
15813 vals->xmac_addr = 0;
15814 vals->emac_addr = 0;
15816 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2);
15818 if (!CHIP_IS_E3(sc)) {
15819 val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
15820 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
15821 if ((mask & reset_reg) && val) {
15822 BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n");
15823 base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM
15824 : NIG_REG_INGRESS_BMAC0_MEM;
15825 offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL
15826 : BIGMAC_REGISTER_BMAC_CONTROL;
15829 * use rd/wr since we cannot use dmae. This is safe
15830 * since MCP won't access the bus due to the request
15831 * to unload, and no function on the path can be
15832 * loaded at this time.
15834 wb_data[0] = REG_RD(sc, base_addr + offset);
15835 wb_data[1] = REG_RD(sc, base_addr + offset + 0x4);
15836 vals->bmac_addr = base_addr + offset;
15837 vals->bmac_val[0] = wb_data[0];
15838 vals->bmac_val[1] = wb_data[1];
15839 wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE;
15840 REG_WR(sc, vals->bmac_addr, wb_data[0]);
15841 REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]);
15844 BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n");
15845 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4;
15846 vals->emac_val = REG_RD(sc, vals->emac_addr);
15847 REG_WR(sc, vals->emac_addr, 0);
15848 mac_stopped = TRUE;
15850 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
15851 BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n");
15852 base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
15853 val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI);
15854 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1));
15855 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1));
15856 vals->xmac_addr = base_addr + XMAC_REG_CTRL;
15857 vals->xmac_val = REG_RD(sc, vals->xmac_addr);
15858 REG_WR(sc, vals->xmac_addr, 0);
15859 mac_stopped = TRUE;
15862 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
15863 if (mask & reset_reg) {
15864 BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n");
15865 base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
15866 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
15867 vals->umac_val = REG_RD(sc, vals->umac_addr);
15868 REG_WR(sc, vals->umac_addr, 0);
15869 mac_stopped = TRUE;
15878 #define BXE_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
15879 #define BXE_PREV_UNDI_RCQ(val) ((val) & 0xffff)
15880 #define BXE_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
15881 #define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
15884 bxe_prev_unload_undi_inc(struct bxe_softc *sc,
15889 uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port));
15891 rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc;
15892 bd = BXE_PREV_UNDI_BD(tmp_reg) + inc;
15894 tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd);
15895 REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg);
15897 BLOGD(sc, DBG_LOAD,
15898 "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
15903 bxe_prev_unload_common(struct bxe_softc *sc)
15905 uint32_t reset_reg, tmp_reg = 0, rc;
15906 uint8_t prev_undi = FALSE;
15907 struct bxe_mac_vals mac_vals;
15908 uint32_t timer_count = 1000;
15912 * It is possible a previous function received 'common' answer,
15913 * but hasn't loaded yet, therefore creating a scenario of
15914 * multiple functions receiving 'common' on the same path.
15916 BLOGD(sc, DBG_LOAD, "Common unload Flow\n");
15918 memset(&mac_vals, 0, sizeof(mac_vals));
15920 if (bxe_prev_is_path_marked(sc)) {
15921 return (bxe_prev_mcp_done(sc));
15924 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1);
15926 /* Reset should be performed after BRB is emptied */
15927 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
15928 /* Close the MAC Rx to prevent BRB from filling up */
15929 bxe_prev_unload_close_mac(sc, &mac_vals);
15931 /* close LLH filters towards the BRB */
15932 elink_set_rx_filter(&sc->link_params, 0);
15935 * Check if the UNDI driver was previously loaded.
15936 * UNDI driver initializes CID offset for normal bell to 0x7
15938 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
15939 tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST);
15940 if (tmp_reg == 0x7) {
15941 BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n");
15943 /* clear the UNDI indication */
15944 REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0);
15945 /* clear possible idle check errors */
15946 REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0);
15950 /* wait until BRB is empty */
15951 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15952 while (timer_count) {
15953 prev_brb = tmp_reg;
15955 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15960 BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg);
15962 /* reset timer as long as BRB actually gets emptied */
15963 if (prev_brb > tmp_reg) {
15964 timer_count = 1000;
15969 /* If UNDI resides in memory, manually increment it */
15971 bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1);
15977 if (!timer_count) {
15978 BLOGE(sc, "Failed to empty BRB\n");
15982 /* No packets are in the pipeline, path is ready for reset */
15983 bxe_reset_common(sc);
15985 if (mac_vals.xmac_addr) {
15986 REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val);
15988 if (mac_vals.umac_addr) {
15989 REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val);
15991 if (mac_vals.emac_addr) {
15992 REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val);
15994 if (mac_vals.bmac_addr) {
15995 REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
15996 REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
15999 rc = bxe_prev_mark_path(sc, prev_undi);
16001 bxe_prev_mcp_done(sc);
16005 return (bxe_prev_mcp_done(sc));
16009 bxe_prev_unload_uncommon(struct bxe_softc *sc)
16013 BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n");
16015 /* Test if previous unload process was already finished for this path */
16016 if (bxe_prev_is_path_marked(sc)) {
16017 return (bxe_prev_mcp_done(sc));
16020 BLOGD(sc, DBG_LOAD, "Path is unmarked\n");
16023 * If function has FLR capabilities, and existing FW version matches
16024 * the one required, then FLR will be sufficient to clean any residue
16025 * left by previous driver
16027 rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION);
16029 /* fw version is good */
16030 BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n");
16031 rc = bxe_do_flr(sc);
16035 /* FLR was performed */
16036 BLOGD(sc, DBG_LOAD, "FLR successful\n");
16040 BLOGD(sc, DBG_LOAD, "Could not FLR\n");
16042 /* Close the MCP request, return failure*/
16043 rc = bxe_prev_mcp_done(sc);
16045 rc = BXE_PREV_WAIT_NEEDED;
16052 bxe_prev_unload(struct bxe_softc *sc)
16054 int time_counter = 10;
16055 uint32_t fw, hw_lock_reg, hw_lock_val;
16059 * Clear HW from errors which may have resulted from an interrupted
16060 * DMAE transaction.
16062 bxe_prev_interrupted_dmae(sc);
16064 /* Release previously held locks */
16066 (SC_FUNC(sc) <= 5) ?
16067 (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) :
16068 (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
16070 hw_lock_val = (REG_RD(sc, hw_lock_reg));
16072 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
16073 BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n");
16074 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
16075 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc)));
16077 BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n");
16078 REG_WR(sc, hw_lock_reg, 0xffffffff);
16080 BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n");
16083 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) {
16084 BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n");
16085 REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0);
16089 /* Lock MCP using an unload request */
16090 fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
16092 BLOGE(sc, "MCP response failure, aborting\n");
16097 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
16098 rc = bxe_prev_unload_common(sc);
16102 /* non-common reply from MCP night require looping */
16103 rc = bxe_prev_unload_uncommon(sc);
16104 if (rc != BXE_PREV_WAIT_NEEDED) {
16109 } while (--time_counter);
16111 if (!time_counter || rc) {
16112 BLOGE(sc, "Failed to unload previous driver!\n");
16120 bxe_dcbx_set_state(struct bxe_softc *sc,
16122 uint32_t dcbx_enabled)
16124 if (!CHIP_IS_E1x(sc)) {
16125 sc->dcb_state = dcb_on;
16126 sc->dcbx_enabled = dcbx_enabled;
16128 sc->dcb_state = FALSE;
16129 sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID;
16131 BLOGD(sc, DBG_LOAD,
16132 "DCB state [%s:%s]\n",
16133 dcb_on ? "ON" : "OFF",
16134 (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" :
16135 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" :
16136 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ?
16137 "on-chip with negotiation" : "invalid");
16140 /* must be called after sriov-enable */
16142 bxe_set_qm_cid_count(struct bxe_softc *sc)
16144 int cid_count = BXE_L2_MAX_CID(sc);
16146 if (IS_SRIOV(sc)) {
16147 cid_count += BXE_VF_CIDS;
16150 if (CNIC_SUPPORT(sc)) {
16151 cid_count += CNIC_CID_MAX;
16154 return (roundup(cid_count, QM_CID_ROUND));
16158 bxe_init_multi_cos(struct bxe_softc *sc)
16162 uint32_t pri_map = 0; /* XXX change to user config */
16164 for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) {
16165 cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4));
16166 if (cos < sc->max_cos) {
16167 sc->prio_to_cos[pri] = cos;
16169 BLOGW(sc, "Invalid COS %d for priority %d "
16170 "(max COS is %d), setting to 0\n",
16171 cos, pri, (sc->max_cos - 1));
16172 sc->prio_to_cos[pri] = 0;
16178 bxe_sysctl_state(SYSCTL_HANDLER_ARGS)
16180 struct bxe_softc *sc;
16184 error = sysctl_handle_int(oidp, &result, 0, req);
16186 if (error || !req->newptr) {
16192 sc = (struct bxe_softc *)arg1;
16194 BLOGI(sc, "... dumping driver state ...\n");
16195 temp = SHMEM2_RD(sc, temperature_in_half_celsius);
16196 BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2));
16203 bxe_sysctl_trigger_grcdump(SYSCTL_HANDLER_ARGS)
16205 struct bxe_softc *sc;
16209 error = sysctl_handle_int(oidp, &result, 0, req);
16211 if (error || !req->newptr) {
16216 sc = (struct bxe_softc *)arg1;
16218 BLOGI(sc, "... grcdump start ...\n");
16220 BLOGI(sc, "... grcdump done ...\n");
16227 bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS)
16229 struct bxe_softc *sc = (struct bxe_softc *)arg1;
16230 uint32_t *eth_stats = (uint32_t *)&sc->eth_stats;
16232 uint64_t value = 0;
16233 int index = (int)arg2;
16235 if (index >= BXE_NUM_ETH_STATS) {
16236 BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index);
16240 offset = (eth_stats + bxe_eth_stats_arr[index].offset);
16242 switch (bxe_eth_stats_arr[index].size) {
16244 value = (uint64_t)*offset;
16247 value = HILO_U64(*offset, *(offset + 1));
16250 BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n",
16251 index, bxe_eth_stats_arr[index].size);
16255 return (sysctl_handle_64(oidp, &value, 0, req));
16259 bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS)
16261 struct bxe_softc *sc = (struct bxe_softc *)arg1;
16262 uint32_t *eth_stats;
16264 uint64_t value = 0;
16265 uint32_t q_stat = (uint32_t)arg2;
16266 uint32_t fp_index = ((q_stat >> 16) & 0xffff);
16267 uint32_t index = (q_stat & 0xffff);
16269 eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats;
16271 if (index >= BXE_NUM_ETH_Q_STATS) {
16272 BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index);
16276 offset = (eth_stats + bxe_eth_q_stats_arr[index].offset);
16278 switch (bxe_eth_q_stats_arr[index].size) {
16280 value = (uint64_t)*offset;
16283 value = HILO_U64(*offset, *(offset + 1));
16286 BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n",
16287 index, bxe_eth_q_stats_arr[index].size);
16291 return (sysctl_handle_64(oidp, &value, 0, req));
16295 bxe_add_sysctls(struct bxe_softc *sc)
16297 struct sysctl_ctx_list *ctx;
16298 struct sysctl_oid_list *children;
16299 struct sysctl_oid *queue_top, *queue;
16300 struct sysctl_oid_list *queue_top_children, *queue_children;
16301 char queue_num_buf[32];
16305 ctx = device_get_sysctl_ctx(sc->dev);
16306 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
16308 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version",
16309 CTLFLAG_RD, BXE_DRIVER_VERSION, 0,
16312 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
16313 CTLFLAG_RD, &sc->devinfo.bc_ver_str, 0,
16314 "bootcode version");
16316 snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d",
16317 BCM_5710_FW_MAJOR_VERSION,
16318 BCM_5710_FW_MINOR_VERSION,
16319 BCM_5710_FW_REVISION_VERSION,
16320 BCM_5710_FW_ENGINEERING_VERSION);
16321 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
16322 CTLFLAG_RD, &sc->fw_ver_str, 0,
16323 "firmware version");
16325 snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s",
16326 ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION) ? "Single" :
16327 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD) ? "MF-SD" :
16328 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI) ? "MF-SI" :
16329 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" :
16331 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
16332 CTLFLAG_RD, &sc->mf_mode_str, 0,
16333 "multifunction mode");
16335 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics",
16336 CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0,
16337 "multifunction vnics per port");
16339 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
16340 CTLFLAG_RD, &sc->mac_addr_str, 0,
16343 snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d",
16344 ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" :
16345 (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" :
16346 (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" :
16348 sc->devinfo.pcie_link_width);
16349 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
16350 CTLFLAG_RD, &sc->pci_link_str, 0,
16351 "pci link status");
16353 sc->debug = bxe_debug;
16354 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "debug",
16355 CTLFLAG_RW, &sc->debug, 0,
16356 "debug logging mode");
16358 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "trigger_grcdump",
16359 CTLTYPE_UINT | CTLFLAG_RW, sc, 0,
16360 bxe_sysctl_trigger_grcdump, "IU",
16361 "set by driver when a grcdump is needed");
16363 sc->grcdump_done = 0;
16364 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done",
16365 CTLFLAG_RW, &sc->grcdump_done, 0,
16366 "set by driver when grcdump is done");
16368 sc->rx_budget = bxe_rx_budget;
16369 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget",
16370 CTLFLAG_RW, &sc->rx_budget, 0,
16371 "rx processing budget");
16373 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state",
16374 CTLTYPE_UINT | CTLFLAG_RW, sc, 0,
16375 bxe_sysctl_state, "IU", "dump driver state");
16377 for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
16378 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
16379 bxe_eth_stats_arr[i].string,
16380 CTLTYPE_U64 | CTLFLAG_RD, sc, i,
16381 bxe_sysctl_eth_stat, "LU",
16382 bxe_eth_stats_arr[i].string);
16385 /* add a new parent node for all queues "dev.bxe.#.queue" */
16386 queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue",
16387 CTLFLAG_RD, NULL, "queue");
16388 queue_top_children = SYSCTL_CHILDREN(queue_top);
16390 for (i = 0; i < sc->num_queues; i++) {
16391 /* add a new parent node for a single queue "dev.bxe.#.queue.#" */
16392 snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i);
16393 queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO,
16394 queue_num_buf, CTLFLAG_RD, NULL,
16396 queue_children = SYSCTL_CHILDREN(queue);
16398 for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) {
16399 q_stat = ((i << 16) | j);
16400 SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO,
16401 bxe_eth_q_stats_arr[j].string,
16402 CTLTYPE_U64 | CTLFLAG_RD, sc, q_stat,
16403 bxe_sysctl_eth_q_stat, "LU",
16404 bxe_eth_q_stats_arr[j].string);
16410 * Device attach function.
16412 * Allocates device resources, performs secondary chip identification, and
16413 * initializes driver instance variables. This function is called from driver
16414 * load after a successful probe.
16417 * 0 = Success, >0 = Failure
16420 bxe_attach(device_t dev)
16422 struct bxe_softc *sc;
16424 sc = device_get_softc(dev);
16426 BLOGD(sc, DBG_LOAD, "Starting attach...\n");
16428 sc->state = BXE_STATE_CLOSED;
16431 sc->unit = device_get_unit(dev);
16433 BLOGD(sc, DBG_LOAD, "softc = %p\n", sc);
16435 sc->pcie_bus = pci_get_bus(dev);
16436 sc->pcie_device = pci_get_slot(dev);
16437 sc->pcie_func = pci_get_function(dev);
16439 /* enable bus master capability */
16440 pci_enable_busmaster(dev);
16443 if (bxe_allocate_bars(sc) != 0) {
16447 /* initialize the mutexes */
16448 bxe_init_mutexes(sc);
16450 /* prepare the periodic callout */
16451 callout_init(&sc->periodic_callout, 0);
16453 /* prepare the chip taskqueue */
16454 sc->chip_tq_flags = CHIP_TQ_NONE;
16455 snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name),
16456 "bxe%d_chip_tq", sc->unit);
16457 TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc);
16458 sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT,
16459 taskqueue_thread_enqueue,
16461 taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */
16462 "%s", sc->chip_tq_name);
16464 /* get device info and set params */
16465 if (bxe_get_device_info(sc) != 0) {
16466 BLOGE(sc, "getting device info\n");
16467 bxe_deallocate_bars(sc);
16468 pci_disable_busmaster(dev);
16472 /* get final misc params */
16473 bxe_get_params(sc);
16475 /* set the default MTU (changed via ifconfig) */
16476 sc->mtu = ETHERMTU;
16478 bxe_set_modes_bitmap(sc);
16481 * If in AFEX mode and the function is configured for FCoE
16482 * then bail... no L2 allowed.
16485 /* get phy settings from shmem and 'and' against admin settings */
16486 bxe_get_phy_info(sc);
16488 /* initialize the FreeBSD ifnet interface */
16489 if (bxe_init_ifnet(sc) != 0) {
16490 bxe_release_mutexes(sc);
16491 bxe_deallocate_bars(sc);
16492 pci_disable_busmaster(dev);
16496 if (bxe_add_cdev(sc) != 0) {
16497 if (sc->ifnet != NULL) {
16498 ether_ifdetach(sc->ifnet);
16500 ifmedia_removeall(&sc->ifmedia);
16501 bxe_release_mutexes(sc);
16502 bxe_deallocate_bars(sc);
16503 pci_disable_busmaster(dev);
16507 /* allocate device interrupts */
16508 if (bxe_interrupt_alloc(sc) != 0) {
16510 if (sc->ifnet != NULL) {
16511 ether_ifdetach(sc->ifnet);
16513 ifmedia_removeall(&sc->ifmedia);
16514 bxe_release_mutexes(sc);
16515 bxe_deallocate_bars(sc);
16516 pci_disable_busmaster(dev);
16521 if (bxe_alloc_ilt_mem(sc) != 0) {
16522 bxe_interrupt_free(sc);
16524 if (sc->ifnet != NULL) {
16525 ether_ifdetach(sc->ifnet);
16527 ifmedia_removeall(&sc->ifmedia);
16528 bxe_release_mutexes(sc);
16529 bxe_deallocate_bars(sc);
16530 pci_disable_busmaster(dev);
16534 /* allocate the host hardware/software hsi structures */
16535 if (bxe_alloc_hsi_mem(sc) != 0) {
16536 bxe_free_ilt_mem(sc);
16537 bxe_interrupt_free(sc);
16539 if (sc->ifnet != NULL) {
16540 ether_ifdetach(sc->ifnet);
16542 ifmedia_removeall(&sc->ifmedia);
16543 bxe_release_mutexes(sc);
16544 bxe_deallocate_bars(sc);
16545 pci_disable_busmaster(dev);
16549 /* need to reset chip if UNDI was active */
16550 if (IS_PF(sc) && !BXE_NOMCP(sc)) {
16553 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
16554 DRV_MSG_SEQ_NUMBER_MASK);
16555 BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq);
16556 bxe_prev_unload(sc);
16561 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16563 if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) &&
16564 SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) &&
16565 SHMEM2_RD(sc, dcbx_lldp_params_offset) &&
16566 SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) {
16567 bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON);
16568 bxe_dcbx_init_params(sc);
16570 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16574 /* calculate qm_cid_count */
16575 sc->qm_cid_count = bxe_set_qm_cid_count(sc);
16576 BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count);
16579 bxe_init_multi_cos(sc);
16581 bxe_add_sysctls(sc);
16587 * Device detach function.
16589 * Stops the controller, resets the controller, and releases resources.
16592 * 0 = Success, >0 = Failure
16595 bxe_detach(device_t dev)
16597 struct bxe_softc *sc;
16600 sc = device_get_softc(dev);
16602 BLOGD(sc, DBG_LOAD, "Starting detach...\n");
16605 if (ifp != NULL && ifp->if_vlantrunk != NULL) {
16606 BLOGE(sc, "Cannot detach while VLANs are in use.\n");
16612 /* stop the periodic callout */
16613 bxe_periodic_stop(sc);
16615 /* stop the chip taskqueue */
16616 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE);
16618 taskqueue_drain(sc->chip_tq, &sc->chip_tq_task);
16619 taskqueue_free(sc->chip_tq);
16620 sc->chip_tq = NULL;
16623 /* stop and reset the controller if it was open */
16624 if (sc->state != BXE_STATE_CLOSED) {
16626 bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE);
16627 BXE_CORE_UNLOCK(sc);
16630 /* release the network interface */
16632 ether_ifdetach(ifp);
16634 ifmedia_removeall(&sc->ifmedia);
16636 /* XXX do the following based on driver state... */
16638 /* free the host hardware/software hsi structures */
16639 bxe_free_hsi_mem(sc);
16642 bxe_free_ilt_mem(sc);
16644 /* release the interrupts */
16645 bxe_interrupt_free(sc);
16647 /* Release the mutexes*/
16648 bxe_release_mutexes(sc);
16650 /* Release the PCIe BAR mapped memory */
16651 bxe_deallocate_bars(sc);
16653 /* Release the FreeBSD interface. */
16654 if (sc->ifnet != NULL) {
16655 if_free(sc->ifnet);
16658 pci_disable_busmaster(dev);
16664 * Device shutdown function.
16666 * Stops and resets the controller.
16672 bxe_shutdown(device_t dev)
16674 struct bxe_softc *sc;
16676 sc = device_get_softc(dev);
16678 BLOGD(sc, DBG_LOAD, "Starting shutdown...\n");
16680 /* stop the periodic callout */
16681 bxe_periodic_stop(sc);
16684 bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE);
16685 BXE_CORE_UNLOCK(sc);
16691 bxe_igu_ack_sb(struct bxe_softc *sc,
16698 uint32_t igu_addr = sc->igu_base_addr;
16699 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
16700 bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr);
16704 bxe_igu_clear_sb_gen(struct bxe_softc *sc,
16709 uint32_t data, ctl, cnt = 100;
16710 uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
16711 uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
16712 uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
16713 uint32_t sb_bit = 1 << (idu_sb_id%32);
16714 uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
16715 uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
16717 /* Not supported in BC mode */
16718 if (CHIP_INT_MODE_IS_BC(sc)) {
16722 data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup <<
16723 IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
16724 IGU_REGULAR_CLEANUP_SET |
16725 IGU_REGULAR_BCLEANUP);
16727 ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) |
16728 (func_encode << IGU_CTRL_REG_FID_SHIFT) |
16729 (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
16731 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16732 data, igu_addr_data);
16733 REG_WR(sc, igu_addr_data, data);
16735 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16736 BUS_SPACE_BARRIER_WRITE);
16739 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16740 ctl, igu_addr_ctl);
16741 REG_WR(sc, igu_addr_ctl, ctl);
16743 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16744 BUS_SPACE_BARRIER_WRITE);
16747 /* wait for clean up to finish */
16748 while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) {
16752 if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) {
16753 BLOGD(sc, DBG_LOAD,
16754 "Unable to finish IGU cleanup: "
16755 "idu_sb_id %d offset %d bit %d (cnt %d)\n",
16756 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
16761 bxe_igu_clear_sb(struct bxe_softc *sc,
16764 bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/);
16773 /*******************/
16774 /* ECORE CALLBACKS */
16775 /*******************/
16778 bxe_reset_common(struct bxe_softc *sc)
16780 uint32_t val = 0x1400;
16783 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f);
16785 if (CHIP_IS_E3(sc)) {
16786 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16787 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16790 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val);
16794 bxe_common_init_phy(struct bxe_softc *sc)
16796 uint32_t shmem_base[2];
16797 uint32_t shmem2_base[2];
16799 /* Avoid common init in case MFW supports LFA */
16800 if (SHMEM2_RD(sc, size) >
16801 (uint32_t)offsetof(struct shmem2_region,
16802 lfa_host_addr[SC_PORT(sc)])) {
16806 shmem_base[0] = sc->devinfo.shmem_base;
16807 shmem2_base[0] = sc->devinfo.shmem2_base;
16809 if (!CHIP_IS_E1x(sc)) {
16810 shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr);
16811 shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr);
16814 bxe_acquire_phy_lock(sc);
16815 elink_common_init_phy(sc, shmem_base, shmem2_base,
16816 sc->devinfo.chip_id, 0);
16817 bxe_release_phy_lock(sc);
16821 bxe_pf_disable(struct bxe_softc *sc)
16823 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
16825 val &= ~IGU_PF_CONF_FUNC_EN;
16827 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
16828 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16829 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0);
16833 bxe_init_pxp(struct bxe_softc *sc)
16836 int r_order, w_order;
16838 devctl = bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL, 2);
16840 BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl);
16842 w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5);
16844 if (sc->mrrs == -1) {
16845 r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12);
16847 BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs);
16848 r_order = sc->mrrs;
16851 ecore_init_pxp_arb(sc, r_order, w_order);
16855 bxe_get_pretend_reg(struct bxe_softc *sc)
16857 uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0;
16858 uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base);
16859 return (base + (SC_ABS_FUNC(sc)) * stride);
16863 * Called only on E1H or E2.
16864 * When pretending to be PF, the pretend value is the function number 0..7.
16865 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
16869 bxe_pretend_func(struct bxe_softc *sc,
16870 uint16_t pretend_func_val)
16872 uint32_t pretend_reg;
16874 if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) {
16878 /* get my own pretend register */
16879 pretend_reg = bxe_get_pretend_reg(sc);
16880 REG_WR(sc, pretend_reg, pretend_func_val);
16881 REG_RD(sc, pretend_reg);
16886 bxe_iov_init_dmae(struct bxe_softc *sc)
16890 BLOGD(sc, DBG_LOAD, "SRIOV is %s\n", IS_SRIOV(sc) ? "ON" : "OFF");
16892 if (!IS_SRIOV(sc)) {
16896 REG_WR(sc, DMAE_REG_BACKWARD_COMP_EN, 0);
16902 bxe_iov_init_ilt(struct bxe_softc *sc,
16908 struct ecore_ilt* ilt = sc->ilt;
16910 if (!IS_SRIOV(sc)) {
16914 /* set vfs ilt lines */
16915 for (i = 0; i < BXE_VF_CIDS/ILT_PAGE_CIDS ; i++) {
16916 struct hw_dma *hw_cxt = SC_VF_CXT_PAGE(sc,i);
16917 ilt->lines[line+i].page = hw_cxt->addr;
16918 ilt->lines[line+i].page_mapping = hw_cxt->mapping;
16919 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
16927 bxe_iov_init_dq(struct bxe_softc *sc)
16931 if (!IS_SRIOV(sc)) {
16935 /* Set the DQ such that the CID reflect the abs_vfid */
16936 REG_WR(sc, DORQ_REG_VF_NORM_VF_BASE, 0);
16937 REG_WR(sc, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
16940 * Set VFs starting CID. If its > 0 the preceding CIDs are belong to
16943 REG_WR(sc, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
16945 /* The VF window size is the log2 of the max number of CIDs per VF */
16946 REG_WR(sc, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
16949 * The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
16950 * the Pf doorbell size although the 2 are independent.
16952 REG_WR(sc, DORQ_REG_VF_NORM_CID_OFST,
16953 BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
16956 * No security checks for now -
16957 * configure single rule (out of 16) mask = 0x1, value = 0x0,
16958 * CID range 0 - 0x1ffff
16960 REG_WR(sc, DORQ_REG_VF_TYPE_MASK_0, 1);
16961 REG_WR(sc, DORQ_REG_VF_TYPE_VALUE_0, 0);
16962 REG_WR(sc, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
16963 REG_WR(sc, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
16965 /* set the number of VF alllowed doorbells to the full DQ range */
16966 REG_WR(sc, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
16968 /* set the VF doorbell threshold */
16969 REG_WR(sc, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
16973 /* send a NIG loopback debug packet */
16975 bxe_lb_pckt(struct bxe_softc *sc)
16977 uint32_t wb_write[3];
16979 /* Ethernet source and destination addresses */
16980 wb_write[0] = 0x55555555;
16981 wb_write[1] = 0x55555555;
16982 wb_write[2] = 0x20; /* SOP */
16983 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16985 /* NON-IP protocol */
16986 wb_write[0] = 0x09000000;
16987 wb_write[1] = 0x55555555;
16988 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
16989 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16993 * Some of the internal memories are not directly readable from the driver.
16994 * To test them we send debug packets.
16997 bxe_int_mem_test(struct bxe_softc *sc)
17003 if (CHIP_REV_IS_FPGA(sc)) {
17005 } else if (CHIP_REV_IS_EMUL(sc)) {
17011 /* disable inputs of parser neighbor blocks */
17012 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
17013 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
17014 REG_WR(sc, CFC_REG_DEBUG0, 0x1);
17015 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
17017 /* write 0 to parser credits for CFC search request */
17018 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
17020 /* send Ethernet packet */
17023 /* TODO do i reset NIG statistic? */
17024 /* Wait until NIG register shows 1 packet of size 0x10 */
17025 count = 1000 * factor;
17027 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
17028 val = *BXE_SP(sc, wb_data[0]);
17038 BLOGE(sc, "NIG timeout val=0x%x\n", val);
17042 /* wait until PRS register shows 1 packet */
17043 count = (1000 * factor);
17045 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
17055 BLOGE(sc, "PRS timeout val=0x%x\n", val);
17059 /* Reset and init BRB, PRS */
17060 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
17062 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
17064 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
17065 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
17067 /* Disable inputs of parser neighbor blocks */
17068 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
17069 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
17070 REG_WR(sc, CFC_REG_DEBUG0, 0x1);
17071 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
17073 /* Write 0 to parser credits for CFC search request */
17074 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
17076 /* send 10 Ethernet packets */
17077 for (i = 0; i < 10; i++) {
17081 /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */
17082 count = (1000 * factor);
17084 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
17085 val = *BXE_SP(sc, wb_data[0]);
17095 BLOGE(sc, "NIG timeout val=0x%x\n", val);
17099 /* Wait until PRS register shows 2 packets */
17100 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
17102 BLOGE(sc, "PRS timeout val=0x%x\n", val);
17105 /* Write 1 to parser credits for CFC search request */
17106 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
17108 /* Wait until PRS register shows 3 packets */
17109 DELAY(10000 * factor);
17111 /* Wait until NIG register shows 1 packet of size 0x10 */
17112 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
17114 BLOGE(sc, "PRS timeout val=0x%x\n", val);
17117 /* clear NIG EOP FIFO */
17118 for (i = 0; i < 11; i++) {
17119 REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO);
17122 val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY);
17124 BLOGE(sc, "clear of NIG failed\n");
17128 /* Reset and init BRB, PRS, NIG */
17129 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
17131 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
17133 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
17134 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
17135 if (!CNIC_SUPPORT(sc)) {
17137 REG_WR(sc, PRS_REG_NIC_MODE, 1);
17140 /* Enable inputs of parser neighbor blocks */
17141 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff);
17142 REG_WR(sc, TCM_REG_PRS_IFEN, 0x1);
17143 REG_WR(sc, CFC_REG_DEBUG0, 0x0);
17144 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1);
17150 bxe_setup_fan_failure_detection(struct bxe_softc *sc)
17157 val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) &
17158 SHARED_HW_CFG_FAN_FAILURE_MASK);
17160 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) {
17164 * The fan failure mechanism is usually related to the PHY type since
17165 * the power consumption of the board is affected by the PHY. Currently,
17166 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
17168 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) {
17169 for (port = PORT_0; port < PORT_MAX; port++) {
17170 is_required |= elink_fan_failure_det_req(sc,
17171 sc->devinfo.shmem_base,
17172 sc->devinfo.shmem2_base,
17177 BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required);
17179 if (is_required == 0) {
17183 /* Fan failure is indicated by SPIO 5 */
17184 bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
17186 /* set to active low mode */
17187 val = REG_RD(sc, MISC_REG_SPIO_INT);
17188 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
17189 REG_WR(sc, MISC_REG_SPIO_INT, val);
17191 /* enable interrupt to signal the IGU */
17192 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
17193 val |= MISC_SPIO_SPIO5;
17194 REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val);
17198 bxe_enable_blocks_attention(struct bxe_softc *sc)
17202 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
17203 if (!CHIP_IS_E1x(sc)) {
17204 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40);
17206 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0);
17208 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
17209 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
17211 * mask read length error interrupts in brb for parser
17212 * (parsing unit and 'checksum and crc' unit)
17213 * these errors are legal (PU reads fixed length and CAC can cause
17214 * read length error on truncated packets)
17216 REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00);
17217 REG_WR(sc, QM_REG_QM_INT_MASK, 0);
17218 REG_WR(sc, TM_REG_TM_INT_MASK, 0);
17219 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0);
17220 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0);
17221 REG_WR(sc, XCM_REG_XCM_INT_MASK, 0);
17222 /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */
17223 /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */
17224 REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0);
17225 REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0);
17226 REG_WR(sc, UCM_REG_UCM_INT_MASK, 0);
17227 /* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */
17228 /* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */
17229 REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
17230 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0);
17231 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0);
17232 REG_WR(sc, CCM_REG_CCM_INT_MASK, 0);
17233 /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */
17234 /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */
17236 val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
17237 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
17238 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN);
17239 if (!CHIP_IS_E1x(sc)) {
17240 val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
17241 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED);
17243 REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val);
17245 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0);
17246 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0);
17247 REG_WR(sc, TCM_REG_TCM_INT_MASK, 0);
17248 /* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */
17250 if (!CHIP_IS_E1x(sc)) {
17251 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
17252 REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
17255 REG_WR(sc, CDU_REG_CDU_INT_MASK, 0);
17256 REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0);
17257 /* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */
17258 REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
17262 * bxe_init_hw_common - initialize the HW at the COMMON phase.
17264 * @sc: driver handle
17267 bxe_init_hw_common(struct bxe_softc *sc)
17269 uint8_t abs_func_id;
17272 BLOGD(sc, DBG_LOAD, "starting common init for func %d\n",
17276 * take the RESET lock to protect undi_unload flow from accessing
17277 * registers while we are resetting the chip
17279 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
17281 bxe_reset_common(sc);
17283 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff);
17286 if (CHIP_IS_E3(sc)) {
17287 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
17288 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
17291 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val);
17293 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
17295 ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON);
17296 BLOGD(sc, DBG_LOAD, "after misc block init\n");
17298 if (!CHIP_IS_E1x(sc)) {
17300 * 4-port mode or 2-port mode we need to turn off master-enable for
17301 * everyone. After that we turn it back on for self. So, we disregard
17302 * multi-function, and always disable all functions on the given path,
17303 * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1
17305 for (abs_func_id = SC_PATH(sc);
17306 abs_func_id < (E2_FUNC_MAX * 2);
17307 abs_func_id += 2) {
17308 if (abs_func_id == SC_ABS_FUNC(sc)) {
17309 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17313 bxe_pretend_func(sc, abs_func_id);
17315 /* clear pf enable */
17316 bxe_pf_disable(sc);
17318 bxe_pretend_func(sc, SC_ABS_FUNC(sc));
17322 BLOGD(sc, DBG_LOAD, "after pf disable\n");
17324 ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON);
17326 if (CHIP_IS_E1(sc)) {
17328 * enable HW interrupt from PXP on USDM overflow
17329 * bit 16 on INT_MASK_0
17331 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
17334 ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON);
17337 #ifdef __BIG_ENDIAN
17338 REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1);
17339 REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1);
17340 REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
17341 REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
17342 REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
17343 /* make sure this value is 0 */
17344 REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0);
17346 //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1);
17347 REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1);
17348 REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1);
17349 REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1);
17350 REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
17353 ecore_ilt_init_page_size(sc, INITOP_SET);
17355 if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) {
17356 REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
17359 /* let the HW do it's magic... */
17362 /* finish PXP init */
17363 val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE);
17365 BLOGE(sc, "PXP2 CFG failed\n");
17368 val = REG_RD(sc, PXP2_REG_RD_INIT_DONE);
17370 BLOGE(sc, "PXP2 RD_INIT failed\n");
17374 BLOGD(sc, DBG_LOAD, "after pxp init\n");
17377 * Timer bug workaround for E2 only. We need to set the entire ILT to have
17378 * entries with value "0" and valid bit on. This needs to be done by the
17379 * first PF that is loaded in a path (i.e. common phase)
17381 if (!CHIP_IS_E1x(sc)) {
17383 * In E2 there is a bug in the timers block that can cause function 6 / 7
17384 * (i.e. vnic3) to start even if it is marked as "scan-off".
17385 * This occurs when a different function (func2,3) is being marked
17386 * as "scan-off". Real-life scenario for example: if a driver is being
17387 * load-unloaded while func6,7 are down. This will cause the timer to access
17388 * the ilt, translate to a logical address and send a request to read/write.
17389 * Since the ilt for the function that is down is not valid, this will cause
17390 * a translation error which is unrecoverable.
17391 * The Workaround is intended to make sure that when this happens nothing
17392 * fatal will occur. The workaround:
17393 * 1. First PF driver which loads on a path will:
17394 * a. After taking the chip out of reset, by using pretend,
17395 * it will write "0" to the following registers of
17397 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
17398 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
17399 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
17400 * And for itself it will write '1' to
17401 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
17402 * dmae-operations (writing to pram for example.)
17403 * note: can be done for only function 6,7 but cleaner this
17405 * b. Write zero+valid to the entire ILT.
17406 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of
17407 * VNIC3 (of that port). The range allocated will be the
17408 * entire ILT. This is needed to prevent ILT range error.
17409 * 2. Any PF driver load flow:
17410 * a. ILT update with the physical addresses of the allocated
17412 * b. Wait 20msec. - note that this timeout is needed to make
17413 * sure there are no requests in one of the PXP internal
17414 * queues with "old" ILT addresses.
17415 * c. PF enable in the PGLC.
17416 * d. Clear the was_error of the PF in the PGLC. (could have
17417 * occurred while driver was down)
17418 * e. PF enable in the CFC (WEAK + STRONG)
17419 * f. Timers scan enable
17420 * 3. PF driver unload flow:
17421 * a. Clear the Timers scan_en.
17422 * b. Polling for scan_on=0 for that PF.
17423 * c. Clear the PF enable bit in the PXP.
17424 * d. Clear the PF enable in the CFC (WEAK + STRONG)
17425 * e. Write zero+valid to all ILT entries (The valid bit must
17427 * f. If this is VNIC 3 of a port then also init
17428 * first_timers_ilt_entry to zero and last_timers_ilt_entry
17429 * to the last enrty in the ILT.
17432 * Currently the PF error in the PGLC is non recoverable.
17433 * In the future the there will be a recovery routine for this error.
17434 * Currently attention is masked.
17435 * Having an MCP lock on the load/unload process does not guarantee that
17436 * there is no Timer disable during Func6/7 enable. This is because the
17437 * Timers scan is currently being cleared by the MCP on FLR.
17438 * Step 2.d can be done only for PF6/7 and the driver can also check if
17439 * there is error before clearing it. But the flow above is simpler and
17441 * All ILT entries are written by zero+valid and not just PF6/7
17442 * ILT entries since in the future the ILT entries allocation for
17443 * PF-s might be dynamic.
17445 struct ilt_client_info ilt_cli;
17446 struct ecore_ilt ilt;
17448 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
17449 memset(&ilt, 0, sizeof(struct ecore_ilt));
17451 /* initialize dummy TM client */
17453 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
17454 ilt_cli.client_num = ILT_CLIENT_TM;
17457 * Step 1: set zeroes to all ilt page entries with valid bit on
17458 * Step 2: set the timers first/last ilt entry to point
17459 * to the entire range to prevent ILT range error for 3rd/4th
17460 * vnic (this code assumes existence of the vnic)
17462 * both steps performed by call to ecore_ilt_client_init_op()
17463 * with dummy TM client
17465 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
17466 * and his brother are split registers
17469 bxe_pretend_func(sc, (SC_PATH(sc) + 6));
17470 ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR);
17471 bxe_pretend_func(sc, SC_ABS_FUNC(sc));
17473 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN);
17474 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN);
17475 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
17478 REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0);
17479 REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0);
17481 if (!CHIP_IS_E1x(sc)) {
17482 int factor = CHIP_REV_IS_EMUL(sc) ? 1000 :
17483 (CHIP_REV_IS_FPGA(sc) ? 400 : 0);
17485 ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON);
17486 ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON);
17488 /* let the HW do it's magic... */
17491 val = REG_RD(sc, ATC_REG_ATC_INIT_DONE);
17492 } while (factor-- && (val != 1));
17495 BLOGE(sc, "ATC_INIT failed\n");
17500 BLOGD(sc, DBG_LOAD, "after pglue and atc init\n");
17502 ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON);
17504 bxe_iov_init_dmae(sc);
17506 /* clean the DMAE memory */
17507 sc->dmae_ready = 1;
17508 ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1);
17510 ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON);
17512 ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON);
17514 ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON);
17516 ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON);
17518 bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3);
17519 bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3);
17520 bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3);
17521 bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3);
17523 ecore_init_block(sc, BLOCK_QM, PHASE_COMMON);
17525 /* QM queues pointers table */
17526 ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET);
17528 /* soft reset pulse */
17529 REG_WR(sc, QM_REG_SOFT_RESET, 1);
17530 REG_WR(sc, QM_REG_SOFT_RESET, 0);
17532 if (CNIC_SUPPORT(sc))
17533 ecore_init_block(sc, BLOCK_TM, PHASE_COMMON);
17535 ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON);
17536 REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT);
17537 if (!CHIP_REV_IS_SLOW(sc)) {
17538 /* enable hw interrupt from doorbell Q */
17539 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
17542 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
17544 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
17545 REG_WR(sc, PRS_REG_A_PRSU_20, 0xf);
17547 if (!CHIP_IS_E1(sc)) {
17548 REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan);
17551 if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) {
17552 if (IS_MF_AFEX(sc)) {
17554 * configure that AFEX and VLAN headers must be
17555 * received in AFEX mode
17557 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE);
17558 REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA);
17559 REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
17560 REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
17561 REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4);
17564 * Bit-map indicating which L2 hdrs may appear
17565 * after the basic Ethernet header
17567 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC,
17568 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17572 ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON);
17573 ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON);
17574 ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON);
17575 ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON);
17577 if (!CHIP_IS_E1x(sc)) {
17578 /* reset VFC memories */
17579 REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17580 VFC_MEMORIES_RST_REG_CAM_RST |
17581 VFC_MEMORIES_RST_REG_RAM_RST);
17582 REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17583 VFC_MEMORIES_RST_REG_CAM_RST |
17584 VFC_MEMORIES_RST_REG_RAM_RST);
17589 ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON);
17590 ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON);
17591 ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON);
17592 ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON);
17594 /* sync semi rtc */
17595 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
17597 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
17600 ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON);
17601 ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON);
17602 ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON);
17604 if (!CHIP_IS_E1x(sc)) {
17605 if (IS_MF_AFEX(sc)) {
17607 * configure that AFEX and VLAN headers must be
17608 * sent in AFEX mode
17610 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE);
17611 REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA);
17612 REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
17613 REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
17614 REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4);
17616 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC,
17617 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17621 REG_WR(sc, SRC_REG_SOFT_RST, 1);
17623 ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON);
17625 if (CNIC_SUPPORT(sc)) {
17626 REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672);
17627 REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
17628 REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b);
17629 REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a);
17630 REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116);
17631 REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
17632 REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf);
17633 REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
17634 REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f);
17635 REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7);
17637 REG_WR(sc, SRC_REG_SOFT_RST, 0);
17639 if (sizeof(union cdu_context) != 1024) {
17640 /* we currently assume that a context is 1024 bytes */
17641 BLOGE(sc, "please adjust the size of cdu_context(%ld)\n",
17642 (long)sizeof(union cdu_context));
17645 ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON);
17646 val = (4 << 24) + (0 << 12) + 1024;
17647 REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val);
17649 ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON);
17651 REG_WR(sc, CFC_REG_INIT_REG, 0x7FF);
17652 /* enable context validation interrupt from CFC */
17653 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
17655 /* set the thresholds to prevent CFC/CDU race */
17656 REG_WR(sc, CFC_REG_DEBUG0, 0x20020000);
17657 ecore_init_block(sc, BLOCK_HC, PHASE_COMMON);
17659 if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) {
17660 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36);
17663 ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON);
17664 ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON);
17666 /* Reset PCIE errors for debug */
17667 REG_WR(sc, 0x2814, 0xffffffff);
17668 REG_WR(sc, 0x3820, 0xffffffff);
17670 if (!CHIP_IS_E1x(sc)) {
17671 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
17672 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
17673 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
17674 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
17675 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
17676 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
17677 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
17678 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
17679 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
17680 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
17681 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
17684 ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON);
17686 if (!CHIP_IS_E1(sc)) {
17687 /* in E3 this done in per-port section */
17688 if (!CHIP_IS_E3(sc))
17689 REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc));
17692 if (CHIP_IS_E1H(sc)) {
17693 /* not applicable for E2 (and above ...) */
17694 REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc));
17697 if (CHIP_REV_IS_SLOW(sc)) {
17701 /* finish CFC init */
17702 val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10);
17704 BLOGE(sc, "CFC LL_INIT failed\n");
17707 val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10);
17709 BLOGE(sc, "CFC AC_INIT failed\n");
17712 val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
17714 BLOGE(sc, "CFC CAM_INIT failed\n");
17717 REG_WR(sc, CFC_REG_DEBUG0, 0);
17719 if (CHIP_IS_E1(sc)) {
17720 /* read NIG statistic to see if this is our first up since powerup */
17721 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
17722 val = *BXE_SP(sc, wb_data[0]);
17724 /* do internal memory self test */
17725 if ((val == 0) && bxe_int_mem_test(sc)) {
17726 BLOGE(sc, "internal mem self test failed\n");
17731 bxe_setup_fan_failure_detection(sc);
17733 /* clear PXP2 attentions */
17734 REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
17736 bxe_enable_blocks_attention(sc);
17738 if (!CHIP_REV_IS_SLOW(sc)) {
17739 ecore_enable_blocks_parity(sc);
17742 if (!BXE_NOMCP(sc)) {
17743 if (CHIP_IS_E1x(sc)) {
17744 bxe_common_init_phy(sc);
17752 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase.
17754 * @sc: driver handle
17757 bxe_init_hw_common_chip(struct bxe_softc *sc)
17759 int rc = bxe_init_hw_common(sc);
17765 /* In E2 2-PORT mode, same ext phy is used for the two paths */
17766 if (!BXE_NOMCP(sc)) {
17767 bxe_common_init_phy(sc);
17774 bxe_init_hw_port(struct bxe_softc *sc)
17776 int port = SC_PORT(sc);
17777 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
17778 uint32_t low, high;
17781 BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port);
17783 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
17785 ecore_init_block(sc, BLOCK_MISC, init_phase);
17786 ecore_init_block(sc, BLOCK_PXP, init_phase);
17787 ecore_init_block(sc, BLOCK_PXP2, init_phase);
17790 * Timers bug workaround: disables the pf_master bit in pglue at
17791 * common phase, we need to enable it here before any dmae access are
17792 * attempted. Therefore we manually added the enable-master to the
17793 * port phase (it also happens in the function phase)
17795 if (!CHIP_IS_E1x(sc)) {
17796 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17799 ecore_init_block(sc, BLOCK_ATC, init_phase);
17800 ecore_init_block(sc, BLOCK_DMAE, init_phase);
17801 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17802 ecore_init_block(sc, BLOCK_QM, init_phase);
17804 ecore_init_block(sc, BLOCK_TCM, init_phase);
17805 ecore_init_block(sc, BLOCK_UCM, init_phase);
17806 ecore_init_block(sc, BLOCK_CCM, init_phase);
17807 ecore_init_block(sc, BLOCK_XCM, init_phase);
17809 /* QM cid (connection) count */
17810 ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET);
17812 if (CNIC_SUPPORT(sc)) {
17813 ecore_init_block(sc, BLOCK_TM, init_phase);
17814 REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20);
17815 REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
17818 ecore_init_block(sc, BLOCK_DORQ, init_phase);
17820 ecore_init_block(sc, BLOCK_BRB1, init_phase);
17822 if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) {
17824 low = (BXE_ONE_PORT(sc) ? 160 : 246);
17825 } else if (sc->mtu > 4096) {
17826 if (BXE_ONE_PORT(sc)) {
17830 /* (24*1024 + val*4)/256 */
17831 low = (96 + (val / 64) + ((val % 64) ? 1 : 0));
17834 low = (BXE_ONE_PORT(sc) ? 80 : 160);
17836 high = (low + 56); /* 14*1024/256 */
17837 REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
17838 REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
17841 if (CHIP_IS_MODE_4_PORT(sc)) {
17842 REG_WR(sc, SC_PORT(sc) ?
17843 BRB1_REG_MAC_GUARANTIED_1 :
17844 BRB1_REG_MAC_GUARANTIED_0, 40);
17847 ecore_init_block(sc, BLOCK_PRS, init_phase);
17848 if (CHIP_IS_E3B0(sc)) {
17849 if (IS_MF_AFEX(sc)) {
17850 /* configure headers for AFEX mode */
17851 REG_WR(sc, SC_PORT(sc) ?
17852 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17853 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
17854 REG_WR(sc, SC_PORT(sc) ?
17855 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
17856 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
17857 REG_WR(sc, SC_PORT(sc) ?
17858 PRS_REG_MUST_HAVE_HDRS_PORT_1 :
17859 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
17861 /* Ovlan exists only if we are in multi-function +
17862 * switch-dependent mode, in switch-independent there
17863 * is no ovlan headers
17865 REG_WR(sc, SC_PORT(sc) ?
17866 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17867 PRS_REG_HDRS_AFTER_BASIC_PORT_0,
17868 (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6));
17872 ecore_init_block(sc, BLOCK_TSDM, init_phase);
17873 ecore_init_block(sc, BLOCK_CSDM, init_phase);
17874 ecore_init_block(sc, BLOCK_USDM, init_phase);
17875 ecore_init_block(sc, BLOCK_XSDM, init_phase);
17877 ecore_init_block(sc, BLOCK_TSEM, init_phase);
17878 ecore_init_block(sc, BLOCK_USEM, init_phase);
17879 ecore_init_block(sc, BLOCK_CSEM, init_phase);
17880 ecore_init_block(sc, BLOCK_XSEM, init_phase);
17882 ecore_init_block(sc, BLOCK_UPB, init_phase);
17883 ecore_init_block(sc, BLOCK_XPB, init_phase);
17885 ecore_init_block(sc, BLOCK_PBF, init_phase);
17887 if (CHIP_IS_E1x(sc)) {
17888 /* configure PBF to work without PAUSE mtu 9000 */
17889 REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
17891 /* update threshold */
17892 REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
17893 /* update init credit */
17894 REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
17896 /* probe changes */
17897 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1);
17899 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0);
17902 if (CNIC_SUPPORT(sc)) {
17903 ecore_init_block(sc, BLOCK_SRC, init_phase);
17906 ecore_init_block(sc, BLOCK_CDU, init_phase);
17907 ecore_init_block(sc, BLOCK_CFC, init_phase);
17909 if (CHIP_IS_E1(sc)) {
17910 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17911 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17913 ecore_init_block(sc, BLOCK_HC, init_phase);
17915 ecore_init_block(sc, BLOCK_IGU, init_phase);
17917 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17918 /* init aeu_mask_attn_func_0/1:
17919 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
17920 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
17921 * bits 4-7 are used for "per vn group attention" */
17922 val = IS_MF(sc) ? 0xF7 : 0x7;
17923 /* Enable DCBX attention for all but E1 */
17924 val |= CHIP_IS_E1(sc) ? 0 : 0x10;
17925 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
17927 ecore_init_block(sc, BLOCK_NIG, init_phase);
17929 if (!CHIP_IS_E1x(sc)) {
17930 /* Bit-map indicating which L2 hdrs may appear after the
17931 * basic Ethernet header
17933 if (IS_MF_AFEX(sc)) {
17934 REG_WR(sc, SC_PORT(sc) ?
17935 NIG_REG_P1_HDRS_AFTER_BASIC :
17936 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
17938 REG_WR(sc, SC_PORT(sc) ?
17939 NIG_REG_P1_HDRS_AFTER_BASIC :
17940 NIG_REG_P0_HDRS_AFTER_BASIC,
17941 IS_MF_SD(sc) ? 7 : 6);
17944 if (CHIP_IS_E3(sc)) {
17945 REG_WR(sc, SC_PORT(sc) ?
17946 NIG_REG_LLH1_MF_MODE :
17947 NIG_REG_LLH_MF_MODE, IS_MF(sc));
17950 if (!CHIP_IS_E3(sc)) {
17951 REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
17954 if (!CHIP_IS_E1(sc)) {
17955 /* 0x2 disable mf_ov, 0x1 enable */
17956 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
17957 (IS_MF_SD(sc) ? 0x1 : 0x2));
17959 if (!CHIP_IS_E1x(sc)) {
17961 switch (sc->devinfo.mf_info.mf_mode) {
17962 case MULTI_FUNCTION_SD:
17965 case MULTI_FUNCTION_SI:
17966 case MULTI_FUNCTION_AFEX:
17971 REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE :
17972 NIG_REG_LLH0_CLS_TYPE), val);
17974 REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
17975 REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
17976 REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
17979 /* If SPIO5 is set to generate interrupts, enable it for this port */
17980 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
17981 if (val & MISC_SPIO_SPIO5) {
17982 uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
17983 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
17984 val = REG_RD(sc, reg_addr);
17985 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
17986 REG_WR(sc, reg_addr, val);
17993 bxe_flr_clnup_reg_poll(struct bxe_softc *sc,
17996 uint32_t poll_count)
17998 uint32_t cur_cnt = poll_count;
18001 while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) {
18002 DELAY(FLR_WAIT_INTERVAL);
18009 bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc,
18014 uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt);
18017 BLOGE(sc, "%s usage count=%d\n", msg, val);
18024 /* Common routines with VF FLR cleanup */
18026 bxe_flr_clnup_poll_count(struct bxe_softc *sc)
18028 /* adjust polling timeout */
18029 if (CHIP_REV_IS_EMUL(sc)) {
18030 return (FLR_POLL_CNT * 2000);
18033 if (CHIP_REV_IS_FPGA(sc)) {
18034 return (FLR_POLL_CNT * 120);
18037 return (FLR_POLL_CNT);
18041 bxe_poll_hw_usage_counters(struct bxe_softc *sc,
18044 /* wait for CFC PF usage-counter to zero (includes all the VFs) */
18045 if (bxe_flr_clnup_poll_hw_counter(sc,
18046 CFC_REG_NUM_LCIDS_INSIDE_PF,
18047 "CFC PF usage counter timed out",
18052 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
18053 if (bxe_flr_clnup_poll_hw_counter(sc,
18054 DORQ_REG_PF_USAGE_CNT,
18055 "DQ PF usage counter timed out",
18060 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
18061 if (bxe_flr_clnup_poll_hw_counter(sc,
18062 QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc),
18063 "QM PF usage counter timed out",
18068 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
18069 if (bxe_flr_clnup_poll_hw_counter(sc,
18070 TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc),
18071 "Timers VNIC usage counter timed out",
18076 if (bxe_flr_clnup_poll_hw_counter(sc,
18077 TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc),
18078 "Timers NUM_SCANS usage counter timed out",
18083 /* Wait DMAE PF usage counter to zero */
18084 if (bxe_flr_clnup_poll_hw_counter(sc,
18085 dmae_reg_go_c[INIT_DMAE_C(sc)],
18086 "DMAE dommand register timed out",
18094 #define OP_GEN_PARAM(param) \
18095 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
18096 #define OP_GEN_TYPE(type) \
18097 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
18098 #define OP_GEN_AGG_VECT(index) \
18099 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
18102 bxe_send_final_clnup(struct bxe_softc *sc,
18103 uint8_t clnup_func,
18106 uint32_t op_gen_command = 0;
18107 uint32_t comp_addr = (BAR_CSTRORM_INTMEM +
18108 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func));
18111 if (REG_RD(sc, comp_addr)) {
18112 BLOGE(sc, "Cleanup complete was not 0 before sending\n");
18116 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
18117 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
18118 op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
18119 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
18121 BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n");
18122 REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command);
18124 if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) {
18125 BLOGE(sc, "FW final cleanup did not succeed\n");
18126 BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n",
18127 (REG_RD(sc, comp_addr)));
18128 bxe_panic(sc, ("FLR cleanup failed\n"));
18132 /* Zero completion for nxt FLR */
18133 REG_WR(sc, comp_addr, 0);
18139 bxe_pbf_pN_buf_flushed(struct bxe_softc *sc,
18140 struct pbf_pN_buf_regs *regs,
18141 uint32_t poll_count)
18143 uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start;
18144 uint32_t cur_cnt = poll_count;
18146 crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed);
18147 crd = crd_start = REG_RD(sc, regs->crd);
18148 init_crd = REG_RD(sc, regs->init_crd);
18150 BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
18151 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : s:%x\n", regs->pN, crd);
18152 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
18154 while ((crd != init_crd) &&
18155 ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) <
18156 (init_crd - crd_start))) {
18158 DELAY(FLR_WAIT_INTERVAL);
18159 crd = REG_RD(sc, regs->crd);
18160 crd_freed = REG_RD(sc, regs->crd_freed);
18162 BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN);
18163 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : c:%x\n", regs->pN, crd);
18164 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed);
18169 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n",
18170 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
18174 bxe_pbf_pN_cmd_flushed(struct bxe_softc *sc,
18175 struct pbf_pN_cmd_regs *regs,
18176 uint32_t poll_count)
18178 uint32_t occup, to_free, freed, freed_start;
18179 uint32_t cur_cnt = poll_count;
18181 occup = to_free = REG_RD(sc, regs->lines_occup);
18182 freed = freed_start = REG_RD(sc, regs->lines_freed);
18184 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
18185 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
18188 ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) {
18190 DELAY(FLR_WAIT_INTERVAL);
18191 occup = REG_RD(sc, regs->lines_occup);
18192 freed = REG_RD(sc, regs->lines_freed);
18194 BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN);
18195 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
18196 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
18201 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n",
18202 poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
18206 bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count)
18208 struct pbf_pN_cmd_regs cmd_regs[] = {
18209 {0, (CHIP_IS_E3B0(sc)) ?
18210 PBF_REG_TQ_OCCUPANCY_Q0 :
18211 PBF_REG_P0_TQ_OCCUPANCY,
18212 (CHIP_IS_E3B0(sc)) ?
18213 PBF_REG_TQ_LINES_FREED_CNT_Q0 :
18214 PBF_REG_P0_TQ_LINES_FREED_CNT},
18215 {1, (CHIP_IS_E3B0(sc)) ?
18216 PBF_REG_TQ_OCCUPANCY_Q1 :
18217 PBF_REG_P1_TQ_OCCUPANCY,
18218 (CHIP_IS_E3B0(sc)) ?
18219 PBF_REG_TQ_LINES_FREED_CNT_Q1 :
18220 PBF_REG_P1_TQ_LINES_FREED_CNT},
18221 {4, (CHIP_IS_E3B0(sc)) ?
18222 PBF_REG_TQ_OCCUPANCY_LB_Q :
18223 PBF_REG_P4_TQ_OCCUPANCY,
18224 (CHIP_IS_E3B0(sc)) ?
18225 PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
18226 PBF_REG_P4_TQ_LINES_FREED_CNT}
18229 struct pbf_pN_buf_regs buf_regs[] = {
18230 {0, (CHIP_IS_E3B0(sc)) ?
18231 PBF_REG_INIT_CRD_Q0 :
18232 PBF_REG_P0_INIT_CRD ,
18233 (CHIP_IS_E3B0(sc)) ?
18234 PBF_REG_CREDIT_Q0 :
18236 (CHIP_IS_E3B0(sc)) ?
18237 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
18238 PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
18239 {1, (CHIP_IS_E3B0(sc)) ?
18240 PBF_REG_INIT_CRD_Q1 :
18241 PBF_REG_P1_INIT_CRD,
18242 (CHIP_IS_E3B0(sc)) ?
18243 PBF_REG_CREDIT_Q1 :
18245 (CHIP_IS_E3B0(sc)) ?
18246 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
18247 PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
18248 {4, (CHIP_IS_E3B0(sc)) ?
18249 PBF_REG_INIT_CRD_LB_Q :
18250 PBF_REG_P4_INIT_CRD,
18251 (CHIP_IS_E3B0(sc)) ?
18252 PBF_REG_CREDIT_LB_Q :
18254 (CHIP_IS_E3B0(sc)) ?
18255 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
18256 PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
18261 /* Verify the command queues are flushed P0, P1, P4 */
18262 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) {
18263 bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count);
18266 /* Verify the transmission buffers are flushed P0, P1, P4 */
18267 for (i = 0; i < ARRAY_SIZE(buf_regs); i++) {
18268 bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count);
18273 bxe_hw_enable_status(struct bxe_softc *sc)
18277 val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF);
18278 BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
18280 val = REG_RD(sc, PBF_REG_DISABLE_PF);
18281 BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val);
18283 val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN);
18284 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
18286 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN);
18287 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
18289 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
18290 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
18292 val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
18293 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
18295 val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
18296 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
18298 val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
18299 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val);
18303 bxe_pf_flr_clnup(struct bxe_softc *sc)
18305 uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc);
18307 BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc));
18309 /* Re-enable PF target read access */
18310 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
18312 /* Poll HW usage counters */
18313 BLOGD(sc, DBG_LOAD, "Polling usage counters\n");
18314 if (bxe_poll_hw_usage_counters(sc, poll_cnt)) {
18318 /* Zero the igu 'trailing edge' and 'leading edge' */
18320 /* Send the FW cleanup command */
18321 if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) {
18327 /* Verify TX hw is flushed */
18328 bxe_tx_hw_flushed(sc, poll_cnt);
18330 /* Wait 100ms (not adjusted according to platform) */
18333 /* Verify no pending pci transactions */
18334 if (bxe_is_pcie_pending(sc)) {
18335 BLOGE(sc, "PCIE Transactions still pending\n");
18339 bxe_hw_enable_status(sc);
18342 * Master enable - Due to WB DMAE writes performed before this
18343 * register is re-initialized as part of the regular function init
18345 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
18352 bxe_init_searcher(struct bxe_softc *sc)
18354 int port = SC_PORT(sc);
18355 ecore_src_init_t2(sc, sc->t2, sc->t2_mapping, SRC_CONN_NUM);
18356 /* T1 hash bits value determines the T1 number of entries */
18357 REG_WR(sc, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
18362 bxe_init_hw_func(struct bxe_softc *sc)
18364 int port = SC_PORT(sc);
18365 int func = SC_FUNC(sc);
18366 int init_phase = PHASE_PF0 + func;
18367 struct ecore_ilt *ilt = sc->ilt;
18368 uint16_t cdu_ilt_start;
18369 uint32_t addr, val;
18370 uint32_t main_mem_base, main_mem_size, main_mem_prty_clr;
18371 int i, main_mem_width, rc;
18373 BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func);
18376 if (!CHIP_IS_E1x(sc)) {
18377 rc = bxe_pf_flr_clnup(sc);
18379 BLOGE(sc, "FLR cleanup failed!\n");
18380 // XXX bxe_fw_dump(sc);
18381 // XXX bxe_idle_chk(sc);
18386 /* set MSI reconfigure capability */
18387 if (sc->devinfo.int_block == INT_BLOCK_HC) {
18388 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
18389 val = REG_RD(sc, addr);
18390 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
18391 REG_WR(sc, addr, val);
18394 ecore_init_block(sc, BLOCK_PXP, init_phase);
18395 ecore_init_block(sc, BLOCK_PXP2, init_phase);
18398 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
18401 if (IS_SRIOV(sc)) {
18402 cdu_ilt_start += BXE_FIRST_VF_CID/ILT_PAGE_CIDS;
18404 cdu_ilt_start = bxe_iov_init_ilt(sc, cdu_ilt_start);
18406 #if (BXE_FIRST_VF_CID > 0)
18408 * If BXE_FIRST_VF_CID > 0 then the PF L2 cids precedes
18409 * those of the VFs, so start line should be reset
18411 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
18415 for (i = 0; i < L2_ILT_LINES(sc); i++) {
18416 ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
18417 ilt->lines[cdu_ilt_start + i].page_mapping =
18418 sc->context[i].vcxt_dma.paddr;
18419 ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
18421 ecore_ilt_init_op(sc, INITOP_SET);
18424 if (!CONFIGURE_NIC_MODE(sc)) {
18425 bxe_init_searcher(sc);
18426 REG_WR(sc, PRS_REG_NIC_MODE, 0);
18427 BLOGD(sc, DBG_LOAD, "NIC MODE disabled\n");
18432 REG_WR(sc, PRS_REG_NIC_MODE, 1);
18433 BLOGD(sc, DBG_LOAD, "NIC MODE configured\n");
18436 if (!CHIP_IS_E1x(sc)) {
18437 uint32_t pf_conf = IGU_PF_CONF_FUNC_EN;
18439 /* Turn on a single ISR mode in IGU if driver is going to use
18442 if (sc->interrupt_mode != INTR_MODE_MSIX) {
18443 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
18447 * Timers workaround bug: function init part.
18448 * Need to wait 20msec after initializing ILT,
18449 * needed to make sure there are no requests in
18450 * one of the PXP internal queues with "old" ILT addresses
18455 * Master enable - Due to WB DMAE writes performed before this
18456 * register is re-initialized as part of the regular function
18459 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
18460 /* Enable the function in IGU */
18461 REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf);
18464 sc->dmae_ready = 1;
18466 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
18468 if (!CHIP_IS_E1x(sc))
18469 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
18471 ecore_init_block(sc, BLOCK_ATC, init_phase);
18472 ecore_init_block(sc, BLOCK_DMAE, init_phase);
18473 ecore_init_block(sc, BLOCK_NIG, init_phase);
18474 ecore_init_block(sc, BLOCK_SRC, init_phase);
18475 ecore_init_block(sc, BLOCK_MISC, init_phase);
18476 ecore_init_block(sc, BLOCK_TCM, init_phase);
18477 ecore_init_block(sc, BLOCK_UCM, init_phase);
18478 ecore_init_block(sc, BLOCK_CCM, init_phase);
18479 ecore_init_block(sc, BLOCK_XCM, init_phase);
18480 ecore_init_block(sc, BLOCK_TSEM, init_phase);
18481 ecore_init_block(sc, BLOCK_USEM, init_phase);
18482 ecore_init_block(sc, BLOCK_CSEM, init_phase);
18483 ecore_init_block(sc, BLOCK_XSEM, init_phase);
18485 if (!CHIP_IS_E1x(sc))
18486 REG_WR(sc, QM_REG_PF_EN, 1);
18488 if (!CHIP_IS_E1x(sc)) {
18489 REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18490 REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18491 REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18492 REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18494 ecore_init_block(sc, BLOCK_QM, init_phase);
18496 ecore_init_block(sc, BLOCK_TM, init_phase);
18497 ecore_init_block(sc, BLOCK_DORQ, init_phase);
18499 bxe_iov_init_dq(sc);
18501 ecore_init_block(sc, BLOCK_BRB1, init_phase);
18502 ecore_init_block(sc, BLOCK_PRS, init_phase);
18503 ecore_init_block(sc, BLOCK_TSDM, init_phase);
18504 ecore_init_block(sc, BLOCK_CSDM, init_phase);
18505 ecore_init_block(sc, BLOCK_USDM, init_phase);
18506 ecore_init_block(sc, BLOCK_XSDM, init_phase);
18507 ecore_init_block(sc, BLOCK_UPB, init_phase);
18508 ecore_init_block(sc, BLOCK_XPB, init_phase);
18509 ecore_init_block(sc, BLOCK_PBF, init_phase);
18510 if (!CHIP_IS_E1x(sc))
18511 REG_WR(sc, PBF_REG_DISABLE_PF, 0);
18513 ecore_init_block(sc, BLOCK_CDU, init_phase);
18515 ecore_init_block(sc, BLOCK_CFC, init_phase);
18517 if (!CHIP_IS_E1x(sc))
18518 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1);
18521 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
18522 REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc));
18525 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
18527 /* HC init per function */
18528 if (sc->devinfo.int_block == INT_BLOCK_HC) {
18529 if (CHIP_IS_E1H(sc)) {
18530 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
18532 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18533 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18535 ecore_init_block(sc, BLOCK_HC, init_phase);
18538 int num_segs, sb_idx, prod_offset;
18540 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
18542 if (!CHIP_IS_E1x(sc)) {
18543 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18544 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18547 ecore_init_block(sc, BLOCK_IGU, init_phase);
18549 if (!CHIP_IS_E1x(sc)) {
18553 * E2 mode: address 0-135 match to the mapping memory;
18554 * 136 - PF0 default prod; 137 - PF1 default prod;
18555 * 138 - PF2 default prod; 139 - PF3 default prod;
18556 * 140 - PF0 attn prod; 141 - PF1 attn prod;
18557 * 142 - PF2 attn prod; 143 - PF3 attn prod;
18558 * 144-147 reserved.
18560 * E1.5 mode - In backward compatible mode;
18561 * for non default SB; each even line in the memory
18562 * holds the U producer and each odd line hold
18563 * the C producer. The first 128 producers are for
18564 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
18565 * producers are for the DSB for each PF.
18566 * Each PF has five segments: (the order inside each
18567 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
18568 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
18569 * 144-147 attn prods;
18571 /* non-default-status-blocks */
18572 num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18573 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
18574 for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) {
18575 prod_offset = (sc->igu_base_sb + sb_idx) *
18578 for (i = 0; i < num_segs; i++) {
18579 addr = IGU_REG_PROD_CONS_MEMORY +
18580 (prod_offset + i) * 4;
18581 REG_WR(sc, addr, 0);
18583 /* send consumer update with value 0 */
18584 bxe_ack_sb(sc, sc->igu_base_sb + sb_idx,
18585 USTORM_ID, 0, IGU_INT_NOP, 1);
18586 bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx);
18589 /* default-status-blocks */
18590 num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18591 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
18593 if (CHIP_IS_MODE_4_PORT(sc))
18594 dsb_idx = SC_FUNC(sc);
18596 dsb_idx = SC_VN(sc);
18598 prod_offset = (CHIP_INT_MODE_IS_BC(sc) ?
18599 IGU_BC_BASE_DSB_PROD + dsb_idx :
18600 IGU_NORM_BASE_DSB_PROD + dsb_idx);
18603 * igu prods come in chunks of E1HVN_MAX (4) -
18604 * does not matters what is the current chip mode
18606 for (i = 0; i < (num_segs * E1HVN_MAX);
18608 addr = IGU_REG_PROD_CONS_MEMORY +
18609 (prod_offset + i)*4;
18610 REG_WR(sc, addr, 0);
18612 /* send consumer update with 0 */
18613 if (CHIP_INT_MODE_IS_BC(sc)) {
18614 bxe_ack_sb(sc, sc->igu_dsb_id,
18615 USTORM_ID, 0, IGU_INT_NOP, 1);
18616 bxe_ack_sb(sc, sc->igu_dsb_id,
18617 CSTORM_ID, 0, IGU_INT_NOP, 1);
18618 bxe_ack_sb(sc, sc->igu_dsb_id,
18619 XSTORM_ID, 0, IGU_INT_NOP, 1);
18620 bxe_ack_sb(sc, sc->igu_dsb_id,
18621 TSTORM_ID, 0, IGU_INT_NOP, 1);
18622 bxe_ack_sb(sc, sc->igu_dsb_id,
18623 ATTENTION_ID, 0, IGU_INT_NOP, 1);
18625 bxe_ack_sb(sc, sc->igu_dsb_id,
18626 USTORM_ID, 0, IGU_INT_NOP, 1);
18627 bxe_ack_sb(sc, sc->igu_dsb_id,
18628 ATTENTION_ID, 0, IGU_INT_NOP, 1);
18630 bxe_igu_clear_sb(sc, sc->igu_dsb_id);
18632 /* !!! these should become driver const once
18633 rf-tool supports split-68 const */
18634 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
18635 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
18636 REG_WR(sc, IGU_REG_SB_MASK_LSB, 0);
18637 REG_WR(sc, IGU_REG_SB_MASK_MSB, 0);
18638 REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0);
18639 REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0);
18643 /* Reset PCIE errors for debug */
18644 REG_WR(sc, 0x2114, 0xffffffff);
18645 REG_WR(sc, 0x2120, 0xffffffff);
18647 if (CHIP_IS_E1x(sc)) {
18648 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
18649 main_mem_base = HC_REG_MAIN_MEMORY +
18650 SC_PORT(sc) * (main_mem_size * 4);
18651 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
18652 main_mem_width = 8;
18654 val = REG_RD(sc, main_mem_prty_clr);
18656 BLOGD(sc, DBG_LOAD,
18657 "Parity errors in HC block during function init (0x%x)!\n",
18661 /* Clear "false" parity errors in MSI-X table */
18662 for (i = main_mem_base;
18663 i < main_mem_base + main_mem_size * 4;
18664 i += main_mem_width) {
18665 bxe_read_dmae(sc, i, main_mem_width / 4);
18666 bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data),
18667 i, main_mem_width / 4);
18669 /* Clear HC parity attention */
18670 REG_RD(sc, main_mem_prty_clr);
18674 /* Enable STORMs SP logging */
18675 REG_WR8(sc, BAR_USTRORM_INTMEM +
18676 USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18677 REG_WR8(sc, BAR_TSTRORM_INTMEM +
18678 TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18679 REG_WR8(sc, BAR_CSTRORM_INTMEM +
18680 CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18681 REG_WR8(sc, BAR_XSTRORM_INTMEM +
18682 XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18685 elink_phy_probe(&sc->link_params);
18691 bxe_link_reset(struct bxe_softc *sc)
18693 if (!BXE_NOMCP(sc)) {
18694 bxe_acquire_phy_lock(sc);
18695 elink_lfa_reset(&sc->link_params, &sc->link_vars);
18696 bxe_release_phy_lock(sc);
18698 if (!CHIP_REV_IS_SLOW(sc)) {
18699 BLOGW(sc, "Bootcode is missing - cannot reset link\n");
18705 bxe_reset_port(struct bxe_softc *sc)
18707 int port = SC_PORT(sc);
18710 /* reset physical Link */
18711 bxe_link_reset(sc);
18713 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
18715 /* Do not rcv packets to BRB */
18716 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
18717 /* Do not direct rcv packets that are not for MCP to the BRB */
18718 REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
18719 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
18721 /* Configure AEU */
18722 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
18726 /* Check for BRB port occupancy */
18727 val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
18729 BLOGD(sc, DBG_LOAD,
18730 "BRB1 is not empty, %d blocks are occupied\n", val);
18733 /* TODO: Close Doorbell port? */
18737 bxe_ilt_wr(struct bxe_softc *sc,
18742 uint32_t wb_write[2];
18744 if (CHIP_IS_E1(sc)) {
18745 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
18747 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
18750 wb_write[0] = ONCHIP_ADDR1(addr);
18751 wb_write[1] = ONCHIP_ADDR2(addr);
18752 REG_WR_DMAE(sc, reg, wb_write, 2);
18756 bxe_clear_func_ilt(struct bxe_softc *sc,
18759 uint32_t i, base = FUNC_ILT_BASE(func);
18760 for (i = base; i < base + ILT_PER_FUNC; i++) {
18761 bxe_ilt_wr(sc, i, 0);
18766 bxe_reset_func(struct bxe_softc *sc)
18768 struct bxe_fastpath *fp;
18769 int port = SC_PORT(sc);
18770 int func = SC_FUNC(sc);
18773 /* Disable the function in the FW */
18774 REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
18775 REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
18776 REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
18777 REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
18780 FOR_EACH_ETH_QUEUE(sc, i) {
18782 REG_WR8(sc, BAR_CSTRORM_INTMEM +
18783 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
18788 if (CNIC_LOADED(sc)) {
18790 REG_WR8(sc, BAR_CSTRORM_INTMEM +
18791 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
18792 (bxe_cnic_fw_sb_id(sc)), SB_DISABLED);
18797 REG_WR8(sc, BAR_CSTRORM_INTMEM +
18798 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
18801 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) {
18802 REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0);
18805 /* Configure IGU */
18806 if (sc->devinfo.int_block == INT_BLOCK_HC) {
18807 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18808 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18810 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18811 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18814 if (CNIC_LOADED(sc)) {
18815 /* Disable Timer scan */
18816 REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
18818 * Wait for at least 10ms and up to 2 second for the timers
18821 for (i = 0; i < 200; i++) {
18823 if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4))
18829 bxe_clear_func_ilt(sc, func);
18832 * Timers workaround bug for E2: if this is vnic-3,
18833 * we need to set the entire ilt range for this timers.
18835 if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) {
18836 struct ilt_client_info ilt_cli;
18837 /* use dummy TM client */
18838 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
18840 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
18841 ilt_cli.client_num = ILT_CLIENT_TM;
18843 ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR);
18846 /* this assumes that reset_port() called before reset_func()*/
18847 if (!CHIP_IS_E1x(sc)) {
18848 bxe_pf_disable(sc);
18851 sc->dmae_ready = 0;
18855 bxe_gunzip_init(struct bxe_softc *sc)
18861 bxe_gunzip_end(struct bxe_softc *sc)
18867 bxe_init_firmware(struct bxe_softc *sc)
18869 if (CHIP_IS_E1(sc)) {
18870 ecore_init_e1_firmware(sc);
18871 sc->iro_array = e1_iro_arr;
18872 } else if (CHIP_IS_E1H(sc)) {
18873 ecore_init_e1h_firmware(sc);
18874 sc->iro_array = e1h_iro_arr;
18875 } else if (!CHIP_IS_E1x(sc)) {
18876 ecore_init_e2_firmware(sc);
18877 sc->iro_array = e2_iro_arr;
18879 BLOGE(sc, "Unsupported chip revision\n");
18887 bxe_release_firmware(struct bxe_softc *sc)
18894 ecore_gunzip(struct bxe_softc *sc,
18895 const uint8_t *zbuf,
18898 /* XXX : Implement... */
18899 BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n");
18904 ecore_reg_wr_ind(struct bxe_softc *sc,
18908 bxe_reg_wr_ind(sc, addr, val);
18912 ecore_write_dmae_phys_len(struct bxe_softc *sc,
18913 bus_addr_t phys_addr,
18917 bxe_write_dmae_phys_len(sc, phys_addr, addr, len);
18921 ecore_storm_memset_struct(struct bxe_softc *sc,
18927 for (i = 0; i < size/4; i++) {
18928 REG_WR(sc, addr + (i * 4), data[i]);
18934 * character device - ioctl interface definitions
18938 #include "bxe_dump.h"
18939 #include "bxe_ioctl.h"
18940 #include <sys/conf.h>
18942 static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
18943 struct thread *td);
18945 static struct cdevsw bxe_cdevsw = {
18946 .d_version = D_VERSION,
18947 .d_ioctl = bxe_eioctl,
18948 .d_name = "bxecnic",
18951 #define BXE_PATH(sc) (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1))
18954 #define DUMP_ALL_PRESETS 0x1FFF
18955 #define DUMP_MAX_PRESETS 13
18956 #define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
18957 #define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
18958 #define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
18959 #define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
18960 #define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
18962 #define IS_REG_IN_PRESET(presets, idx) \
18963 ((presets & (1 << (idx-1))) == (1 << (idx-1)))
18967 bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset)
18969 if (CHIP_IS_E1(sc))
18970 return dump_num_registers[0][preset-1];
18971 else if (CHIP_IS_E1H(sc))
18972 return dump_num_registers[1][preset-1];
18973 else if (CHIP_IS_E2(sc))
18974 return dump_num_registers[2][preset-1];
18975 else if (CHIP_IS_E3A0(sc))
18976 return dump_num_registers[3][preset-1];
18977 else if (CHIP_IS_E3B0(sc))
18978 return dump_num_registers[4][preset-1];
18984 bxe_get_total_regs_len32(struct bxe_softc *sc)
18986 uint32_t preset_idx;
18987 int regdump_len32 = 0;
18990 /* Calculate the total preset regs length */
18991 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18992 regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx);
18995 return regdump_len32;
18998 static const uint32_t *
18999 __bxe_get_page_addr_ar(struct bxe_softc *sc)
19001 if (CHIP_IS_E2(sc))
19002 return page_vals_e2;
19003 else if (CHIP_IS_E3(sc))
19004 return page_vals_e3;
19010 __bxe_get_page_reg_num(struct bxe_softc *sc)
19012 if (CHIP_IS_E2(sc))
19013 return PAGE_MODE_VALUES_E2;
19014 else if (CHIP_IS_E3(sc))
19015 return PAGE_MODE_VALUES_E3;
19020 static const uint32_t *
19021 __bxe_get_page_write_ar(struct bxe_softc *sc)
19023 if (CHIP_IS_E2(sc))
19024 return page_write_regs_e2;
19025 else if (CHIP_IS_E3(sc))
19026 return page_write_regs_e3;
19032 __bxe_get_page_write_num(struct bxe_softc *sc)
19034 if (CHIP_IS_E2(sc))
19035 return PAGE_WRITE_REGS_E2;
19036 else if (CHIP_IS_E3(sc))
19037 return PAGE_WRITE_REGS_E3;
19042 static const struct reg_addr *
19043 __bxe_get_page_read_ar(struct bxe_softc *sc)
19045 if (CHIP_IS_E2(sc))
19046 return page_read_regs_e2;
19047 else if (CHIP_IS_E3(sc))
19048 return page_read_regs_e3;
19054 __bxe_get_page_read_num(struct bxe_softc *sc)
19056 if (CHIP_IS_E2(sc))
19057 return PAGE_READ_REGS_E2;
19058 else if (CHIP_IS_E3(sc))
19059 return PAGE_READ_REGS_E3;
19065 bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info)
19067 if (CHIP_IS_E1(sc))
19068 return IS_E1_REG(reg_info->chips);
19069 else if (CHIP_IS_E1H(sc))
19070 return IS_E1H_REG(reg_info->chips);
19071 else if (CHIP_IS_E2(sc))
19072 return IS_E2_REG(reg_info->chips);
19073 else if (CHIP_IS_E3A0(sc))
19074 return IS_E3A0_REG(reg_info->chips);
19075 else if (CHIP_IS_E3B0(sc))
19076 return IS_E3B0_REG(reg_info->chips);
19082 bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info)
19084 if (CHIP_IS_E1(sc))
19085 return IS_E1_REG(wreg_info->chips);
19086 else if (CHIP_IS_E1H(sc))
19087 return IS_E1H_REG(wreg_info->chips);
19088 else if (CHIP_IS_E2(sc))
19089 return IS_E2_REG(wreg_info->chips);
19090 else if (CHIP_IS_E3A0(sc))
19091 return IS_E3A0_REG(wreg_info->chips);
19092 else if (CHIP_IS_E3B0(sc))
19093 return IS_E3B0_REG(wreg_info->chips);
19099 * bxe_read_pages_regs - read "paged" registers
19101 * @bp device handle
19104 * Reads "paged" memories: memories that may only be read by first writing to a
19105 * specific address ("write address") and then reading from a specific address
19106 * ("read address"). There may be more than one write address per "page" and
19107 * more than one read address per write address.
19110 bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
19112 uint32_t i, j, k, n;
19114 /* addresses of the paged registers */
19115 const uint32_t *page_addr = __bxe_get_page_addr_ar(sc);
19116 /* number of paged registers */
19117 int num_pages = __bxe_get_page_reg_num(sc);
19118 /* write addresses */
19119 const uint32_t *write_addr = __bxe_get_page_write_ar(sc);
19120 /* number of write addresses */
19121 int write_num = __bxe_get_page_write_num(sc);
19122 /* read addresses info */
19123 const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc);
19124 /* number of read addresses */
19125 int read_num = __bxe_get_page_read_num(sc);
19126 uint32_t addr, size;
19128 for (i = 0; i < num_pages; i++) {
19129 for (j = 0; j < write_num; j++) {
19130 REG_WR(sc, write_addr[j], page_addr[i]);
19132 for (k = 0; k < read_num; k++) {
19133 if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) {
19134 size = read_addr[k].size;
19135 for (n = 0; n < size; n++) {
19136 addr = read_addr[k].addr + n*4;
19137 *p++ = REG_RD(sc, addr);
19148 bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
19150 uint32_t i, j, addr;
19151 const struct wreg_addr *wreg_addr_p = NULL;
19153 if (CHIP_IS_E1(sc))
19154 wreg_addr_p = &wreg_addr_e1;
19155 else if (CHIP_IS_E1H(sc))
19156 wreg_addr_p = &wreg_addr_e1h;
19157 else if (CHIP_IS_E2(sc))
19158 wreg_addr_p = &wreg_addr_e2;
19159 else if (CHIP_IS_E3A0(sc))
19160 wreg_addr_p = &wreg_addr_e3;
19161 else if (CHIP_IS_E3B0(sc))
19162 wreg_addr_p = &wreg_addr_e3b0;
19166 /* Read the idle_chk registers */
19167 for (i = 0; i < IDLE_REGS_COUNT; i++) {
19168 if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) &&
19169 IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
19170 for (j = 0; j < idle_reg_addrs[i].size; j++)
19171 *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4);
19175 /* Read the regular registers */
19176 for (i = 0; i < REGS_COUNT; i++) {
19177 if (bxe_is_reg_in_chip(sc, ®_addrs[i]) &&
19178 IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
19179 for (j = 0; j < reg_addrs[i].size; j++)
19180 *p++ = REG_RD(sc, reg_addrs[i].addr + j*4);
19184 /* Read the CAM registers */
19185 if (bxe_is_wreg_in_chip(sc, wreg_addr_p) &&
19186 IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
19187 for (i = 0; i < wreg_addr_p->size; i++) {
19188 *p++ = REG_RD(sc, wreg_addr_p->addr + i*4);
19190 /* In case of wreg_addr register, read additional
19191 registers from read_regs array
19193 for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
19194 addr = *(wreg_addr_p->read_regs);
19195 *p++ = REG_RD(sc, addr + j*4);
19200 /* Paged registers are supported in E2 & E3 only */
19201 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
19202 /* Read "paged" registers */
19203 bxe_read_pages_regs(sc, p, preset);
19210 bxe_grc_dump(struct bxe_softc *sc)
19213 uint32_t preset_idx;
19216 struct dump_header *d_hdr;
19218 if (sc->grcdump_done)
19221 ecore_disable_blocks_parity(sc);
19223 buf = sc->grc_dump;
19224 d_hdr = sc->grc_dump;
19226 d_hdr->header_size = (sizeof(struct dump_header) >> 2) - 1;
19227 d_hdr->version = BNX2X_DUMP_VERSION;
19228 d_hdr->preset = DUMP_ALL_PRESETS;
19230 if (CHIP_IS_E1(sc)) {
19231 d_hdr->dump_meta_data = DUMP_CHIP_E1;
19232 } else if (CHIP_IS_E1H(sc)) {
19233 d_hdr->dump_meta_data = DUMP_CHIP_E1H;
19234 } else if (CHIP_IS_E2(sc)) {
19235 d_hdr->dump_meta_data = DUMP_CHIP_E2 |
19236 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
19237 } else if (CHIP_IS_E3A0(sc)) {
19238 d_hdr->dump_meta_data = DUMP_CHIP_E3A0 |
19239 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
19240 } else if (CHIP_IS_E3B0(sc)) {
19241 d_hdr->dump_meta_data = DUMP_CHIP_E3B0 |
19242 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
19245 buf += sizeof(struct dump_header);
19247 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
19249 /* Skip presets with IOR */
19250 if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) ||
19251 (preset_idx == 11))
19254 rval = bxe_get_preset_regs(sc, sc->grc_dump, preset_idx);
19259 size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t));
19264 ecore_clear_blocks_parity(sc);
19265 ecore_enable_blocks_parity(sc);
19267 sc->grcdump_done = 1;
19272 bxe_add_cdev(struct bxe_softc *sc)
19276 grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19277 sizeof(struct dump_header);
19279 sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT);
19281 if (sc->grc_dump == NULL)
19284 sc->ioctl_dev = make_dev(&bxe_cdevsw,
19285 sc->ifnet->if_dunit,
19290 if_name(sc->ifnet));
19292 if (sc->ioctl_dev == NULL) {
19294 free(sc->grc_dump, M_DEVBUF);
19299 sc->ioctl_dev->si_drv1 = sc;
19305 bxe_del_cdev(struct bxe_softc *sc)
19307 if (sc->ioctl_dev != NULL)
19308 destroy_dev(sc->ioctl_dev);
19310 if (sc->grc_dump == NULL)
19311 free(sc->grc_dump, M_DEVBUF);
19317 bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
19320 struct bxe_softc *sc;
19323 bxe_grcdump_t *dump = NULL;
19326 if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL)
19331 dump = (bxe_grcdump_t *)data;
19335 case BXE_GRC_DUMP_SIZE:
19336 dump->pci_func = sc->pcie_func;
19337 dump->grcdump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19338 sizeof(struct dump_header);
19343 grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19344 sizeof(struct dump_header);
19346 if ((sc->grc_dump == NULL) || (dump->grcdump == NULL) ||
19347 (dump->grcdump_size < grc_dump_size) || (!sc->grcdump_done)) {
19351 dump->grcdump_dwords = grc_dump_size >> 2;
19352 rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size);
19353 sc->grcdump_done = 0;