2 * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
4 * Eric Davis <edavis@broadcom.com>
5 * David Christensen <davidch@broadcom.com>
6 * Gary Zambrano <zambrano@broadcom.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written consent.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #define BXE_DRIVER_VERSION "1.78.75"
41 #include "ecore_init.h"
42 #include "ecore_init_ops.h"
44 #include "57710_int_offsets.h"
45 #include "57711_int_offsets.h"
46 #include "57712_int_offsets.h"
49 * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these
50 * explicitly here for older kernels that don't include this changeset.
53 #define CTLTYPE_U64 CTLTYPE_QUAD
54 #define sysctl_handle_64 sysctl_handle_quad
58 * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these
59 * here as zero(0) for older kernels that don't include this changeset
60 * thereby masking the functionality.
63 #define CSUM_TCP_IPV6 0
64 #define CSUM_UDP_IPV6 0
68 * pci_find_cap was added in r219865. Re-define this at pci_find_extcap
69 * for older kernels that don't include this changeset.
71 #if __FreeBSD_version < 900035
72 #define pci_find_cap pci_find_extcap
75 #define BXE_DEF_SB_ATT_IDX 0x0001
76 #define BXE_DEF_SB_IDX 0x0002
79 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per
80 * function HW initialization.
82 #define FLR_WAIT_USEC 10000 /* 10 msecs */
83 #define FLR_WAIT_INTERVAL 50 /* usecs */
84 #define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */
86 struct pbf_pN_buf_regs {
93 struct pbf_pN_cmd_regs {
100 * PCI Device ID Table used by bxe_probe().
102 #define BXE_DEVDESC_MAX 64
103 static struct bxe_device_type bxe_devs[] = {
107 PCI_ANY_ID, PCI_ANY_ID,
108 "Broadcom NetXtreme II BCM57710 10GbE"
113 PCI_ANY_ID, PCI_ANY_ID,
114 "Broadcom NetXtreme II BCM57711 10GbE"
119 PCI_ANY_ID, PCI_ANY_ID,
120 "Broadcom NetXtreme II BCM57711E 10GbE"
125 PCI_ANY_ID, PCI_ANY_ID,
126 "Broadcom NetXtreme II BCM57712 10GbE"
131 PCI_ANY_ID, PCI_ANY_ID,
132 "Broadcom NetXtreme II BCM57712 MF 10GbE"
138 PCI_ANY_ID, PCI_ANY_ID,
139 "Broadcom NetXtreme II BCM57712 VF 10GbE"
145 PCI_ANY_ID, PCI_ANY_ID,
146 "Broadcom NetXtreme II BCM57800 10GbE"
151 PCI_ANY_ID, PCI_ANY_ID,
152 "Broadcom NetXtreme II BCM57800 MF 10GbE"
158 PCI_ANY_ID, PCI_ANY_ID,
159 "Broadcom NetXtreme II BCM57800 VF 10GbE"
165 PCI_ANY_ID, PCI_ANY_ID,
166 "Broadcom NetXtreme II BCM57810 10GbE"
171 PCI_ANY_ID, PCI_ANY_ID,
172 "Broadcom NetXtreme II BCM57810 MF 10GbE"
178 PCI_ANY_ID, PCI_ANY_ID,
179 "Broadcom NetXtreme II BCM57810 VF 10GbE"
185 PCI_ANY_ID, PCI_ANY_ID,
186 "Broadcom NetXtreme II BCM57811 10GbE"
191 PCI_ANY_ID, PCI_ANY_ID,
192 "Broadcom NetXtreme II BCM57811 MF 10GbE"
198 PCI_ANY_ID, PCI_ANY_ID,
199 "Broadcom NetXtreme II BCM57811 VF 10GbE"
205 PCI_ANY_ID, PCI_ANY_ID,
206 "Broadcom NetXtreme II BCM57840 4x10GbE"
212 PCI_ANY_ID, PCI_ANY_ID,
213 "Broadcom NetXtreme II BCM57840 2x20GbE"
219 PCI_ANY_ID, PCI_ANY_ID,
220 "Broadcom NetXtreme II BCM57840 MF 10GbE"
226 PCI_ANY_ID, PCI_ANY_ID,
227 "Broadcom NetXtreme II BCM57840 VF 10GbE"
235 MALLOC_DECLARE(M_BXE_ILT);
236 MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer");
239 * FreeBSD device entry points.
241 static int bxe_probe(device_t);
242 static int bxe_attach(device_t);
243 static int bxe_detach(device_t);
244 static int bxe_shutdown(device_t);
247 * FreeBSD KLD module/device interface event handler method.
249 static device_method_t bxe_methods[] = {
250 /* Device interface (device_if.h) */
251 DEVMETHOD(device_probe, bxe_probe),
252 DEVMETHOD(device_attach, bxe_attach),
253 DEVMETHOD(device_detach, bxe_detach),
254 DEVMETHOD(device_shutdown, bxe_shutdown),
256 DEVMETHOD(device_suspend, bxe_suspend),
257 DEVMETHOD(device_resume, bxe_resume),
259 /* Bus interface (bus_if.h) */
260 DEVMETHOD(bus_print_child, bus_generic_print_child),
261 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
266 * FreeBSD KLD Module data declaration
268 static driver_t bxe_driver = {
269 "bxe", /* module name */
270 bxe_methods, /* event handler */
271 sizeof(struct bxe_softc) /* extra data */
275 * FreeBSD dev class is needed to manage dev instances and
276 * to associate with a bus type
278 static devclass_t bxe_devclass;
280 MODULE_DEPEND(bxe, pci, 1, 1, 1);
281 MODULE_DEPEND(bxe, ether, 1, 1, 1);
282 DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0);
284 /* resources needed for unloading a previously loaded device */
286 #define BXE_PREV_WAIT_NEEDED 1
287 struct mtx bxe_prev_mtx;
288 MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF);
289 struct bxe_prev_list_node {
290 LIST_ENTRY(bxe_prev_list_node) node;
294 uint8_t aer; /* XXX automatic error recovery */
297 static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list);
299 static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
301 /* Tunable device values... */
303 SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters");
306 unsigned long bxe_debug = 0;
307 TUNABLE_ULONG("hw.bxe.debug", &bxe_debug);
308 SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, (CTLFLAG_RDTUN),
309 &bxe_debug, 0, "Debug logging mode");
311 /* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
312 static int bxe_interrupt_mode = INTR_MODE_MSIX;
313 TUNABLE_INT("hw.bxe.interrupt_mode", &bxe_interrupt_mode);
314 SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN,
315 &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode");
317 /* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */
318 static int bxe_queue_count = 4;
319 TUNABLE_INT("hw.bxe.queue_count", &bxe_queue_count);
320 SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
321 &bxe_queue_count, 0, "Multi-Queue queue count");
323 /* max number of buffers per queue (default RX_BD_USABLE) */
324 static int bxe_max_rx_bufs = 0;
325 TUNABLE_INT("hw.bxe.max_rx_bufs", &bxe_max_rx_bufs);
326 SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN,
327 &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue");
329 /* Host interrupt coalescing RX tick timer (usecs) */
330 static int bxe_hc_rx_ticks = 25;
331 TUNABLE_INT("hw.bxe.hc_rx_ticks", &bxe_hc_rx_ticks);
332 SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN,
333 &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks");
335 /* Host interrupt coalescing TX tick timer (usecs) */
336 static int bxe_hc_tx_ticks = 50;
337 TUNABLE_INT("hw.bxe.hc_tx_ticks", &bxe_hc_tx_ticks);
338 SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN,
339 &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks");
341 /* Maximum number of Rx packets to process at a time */
342 static int bxe_rx_budget = 0xffffffff;
343 TUNABLE_INT("hw.bxe.rx_budget", &bxe_rx_budget);
344 SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN,
345 &bxe_rx_budget, 0, "Rx processing budget");
347 /* Maximum LRO aggregation size */
348 static int bxe_max_aggregation_size = 0;
349 TUNABLE_INT("hw.bxe.max_aggregation_size", &bxe_max_aggregation_size);
350 SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN,
351 &bxe_max_aggregation_size, 0, "max aggregation size");
353 /* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */
354 static int bxe_mrrs = -1;
355 TUNABLE_INT("hw.bxe.mrrs", &bxe_mrrs);
356 SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN,
357 &bxe_mrrs, 0, "PCIe maximum read request size");
359 /* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */
360 static int bxe_autogreeen = 0;
361 TUNABLE_INT("hw.bxe.autogreeen", &bxe_autogreeen);
362 SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN,
363 &bxe_autogreeen, 0, "AutoGrEEEn support");
365 /* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */
366 static int bxe_udp_rss = 0;
367 TUNABLE_INT("hw.bxe.udp_rss", &bxe_udp_rss);
368 SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN,
369 &bxe_udp_rss, 0, "UDP RSS support");
372 #define STAT_NAME_LEN 32 /* no stat names below can be longer than this */
374 #define STATS_OFFSET32(stat_name) \
375 (offsetof(struct bxe_eth_stats, stat_name) / 4)
377 #define Q_STATS_OFFSET32(stat_name) \
378 (offsetof(struct bxe_eth_q_stats, stat_name) / 4)
380 static const struct {
384 #define STATS_FLAGS_PORT 1
385 #define STATS_FLAGS_FUNC 2 /* MF only cares about function stats */
386 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
387 char string[STAT_NAME_LEN];
388 } bxe_eth_stats_arr[] = {
389 { STATS_OFFSET32(total_bytes_received_hi),
390 8, STATS_FLAGS_BOTH, "rx_bytes" },
391 { STATS_OFFSET32(error_bytes_received_hi),
392 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
393 { STATS_OFFSET32(total_unicast_packets_received_hi),
394 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
395 { STATS_OFFSET32(total_multicast_packets_received_hi),
396 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
397 { STATS_OFFSET32(total_broadcast_packets_received_hi),
398 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
399 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
400 8, STATS_FLAGS_PORT, "rx_crc_errors" },
401 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
402 8, STATS_FLAGS_PORT, "rx_align_errors" },
403 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
404 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
405 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
406 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
407 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
408 8, STATS_FLAGS_PORT, "rx_fragments" },
409 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
410 8, STATS_FLAGS_PORT, "rx_jabbers" },
411 { STATS_OFFSET32(no_buff_discard_hi),
412 8, STATS_FLAGS_BOTH, "rx_discards" },
413 { STATS_OFFSET32(mac_filter_discard),
414 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
415 { STATS_OFFSET32(mf_tag_discard),
416 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
417 { STATS_OFFSET32(pfc_frames_received_hi),
418 8, STATS_FLAGS_PORT, "pfc_frames_received" },
419 { STATS_OFFSET32(pfc_frames_sent_hi),
420 8, STATS_FLAGS_PORT, "pfc_frames_sent" },
421 { STATS_OFFSET32(brb_drop_hi),
422 8, STATS_FLAGS_PORT, "rx_brb_discard" },
423 { STATS_OFFSET32(brb_truncate_hi),
424 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
425 { STATS_OFFSET32(pause_frames_received_hi),
426 8, STATS_FLAGS_PORT, "rx_pause_frames" },
427 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
428 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
429 { STATS_OFFSET32(nig_timer_max),
430 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
431 { STATS_OFFSET32(total_bytes_transmitted_hi),
432 8, STATS_FLAGS_BOTH, "tx_bytes" },
433 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
434 8, STATS_FLAGS_PORT, "tx_error_bytes" },
435 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
436 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
437 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
438 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
439 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
440 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
441 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
442 8, STATS_FLAGS_PORT, "tx_mac_errors" },
443 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
444 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
445 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
446 8, STATS_FLAGS_PORT, "tx_single_collisions" },
447 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
448 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
449 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
450 8, STATS_FLAGS_PORT, "tx_deferred" },
451 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
452 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
453 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
454 8, STATS_FLAGS_PORT, "tx_late_collisions" },
455 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
456 8, STATS_FLAGS_PORT, "tx_total_collisions" },
457 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
458 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
459 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
460 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
461 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
462 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
463 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
464 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
465 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
466 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
467 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
468 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
469 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
470 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
471 { STATS_OFFSET32(pause_frames_sent_hi),
472 8, STATS_FLAGS_PORT, "tx_pause_frames" },
473 { STATS_OFFSET32(total_tpa_aggregations_hi),
474 8, STATS_FLAGS_FUNC, "tpa_aggregations" },
475 { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
476 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
477 { STATS_OFFSET32(total_tpa_bytes_hi),
478 8, STATS_FLAGS_FUNC, "tpa_bytes"},
480 { STATS_OFFSET32(recoverable_error),
481 4, STATS_FLAGS_FUNC, "recoverable_errors" },
482 { STATS_OFFSET32(unrecoverable_error),
483 4, STATS_FLAGS_FUNC, "unrecoverable_errors" },
485 { STATS_OFFSET32(eee_tx_lpi),
486 4, STATS_FLAGS_PORT, "eee_tx_lpi"},
487 { STATS_OFFSET32(rx_calls),
488 4, STATS_FLAGS_FUNC, "rx_calls"},
489 { STATS_OFFSET32(rx_pkts),
490 4, STATS_FLAGS_FUNC, "rx_pkts"},
491 { STATS_OFFSET32(rx_tpa_pkts),
492 4, STATS_FLAGS_FUNC, "rx_tpa_pkts"},
493 { STATS_OFFSET32(rx_soft_errors),
494 4, STATS_FLAGS_FUNC, "rx_soft_errors"},
495 { STATS_OFFSET32(rx_hw_csum_errors),
496 4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"},
497 { STATS_OFFSET32(rx_ofld_frames_csum_ip),
498 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"},
499 { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
500 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"},
501 { STATS_OFFSET32(rx_budget_reached),
502 4, STATS_FLAGS_FUNC, "rx_budget_reached"},
503 { STATS_OFFSET32(tx_pkts),
504 4, STATS_FLAGS_FUNC, "tx_pkts"},
505 { STATS_OFFSET32(tx_soft_errors),
506 4, STATS_FLAGS_FUNC, "tx_soft_errors"},
507 { STATS_OFFSET32(tx_ofld_frames_csum_ip),
508 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"},
509 { STATS_OFFSET32(tx_ofld_frames_csum_tcp),
510 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"},
511 { STATS_OFFSET32(tx_ofld_frames_csum_udp),
512 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"},
513 { STATS_OFFSET32(tx_ofld_frames_lso),
514 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"},
515 { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
516 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"},
517 { STATS_OFFSET32(tx_encap_failures),
518 4, STATS_FLAGS_FUNC, "tx_encap_failures"},
519 { STATS_OFFSET32(tx_hw_queue_full),
520 4, STATS_FLAGS_FUNC, "tx_hw_queue_full"},
521 { STATS_OFFSET32(tx_hw_max_queue_depth),
522 4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"},
523 { STATS_OFFSET32(tx_dma_mapping_failure),
524 4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"},
525 { STATS_OFFSET32(tx_max_drbr_queue_depth),
526 4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"},
527 { STATS_OFFSET32(tx_window_violation_std),
528 4, STATS_FLAGS_FUNC, "tx_window_violation_std"},
529 { STATS_OFFSET32(tx_window_violation_tso),
530 4, STATS_FLAGS_FUNC, "tx_window_violation_tso"},
532 { STATS_OFFSET32(tx_unsupported_tso_request_ipv6),
533 4, STATS_FLAGS_FUNC, "tx_unsupported_tso_request_ipv6"},
534 { STATS_OFFSET32(tx_unsupported_tso_request_not_tcp),
535 4, STATS_FLAGS_FUNC, "tx_unsupported_tso_request_not_tcp"},
537 { STATS_OFFSET32(tx_chain_lost_mbuf),
538 4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"},
539 { STATS_OFFSET32(tx_frames_deferred),
540 4, STATS_FLAGS_FUNC, "tx_frames_deferred"},
541 { STATS_OFFSET32(tx_queue_xoff),
542 4, STATS_FLAGS_FUNC, "tx_queue_xoff"},
543 { STATS_OFFSET32(mbuf_defrag_attempts),
544 4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"},
545 { STATS_OFFSET32(mbuf_defrag_failures),
546 4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"},
547 { STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
548 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"},
549 { STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
550 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"},
551 { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
552 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"},
553 { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
554 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"},
555 { STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
556 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"},
557 { STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
558 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"},
559 { STATS_OFFSET32(mbuf_alloc_tx),
560 4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"},
561 { STATS_OFFSET32(mbuf_alloc_rx),
562 4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"},
563 { STATS_OFFSET32(mbuf_alloc_sge),
564 4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"},
565 { STATS_OFFSET32(mbuf_alloc_tpa),
566 4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"}
569 static const struct {
572 char string[STAT_NAME_LEN];
573 } bxe_eth_q_stats_arr[] = {
574 { Q_STATS_OFFSET32(total_bytes_received_hi),
576 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
577 8, "rx_ucast_packets" },
578 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
579 8, "rx_mcast_packets" },
580 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
581 8, "rx_bcast_packets" },
582 { Q_STATS_OFFSET32(no_buff_discard_hi),
584 { Q_STATS_OFFSET32(total_bytes_transmitted_hi),
586 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
587 8, "tx_ucast_packets" },
588 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
589 8, "tx_mcast_packets" },
590 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
591 8, "tx_bcast_packets" },
592 { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
593 8, "tpa_aggregations" },
594 { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
595 8, "tpa_aggregated_frames"},
596 { Q_STATS_OFFSET32(total_tpa_bytes_hi),
598 { Q_STATS_OFFSET32(rx_calls),
600 { Q_STATS_OFFSET32(rx_pkts),
602 { Q_STATS_OFFSET32(rx_tpa_pkts),
604 { Q_STATS_OFFSET32(rx_soft_errors),
605 4, "rx_soft_errors"},
606 { Q_STATS_OFFSET32(rx_hw_csum_errors),
607 4, "rx_hw_csum_errors"},
608 { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip),
609 4, "rx_ofld_frames_csum_ip"},
610 { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
611 4, "rx_ofld_frames_csum_tcp_udp"},
612 { Q_STATS_OFFSET32(rx_budget_reached),
613 4, "rx_budget_reached"},
614 { Q_STATS_OFFSET32(tx_pkts),
616 { Q_STATS_OFFSET32(tx_soft_errors),
617 4, "tx_soft_errors"},
618 { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip),
619 4, "tx_ofld_frames_csum_ip"},
620 { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp),
621 4, "tx_ofld_frames_csum_tcp"},
622 { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp),
623 4, "tx_ofld_frames_csum_udp"},
624 { Q_STATS_OFFSET32(tx_ofld_frames_lso),
625 4, "tx_ofld_frames_lso"},
626 { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
627 4, "tx_ofld_frames_lso_hdr_splits"},
628 { Q_STATS_OFFSET32(tx_encap_failures),
629 4, "tx_encap_failures"},
630 { Q_STATS_OFFSET32(tx_hw_queue_full),
631 4, "tx_hw_queue_full"},
632 { Q_STATS_OFFSET32(tx_hw_max_queue_depth),
633 4, "tx_hw_max_queue_depth"},
634 { Q_STATS_OFFSET32(tx_dma_mapping_failure),
635 4, "tx_dma_mapping_failure"},
636 { Q_STATS_OFFSET32(tx_max_drbr_queue_depth),
637 4, "tx_max_drbr_queue_depth"},
638 { Q_STATS_OFFSET32(tx_window_violation_std),
639 4, "tx_window_violation_std"},
640 { Q_STATS_OFFSET32(tx_window_violation_tso),
641 4, "tx_window_violation_tso"},
643 { Q_STATS_OFFSET32(tx_unsupported_tso_request_ipv6),
644 4, "tx_unsupported_tso_request_ipv6"},
645 { Q_STATS_OFFSET32(tx_unsupported_tso_request_not_tcp),
646 4, "tx_unsupported_tso_request_not_tcp"},
648 { Q_STATS_OFFSET32(tx_chain_lost_mbuf),
649 4, "tx_chain_lost_mbuf"},
650 { Q_STATS_OFFSET32(tx_frames_deferred),
651 4, "tx_frames_deferred"},
652 { Q_STATS_OFFSET32(tx_queue_xoff),
654 { Q_STATS_OFFSET32(mbuf_defrag_attempts),
655 4, "mbuf_defrag_attempts"},
656 { Q_STATS_OFFSET32(mbuf_defrag_failures),
657 4, "mbuf_defrag_failures"},
658 { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
659 4, "mbuf_rx_bd_alloc_failed"},
660 { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
661 4, "mbuf_rx_bd_mapping_failed"},
662 { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
663 4, "mbuf_rx_tpa_alloc_failed"},
664 { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
665 4, "mbuf_rx_tpa_mapping_failed"},
666 { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
667 4, "mbuf_rx_sge_alloc_failed"},
668 { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
669 4, "mbuf_rx_sge_mapping_failed"},
670 { Q_STATS_OFFSET32(mbuf_alloc_tx),
672 { Q_STATS_OFFSET32(mbuf_alloc_rx),
674 { Q_STATS_OFFSET32(mbuf_alloc_sge),
675 4, "mbuf_alloc_sge"},
676 { Q_STATS_OFFSET32(mbuf_alloc_tpa),
680 #define BXE_NUM_ETH_STATS ARRAY_SIZE(bxe_eth_stats_arr)
681 #define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr)
684 static void bxe_cmng_fns_init(struct bxe_softc *sc,
687 static int bxe_get_cmng_fns_mode(struct bxe_softc *sc);
688 static void storm_memset_cmng(struct bxe_softc *sc,
689 struct cmng_init *cmng,
691 static void bxe_set_reset_global(struct bxe_softc *sc);
692 static void bxe_set_reset_in_progress(struct bxe_softc *sc);
693 static uint8_t bxe_reset_is_done(struct bxe_softc *sc,
695 static uint8_t bxe_clear_pf_load(struct bxe_softc *sc);
696 static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc,
699 static void bxe_int_disable(struct bxe_softc *sc);
700 static int bxe_release_leader_lock(struct bxe_softc *sc);
701 static void bxe_pf_disable(struct bxe_softc *sc);
702 static void bxe_free_fp_buffers(struct bxe_softc *sc);
703 static inline void bxe_update_rx_prod(struct bxe_softc *sc,
704 struct bxe_fastpath *fp,
707 uint16_t rx_sge_prod);
708 static void bxe_link_report_locked(struct bxe_softc *sc);
709 static void bxe_link_report(struct bxe_softc *sc);
710 static void bxe_link_status_update(struct bxe_softc *sc);
711 static void bxe_periodic_callout_func(void *xsc);
712 static void bxe_periodic_start(struct bxe_softc *sc);
713 static void bxe_periodic_stop(struct bxe_softc *sc);
714 static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
717 static int bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
719 static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
721 static uint8_t bxe_txeof(struct bxe_softc *sc,
722 struct bxe_fastpath *fp);
723 static void bxe_task_fp(struct bxe_fastpath *fp);
724 static __noinline void bxe_dump_mbuf(struct bxe_softc *sc,
727 static int bxe_alloc_mem(struct bxe_softc *sc);
728 static void bxe_free_mem(struct bxe_softc *sc);
729 static int bxe_alloc_fw_stats_mem(struct bxe_softc *sc);
730 static void bxe_free_fw_stats_mem(struct bxe_softc *sc);
731 static int bxe_interrupt_attach(struct bxe_softc *sc);
732 static void bxe_interrupt_detach(struct bxe_softc *sc);
733 static void bxe_set_rx_mode(struct bxe_softc *sc);
734 static int bxe_init_locked(struct bxe_softc *sc);
735 static int bxe_stop_locked(struct bxe_softc *sc);
736 static __noinline int bxe_nic_load(struct bxe_softc *sc,
738 static __noinline int bxe_nic_unload(struct bxe_softc *sc,
739 uint32_t unload_mode,
742 static void bxe_handle_sp_tq(void *context, int pending);
743 static void bxe_handle_rx_mode_tq(void *context, int pending);
744 static void bxe_handle_fp_tq(void *context, int pending);
747 /* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */
749 calc_crc32(uint8_t *crc32_packet,
750 uint32_t crc32_length,
759 uint8_t current_byte = 0;
760 uint32_t crc32_result = crc32_seed;
761 const uint32_t CRC32_POLY = 0x1edc6f41;
763 if ((crc32_packet == NULL) ||
764 (crc32_length == 0) ||
765 ((crc32_length % 8) != 0))
767 return (crc32_result);
770 for (byte = 0; byte < crc32_length; byte = byte + 1)
772 current_byte = crc32_packet[byte];
773 for (bit = 0; bit < 8; bit = bit + 1)
775 /* msb = crc32_result[31]; */
776 msb = (uint8_t)(crc32_result >> 31);
778 crc32_result = crc32_result << 1;
780 /* it (msb != current_byte[bit]) */
781 if (msb != (0x1 & (current_byte >> bit)))
783 crc32_result = crc32_result ^ CRC32_POLY;
784 /* crc32_result[0] = 1 */
791 * 1. "mirror" every bit
792 * 2. swap the 4 bytes
793 * 3. complement each bit
798 shft = sizeof(crc32_result) * 8 - 1;
800 for (crc32_result >>= 1; crc32_result; crc32_result >>= 1)
803 temp |= crc32_result & 1;
807 /* temp[31-bit] = crc32_result[bit] */
811 /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */
813 uint32_t t0, t1, t2, t3;
814 t0 = (0x000000ff & (temp >> 24));
815 t1 = (0x0000ff00 & (temp >> 8));
816 t2 = (0x00ff0000 & (temp << 8));
817 t3 = (0xff000000 & (temp << 24));
818 crc32_result = t0 | t1 | t2 | t3;
824 crc32_result = ~crc32_result;
827 return (crc32_result);
832 volatile unsigned long *addr)
834 return ((atomic_load_acq_long(addr) & (1 << nr)) != 0);
838 bxe_set_bit(unsigned int nr,
839 volatile unsigned long *addr)
841 atomic_set_acq_long(addr, (1 << nr));
845 bxe_clear_bit(int nr,
846 volatile unsigned long *addr)
848 atomic_clear_acq_long(addr, (1 << nr));
852 bxe_test_and_set_bit(int nr,
853 volatile unsigned long *addr)
859 } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0);
860 // if (x & nr) bit_was_set; else bit_was_not_set;
865 bxe_test_and_clear_bit(int nr,
866 volatile unsigned long *addr)
872 } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0);
873 // if (x & nr) bit_was_set; else bit_was_not_set;
878 bxe_cmpxchg(volatile int *addr,
885 } while (atomic_cmpset_acq_int(addr, old, new) == 0);
890 * Get DMA memory from the OS.
892 * Validates that the OS has provided DMA buffers in response to a
893 * bus_dmamap_load call and saves the physical address of those buffers.
894 * When the callback is used the OS will return 0 for the mapping function
895 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any
896 * failures back to the caller.
902 bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
904 struct bxe_dma *dma = arg;
909 BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error);
911 dma->paddr = segs->ds_addr;
914 BLOGD(dma->sc, DBG_LOAD,,
915 "DMA alloc '%s': vaddr=%p paddr=%p nseg=%d size=%lu\n",
916 dma->msg, dma->vaddr, (void *)dma->paddr,
917 dma->nseg, dma->size);
923 * Allocate a block of memory and map it for DMA. No partial completions
924 * allowed and release any resources acquired if we can't acquire all
928 * 0 = Success, !0 = Failure
931 bxe_dma_alloc(struct bxe_softc *sc,
939 BLOGE(sc, "dma block '%s' already has size %lu\n", msg,
940 (unsigned long)dma->size);
944 memset(dma, 0, sizeof(*dma)); /* sanity */
947 snprintf(dma->msg, sizeof(dma->msg), "%s", msg);
949 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
950 BCM_PAGE_SIZE, /* alignment */
951 0, /* boundary limit */
952 BUS_SPACE_MAXADDR, /* restricted low */
953 BUS_SPACE_MAXADDR, /* restricted hi */
954 NULL, /* addr filter() */
955 NULL, /* addr filter() arg */
956 size, /* max map size */
957 1, /* num discontinuous */
958 size, /* max seg size */
959 BUS_DMA_ALLOCNOW, /* flags */
961 NULL, /* lock() arg */
962 &dma->tag); /* returned dma tag */
964 BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc);
965 memset(dma, 0, sizeof(*dma));
969 rc = bus_dmamem_alloc(dma->tag,
970 (void **)&dma->vaddr,
971 (BUS_DMA_NOWAIT | BUS_DMA_ZERO),
974 BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc);
975 bus_dma_tag_destroy(dma->tag);
976 memset(dma, 0, sizeof(*dma));
980 rc = bus_dmamap_load(dma->tag,
984 bxe_dma_map_addr, /* BLOGD in here */
988 BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc);
989 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
990 bus_dma_tag_destroy(dma->tag);
991 memset(dma, 0, sizeof(*dma));
999 bxe_dma_free(struct bxe_softc *sc,
1000 struct bxe_dma *dma)
1002 if (dma->size > 0) {
1005 "DMA free '%s': vaddr=%p paddr=%p nseg=%d size=%lu\n",
1006 dma->msg, dma->vaddr, (void *)dma->paddr,
1007 dma->nseg, dma->size);
1010 DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL"));
1012 bus_dmamap_sync(dma->tag, dma->map,
1013 (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE));
1014 bus_dmamap_unload(dma->tag, dma->map);
1015 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1016 bus_dma_tag_destroy(dma->tag);
1019 memset(dma, 0, sizeof(*dma));
1023 * These indirect read and write routines are only during init.
1024 * The locking is handled by the MCP.
1028 bxe_reg_wr_ind(struct bxe_softc *sc,
1032 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
1033 pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4);
1034 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
1038 bxe_reg_rd_ind(struct bxe_softc *sc,
1043 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
1044 val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4);
1045 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
1051 void bxe_dp_dmae(struct bxe_softc *sc, struct dmae_command *dmae, int msglvl)
1053 uint32_t src_type = dmae->opcode & DMAE_COMMAND_SRC;
1055 switch (dmae->opcode & DMAE_COMMAND_DST) {
1056 case DMAE_CMD_DST_PCI:
1057 if (src_type == DMAE_CMD_SRC_PCI)
1058 DP(msglvl, "DMAE: opcode 0x%08x\n"
1059 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
1060 "comp_addr [%x:%08x], comp_val 0x%08x\n",
1061 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
1062 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
1063 dmae->comp_addr_hi, dmae->comp_addr_lo,
1066 DP(msglvl, "DMAE: opcode 0x%08x\n"
1067 "src [%08x], len [%d*4], dst [%x:%08x]\n"
1068 "comp_addr [%x:%08x], comp_val 0x%08x\n",
1069 dmae->opcode, dmae->src_addr_lo >> 2,
1070 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
1071 dmae->comp_addr_hi, dmae->comp_addr_lo,
1074 case DMAE_CMD_DST_GRC:
1075 if (src_type == DMAE_CMD_SRC_PCI)
1076 DP(msglvl, "DMAE: opcode 0x%08x\n"
1077 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
1078 "comp_addr [%x:%08x], comp_val 0x%08x\n",
1079 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
1080 dmae->len, dmae->dst_addr_lo >> 2,
1081 dmae->comp_addr_hi, dmae->comp_addr_lo,
1084 DP(msglvl, "DMAE: opcode 0x%08x\n"
1085 "src [%08x], len [%d*4], dst [%08x]\n"
1086 "comp_addr [%x:%08x], comp_val 0x%08x\n",
1087 dmae->opcode, dmae->src_addr_lo >> 2,
1088 dmae->len, dmae->dst_addr_lo >> 2,
1089 dmae->comp_addr_hi, dmae->comp_addr_lo,
1093 if (src_type == DMAE_CMD_SRC_PCI)
1094 DP(msglvl, "DMAE: opcode 0x%08x\n"
1095 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
1096 "comp_addr [%x:%08x] comp_val 0x%08x\n",
1097 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
1098 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
1101 DP(msglvl, "DMAE: opcode 0x%08x\n"
1102 "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
1103 "comp_addr [%x:%08x] comp_val 0x%08x\n",
1104 dmae->opcode, dmae->src_addr_lo >> 2,
1105 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
1114 bxe_acquire_hw_lock(struct bxe_softc *sc,
1117 uint32_t lock_status;
1118 uint32_t resource_bit = (1 << resource);
1119 int func = SC_FUNC(sc);
1120 uint32_t hw_lock_control_reg;
1123 /* validate the resource is within range */
1124 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1125 BLOGE(sc, "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE\n", resource);
1130 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1132 hw_lock_control_reg =
1133 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1136 /* validate the resource is not already taken */
1137 lock_status = REG_RD(sc, hw_lock_control_reg);
1138 if (lock_status & resource_bit) {
1139 BLOGE(sc, "resource in use (status 0x%x bit 0x%x)\n",
1140 lock_status, resource_bit);
1144 /* try every 5ms for 5 seconds */
1145 for (cnt = 0; cnt < 1000; cnt++) {
1146 REG_WR(sc, (hw_lock_control_reg + 4), resource_bit);
1147 lock_status = REG_RD(sc, hw_lock_control_reg);
1148 if (lock_status & resource_bit) {
1154 BLOGE(sc, "Resource lock timeout!\n");
1159 bxe_release_hw_lock(struct bxe_softc *sc,
1162 uint32_t lock_status;
1163 uint32_t resource_bit = (1 << resource);
1164 int func = SC_FUNC(sc);
1165 uint32_t hw_lock_control_reg;
1167 /* validate the resource is within range */
1168 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1169 BLOGE(sc, "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE\n", resource);
1174 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1176 hw_lock_control_reg =
1177 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1180 /* validate the resource is currently taken */
1181 lock_status = REG_RD(sc, hw_lock_control_reg);
1182 if (!(lock_status & resource_bit)) {
1183 BLOGE(sc, "resource not in use (status 0x%x bit 0x%x)\n",
1184 lock_status, resource_bit);
1188 REG_WR(sc, hw_lock_control_reg, resource_bit);
1193 * Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
1194 * had we done things the other way around, if two pfs from the same port
1195 * would attempt to access nvram at the same time, we could run into a
1197 * pf A takes the port lock.
1198 * pf B succeeds in taking the same lock since they are from the same port.
1199 * pf A takes the per pf misc lock. Performs eeprom access.
1200 * pf A finishes. Unlocks the per pf misc lock.
1201 * Pf B takes the lock and proceeds to perform it's own access.
1202 * pf A unlocks the per port lock, while pf B is still working (!).
1203 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
1204 * access corrupted by pf B).*
1207 bxe_acquire_nvram_lock(struct bxe_softc *sc)
1209 int port = SC_PORT(sc);
1213 /* acquire HW lock: protect against other PFs in PF Direct Assignment */
1214 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1216 /* adjust timeout for emulation/FPGA */
1217 count = NVRAM_TIMEOUT_COUNT;
1218 if (CHIP_REV_IS_SLOW(sc)) {
1222 /* request access to nvram interface */
1223 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1224 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
1226 for (i = 0; i < count*10; i++) {
1227 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1228 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1235 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1236 BLOGE(sc, "Cannot get access to nvram interface\n");
1244 bxe_release_nvram_lock(struct bxe_softc *sc)
1246 int port = SC_PORT(sc);
1250 /* adjust timeout for emulation/FPGA */
1251 count = NVRAM_TIMEOUT_COUNT;
1252 if (CHIP_REV_IS_SLOW(sc)) {
1256 /* relinquish nvram interface */
1257 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1258 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
1260 for (i = 0; i < count*10; i++) {
1261 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1262 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1269 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1270 BLOGE(sc, "Cannot free access to nvram interface\n");
1274 /* release HW lock: protect against other PFs in PF Direct Assignment */
1275 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1281 bxe_enable_nvram_access(struct bxe_softc *sc)
1285 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1287 /* enable both bits, even on read */
1288 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1289 (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN));
1293 bxe_disable_nvram_access(struct bxe_softc *sc)
1297 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1299 /* disable both bits, even after read */
1300 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1301 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
1302 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
1306 bxe_nvram_read_dword(struct bxe_softc *sc,
1314 /* build the command word */
1315 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
1317 /* need to clear DONE bit separately */
1318 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1320 /* address of the NVRAM to read from */
1321 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1322 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1324 /* issue a read command */
1325 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1327 /* adjust timeout for emulation/FPGA */
1328 count = NVRAM_TIMEOUT_COUNT;
1329 if (CHIP_REV_IS_SLOW(sc)) {
1333 /* wait for completion */
1336 for (i = 0; i < count; i++) {
1338 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1340 if (val & MCPR_NVM_COMMAND_DONE) {
1341 val = REG_RD(sc, MCP_REG_MCPR_NVM_READ);
1342 /* we read nvram data in cpu order
1343 * but ethtool sees it as an array of bytes
1344 * converting to big-endian will do the work
1346 *ret_val = htobe32(val);
1353 BLOGE(sc, "nvram read timeout expired\n");
1360 bxe_nvram_read(struct bxe_softc *sc,
1369 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
1370 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1375 if ((offset + buf_size) > sc->devinfo.flash_size) {
1376 BLOGE(sc, "Invalid parameter, "
1377 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1378 offset, buf_size, sc->devinfo.flash_size);
1382 /* request access to nvram interface */
1383 rc = bxe_acquire_nvram_lock(sc);
1388 /* enable access to nvram interface */
1389 bxe_enable_nvram_access(sc);
1391 /* read the first word(s) */
1392 cmd_flags = MCPR_NVM_COMMAND_FIRST;
1393 while ((buf_size > sizeof(uint32_t)) && (rc == 0)) {
1394 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1395 memcpy(ret_buf, &val, 4);
1397 /* advance to the next dword */
1398 offset += sizeof(uint32_t);
1399 ret_buf += sizeof(uint32_t);
1400 buf_size -= sizeof(uint32_t);
1405 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1406 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1407 memcpy(ret_buf, &val, 4);
1410 /* disable access to nvram interface */
1411 bxe_disable_nvram_access(sc);
1412 bxe_release_nvram_lock(sc);
1418 bxe_nvram_write_dword(struct bxe_softc *sc,
1425 /* build the command word */
1426 cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR);
1428 /* need to clear DONE bit separately */
1429 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1431 /* write the data */
1432 REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val);
1434 /* address of the NVRAM to write to */
1435 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1436 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1438 /* issue the write command */
1439 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1441 /* adjust timeout for emulation/FPGA */
1442 count = NVRAM_TIMEOUT_COUNT;
1443 if (CHIP_REV_IS_SLOW(sc)) {
1447 /* wait for completion */
1449 for (i = 0; i < count; i++) {
1451 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1452 if (val & MCPR_NVM_COMMAND_DONE) {
1459 BLOGE(sc, "nvram write timeout expired\n");
1465 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
1468 bxe_nvram_write1(struct bxe_softc *sc,
1474 uint32_t align_offset;
1478 if ((offset + buf_size) > sc->devinfo.flash_size) {
1479 BLOGE(sc, "Invalid parameter, "
1480 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1481 offset, buf_size, sc->devinfo.flash_size);
1485 /* request access to nvram interface */
1486 rc = bxe_acquire_nvram_lock(sc);
1491 /* enable access to nvram interface */
1492 bxe_enable_nvram_access(sc);
1494 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
1495 align_offset = (offset & ~0x03);
1496 rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags);
1499 val &= ~(0xff << BYTE_OFFSET(offset));
1500 val |= (*data_buf << BYTE_OFFSET(offset));
1502 /* nvram data is returned as an array of bytes
1503 * convert it back to cpu order
1507 rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags);
1510 /* disable access to nvram interface */
1511 bxe_disable_nvram_access(sc);
1512 bxe_release_nvram_lock(sc);
1518 bxe_nvram_write(struct bxe_softc *sc,
1525 uint32_t written_so_far;
1528 if (buf_size == 1) {
1529 return (bxe_nvram_write1(sc, offset, data_buf, buf_size));
1532 if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) {
1533 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1538 if (buf_size == 0) {
1539 return (0); /* nothing to do */
1542 if ((offset + buf_size) > sc->devinfo.flash_size) {
1543 BLOGE(sc, "Invalid parameter, "
1544 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1545 offset, buf_size, sc->devinfo.flash_size);
1549 /* request access to nvram interface */
1550 rc = bxe_acquire_nvram_lock(sc);
1555 /* enable access to nvram interface */
1556 bxe_enable_nvram_access(sc);
1559 cmd_flags = MCPR_NVM_COMMAND_FIRST;
1560 while ((written_so_far < buf_size) && (rc == 0)) {
1561 if (written_so_far == (buf_size - sizeof(uint32_t))) {
1562 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1563 } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) {
1564 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1565 } else if ((offset % NVRAM_PAGE_SIZE) == 0) {
1566 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
1569 memcpy(&val, data_buf, 4);
1571 rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags);
1573 /* advance to the next dword */
1574 offset += sizeof(uint32_t);
1575 data_buf += sizeof(uint32_t);
1576 written_so_far += sizeof(uint32_t);
1580 /* disable access to nvram interface */
1581 bxe_disable_nvram_access(sc);
1582 bxe_release_nvram_lock(sc);
1587 /* copy command into DMAE command memory and set DMAE command Go */
1589 bxe_post_dmae(struct bxe_softc *sc,
1590 struct dmae_command *dmae,
1593 uint32_t cmd_offset;
1596 cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_command) * idx));
1597 for (i = 0; i < ((sizeof(struct dmae_command) / 4)); i++) {
1598 REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i));
1601 REG_WR(sc, dmae_reg_go_c[idx], 1);
1605 bxe_dmae_opcode_add_comp(uint32_t opcode,
1608 return (opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
1609 DMAE_COMMAND_C_TYPE_ENABLE));
1613 bxe_dmae_opcode_clr_src_reset(uint32_t opcode)
1615 return (opcode & ~DMAE_COMMAND_SRC_RESET);
1619 bxe_dmae_opcode(struct bxe_softc *sc,
1625 uint32_t opcode = 0;
1627 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
1628 (dst_type << DMAE_COMMAND_DST_SHIFT));
1630 opcode |= (DMAE_COMMAND_SRC_RESET | DMAE_COMMAND_DST_RESET);
1632 opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
1634 opcode |= ((SC_VN(sc) << DMAE_COMMAND_E1HVN_SHIFT) |
1635 (SC_VN(sc) << DMAE_COMMAND_DST_VN_SHIFT));
1637 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
1640 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
1642 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
1646 opcode = bxe_dmae_opcode_add_comp(opcode, comp_type);
1653 bxe_prep_dmae_with_comp(struct bxe_softc *sc,
1654 struct dmae_command *dmae,
1658 memset(dmae, 0, sizeof(struct dmae_command));
1660 /* set the opcode */
1661 dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type,
1662 TRUE, DMAE_COMP_PCI);
1664 /* fill in the completion parameters */
1665 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp));
1666 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp));
1667 dmae->comp_val = DMAE_COMP_VAL;
1670 /* issue a DMAE command over the init channel and wait for completion */
1672 bxe_issue_dmae_with_comp(struct bxe_softc *sc,
1673 struct dmae_command *dmae)
1675 uint32_t *wb_comp = BXE_SP(sc, wb_comp);
1676 int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000;
1680 /* reset completion */
1683 /* post the command on the channel used for initializations */
1684 bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
1686 /* wait for completion */
1689 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
1691 (sc->recovery_state != BXE_RECOVERY_DONE &&
1692 sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) {
1693 BLOGE(sc, "DMAE timeout!\n");
1694 BXE_DMAE_UNLOCK(sc);
1695 return (DMAE_TIMEOUT);
1702 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
1703 BLOGE(sc, "DMAE PCI error!\n");
1704 BXE_DMAE_UNLOCK(sc);
1705 return (DMAE_PCI_ERROR);
1708 BXE_DMAE_UNLOCK(sc);
1713 bxe_read_dmae(struct bxe_softc *sc,
1717 struct dmae_command dmae;
1721 DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32));
1723 if (!sc->dmae_ready) {
1724 data = BXE_SP(sc, wb_data[0]);
1726 for (i = 0; i < len32; i++) {
1727 data[i] = (CHIP_IS_E1(sc)) ?
1728 bxe_reg_rd_ind(sc, (src_addr + (i * 4))) :
1729 REG_RD(sc, (src_addr + (i * 4)));
1735 /* set opcode and fixed command fields */
1736 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
1738 /* fill in addresses and len */
1739 dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */
1740 dmae.src_addr_hi = 0;
1741 dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data));
1742 dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data));
1745 /* issue the command and wait for completion */
1746 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1747 bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1752 bxe_write_dmae(struct bxe_softc *sc,
1753 bus_addr_t dma_addr,
1757 struct dmae_command dmae;
1760 if (!sc->dmae_ready) {
1761 DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32));
1763 if (CHIP_IS_E1(sc)) {
1764 ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1766 ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1772 /* set opcode and fixed command fields */
1773 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
1775 /* fill in addresses and len */
1776 dmae.src_addr_lo = U64_LO(dma_addr);
1777 dmae.src_addr_hi = U64_HI(dma_addr);
1778 dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */
1779 dmae.dst_addr_hi = 0;
1782 /* issue the command and wait for completion */
1783 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1784 bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1789 bxe_write_dmae_phys_len(struct bxe_softc *sc,
1790 bus_addr_t phys_addr,
1794 int dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
1797 while (len > dmae_wr_max) {
1799 (phys_addr + offset), /* src DMA address */
1800 (addr + offset), /* dst GRC address */
1802 offset += (dmae_wr_max * 4);
1807 (phys_addr + offset), /* src DMA address */
1808 (addr + offset), /* dst GRC address */
1813 bxe_set_ctx_validation(struct bxe_softc *sc,
1814 struct eth_context *cxt,
1817 /* ustorm cxt validation */
1818 cxt->ustorm_ag_context.cdu_usage =
1819 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1820 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
1821 /* xcontext validation */
1822 cxt->xstorm_ag_context.cdu_reserved =
1823 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1824 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
1828 bxe_storm_memset_hc_timeout(struct bxe_softc *sc,
1835 (BAR_CSTRORM_INTMEM +
1836 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index));
1838 REG_WR8(sc, addr, ticks);
1841 "port %d fw_sb_id %d sb_index %d ticks %d\n",
1842 port, fw_sb_id, sb_index, ticks);
1846 bxe_storm_memset_hc_disable(struct bxe_softc *sc,
1852 uint32_t enable_flag =
1853 (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
1855 (BAR_CSTRORM_INTMEM +
1856 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index));
1860 flags = REG_RD8(sc, addr);
1861 flags &= ~HC_INDEX_DATA_HC_ENABLED;
1862 flags |= enable_flag;
1863 REG_WR8(sc, addr, flags);
1866 "port %d fw_sb_id %d sb_index %d disable %d\n",
1867 port, fw_sb_id, sb_index, disable);
1871 bxe_update_coalesce_sb_index(struct bxe_softc *sc,
1877 int port = SC_PORT(sc);
1878 uint8_t ticks = (usec / 4); /* XXX ??? */
1880 bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks);
1882 disable = (disable) ? 1 : ((usec) ? 0 : 1);
1883 bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable);
1887 elink_cb_udelay(struct bxe_softc *sc,
1894 elink_cb_reg_read(struct bxe_softc *sc,
1897 return (REG_RD(sc, reg_addr));
1901 elink_cb_reg_write(struct bxe_softc *sc,
1905 REG_WR(sc, reg_addr, val);
1909 elink_cb_reg_wb_write(struct bxe_softc *sc,
1914 REG_WR_DMAE(sc, offset, wb_write, len);
1918 elink_cb_reg_wb_read(struct bxe_softc *sc,
1923 REG_RD_DMAE(sc, offset, wb_write, len);
1927 elink_cb_path_id(struct bxe_softc *sc)
1929 return (SC_PATH(sc));
1933 elink_cb_event_log(struct bxe_softc *sc,
1934 const elink_log_id_t elink_log_id,
1940 va_start(ap, elink_log_id);
1941 _XXX_(sc, lm_log_id, ap);
1944 BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id);
1948 bxe_set_spio(struct bxe_softc *sc,
1954 /* Only 2 SPIOs are configurable */
1955 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
1956 BLOGE(sc, "Invalid SPIO 0x%x\n", spio);
1960 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1962 /* read SPIO and mask except the float bits */
1963 spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
1966 case MISC_SPIO_OUTPUT_LOW:
1967 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio);
1968 /* clear FLOAT and set CLR */
1969 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1970 spio_reg |= (spio << MISC_SPIO_CLR_POS);
1973 case MISC_SPIO_OUTPUT_HIGH:
1974 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio);
1975 /* clear FLOAT and set SET */
1976 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1977 spio_reg |= (spio << MISC_SPIO_SET_POS);
1980 case MISC_SPIO_INPUT_HI_Z:
1981 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio);
1983 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
1990 REG_WR(sc, MISC_REG_SPIO, spio_reg);
1991 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1997 bxe_gpio_read(struct bxe_softc *sc,
2001 /* The GPIO should be swapped if swap register is set and active */
2002 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
2003 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
2004 int gpio_shift = (gpio_num +
2005 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
2006 uint32_t gpio_mask = (1 << gpio_shift);
2009 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2010 BLOGE(sc, "Invalid GPIO %d\n", gpio_num);
2014 /* read GPIO value */
2015 gpio_reg = REG_RD(sc, MISC_REG_GPIO);
2017 /* get the requested pin value */
2018 return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0;
2022 bxe_gpio_write(struct bxe_softc *sc,
2027 /* The GPIO should be swapped if swap register is set and active */
2028 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
2029 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
2030 int gpio_shift = (gpio_num +
2031 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
2032 uint32_t gpio_mask = (1 << gpio_shift);
2035 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2036 BLOGE(sc, "Invalid GPIO %d\n", gpio_num);
2040 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2042 /* read GPIO and mask except the float bits */
2043 gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2046 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2048 "Set GPIO %d (shift %d) -> output low\n",
2049 gpio_num, gpio_shift);
2050 /* clear FLOAT and set CLR */
2051 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2052 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2055 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2057 "Set GPIO %d (shift %d) -> output high\n",
2058 gpio_num, gpio_shift);
2059 /* clear FLOAT and set SET */
2060 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2061 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2064 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2066 "Set GPIO %d (shift %d) -> input\n",
2067 gpio_num, gpio_shift);
2069 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2076 REG_WR(sc, MISC_REG_GPIO, gpio_reg);
2077 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2083 bxe_gpio_mult_write(struct bxe_softc *sc,
2089 /* any port swapping should be handled by caller */
2091 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2093 /* read GPIO and mask except the float bits */
2094 gpio_reg = REG_RD(sc, MISC_REG_GPIO);
2095 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2096 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2097 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2100 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2101 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins);
2103 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2106 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2107 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins);
2109 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2112 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2113 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins);
2115 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2119 BLOGE(sc, "Invalid GPIO mode assignment %d\n", mode);
2120 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2124 REG_WR(sc, MISC_REG_GPIO, gpio_reg);
2125 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2131 bxe_gpio_int_write(struct bxe_softc *sc,
2136 /* The GPIO should be swapped if swap register is set and active */
2137 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
2138 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
2139 int gpio_shift = (gpio_num +
2140 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
2141 uint32_t gpio_mask = (1 << gpio_shift);
2144 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2145 BLOGE(sc, "Invalid GPIO %d\n", gpio_num);
2149 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2152 gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT);
2155 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2157 "Clear GPIO INT %d (shift %d) -> output low\n",
2158 gpio_num, gpio_shift);
2159 /* clear SET and set CLR */
2160 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2161 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2164 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2166 "Set GPIO INT %d (shift %d) -> output high\n",
2167 gpio_num, gpio_shift);
2168 /* clear CLR and set SET */
2169 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2170 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2177 REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg);
2178 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2184 elink_cb_gpio_read(struct bxe_softc *sc,
2188 return (bxe_gpio_read(sc, gpio_num, port));
2192 elink_cb_gpio_write(struct bxe_softc *sc,
2194 uint8_t mode, /* 0=low 1=high */
2197 return (bxe_gpio_write(sc, gpio_num, mode, port));
2201 elink_cb_gpio_mult_write(struct bxe_softc *sc,
2203 uint8_t mode) /* 0=low 1=high */
2205 return (bxe_gpio_mult_write(sc, pins, mode));
2209 elink_cb_gpio_int_write(struct bxe_softc *sc,
2211 uint8_t mode, /* 0=low 1=high */
2214 return (bxe_gpio_int_write(sc, gpio_num, mode, port));
2218 elink_cb_notify_link_changed(struct bxe_softc *sc)
2220 REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 +
2221 (SC_FUNC(sc) * sizeof(uint32_t))), 1);
2224 /* send the MCP a request, block until there is a reply */
2226 elink_cb_fw_command(struct bxe_softc *sc,
2230 int mb_idx = SC_FW_MB_IDX(sc);
2234 uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10;
2239 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param);
2240 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq));
2243 "wrote command 0x%08x to FW MB param 0x%08x\n",
2244 (command | seq), param);
2246 /* Let the FW do it's magic. GIve it up to 5 seconds... */
2248 DELAY(delay * 1000);
2249 rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header);
2250 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2253 "[after %d ms] read 0x%x seq 0x%x from FW MB\n",
2254 cnt*delay, rc, seq);
2256 /* is this a reply to our command? */
2257 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
2258 rc &= FW_MSG_CODE_MASK;
2261 BLOGE(sc, "FW failed to respond!\n");
2262 // XXX bxe_fw_dump(sc);
2266 BXE_FWMB_UNLOCK(sc);
2271 bxe_fw_command(struct bxe_softc *sc,
2275 return (elink_cb_fw_command(sc, command, param));
2279 __storm_memset_dma_mapping(struct bxe_softc *sc,
2283 REG_WR(sc, addr, U64_LO(mapping));
2284 REG_WR(sc, (addr + 4), U64_HI(mapping));
2288 storm_memset_spq_addr(struct bxe_softc *sc,
2292 uint32_t addr = (XSEM_REG_FAST_MEMORY +
2293 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid));
2294 __storm_memset_dma_mapping(sc, addr, mapping);
2298 storm_memset_vf_to_pf(struct bxe_softc *sc,
2302 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2303 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2304 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2305 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2309 storm_memset_func_en(struct bxe_softc *sc,
2313 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2314 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2315 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2316 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2320 storm_memset_eq_data(struct bxe_softc *sc,
2321 struct event_ring_data *eq_data,
2327 addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid));
2328 size = sizeof(struct event_ring_data);
2329 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data);
2333 storm_memset_eq_prod(struct bxe_softc *sc,
2337 uint32_t addr = (BAR_CSTRORM_INTMEM +
2338 CSTORM_EVENT_RING_PROD_OFFSET(pfid));
2339 REG_WR16(sc, addr, eq_prod);
2343 * Post a slowpath command.
2345 * A slowpath command is used to propogate a configuration change through
2346 * the controller in a controlled manner, allowing each STORM processor and
2347 * other H/W blocks to phase in the change. The commands sent on the
2348 * slowpath are referred to as ramrods. Depending on the ramrod used the
2349 * completion of the ramrod will occur in different ways. Here's a
2350 * breakdown of ramrods and how they complete:
2352 * RAMROD_CMD_ID_ETH_PORT_SETUP
2353 * Used to setup the leading connection on a port. Completes on the
2354 * Receive Completion Queue (RCQ) of that port (typically fp[0]).
2356 * RAMROD_CMD_ID_ETH_CLIENT_SETUP
2357 * Used to setup an additional connection on a port. Completes on the
2358 * RCQ of the multi-queue/RSS connection being initialized.
2360 * RAMROD_CMD_ID_ETH_STAT_QUERY
2361 * Used to force the storm processors to update the statistics database
2362 * in host memory. This ramrod is send on the leading connection CID and
2363 * completes as an index increment of the CSTORM on the default status
2366 * RAMROD_CMD_ID_ETH_UPDATE
2367 * Used to update the state of the leading connection, usually to udpate
2368 * the RSS indirection table. Completes on the RCQ of the leading
2369 * connection. (Not currently used under FreeBSD until OS support becomes
2372 * RAMROD_CMD_ID_ETH_HALT
2373 * Used when tearing down a connection prior to driver unload. Completes
2374 * on the RCQ of the multi-queue/RSS connection being torn down. Don't
2375 * use this on the leading connection.
2377 * RAMROD_CMD_ID_ETH_SET_MAC
2378 * Sets the Unicast/Broadcast/Multicast used by the port. Completes on
2379 * the RCQ of the leading connection.
2381 * RAMROD_CMD_ID_ETH_CFC_DEL
2382 * Used when tearing down a conneciton prior to driver unload. Completes
2383 * on the RCQ of the leading connection (since the current connection
2384 * has been completely removed from controller memory).
2386 * RAMROD_CMD_ID_ETH_PORT_DEL
2387 * Used to tear down the leading connection prior to driver unload,
2388 * typically fp[0]. Completes as an index increment of the CSTORM on the
2389 * default status block.
2391 * RAMROD_CMD_ID_ETH_FORWARD_SETUP
2392 * Used for connection offload. Completes on the RCQ of the multi-queue
2393 * RSS connection that is being offloaded. (Not currently used under
2396 * There can only be one command pending per function.
2399 * 0 = Success, !0 = Failure.
2402 /* must be called under the spq lock */
2404 struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc)
2406 struct eth_spe *next_spe = sc->spq_prod_bd;
2408 if (sc->spq_prod_bd == sc->spq_last_bd) {
2409 /* wrap back to the first eth_spq */
2410 sc->spq_prod_bd = sc->spq;
2411 sc->spq_prod_idx = 0;
2420 /* must be called under the spq lock */
2422 void bxe_sp_prod_update(struct bxe_softc *sc)
2424 int func = SC_FUNC(sc);
2427 * Make sure that BD data is updated before writing the producer.
2428 * BD data is written to the memory, the producer is read from the
2429 * memory, thus we need a full memory barrier to ensure the ordering.
2433 REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)),
2436 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
2437 BUS_SPACE_BARRIER_WRITE);
2441 * bxe_is_contextless_ramrod - check if the current command ends on EQ
2443 * @cmd: command to check
2444 * @cmd_type: command type
2447 int bxe_is_contextless_ramrod(int cmd,
2450 if ((cmd_type == NONE_CONNECTION_TYPE) ||
2451 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
2452 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
2453 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
2454 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
2455 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
2456 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) {
2464 * bxe_sp_post - place a single command on an SP ring
2466 * @sc: driver handle
2467 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.)
2468 * @cid: SW CID the command is related to
2469 * @data_hi: command private data address (high 32 bits)
2470 * @data_lo: command private data address (low 32 bits)
2471 * @cmd_type: command type (e.g. NONE, ETH)
2473 * SP data is handled as if it's always an address pair, thus data fields are
2474 * not swapped to little endian in upper functions. Instead this function swaps
2475 * data as if it's two uint32 fields.
2478 bxe_sp_post(struct bxe_softc *sc,
2485 struct eth_spe *spe;
2489 common = bxe_is_contextless_ramrod(command, cmd_type);
2494 if (!atomic_load_acq_long(&sc->eq_spq_left)) {
2495 BLOGE(sc, "EQ ring is full!\n");
2500 if (!atomic_load_acq_long(&sc->cq_spq_left)) {
2501 BLOGE(sc, "SPQ ring is full!\n");
2507 spe = bxe_sp_get_next(sc);
2509 /* CID needs port number to be encoded int it */
2510 spe->hdr.conn_and_cmd_data =
2511 htole32((command << SPE_HDR_CMD_ID_SHIFT) | HW_CID(sc, cid));
2513 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
2515 /* TBD: Check if it works for VFs */
2516 type |= ((SC_FUNC(sc) << SPE_HDR_FUNCTION_ID_SHIFT) &
2517 SPE_HDR_FUNCTION_ID);
2519 spe->hdr.type = htole16(type);
2521 spe->data.update_data_addr.hi = htole32(data_hi);
2522 spe->data.update_data_addr.lo = htole32(data_lo);
2525 * It's ok if the actual decrement is issued towards the memory
2526 * somewhere between the lock and unlock. Thus no more explict
2527 * memory barrier is needed.
2530 atomic_subtract_acq_long(&sc->eq_spq_left, 1);
2532 atomic_subtract_acq_long(&sc->cq_spq_left, 1);
2535 BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr);
2536 BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n",
2537 BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata));
2539 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n",
2541 (uint32_t)U64_HI(sc->spq_dma.paddr),
2542 (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq),
2549 atomic_load_acq_long(&sc->cq_spq_left),
2550 atomic_load_acq_long(&sc->eq_spq_left));
2552 bxe_sp_prod_update(sc);
2559 * bxe_debug_print_ind_table - prints the indirection table configuration.
2561 * @sc: driver hanlde
2562 * @p: pointer to rss configuration
2566 bxe_debug_print_ind_table(struct bxe_softc *sc,
2567 struct ecore_config_rss_params *p)
2571 BLOGD(sc, DBG_LOAD, "Setting indirection table to:\n");
2572 BLOGD(sc, DBG_LOAD, " 0x0000: ");
2573 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
2574 BLOGD(sc, DBG_LOAD, "0x%02x ", p->ind_table[i]);
2576 /* Print 4 bytes in a line */
2577 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
2578 (((i + 1) & 0x3) == 0)) {
2579 BLOGD(sc, DBG_LOAD, "\n");
2580 BLOGD(sc, DBG_LOAD, "0x%04x: ", i + 1);
2584 BLOGD(sc, DBG_LOAD, "\n");
2589 * FreeBSD Device probe function.
2591 * Compares the device found to the driver's list of supported devices and
2592 * reports back to the bsd loader whether this is the right driver for the device.
2593 * This is the driver entry function called from the "kldload" command.
2596 * BUS_PROBE_DEFAULT on success, positive value on failure.
2599 bxe_probe(device_t dev)
2601 struct bxe_softc *sc;
2602 struct bxe_device_type *t;
2604 uint16_t did, sdid, svid, vid;
2606 /* Find our device structure */
2607 sc = device_get_softc(dev);
2611 /* Get the data for the device to be probed. */
2612 vid = pci_get_vendor(dev);
2613 did = pci_get_device(dev);
2614 svid = pci_get_subvendor(dev);
2615 sdid = pci_get_subdevice(dev);
2618 "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
2619 "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
2621 /* Look through the list of known devices for a match. */
2622 while (t->bxe_name != NULL) {
2623 if ((vid == t->bxe_vid) && (did == t->bxe_did) &&
2624 ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) &&
2625 ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) {
2626 descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
2627 if (descbuf == NULL)
2630 /* Print out the device identity. */
2631 snprintf(descbuf, BXE_DEVDESC_MAX,
2632 "%s (%c%d) BXE v:%s\n", t->bxe_name,
2633 (((pci_read_config(dev, PCIR_REVID, 4) &
2635 (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
2636 BXE_DRIVER_VERSION);
2638 device_set_desc_copy(dev, descbuf);
2639 free(descbuf, M_TEMP);
2640 return (BUS_PROBE_DEFAULT);
2649 bxe_init_mutexes(struct bxe_softc *sc)
2651 #ifdef BXE_CORE_LOCK_SX
2652 snprintf(sc->core_sx_name, sizeof(sc->core_sx_name),
2653 "bxe%d_core_lock", sc->unit);
2654 sx_init(&sc->core_sx, sc->core_sx_name);
2656 snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name),
2657 "bxe%d_core_lock", sc->unit);
2658 mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF);
2661 snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name),
2662 "bxe%d_sp_lock", sc->unit);
2663 mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF);
2665 snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name),
2666 "bxe%d_dmae_lock", sc->unit);
2667 mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF);
2669 snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name),
2670 "bxe%d_phy_lock", sc->unit);
2671 mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF);
2673 snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name),
2674 "bxe%d_fwmb_lock", sc->unit);
2675 mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF);
2677 snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name),
2678 "bxe%d_print_lock", sc->unit);
2679 mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF);
2681 snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name),
2682 "bxe%d_stats_lock", sc->unit);
2683 mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF);
2685 snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name),
2686 "bxe%d_mcast_lock", sc->unit);
2687 mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF);
2691 bxe_release_mutexes(struct bxe_softc *sc)
2693 #ifdef BXE_CORE_LOCK_SX
2694 sx_destroy(&sc->core_sx);
2696 if (mtx_initialized(&sc->core_mtx)) {
2697 mtx_destroy(&sc->core_mtx);
2701 if (mtx_initialized(&sc->sp_mtx)) {
2702 mtx_destroy(&sc->sp_mtx);
2705 if (mtx_initialized(&sc->dmae_mtx)) {
2706 mtx_destroy(&sc->dmae_mtx);
2709 if (mtx_initialized(&sc->port.phy_mtx)) {
2710 mtx_destroy(&sc->port.phy_mtx);
2713 if (mtx_initialized(&sc->fwmb_mtx)) {
2714 mtx_destroy(&sc->fwmb_mtx);
2717 if (mtx_initialized(&sc->print_mtx)) {
2718 mtx_destroy(&sc->print_mtx);
2721 if (mtx_initialized(&sc->stats_mtx)) {
2722 mtx_destroy(&sc->stats_mtx);
2725 if (mtx_initialized(&sc->mcast_mtx)) {
2726 mtx_destroy(&sc->mcast_mtx);
2731 bxe_tx_disable(struct bxe_softc* sc)
2733 struct ifnet *ifp = sc->ifnet;
2735 /* tell the stack the driver is stopped and TX queue is full */
2737 ifp->if_drv_flags = 0;
2742 bxe_drv_pulse(struct bxe_softc *sc)
2744 SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb,
2745 sc->fw_drv_pulse_wr_seq);
2749 bxe_has_tx_work_unload(struct bxe_fastpath *fp)
2751 mb(); /* consumer and producer can change */
2752 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
2755 static inline uint16_t
2756 bxe_tx_avail(struct bxe_softc *sc,
2757 struct bxe_fastpath *fp)
2763 prod = fp->tx_bd_prod;
2764 cons = fp->tx_bd_cons;
2766 used = SUB_S16(prod, cons);
2769 KASSERT((used < 0), ("used tx bds < 0"));
2770 KASSERT((used > sc->tx_ring_size), ("used tx bds > tx_ring_size"));
2771 KASSERT(((sc->tx_ring_size - used) > MAX_TX_AVAIL),
2772 ("invalid number of tx bds used"));
2775 return (int16_t)(sc->tx_ring_size) - used;
2779 bxe_tx_queue_has_work(struct bxe_fastpath *fp)
2783 mb(); /* status block fields can change */
2784 hw_cons = le16toh(*fp->tx_cons_sb);
2785 return (hw_cons != fp->tx_pkt_cons);
2788 static inline uint8_t
2789 bxe_has_tx_work(struct bxe_fastpath *fp)
2791 /* expand this for multi-cos if ever supported */
2792 return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE;
2796 bxe_has_rx_work(struct bxe_fastpath *fp)
2798 uint16_t rx_cq_cons_sb;
2800 mb(); /* status block fields can change */
2801 rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb);
2802 if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX)
2804 return (fp->rx_cq_cons != rx_cq_cons_sb);
2808 bxe_sp_event(struct bxe_softc *sc,
2809 struct bxe_fastpath *fp,
2810 union eth_rx_cqe *rr_cqe)
2812 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2813 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2814 enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX;
2815 struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
2817 BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n",
2818 fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type);
2822 * If cid is within VF range, replace the slowpath object with the
2823 * one corresponding to this VF
2825 if ((cid >= BXE_FIRST_VF_CID) && (cid < BXE_FIRST_VF_CID + BXE_VF_CIDS)) {
2826 bxe_iov_set_queue_sp_obj(sc, cid, &q_obj);
2831 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
2832 BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid);
2833 drv_cmd = ECORE_Q_CMD_UPDATE;
2836 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
2837 BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid);
2838 drv_cmd = ECORE_Q_CMD_SETUP;
2841 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
2842 BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
2843 drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY;
2846 case (RAMROD_CMD_ID_ETH_HALT):
2847 BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid);
2848 drv_cmd = ECORE_Q_CMD_HALT;
2851 case (RAMROD_CMD_ID_ETH_TERMINATE):
2852 BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid);
2853 drv_cmd = ECORE_Q_CMD_TERMINATE;
2856 case (RAMROD_CMD_ID_ETH_EMPTY):
2857 BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid);
2858 drv_cmd = ECORE_Q_CMD_EMPTY;
2862 BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n",
2863 command, fp->index);
2867 if ((drv_cmd != ECORE_Q_CMD_MAX) &&
2868 q_obj->complete_cmd(sc, q_obj, drv_cmd)) {
2870 * q_obj->complete_cmd() failure means that this was
2871 * an unexpected completion.
2873 * In this case we don't want to increase the sc->spq_left
2874 * because apparently we haven't sent this command the first
2877 // bxe_panic(sc, ("Unexpected SP completion\n"));
2882 /* SRIOV: reschedule any 'in_progress' operations */
2883 bxe_iov_sp_event(sc, cid, TRUE);
2886 atomic_add_acq_long(&sc->cq_spq_left, 1);
2888 BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n",
2889 atomic_load_acq_long(&sc->cq_spq_left));
2892 if ((drv_cmd == ECORE_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
2893 (!!bxe_test_bit(ECORE_AFEX_FCOE_Q_UPDATE_PENDING, &sc->sp_state))) {
2895 * If Queue update ramrod is completed for last Queue in AFEX VIF set
2896 * flow, then ACK MCP at the end. Mark pending ACK to MCP bit to
2897 * prevent case that both bits are cleared. At the end of load/unload
2898 * driver checks that sp_state is cleared and this order prevents
2901 bxe_set_bit(ECORE_AFEX_PENDING_VIFSET_MCP_ACK, &sc->sp_state);
2903 bxe_clear_bit(ECORE_AFEX_FCOE_Q_UPDATE_PENDING, &sc->sp_state);
2905 /* schedule the sp task as MCP ack is required */
2906 bxe_schedule_sp_task(sc);
2912 * The current mbuf is part of an aggregation. Move the mbuf into the TPA
2913 * aggregation queue, put an empty mbuf back onto the receive chain, and mark
2914 * the current aggregation queue as in-progress.
2917 bxe_tpa_start(struct bxe_softc *sc,
2918 struct bxe_fastpath *fp,
2922 struct eth_fast_path_rx_cqe *cqe)
2924 struct bxe_sw_rx_bd tmp_bd;
2925 struct bxe_sw_rx_bd *rx_buf;
2926 struct eth_rx_bd *rx_bd;
2928 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
2931 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START "
2932 "cons=%d prod=%d\n",
2933 fp->index, queue, cons, prod);
2935 max_agg_queues = MAX_AGG_QS(sc);
2937 KASSERT((queue < max_agg_queues),
2938 ("fp[%02d] invalid aggr queue (%d >= %d)!",
2939 fp->index, queue, max_agg_queues));
2941 KASSERT((tpa_info->state == BXE_TPA_STATE_STOP),
2942 ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!",
2945 /* copy the existing mbuf and mapping from the TPA pool */
2946 tmp_bd = tpa_info->bd;
2948 if (tmp_bd.m == NULL) {
2949 BLOGE(sc, "fp[%02d].tpa[%02d] mbuf not allocated!\n",
2951 /* XXX Error handling? */
2955 /* change the TPA queue to the start state */
2956 tpa_info->state = BXE_TPA_STATE_START;
2957 tpa_info->placement_offset = cqe->placement_offset;
2958 tpa_info->parsing_flags = le16toh(cqe->pars_flags.flags);
2959 tpa_info->vlan_tag = le16toh(cqe->vlan_tag);
2960 tpa_info->len_on_bd = le16toh(cqe->len_on_bd);
2962 fp->rx_tpa_queue_used |= (1 << queue);
2965 * If all the buffer descriptors are filled with mbufs then fill in
2966 * the current consumer index with a new BD. Else if a maximum Rx
2967 * buffer limit is imposed then fill in the next producer index.
2969 index = (sc->max_rx_bufs != RX_BD_USABLE) ?
2972 /* move the received mbuf and mapping to TPA pool */
2973 tpa_info->bd = fp->rx_mbuf_chain[cons];
2975 /* release any existing RX BD mbuf mappings */
2976 if (cons != index) {
2977 rx_buf = &fp->rx_mbuf_chain[cons];
2979 if (rx_buf->m_map != NULL) {
2980 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
2981 BUS_DMASYNC_POSTREAD);
2982 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
2986 * We get here when the maximum number of rx buffers is less than
2987 * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL
2988 * it out here without concern of a memory leak.
2990 fp->rx_mbuf_chain[cons].m = NULL;
2993 /* update the Rx SW BD with the mbuf info from the TPA pool */
2994 fp->rx_mbuf_chain[index] = tmp_bd;
2996 /* update the Rx BD with the empty mbuf phys address from the TPA pool */
2997 rx_bd = &fp->rx_chain[index];
2998 rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr));
2999 rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr));
3003 * When a TPA aggregation is completed, loop through the individual mbufs
3004 * of the aggregation, combining them into a single mbuf which will be sent
3005 * up the stack. Refill all freed SGEs with mbufs as we go along.
3008 bxe_fill_frag_mbuf(struct bxe_softc *sc,
3009 struct bxe_fastpath *fp,
3010 struct bxe_sw_tpa_info *tpa_info,
3014 struct eth_end_agg_rx_cqe *cqe,
3017 struct mbuf *m_frag;
3018 uint32_t frag_len, frag_size, i;
3023 frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd;
3026 "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n",
3027 fp->index, queue, tpa_info->len_on_bd, frag_size, pages);
3029 /* make sure the aggregated frame is not too big to handle */
3030 if (pages > 8 * PAGES_PER_SGE) {
3031 BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! "
3032 "pkt_len=%d len_on_bd=%d frag_size=%d\n",
3033 fp->index, cqe_idx, pages, le16toh(cqe->pkt_len),
3034 tpa_info->len_on_bd, frag_size);
3035 bxe_panic(sc, ("sge page count error\n"));
3040 * Scan through the scatter gather list pulling individual mbufs into a
3041 * single mbuf for the host stack.
3043 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
3044 sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j]));
3047 * Firmware gives the indices of the SGE as if the ring is an array
3048 * (meaning that the "next" element will consume 2 indices).
3050 frag_len = min(frag_size, (uint32_t)(SGE_PAGES));
3052 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d "
3053 "sge_idx=%d frag_size=%d frag_len=%d\n",
3054 fp->index, queue, i, j, sge_idx, frag_size, frag_len);
3056 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
3058 /* allocate a new mbuf for the SGE */
3059 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
3061 /* Leave all remaining SGEs in the ring! */
3065 /* update the fragment length */
3066 m_frag->m_len = frag_len;
3068 /* concatenate the fragment to the head mbuf */
3070 fp->eth_q_stats.mbuf_alloc_sge--;
3072 /* update the TPA mbuf size and remaining fragment size */
3073 m->m_pkthdr.len += frag_len;
3074 frag_size -= frag_len;
3078 "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n",
3079 fp->index, queue, frag_size);
3085 bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp)
3089 for (i = 1; i <= RX_SGE_NUM_PAGES; i++) {
3090 int idx = RX_SGE_TOTAL_PER_PAGE * i - 1;
3092 for (j = 0; j < 2; j++) {
3093 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
3100 bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp)
3102 /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */
3103 memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
3106 * Clear the two last indices in the page to 1. These are the indices that
3107 * correspond to the "next" element, hence will never be indicated and
3108 * should be removed from the calculations.
3110 bxe_clear_sge_mask_next_elems(fp);
3114 bxe_update_last_max_sge(struct bxe_fastpath *fp,
3117 uint16_t last_max = fp->last_max_sge;
3119 if (SUB_S16(idx, last_max) > 0) {
3120 fp->last_max_sge = idx;
3125 bxe_update_sge_prod(struct bxe_softc *sc,
3126 struct bxe_fastpath *fp,
3128 struct eth_end_agg_rx_cqe *cqe)
3130 uint16_t last_max, last_elem, first_elem;
3138 /* first mark all used pages */
3139 for (i = 0; i < sge_len; i++) {
3140 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
3141 RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[i])));
3145 "fp[%02d] fp_cqe->sgl[%d] = %d\n",
3146 fp->index, sge_len - 1,
3147 le16toh(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
3149 /* assume that the last SGE index is the biggest */
3150 bxe_update_last_max_sge(fp,
3151 le16toh(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
3153 last_max = RX_SGE(fp->last_max_sge);
3154 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
3155 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
3157 /* if ring is not full */
3158 if (last_elem + 1 != first_elem) {
3162 /* now update the prod */
3163 for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) {
3164 if (__predict_true(fp->sge_mask[i])) {
3168 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
3169 delta += BIT_VEC64_ELEM_SZ;
3173 fp->rx_sge_prod += delta;
3174 /* clear page-end entries */
3175 bxe_clear_sge_mask_next_elems(fp);
3179 "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n",
3180 fp->index, fp->last_max_sge, fp->rx_sge_prod);
3184 * The aggregation on the current TPA queue has completed. Pull the individual
3185 * mbuf fragments together into a single mbuf, perform all necessary checksum
3186 * calculations, and send the resuting mbuf to the stack.
3189 bxe_tpa_stop(struct bxe_softc *sc,
3190 struct bxe_fastpath *fp,
3191 struct bxe_sw_tpa_info *tpa_info,
3194 struct eth_end_agg_rx_cqe *cqe,
3197 struct ifnet *ifp = sc->ifnet;
3202 "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n",
3203 fp->index, queue, tpa_info->placement_offset,
3204 le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag);
3208 /* allocate a replacement before modifying existing mbuf */
3209 rc = bxe_alloc_rx_tpa_mbuf(fp, queue);
3211 /* drop the frame and log an error */
3212 fp->eth_q_stats.rx_soft_errors++;
3213 goto bxe_tpa_stop_exit;
3216 /* we have a replacement, fixup the current mbuf */
3217 m_adj(m, tpa_info->placement_offset);
3218 m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd;
3220 /* mark the checksums valid (taken care of by the firmware) */
3221 fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3222 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3223 m->m_pkthdr.csum_data = 0xffff;
3224 m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
3229 /* aggregate all of the SGEs into a single mbuf */
3230 rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx);
3232 /* drop the packet and log an error */
3233 fp->eth_q_stats.rx_soft_errors++;
3236 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) {
3237 m->m_pkthdr.ether_vtag = tpa_info->vlan_tag;
3238 m->m_flags |= M_VLANTAG;
3241 /* assign packet to this interface interface */
3242 m->m_pkthdr.rcvif = ifp;
3244 #if __FreeBSD_version >= 800000
3245 /* specify what RSS queue was used for this flow */
3246 m->m_pkthdr.flowid = fp->index;
3247 m->m_flags |= M_FLOWID;
3251 fp->eth_q_stats.rx_tpa_pkts++;
3253 /* pass the frame to the stack */
3254 (*ifp->if_input)(ifp, m);
3257 /* we passed an mbuf up the stack or dropped the frame */
3258 fp->eth_q_stats.mbuf_alloc_tpa--;
3262 fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP;
3263 fp->rx_tpa_queue_used &= ~(1 << queue);
3267 bxe_rxeof(struct bxe_softc *sc,
3268 struct bxe_fastpath *fp)
3270 struct ifnet *ifp = sc->ifnet;
3271 uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
3272 uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
3278 /* CQ "next element" is of the size of the regular element */
3279 hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
3280 if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) {
3284 bd_cons = fp->rx_bd_cons;
3285 bd_prod = fp->rx_bd_prod;
3286 bd_prod_fw = bd_prod;
3287 sw_cq_cons = fp->rx_cq_cons;
3288 sw_cq_prod = fp->rx_cq_prod;
3291 * Memory barrier necessary as speculative reads of the rx
3292 * buffer can be ahead of the index in the status block
3297 "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n",
3298 fp->index, hw_cq_cons, sw_cq_cons);
3300 while (sw_cq_cons != hw_cq_cons) {
3301 struct bxe_sw_rx_bd *rx_buf = NULL;
3302 union eth_rx_cqe *cqe;
3303 struct eth_fast_path_rx_cqe *cqe_fp;
3304 uint8_t cqe_fp_flags;
3305 enum eth_rx_cqe_type cqe_fp_type;
3307 struct mbuf *m = NULL;
3309 comp_ring_cons = RCQ(sw_cq_cons);
3310 bd_prod = RX_BD(bd_prod);
3311 bd_cons = RX_BD(bd_cons);
3313 cqe = &fp->rcq_chain[comp_ring_cons];
3314 cqe_fp = &cqe->fast_path_cqe;
3315 cqe_fp_flags = cqe_fp->type_error_flags;
3316 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
3319 "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d "
3320 "BD prod=%d cons=%d CQE type=0x%x err=0x%x "
3321 "status=0x%x rss_hash=0x%x vlan=0x%x len=%u\n",
3327 CQE_TYPE(cqe_fp_flags),
3329 cqe_fp->status_flags,
3330 le32toh(cqe_fp->rss_hash_result),
3331 le16toh(cqe_fp->vlan_tag),
3332 le16toh(cqe_fp->pkt_len_or_gro_seg_len));
3334 /* is this a slowpath msg? */
3335 if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) {
3336 bxe_sp_event(sc, fp, cqe);
3340 rx_buf = &fp->rx_mbuf_chain[bd_cons];
3342 if (!CQE_TYPE_FAST(cqe_fp_type)) {
3343 struct bxe_sw_tpa_info *tpa_info;
3344 uint16_t frag_size, pages;
3349 if (!fp->tpa_enable &&
3350 (CQE_TYPE_START(cqe_fp_type) || CQE_TYPE_STOP(cqe_fp_type))) {
3351 BLOGE(sc, "START/STOP packet while !tpa_enable type (0x%x)\n",
3352 CQE_TYPE(cqe_fp_type));
3356 if (CQE_TYPE_START(cqe_fp_type)) {
3357 bxe_tpa_start(sc, fp, cqe_fp->queue_index,
3358 bd_cons, bd_prod, cqe_fp);
3359 m = NULL; /* packet not ready yet */
3363 KASSERT(CQE_TYPE_STOP(cqe_fp_type),
3364 ("CQE type is not STOP! (0x%x)\n", cqe_fp_type));
3366 queue = cqe->end_agg_cqe.queue_index;
3367 tpa_info = &fp->rx_tpa_info[queue];
3369 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n",
3372 frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) -
3373 tpa_info->len_on_bd);
3374 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3376 bxe_tpa_stop(sc, fp, tpa_info, queue, pages,
3377 &cqe->end_agg_cqe, comp_ring_cons);
3379 bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe);
3386 /* is this an error packet? */
3387 if (__predict_false(cqe_fp_flags &
3388 ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
3389 BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons);
3390 fp->eth_q_stats.rx_soft_errors++;
3394 len = le16toh(cqe_fp->pkt_len_or_gro_seg_len);
3395 pad = cqe_fp->placement_offset;
3399 if (__predict_false(m == NULL)) {
3400 BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n",
3401 bd_cons, fp->index);
3405 /* XXX double copy if packet length under a threshold */
3408 * If all the buffer descriptors are filled with mbufs then fill in
3409 * the current consumer index with a new BD. Else if a maximum Rx
3410 * buffer limit is imposed then fill in the next producer index.
3412 rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons,
3413 (sc->max_rx_bufs != RX_BD_USABLE) ?
3416 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
3418 fp->eth_q_stats.rx_soft_errors++;
3420 if (sc->max_rx_bufs != RX_BD_USABLE) {
3421 /* copy this consumer index to the producer index */
3422 memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf,
3423 sizeof(struct bxe_sw_rx_bd));
3424 memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd));
3430 /* current mbuf was detached from the bd */
3431 fp->eth_q_stats.mbuf_alloc_rx--;
3433 /* we allocated a replacement mbuf, fixup the current one */
3435 m->m_pkthdr.len = m->m_len = len;
3437 /* assign packet to this interface interface */
3438 m->m_pkthdr.rcvif = ifp;
3440 /* assume no hardware checksum has complated */
3441 m->m_pkthdr.csum_flags = 0;
3443 /* validate checksum if offload enabled */
3444 if (ifp->if_capenable & IFCAP_RXCSUM) {
3445 /* check for a valid IP frame */
3446 if (!(cqe->fast_path_cqe.status_flags &
3447 ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) {
3448 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3449 if (__predict_false(cqe_fp_flags &
3450 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) {
3451 fp->eth_q_stats.rx_hw_csum_errors++;
3453 fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3454 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3458 /* check for a valid TCP/UDP frame */
3459 if (!(cqe->fast_path_cqe.status_flags &
3460 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) {
3461 if (__predict_false(cqe_fp_flags &
3462 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) {
3463 fp->eth_q_stats.rx_hw_csum_errors++;
3465 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3466 m->m_pkthdr.csum_data = 0xFFFF;
3467 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
3473 /* if there is a VLAN tag then flag that info */
3474 if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_VLAN) {
3475 m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag;
3476 m->m_flags |= M_VLANTAG;
3479 #if __FreeBSD_version >= 800000
3480 /* specify what RSS queue was used for this flow */
3481 m->m_pkthdr.flowid = fp->index;
3482 m->m_flags |= M_FLOWID;
3487 bd_cons = RX_BD_NEXT(bd_cons);
3488 bd_prod = RX_BD_NEXT(bd_prod);
3489 bd_prod_fw = RX_BD_NEXT(bd_prod_fw);
3491 /* pass the frame to the stack */
3492 if (__predict_true(m != NULL)) {
3495 (*ifp->if_input)(ifp, m);
3500 sw_cq_prod = RCQ_NEXT(sw_cq_prod);
3501 sw_cq_cons = RCQ_NEXT(sw_cq_cons);
3503 /* limit spinning on the queue */
3504 if (rx_pkts == sc->rx_budget) {
3505 fp->eth_q_stats.rx_budget_reached++;
3508 } /* while work to do */
3510 fp->rx_bd_cons = bd_cons;
3511 fp->rx_bd_prod = bd_prod_fw;
3512 fp->rx_cq_cons = sw_cq_cons;
3513 fp->rx_cq_prod = sw_cq_prod;
3515 /* Update producers */
3516 bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod);
3518 fp->eth_q_stats.rx_pkts += rx_pkts;
3519 fp->eth_q_stats.rx_calls++;
3521 BXE_FP_RX_UNLOCK(fp);
3523 return (sw_cq_cons != hw_cq_cons);
3527 bxe_free_tx_pkt(struct bxe_softc *sc,
3528 struct bxe_fastpath *fp,
3531 struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx];
3532 struct eth_tx_start_bd *tx_start_bd;
3533 uint16_t bd_idx = TX_BD(tx_buf->first_bd);
3537 /* unmap the mbuf from non-paged memory */
3538 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
3540 tx_start_bd = &fp->tx_chain[bd_idx].start_bd;
3541 nbd = le16toh(tx_start_bd->nbd) - 1;
3544 if ((nbd - 1) > (MAX_MBUF_FRAGS + 2)) {
3545 bxe_panic(sc, ("BAD nbd!\n"));
3549 new_cons = (tx_buf->first_bd + nbd);
3552 struct eth_tx_bd *tx_data_bd;
3555 * The following code doesn't do anything but is left here
3556 * for clarity on what the new value of new_cons skipped.
3559 /* get the next bd */
3560 bd_idx = TX_BD(TX_BD_NEXT(bd_idx));
3562 /* skip the parse bd */
3564 bd_idx = TX_BD(TX_BD_NEXT(bd_idx));
3566 /* skip the TSO split header bd since they have no mapping */
3567 if (tx_buf->flags & BXE_TSO_SPLIT_BD) {
3569 bd_idx = TX_BD(TX_BD_NEXT(bd_idx));
3572 /* now free frags */
3574 tx_data_bd = &fp->tx_chain[bd_idx].reg_bd;
3576 bd_idx = TX_BD(TX_BD_NEXT(bd_idx));
3582 if (__predict_true(tx_buf->m != NULL)) {
3584 fp->eth_q_stats.mbuf_alloc_tx--;
3586 fp->eth_q_stats.tx_chain_lost_mbuf++;
3590 tx_buf->first_bd = 0;
3595 /* transmit timeout watchdog */
3597 bxe_watchdog(struct bxe_softc *sc,
3598 struct bxe_fastpath *fp)
3602 if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) {
3603 BXE_FP_TX_UNLOCK(fp);
3607 BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index);
3609 BXE_FP_TX_UNLOCK(fp);
3611 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT);
3612 taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task);
3617 /* processes transmit completions */
3619 bxe_txeof(struct bxe_softc *sc,
3620 struct bxe_fastpath *fp)
3622 struct ifnet *ifp = sc->ifnet;
3623 uint16_t bd_cons, hw_cons, sw_cons, pkt_cons;
3624 uint16_t tx_bd_avail;
3626 BXE_FP_TX_LOCK_ASSERT(fp);
3628 bd_cons = fp->tx_bd_cons;
3629 hw_cons = le16toh(*fp->tx_cons_sb);
3630 sw_cons = fp->tx_pkt_cons;
3632 while (sw_cons != hw_cons) {
3633 pkt_cons = TX_BD(sw_cons);
3636 "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n",
3637 fp->index, hw_cons, sw_cons, pkt_cons);
3639 bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons);
3644 fp->tx_pkt_cons = sw_cons;
3645 fp->tx_bd_cons = bd_cons;
3648 "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n",
3649 fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod);
3653 tx_bd_avail = bxe_tx_avail(sc, fp);
3655 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
3656 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3658 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3661 if (fp->tx_pkt_prod != fp->tx_pkt_cons) {
3662 /* reset the watchdog timer if there are pending transmits */
3663 fp->watchdog_timer = BXE_TX_TIMEOUT;
3666 /* clear watchdog when there are no pending transmits */
3667 fp->watchdog_timer = 0;
3673 bxe_drain_tx_queues(struct bxe_softc *sc)
3675 struct bxe_fastpath *fp;
3678 /* wait until all TX fastpath tasks have completed */
3679 for (i = 0; i < sc->num_queues; i++) {
3684 while (bxe_has_tx_work(fp)) {
3688 BXE_FP_TX_UNLOCK(fp);
3691 BLOGE(sc, "Timeout waiting for fp[%d] "
3692 "transmits to complete!\n", i);
3693 bxe_panic(sc, ("tx drain failure\n"));
3707 bxe_del_all_macs(struct bxe_softc *sc,
3708 struct ecore_vlan_mac_obj *mac_obj,
3710 uint8_t wait_for_comp)
3712 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3715 /* wait for completion of requested */
3716 if (wait_for_comp) {
3717 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3720 /* Set the mac type of addresses we want to clear */
3721 bxe_set_bit(mac_type, &vlan_mac_flags);
3723 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
3725 BLOGE(sc, "Failed to delete MACs (%d)\n", rc);
3732 bxe_fill_accept_flags(struct bxe_softc *sc,
3734 unsigned long *rx_accept_flags,
3735 unsigned long *tx_accept_flags)
3737 /* Clear the flags first */
3738 *rx_accept_flags = 0;
3739 *tx_accept_flags = 0;
3742 case BXE_RX_MODE_NONE:
3744 * 'drop all' supersedes any accept flags that may have been
3745 * passed to the function.
3749 case BXE_RX_MODE_NORMAL:
3750 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3751 bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags);
3752 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3754 /* internal switching mode */
3755 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3756 bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags);
3757 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3761 case BXE_RX_MODE_ALLMULTI:
3762 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3763 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3764 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3766 /* internal switching mode */
3767 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3768 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3769 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3773 case BXE_RX_MODE_PROMISC:
3775 * According to deffinition of SI mode, iface in promisc mode
3776 * should receive matched and unmatched (in resolution of port)
3779 bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags);
3780 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3781 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3782 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3784 /* internal switching mode */
3785 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3786 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3789 bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags);
3791 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3797 BLOGE(sc, "Unknown rx_mode (%d)\n", rx_mode);
3801 /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
3802 if (rx_mode != BXE_RX_MODE_NONE) {
3803 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags);
3804 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags);
3811 bxe_set_q_rx_mode(struct bxe_softc *sc,
3813 unsigned long rx_mode_flags,
3814 unsigned long rx_accept_flags,
3815 unsigned long tx_accept_flags,
3816 unsigned long ramrod_flags)
3818 struct ecore_rx_mode_ramrod_params ramrod_param;
3821 memset(&ramrod_param, 0, sizeof(ramrod_param));
3823 /* Prepare ramrod parameters */
3824 ramrod_param.cid = 0;
3825 ramrod_param.cl_id = cl_id;
3826 ramrod_param.rx_mode_obj = &sc->rx_mode_obj;
3827 ramrod_param.func_id = SC_FUNC(sc);
3829 ramrod_param.pstate = &sc->sp_state;
3830 ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING;
3832 ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata);
3833 ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata);
3835 bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
3837 ramrod_param.ramrod_flags = ramrod_flags;
3838 ramrod_param.rx_mode_flags = rx_mode_flags;
3840 ramrod_param.rx_accept_flags = rx_accept_flags;
3841 ramrod_param.tx_accept_flags = tx_accept_flags;
3843 rc = ecore_config_rx_mode(sc, &ramrod_param);
3845 BLOGE(sc, "Set rx_mode %d failed\n", sc->rx_mode);
3853 bxe_set_storm_rx_mode(struct bxe_softc *sc)
3855 unsigned long rx_mode_flags = 0, ramrod_flags = 0;
3856 unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
3859 rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags,
3865 bxe_set_bit(RAMROD_RX, &ramrod_flags);
3866 bxe_set_bit(RAMROD_TX, &ramrod_flags);
3868 /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */
3869 return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags,
3870 rx_accept_flags, tx_accept_flags,
3874 /* returns the "mcp load_code" according to global load_count array */
3876 bxe_nic_load_no_mcp(struct bxe_softc *sc)
3878 int path = SC_PATH(sc);
3879 int port = SC_PORT(sc);
3881 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n",
3882 path, load_count[path][0], load_count[path][1],
3883 load_count[path][2]);
3884 load_count[path][0]++;
3885 load_count[path][1 + port]++;
3886 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n",
3887 path, load_count[path][0], load_count[path][1],
3888 load_count[path][2]);
3889 if (load_count[path][0] == 1) {
3890 return (FW_MSG_CODE_DRV_LOAD_COMMON);
3891 } else if (load_count[path][1 + port] == 1) {
3892 return (FW_MSG_CODE_DRV_LOAD_PORT);
3894 return (FW_MSG_CODE_DRV_LOAD_FUNCTION);
3898 /* returns the "mcp load_code" according to global load_count array */
3900 bxe_nic_unload_no_mcp(struct bxe_softc *sc)
3902 int port = SC_PORT(sc);
3903 int path = SC_PATH(sc);
3905 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n",
3906 path, load_count[path][0], load_count[path][1],
3907 load_count[path][2]);
3908 load_count[path][0]--;
3909 load_count[path][1 + port]--;
3910 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n",
3911 path, load_count[path][0], load_count[path][1],
3912 load_count[path][2]);
3913 if (load_count[path][0] == 0) {
3914 return (FW_MSG_CODE_DRV_UNLOAD_COMMON);
3915 } else if (load_count[path][1 + port] == 0) {
3916 return (FW_MSG_CODE_DRV_UNLOAD_PORT);
3918 return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION);
3922 /* request unload mode from the MCP: COMMON, PORT or FUNCTION */
3924 bxe_send_unload_req(struct bxe_softc *sc,
3927 uint32_t reset_code = 0;
3929 int port = SC_PORT(sc);
3930 int path = SC_PATH(sc);
3933 /* Select the UNLOAD request mode */
3934 if (unload_mode == UNLOAD_NORMAL) {
3935 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3938 else if (sc->flags & BXE_NO_WOL_FLAG) {
3939 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
3940 } else if (sc->wol) {
3941 uint32_t emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
3942 uint8_t *mac_addr = sc->dev->dev_addr;
3947 * The mac address is written to entries 1-4 to
3948 * preserve entry 0 which is used by the PMF
3950 uint8_t entry = (SC_VN(sc) + 1)*8;
3952 val = (mac_addr[0] << 8) | mac_addr[1];
3953 EMAC_WR(sc, EMAC_REG_EMAC_MAC_MATCH + entry, val);
3955 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
3956 (mac_addr[4] << 8) | mac_addr[5];
3957 EMAC_WR(sc, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
3959 /* Enable the PME and clear the status */
3960 pmc = pci_read_config(sc->dev,
3961 (sc->devinfo.pcie_pm_cap_reg +
3964 pmc |= PCIM_PSTAT_PMEENABLE | PCIM_PSTAT_PME;
3965 pci_write_config(sc->dev,
3966 (sc->devinfo.pcie_pm_cap_reg +
3970 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
3974 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3977 /* Send the request to the MCP */
3978 if (!BXE_NOMCP(sc)) {
3979 reset_code = bxe_fw_command(sc, reset_code, 0);
3981 reset_code = bxe_nic_unload_no_mcp(sc);
3984 return (reset_code);
3987 /* send UNLOAD_DONE command to the MCP */
3989 bxe_send_unload_done(struct bxe_softc *sc,
3992 uint32_t reset_param =
3993 keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
3995 /* Report UNLOAD_DONE to MCP */
3996 if (!BXE_NOMCP(sc)) {
3997 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
4002 bxe_func_wait_started(struct bxe_softc *sc)
4006 if (!sc->port.pmf) {
4011 * (assumption: No Attention from MCP at this stage)
4012 * PMF probably in the middle of TX disable/enable transaction
4013 * 1. Sync IRS for default SB
4014 * 2. Sync SP queue - this guarantees us that attention handling started
4015 * 3. Wait, that TX disable/enable transaction completes
4017 * 1+2 guarantee that if DCBX attention was scheduled it already changed
4018 * pending bit of transaction from STARTED-->TX_STOPPED, if we already
4019 * received completion for the transaction the state is TX_STOPPED.
4020 * State will return to STARTED after completion of TX_STOPPED-->STARTED
4024 /* XXX make sure default SB ISR is done */
4025 /* need a way to synchronize an irq (intr_mtx?) */
4027 /* XXX flush any work queues */
4029 while (ecore_func_get_state(sc, &sc->func_obj) !=
4030 ECORE_F_STATE_STARTED && tout--) {
4034 if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) {
4036 * Failed to complete the transaction in a "good way"
4037 * Force both transactions with CLR bit.
4039 struct ecore_func_state_params func_params = { NULL };
4041 BLOGE(sc, "Unexpected function state! "
4042 "Forcing STARTED-->TX_STOPPED-->STARTED\n");
4044 func_params.f_obj = &sc->func_obj;
4045 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
4047 /* STARTED-->TX_STOPPED */
4048 func_params.cmd = ECORE_F_CMD_TX_STOP;
4049 ecore_func_state_change(sc, &func_params);
4051 /* TX_STOPPED-->STARTED */
4052 func_params.cmd = ECORE_F_CMD_TX_START;
4053 return (ecore_func_state_change(sc, &func_params));
4060 bxe_stop_queue(struct bxe_softc *sc,
4063 struct bxe_fastpath *fp = &sc->fp[index];
4064 struct ecore_queue_state_params q_params = { NULL };
4067 BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index);
4069 q_params.q_obj = &sc->sp_objs[fp->index].q_obj;
4070 /* We want to wait for completion in this context */
4071 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
4073 /* Stop the primary connection: */
4075 /* ...halt the connection */
4076 q_params.cmd = ECORE_Q_CMD_HALT;
4077 rc = ecore_queue_state_change(sc, &q_params);
4082 /* ...terminate the connection */
4083 q_params.cmd = ECORE_Q_CMD_TERMINATE;
4084 memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate));
4085 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
4086 rc = ecore_queue_state_change(sc, &q_params);
4091 /* ...delete cfc entry */
4092 q_params.cmd = ECORE_Q_CMD_CFC_DEL;
4093 memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del));
4094 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
4095 return (ecore_queue_state_change(sc, &q_params));
4098 /* wait for the outstanding SP commands */
4099 static inline uint8_t
4100 bxe_wait_sp_comp(struct bxe_softc *sc,
4104 int tout = 5000; /* wait for 5 secs tops */
4108 if (!(atomic_load_acq_long(&sc->sp_state) & mask)) {
4117 tmp = atomic_load_acq_long(&sc->sp_state);
4119 BLOGE(sc, "Filtering completion timed out: "
4120 "sp_state 0x%lx, mask 0x%lx\n",
4129 bxe_func_stop(struct bxe_softc *sc)
4131 struct ecore_func_state_params func_params = { NULL };
4134 /* prepare parameters for function state transitions */
4135 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
4136 func_params.f_obj = &sc->func_obj;
4137 func_params.cmd = ECORE_F_CMD_STOP;
4140 * Try to stop the function the 'good way'. If it fails (in case
4141 * of a parity error during bxe_chip_cleanup()) and we are
4142 * not in a debug mode, perform a state transaction in order to
4143 * enable further HW_RESET transaction.
4145 rc = ecore_func_state_change(sc, &func_params);
4147 BLOGE(sc, "FUNC_STOP ramrod failed. "
4148 "Running a dry transaction\n");
4149 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
4150 return (ecore_func_state_change(sc, &func_params));
4157 bxe_reset_hw(struct bxe_softc *sc,
4160 struct ecore_func_state_params func_params = { NULL };
4162 /* Prepare parameters for function state transitions */
4163 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
4165 func_params.f_obj = &sc->func_obj;
4166 func_params.cmd = ECORE_F_CMD_HW_RESET;
4168 func_params.params.hw_init.load_phase = load_code;
4170 return (ecore_func_state_change(sc, &func_params));
4174 bxe_int_disable_sync(struct bxe_softc *sc,
4178 /* prevent the HW from sending interrupts */
4179 bxe_int_disable(sc);
4182 /* XXX need a way to synchronize ALL irqs (intr_mtx?) */
4183 /* make sure all ISRs are done */
4185 /* XXX make sure sp_task is not running */
4186 /* cancel and flush work queues */
4190 bxe_chip_cleanup(struct bxe_softc *sc,
4191 uint32_t unload_mode,
4194 int port = SC_PORT(sc);
4195 struct ecore_mcast_ramrod_params rparam = { NULL };
4196 uint32_t reset_code;
4199 bxe_drain_tx_queues(sc);
4201 /* give HW time to discard old tx messages */
4204 /* Clean all ETH MACs */
4205 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE);
4207 BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc);
4210 /* Clean up UC list */
4211 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE);
4213 BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc);
4217 if (!CHIP_IS_E1(sc)) {
4218 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
4221 /* Set "drop all" to stop Rx */
4224 * We need to take the BXE_MCAST_LOCK() here in order to prevent
4225 * a race between the completion code and this code.
4229 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
4230 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
4232 bxe_set_storm_rx_mode(sc);
4235 /* Clean up multicast configuration */
4236 rparam.mcast_obj = &sc->mcast_obj;
4237 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4239 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4242 BXE_MCAST_UNLOCK(sc);
4244 // XXX bxe_iov_chip_cleanup(sc);
4247 * Send the UNLOAD_REQUEST to the MCP. This will return if
4248 * this function should perform FUNCTION, PORT, or COMMON HW
4251 reset_code = bxe_send_unload_req(sc, unload_mode);
4254 * (assumption: No Attention from MCP at this stage)
4255 * PMF probably in the middle of TX disable/enable transaction
4257 rc = bxe_func_wait_started(sc);
4259 BLOGE(sc, "bxe_func_wait_started failed\n");
4263 * Close multi and leading connections
4264 * Completions for ramrods are collected in a synchronous way
4266 for (i = 0; i < sc->num_queues; i++) {
4267 if (bxe_stop_queue(sc, i)) {
4273 * If SP settings didn't get completed so far - something
4274 * very wrong has happen.
4276 if (!bxe_wait_sp_comp(sc, ~0x0UL)) {
4277 BLOGE(sc, "Common slow path ramrods got stuck!\n");
4282 rc = bxe_func_stop(sc);
4284 BLOGE(sc, "Function stop failed!\n");
4287 /* disable HW interrupts */
4288 bxe_int_disable_sync(sc, TRUE);
4290 /* detach interrupts */
4291 bxe_interrupt_detach(sc);
4293 /* Reset the chip */
4294 rc = bxe_reset_hw(sc, reset_code);
4296 BLOGE(sc, "Hardware reset failed\n");
4299 /* Report UNLOAD_DONE to MCP */
4300 bxe_send_unload_done(sc, keep_link);
4304 bxe_disable_close_the_gate(struct bxe_softc *sc)
4307 int port = SC_PORT(sc);
4310 "Disabling 'close the gates'\n");
4312 if (CHIP_IS_E1(sc)) {
4313 uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4314 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4315 val = REG_RD(sc, addr);
4317 REG_WR(sc, addr, val);
4319 val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK);
4320 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
4321 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
4322 REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val);
4327 * Cleans the object that have internal lists without sending
4328 * ramrods. Should be run when interrutps are disabled.
4331 bxe_squeeze_objects(struct bxe_softc *sc)
4333 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
4334 struct ecore_mcast_ramrod_params rparam = { NULL };
4335 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
4338 /* Cleanup MACs' object first... */
4340 /* Wait for completion of requested */
4341 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
4342 /* Perform a dry cleanup */
4343 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
4345 /* Clean ETH primary MAC */
4346 bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags);
4347 rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags,
4350 BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc);
4353 /* Cleanup UC list */
4355 bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags);
4356 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags,
4359 BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc);
4362 /* Now clean mcast object... */
4364 rparam.mcast_obj = &sc->mcast_obj;
4365 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
4367 /* Add a DEL command... */
4368 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4370 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4373 /* now wait until all pending commands are cleared */
4375 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4378 BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc);
4382 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4386 /* stop the controller */
4387 static __noinline int
4388 bxe_nic_unload(struct bxe_softc *sc,
4389 uint32_t unload_mode,
4392 uint8_t global = FALSE;
4395 BXE_CORE_LOCK_ASSERT(sc);
4397 BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n");
4399 /* mark driver as unloaded in shmem2 */
4400 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
4401 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
4402 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
4403 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
4406 if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE &&
4407 (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) {
4409 * We can get here if the driver has been unloaded
4410 * during parity error recovery and is either waiting for a
4411 * leader to complete or for other functions to unload and
4412 * then ifconfig down has been issued. In this case we want to
4413 * unload and let other functions to complete a recovery
4416 sc->recovery_state = BXE_RECOVERY_DONE;
4418 bxe_release_leader_lock(sc);
4421 BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n");
4422 BLOGE(sc, "Can't unload in closed or error state\n");
4427 * Nothing to do during unload if previous bxe_nic_load()
4428 * did not completed succesfully - all resourses are released.
4430 if ((sc->state == BXE_STATE_CLOSED) ||
4431 (sc->state == BXE_STATE_ERROR)) {
4435 sc->state = BXE_STATE_CLOSING_WAITING_HALT;
4441 sc->rx_mode = BXE_RX_MODE_NONE;
4442 /* XXX set rx mode ??? */
4445 /* set ALWAYS_ALIVE bit in shmem */
4446 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
4450 bxe_stats_handle(sc, STATS_EVENT_STOP);
4451 bxe_save_statistics(sc);
4454 /* wait till consumers catch up with producers in all queues */
4455 bxe_drain_tx_queues(sc);
4457 /* if VF indicate to PF this function is going down (PF will delete sp
4458 * elements and clear initializations
4461 ; /* bxe_vfpf_close_vf(sc); */
4462 } else if (unload_mode != UNLOAD_RECOVERY) {
4463 /* if this is a normal/close unload need to clean up chip */
4464 bxe_chip_cleanup(sc, unload_mode, keep_link);
4466 /* Send the UNLOAD_REQUEST to the MCP */
4467 bxe_send_unload_req(sc, unload_mode);
4470 * Prevent transactions to host from the functions on the
4471 * engine that doesn't reset global blocks in case of global
4472 * attention once gloabl blocks are reset and gates are opened
4473 * (the engine which leader will perform the recovery
4476 if (!CHIP_IS_E1x(sc)) {
4480 /* disable HW interrupts */
4481 bxe_int_disable_sync(sc, TRUE);
4483 /* detach interrupts */
4484 bxe_interrupt_detach(sc);
4486 /* Report UNLOAD_DONE to MCP */
4487 bxe_send_unload_done(sc, FALSE);
4491 * At this stage no more interrupts will arrive so we may safely clean
4492 * the queue'able objects here in case they failed to get cleaned so far.
4495 bxe_squeeze_objects(sc);
4498 /* There should be no more pending SP commands at this stage */
4503 bxe_free_fp_buffers(sc);
4509 bxe_free_fw_stats_mem(sc);
4511 sc->state = BXE_STATE_CLOSED;
4514 * Check if there are pending parity attentions. If there are - set
4515 * RECOVERY_IN_PROGRESS.
4517 if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) {
4518 bxe_set_reset_in_progress(sc);
4520 /* Set RESET_IS_GLOBAL if needed */
4522 bxe_set_reset_global(sc);
4527 * The last driver must disable a "close the gate" if there is no
4528 * parity attention or "process kill" pending.
4530 if (IS_PF(sc) && !bxe_clear_pf_load(sc) &&
4531 bxe_reset_is_done(sc, SC_PATH(sc))) {
4532 bxe_disable_close_the_gate(sc);
4535 BLOGD(sc, DBG_LOAD, "Ended NIC unload\n");
4541 * Called by the OS to set various media options (i.e. link, speed, etc.) when
4542 * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...".
4545 bxe_ifmedia_update(struct ifnet *ifp)
4547 struct bxe_softc *sc = (struct bxe_softc *)ifp->if_softc;
4548 struct ifmedia *ifm;
4552 /* We only support Ethernet media type. */
4553 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
4557 switch (IFM_SUBTYPE(ifm->ifm_media)) {
4563 case IFM_10G_TWINAX:
4565 /* We don't support changing the media type. */
4566 BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n",
4567 IFM_SUBTYPE(ifm->ifm_media));
4575 * Called by the OS to get the current media status (i.e. link, speed, etc.).
4578 bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
4580 struct bxe_softc *sc = ifp->if_softc;
4582 /* Report link down if the driver isn't running. */
4583 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
4584 ifmr->ifm_active |= IFM_NONE;
4588 /* Setup the default interface info. */
4589 ifmr->ifm_status = IFM_AVALID;
4590 ifmr->ifm_active = IFM_ETHER;
4592 if (sc->link_vars.link_up) {
4593 ifmr->ifm_status |= IFM_ACTIVE;
4595 ifmr->ifm_active |= IFM_NONE;
4599 ifmr->ifm_active |= sc->media;
4601 if (sc->link_vars.duplex == DUPLEX_FULL) {
4602 ifmr->ifm_active |= IFM_FDX;
4604 ifmr->ifm_active |= IFM_HDX;
4609 bxe_ioctl_nvram(struct bxe_softc *sc,
4613 struct bxe_nvram_data nvdata_base;
4614 struct bxe_nvram_data *nvdata;
4618 copyin(ifr->ifr_data, &nvdata_base, sizeof(nvdata_base));
4620 len = (sizeof(struct bxe_nvram_data) +
4624 if (len > sizeof(struct bxe_nvram_data)) {
4625 if ((nvdata = (struct bxe_nvram_data *)
4626 malloc(len, M_DEVBUF,
4627 (M_NOWAIT | M_ZERO))) == NULL) {
4628 BLOGE(sc, "BXE_IOC_RD_NVRAM malloc failed\n");
4631 memcpy(nvdata, &nvdata_base, sizeof(struct bxe_nvram_data));
4633 nvdata = &nvdata_base;
4636 if (priv_op == BXE_IOC_RD_NVRAM) {
4637 BLOGD(sc, DBG_IOCTL, "IOC_RD_NVRAM 0x%x %d\n",
4638 nvdata->offset, nvdata->len);
4639 error = bxe_nvram_read(sc,
4641 (uint8_t *)nvdata->value,
4643 copyout(nvdata, ifr->ifr_data, len);
4644 } else { /* BXE_IOC_WR_NVRAM */
4645 BLOGD(sc, DBG_IOCTL, "IOC_WR_NVRAM 0x%x %d\n",
4646 nvdata->offset, nvdata->len);
4647 copyin(ifr->ifr_data, nvdata, len);
4648 error = bxe_nvram_write(sc,
4650 (uint8_t *)nvdata->value,
4654 if (len > sizeof(struct bxe_nvram_data)) {
4655 free(nvdata, M_DEVBUF);
4662 bxe_ioctl_stats_show(struct bxe_softc *sc,
4666 const size_t str_size = (BXE_NUM_ETH_STATS * STAT_NAME_LEN);
4667 const size_t stats_size = (BXE_NUM_ETH_STATS * sizeof(uint64_t));
4674 case BXE_IOC_STATS_SHOW_NUM:
4675 memset(ifr->ifr_data, 0, sizeof(union bxe_stats_show_data));
4676 ((union bxe_stats_show_data *)ifr->ifr_data)->desc.num =
4678 ((union bxe_stats_show_data *)ifr->ifr_data)->desc.len =
4682 case BXE_IOC_STATS_SHOW_STR:
4683 memset(ifr->ifr_data, 0, str_size);
4684 p_tmp = ifr->ifr_data;
4685 for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
4686 strcpy(p_tmp, bxe_eth_stats_arr[i].string);
4687 p_tmp += STAT_NAME_LEN;
4691 case BXE_IOC_STATS_SHOW_CNT:
4692 memset(ifr->ifr_data, 0, stats_size);
4693 p_tmp = ifr->ifr_data;
4694 for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
4695 offset = ((uint32_t *)&sc->eth_stats +
4696 bxe_eth_stats_arr[i].offset);
4697 switch (bxe_eth_stats_arr[i].size) {
4699 *((uint64_t *)p_tmp) = (uint64_t)*offset;
4702 *((uint64_t *)p_tmp) = HILO_U64(*offset, *(offset + 1));
4705 *((uint64_t *)p_tmp) = 0;
4707 p_tmp += sizeof(uint64_t);
4717 bxe_handle_chip_tq(void *context,
4720 struct bxe_softc *sc = (struct bxe_softc *)context;
4721 long work = atomic_load_acq_long(&sc->chip_tq_flags);
4726 if ((sc->ifnet->if_flags & IFF_UP) &&
4727 !(sc->ifnet->if_drv_flags & IFF_DRV_RUNNING)) {
4728 /* start the interface */
4729 BLOGD(sc, DBG_LOAD, "Starting the interface...\n");
4731 bxe_init_locked(sc);
4732 BXE_CORE_UNLOCK(sc);
4737 if (!(sc->ifnet->if_flags & IFF_UP) &&
4738 (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING)) {
4739 /* bring down the interface */
4740 BLOGD(sc, DBG_LOAD, "Stopping the interface...\n");
4741 bxe_periodic_stop(sc);
4743 bxe_stop_locked(sc);
4744 BXE_CORE_UNLOCK(sc);
4748 case CHIP_TQ_REINIT:
4749 if (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING) {
4750 /* restart the interface */
4751 BLOGD(sc, DBG_LOAD, "Restarting the interface...\n");
4752 bxe_periodic_stop(sc);
4754 bxe_stop_locked(sc);
4755 bxe_init_locked(sc);
4756 BXE_CORE_UNLOCK(sc);
4766 * Handles any IOCTL calls from the operating system.
4769 * 0 = Success, >0 Failure
4772 bxe_ioctl(struct ifnet *ifp,
4776 struct bxe_softc *sc = ifp->if_softc;
4777 struct ifreq *ifr = (struct ifreq *)data;
4778 struct bxe_nvram_data *nvdata;
4784 int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN);
4785 int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING);
4790 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n",
4793 if (sc->mtu == ifr->ifr_mtu) {
4794 /* nothing to change */
4798 if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) {
4799 BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n",
4800 ifr->ifr_mtu, mtu_min, mtu_max);
4805 atomic_store_rel_int((volatile unsigned int *)&sc->mtu,
4806 (unsigned long)ifr->ifr_mtu);
4807 atomic_store_rel_long((volatile unsigned long *)&ifp->if_mtu,
4808 (unsigned long)ifr->ifr_mtu);
4814 /* toggle the interface state up or down */
4815 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n");
4817 /* check if the interface is up */
4818 if (ifp->if_flags & IFF_UP) {
4819 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4820 /* set the receive mode flags */
4821 bxe_set_rx_mode(sc);
4823 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_START);
4824 taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task);
4827 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4828 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_STOP);
4829 taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task);
4837 /* add/delete multicast addresses */
4838 BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n");
4840 /* check if the interface is up */
4841 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4842 /* set the receive mode flags */
4843 bxe_set_rx_mode(sc);
4849 /* find out which capabilities have changed */
4850 mask = (ifr->ifr_reqcap ^ ifp->if_capenable);
4852 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n",
4855 /* toggle the LRO capabilites enable flag */
4856 if (mask & IFCAP_LRO) {
4857 ifp->if_capenable ^= IFCAP_LRO;
4858 BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n",
4859 (ifp->if_capenable & IFCAP_LRO) ? "ON" : "OFF");
4863 /* toggle the TXCSUM checksum capabilites enable flag */
4864 if (mask & IFCAP_TXCSUM) {
4865 ifp->if_capenable ^= IFCAP_TXCSUM;
4866 BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n",
4867 (ifp->if_capenable & IFCAP_TXCSUM) ? "ON" : "OFF");
4868 if (ifp->if_capenable & IFCAP_TXCSUM) {
4869 ifp->if_hwassist = (CSUM_IP |
4876 ifp->if_hwassist = 0;
4880 /* toggle the RXCSUM checksum capabilities enable flag */
4881 if (mask & IFCAP_RXCSUM) {
4882 ifp->if_capenable ^= IFCAP_RXCSUM;
4883 BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n",
4884 (ifp->if_capenable & IFCAP_RXCSUM) ? "ON" : "OFF");
4885 if (ifp->if_capenable & IFCAP_RXCSUM) {
4886 ifp->if_hwassist = (CSUM_IP |
4893 ifp->if_hwassist = 0;
4897 /* toggle TSO4 capabilities enabled flag */
4898 if (mask & IFCAP_TSO4) {
4899 ifp->if_capenable ^= IFCAP_TSO4;
4900 BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n",
4901 (ifp->if_capenable & IFCAP_TSO4) ? "ON" : "OFF");
4904 /* toggle TSO6 capabilities enabled flag */
4905 if (mask & IFCAP_TSO6) {
4906 ifp->if_capenable ^= IFCAP_TSO6;
4907 BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n",
4908 (ifp->if_capenable & IFCAP_TSO6) ? "ON" : "OFF");
4911 /* toggle VLAN_HWTSO capabilities enabled flag */
4912 if (mask & IFCAP_VLAN_HWTSO) {
4913 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
4914 BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n",
4915 (ifp->if_capenable & IFCAP_VLAN_HWTSO) ? "ON" : "OFF");
4918 /* toggle VLAN_HWCSUM capabilities enabled flag */
4919 if (mask & IFCAP_VLAN_HWCSUM) {
4920 /* XXX investigate this... */
4921 BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n");
4925 /* toggle VLAN_MTU capabilities enable flag */
4926 if (mask & IFCAP_VLAN_MTU) {
4927 /* XXX investigate this... */
4928 BLOGE(sc, "Changing VLAN_MTU is not supported!\n");
4932 /* toggle VLAN_HWTAGGING capabilities enabled flag */
4933 if (mask & IFCAP_VLAN_HWTAGGING) {
4934 /* XXX investigate this... */
4935 BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n");
4939 /* toggle VLAN_HWFILTER capabilities enabled flag */
4940 if (mask & IFCAP_VLAN_HWFILTER) {
4941 /* XXX investigate this... */
4942 BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n");
4954 /* set/get interface media */
4955 BLOGD(sc, DBG_IOCTL,
4956 "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n",
4958 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
4961 case SIOCGPRIVATE_0:
4962 copyin(ifr->ifr_data, &priv_op, sizeof(priv_op));
4966 case BXE_IOC_RD_NVRAM:
4967 case BXE_IOC_WR_NVRAM:
4968 nvdata = (struct bxe_nvram_data *)ifr->ifr_data;
4969 BLOGD(sc, DBG_IOCTL,
4970 "Received Private NVRAM ioctl addr=0x%x size=%u\n",
4971 nvdata->offset, nvdata->len);
4972 error = bxe_ioctl_nvram(sc, priv_op, ifr);
4975 case BXE_IOC_STATS_SHOW_NUM:
4976 case BXE_IOC_STATS_SHOW_STR:
4977 case BXE_IOC_STATS_SHOW_CNT:
4978 BLOGD(sc, DBG_IOCTL, "Received Private Stats ioctl (%d)\n",
4980 error = bxe_ioctl_stats_show(sc, priv_op, ifr);
4984 BLOGW(sc, "Received Private Unknown ioctl (%d)\n", priv_op);
4992 BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n",
4994 error = ether_ioctl(ifp, command, data);
4998 if (reinit && (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING)) {
4999 BLOGD(sc, DBG_LOAD | DBG_IOCTL,
5000 "Re-initializing hardware from IOCTL change\n");
5001 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT);
5002 taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task);
5008 static __noinline void
5009 bxe_dump_mbuf(struct bxe_softc *sc,
5015 if (!(sc->debug & DBG_MBUF)) {
5020 BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n");
5026 "mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
5027 m, m->m_len, m->m_flags,
5028 "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", m->m_data);
5030 if (m->m_flags & M_PKTHDR) {
5032 "- m_pkthdr: len=%d flags=0x%b csum_flags=%b\n",
5033 m->m_pkthdr.len, m->m_flags,
5034 "\20\12M_BCAST\13M_MCAST\14M_FRAG"
5035 "\15M_FIRSTFRAG\16M_LASTFRAG\21M_VLANTAG"
5036 "\22M_PROMISC\23M_NOFREE",
5037 (int)m->m_pkthdr.csum_flags,
5038 "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS"
5039 "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED"
5040 "\12CSUM_IP_VALID\13CSUM_DATA_VALID"
5041 "\14CSUM_PSEUDO_HDR");
5044 if (m->m_flags & M_EXT) {
5045 switch (m->m_ext.ext_type) {
5046 case EXT_CLUSTER: type = "EXT_CLUSTER"; break;
5047 case EXT_SFBUF: type = "EXT_SFBUF"; break;
5048 case EXT_JUMBO9: type = "EXT_JUMBO9"; break;
5049 case EXT_JUMBO16: type = "EXT_JUMBO16"; break;
5050 case EXT_PACKET: type = "EXT_PACKET"; break;
5051 case EXT_MBUF: type = "EXT_MBUF"; break;
5052 case EXT_NET_DRV: type = "EXT_NET_DRV"; break;
5053 case EXT_MOD_TYPE: type = "EXT_MOD_TYPE"; break;
5054 case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break;
5055 case EXT_EXTREF: type = "EXT_EXTREF"; break;
5056 default: type = "UNKNOWN"; break;
5060 "- m_ext: %p ext_size=%d, type=%s\n",
5061 m->m_ext.ext_buf, m->m_ext.ext_size, type);
5065 bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE);
5073 * Checks to ensure the 13 bd sliding window is >= MSS for TSO.
5074 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS.
5075 * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD
5076 * The headers comes in a seperate bd in FreeBSD so 13-3=10.
5077 * Returns: 0 if OK to send, 1 if packet needs further defragmentation
5080 bxe_chktso_window(struct bxe_softc *sc,
5082 bus_dma_segment_t *segs,
5085 uint32_t num_wnds, wnd_size, wnd_sum;
5086 int32_t frag_idx, wnd_idx;
5087 unsigned short lso_mss;
5093 num_wnds = nsegs - wnd_size;
5094 lso_mss = htole16(m->m_pkthdr.tso_segsz);
5097 * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the
5098 * first window sum of data while skipping the first assuming it is the
5099 * header in FreeBSD.
5101 for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) {
5102 wnd_sum += htole16(segs[frag_idx].ds_len);
5105 /* check the first 10 bd window size */
5106 if (wnd_sum < lso_mss) {
5110 /* run through the windows */
5111 for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) {
5112 /* subtract the first mbuf->m_len of the last wndw(-header) */
5113 wnd_sum -= htole16(segs[wnd_idx+1].ds_len);
5114 /* add the next mbuf len to the len of our new window */
5115 wnd_sum += htole16(segs[frag_idx].ds_len);
5116 if (wnd_sum < lso_mss) {
5125 bxe_set_pbd_csum_e2(struct bxe_fastpath *fp,
5127 uint32_t *parsing_data)
5129 struct ether_vlan_header *eh = NULL;
5130 struct ip *ip4 = NULL;
5131 struct ip6_hdr *ip6 = NULL;
5133 struct tcphdr *th = NULL;
5134 int e_hlen, ip_hlen, l4_off;
5137 if (m->m_pkthdr.csum_flags == CSUM_IP) {
5138 /* no L4 checksum offload needed */
5142 /* get the Ethernet header */
5143 eh = mtod(m, struct ether_vlan_header *);
5145 /* handle VLAN encapsulation if present */
5146 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
5147 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
5148 proto = ntohs(eh->evl_proto);
5150 e_hlen = ETHER_HDR_LEN;
5151 proto = ntohs(eh->evl_encap_proto);
5156 /* get the IP header, if mbuf len < 20 then header in next mbuf */
5157 ip4 = (m->m_len < sizeof(struct ip)) ?
5158 (struct ip *)m->m_next->m_data :
5159 (struct ip *)(m->m_data + e_hlen);
5160 /* ip_hl is number of 32-bit words */
5161 ip_hlen = (ip4->ip_hl << 2);
5164 case ETHERTYPE_IPV6:
5165 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
5166 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
5167 (struct ip6_hdr *)m->m_next->m_data :
5168 (struct ip6_hdr *)(m->m_data + e_hlen);
5169 /* XXX cannot support offload with IPv6 extensions */
5170 ip_hlen = sizeof(struct ip6_hdr);
5174 /* We can't offload in this case... */
5175 /* XXX error stat ??? */
5179 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
5180 l4_off = (e_hlen + ip_hlen);
5183 (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
5184 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W);
5186 if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5189 fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
5190 th = (struct tcphdr *)(ip + ip_hlen);
5191 /* th_off is number of 32-bit words */
5192 *parsing_data |= ((th->th_off <<
5193 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
5194 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW);
5195 return (l4_off + (th->th_off << 2)); /* entire header length */
5196 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5198 fp->eth_q_stats.tx_ofld_frames_csum_udp++;
5199 return (l4_off + sizeof(struct udphdr)); /* entire header length */
5201 /* XXX error stat ??? */
5207 bxe_set_pbd_csum(struct bxe_fastpath *fp,
5209 struct eth_tx_parse_bd_e1x *pbd)
5211 struct ether_vlan_header *eh = NULL;
5212 struct ip *ip4 = NULL;
5213 struct ip6_hdr *ip6 = NULL;
5215 struct tcphdr *th = NULL;
5216 struct udphdr *uh = NULL;
5217 int e_hlen, ip_hlen;
5223 /* get the Ethernet header */
5224 eh = mtod(m, struct ether_vlan_header *);
5226 /* handle VLAN encapsulation if present */
5227 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
5228 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
5229 proto = ntohs(eh->evl_proto);
5231 e_hlen = ETHER_HDR_LEN;
5232 proto = ntohs(eh->evl_encap_proto);
5237 /* get the IP header, if mbuf len < 20 then header in next mbuf */
5238 ip4 = (m->m_len < sizeof(struct ip)) ?
5239 (struct ip *)m->m_next->m_data :
5240 (struct ip *)(m->m_data + e_hlen);
5241 /* ip_hl is number of 32-bit words */
5242 ip_hlen = (ip4->ip_hl << 1);
5245 case ETHERTYPE_IPV6:
5246 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
5247 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
5248 (struct ip6_hdr *)m->m_next->m_data :
5249 (struct ip6_hdr *)(m->m_data + e_hlen);
5250 /* XXX cannot support offload with IPv6 extensions */
5251 ip_hlen = (sizeof(struct ip6_hdr) >> 1);
5255 /* We can't offload in this case... */
5256 /* XXX error stat ??? */
5260 hlen = (e_hlen >> 1);
5262 /* note that rest of global_data is indirectly zeroed here */
5263 if (m->m_flags & M_VLANTAG) {
5265 htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
5267 pbd->global_data = htole16(hlen);
5270 pbd->ip_hlen_w = ip_hlen;
5272 hlen += pbd->ip_hlen_w;
5274 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
5276 if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5279 th = (struct tcphdr *)(ip + (ip_hlen << 1));
5280 /* th_off is number of 32-bit words */
5281 hlen += (uint16_t)(th->th_off << 1);
5282 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5284 uh = (struct udphdr *)(ip + (ip_hlen << 1));
5285 hlen += (sizeof(struct udphdr) / 2);
5287 /* valid case as only CSUM_IP was set */
5291 pbd->total_hlen_w = htole16(hlen);
5293 if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5296 fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
5297 pbd->tcp_pseudo_csum = ntohs(th->th_sum);
5298 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5300 fp->eth_q_stats.tx_ofld_frames_csum_udp++;
5303 * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP
5304 * checksums and does not know anything about the UDP header and where
5305 * the checksum field is located. It only knows about TCP. Therefore
5306 * we "lie" to the hardware for outgoing UDP packets w/ checksum
5307 * offload. Since the checksum field offset for TCP is 16 bytes and
5308 * for UDP it is 6 bytes we pass a pointer to the hardware that is 10
5309 * bytes less than the start of the UDP header. This allows the
5310 * hardware to write the checksum in the correct spot. But the
5311 * hardware will compute a checksum which includes the last 10 bytes
5312 * of the IP header. To correct this we tweak the stack computed
5313 * pseudo checksum by folding in the calculation of the inverse
5314 * checksum for those final 10 bytes of the IP header. This allows
5315 * the correct checksum to be computed by the hardware.
5318 /* set pointer 10 bytes before UDP header */
5319 tmp_uh = (uint32_t *)((uint8_t *)uh - 10);
5321 /* calculate a pseudo header checksum over the first 10 bytes */
5322 tmp_csum = in_pseudo(*tmp_uh,
5324 *(uint16_t *)(tmp_uh + 2));
5326 pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum));
5329 return (hlen * 2); /* entire header length, number of bytes */
5333 bxe_set_pbd_lso_e2(struct mbuf *m,
5334 uint32_t *parsing_data)
5336 *parsing_data |= ((m->m_pkthdr.tso_segsz <<
5337 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
5338 ETH_TX_PARSE_BD_E2_LSO_MSS);
5340 /* XXX test for IPv6 with extension header... */
5342 struct ip6_hdr *ip6;
5343 if (ip6 && ip6->ip6_nxt == 'some ipv6 extension header')
5344 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
5349 bxe_set_pbd_lso(struct mbuf *m,
5350 struct eth_tx_parse_bd_e1x *pbd)
5352 struct ether_vlan_header *eh = NULL;
5353 struct ip *ip = NULL;
5354 struct tcphdr *th = NULL;
5357 /* get the Ethernet header */
5358 eh = mtod(m, struct ether_vlan_header *);
5360 /* handle VLAN encapsulation if present */
5361 e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ?
5362 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN;
5364 /* get the IP and TCP header, with LSO entire header in first mbuf */
5365 /* XXX assuming IPv4 */
5366 ip = (struct ip *)(m->m_data + e_hlen);
5367 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
5369 pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz);
5370 pbd->tcp_send_seq = ntohl(th->th_seq);
5371 pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff);
5375 pbd->ip_id = ntohs(ip->ip_id);
5376 pbd->tcp_pseudo_csum =
5377 ntohs(in_pseudo(ip->ip_src.s_addr,
5379 htons(IPPROTO_TCP)));
5382 pbd->tcp_pseudo_csum =
5383 ntohs(in_pseudo(&ip6->ip6_src,
5385 htons(IPPROTO_TCP)));
5389 htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
5393 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory
5394 * visible to the controller.
5396 * If an mbuf is submitted to this routine and cannot be given to the
5397 * controller (e.g. it has too many fragments) then the function may free
5398 * the mbuf and return to the caller.
5401 * 0 = Success, !0 = Failure
5402 * Note the side effect that an mbuf may be freed if it causes a problem.
5405 bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
5407 bus_dma_segment_t segs[32];
5409 struct bxe_sw_tx_bd *tx_buf;
5410 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
5411 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
5412 /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */
5413 struct eth_tx_bd *tx_data_bd;
5414 struct eth_tx_bd *tx_total_pkt_size_bd;
5415 struct eth_tx_start_bd *tx_start_bd;
5416 uint16_t bd_prod, pkt_prod, total_pkt_size;
5418 int defragged, error, nsegs, rc, nbds, vlan_off, ovlan;
5419 struct bxe_softc *sc;
5420 uint16_t tx_bd_avail;
5421 struct ether_vlan_header *eh;
5422 uint32_t pbd_e2_parsing_data = 0;
5429 M_ASSERTPKTHDR(*m_head);
5432 rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0;
5435 tx_total_pkt_size_bd = NULL;
5437 /* get the H/W pointer for packets and BDs */
5438 pkt_prod = fp->tx_pkt_prod;
5439 bd_prod = fp->tx_bd_prod;
5441 mac_type = UNICAST_ADDRESS;
5443 /* map the mbuf into the next open DMAable memory */
5444 tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)];
5445 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5447 segs, &nsegs, BUS_DMA_NOWAIT);
5449 /* mapping errors */
5450 if(__predict_false(error != 0)) {
5451 fp->eth_q_stats.tx_dma_mapping_failure++;
5452 if (error == ENOMEM) {
5453 /* resource issue, try again later */
5455 } else if (error == EFBIG) {
5456 /* possibly recoverable with defragmentation */
5457 fp->eth_q_stats.mbuf_defrag_attempts++;
5458 m0 = m_defrag(*m_head, M_DONTWAIT);
5460 fp->eth_q_stats.mbuf_defrag_failures++;
5463 /* defrag successful, try mapping again */
5465 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5467 segs, &nsegs, BUS_DMA_NOWAIT);
5469 fp->eth_q_stats.tx_dma_mapping_failure++;
5474 /* unknown, unrecoverable mapping error */
5475 BLOGE(sc, "Unknown TX mapping error rc=%d\n", error);
5476 bxe_dump_mbuf(sc, m0, FALSE);
5480 goto bxe_tx_encap_continue;
5483 tx_bd_avail = bxe_tx_avail(sc, fp);
5485 /* make sure there is enough room in the send queue */
5486 if (__predict_false(tx_bd_avail < (nsegs + 2))) {
5487 /* Recoverable, try again later. */
5488 fp->eth_q_stats.tx_hw_queue_full++;
5489 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5491 goto bxe_tx_encap_continue;
5494 /* capture the current H/W TX chain high watermark */
5495 if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth <
5496 (TX_BD_USABLE - tx_bd_avail))) {
5497 fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail);
5500 /* make sure it fits in the packet window */
5501 if (__predict_false(nsegs > 12)) {
5503 * The mbuf may be to big for the controller to handle. If the frame
5504 * is a TSO frame we'll need to do an additional check.
5506 if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5507 if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) {
5508 goto bxe_tx_encap_continue; /* OK to send */
5510 fp->eth_q_stats.tx_window_violation_tso++;
5513 fp->eth_q_stats.tx_window_violation_std++;
5516 /* XXX I don't like this, change to double copy packet */
5518 /* no sense trying to defrag again, just drop the frame */
5522 bxe_tx_encap_continue:
5524 /* Check for errors */
5527 /* recoverable try again later */
5529 fp->eth_q_stats.tx_soft_errors++;
5530 fp->eth_q_stats.mbuf_alloc_tx--;
5538 /* set flag according to packet type (UNICAST_ADDRESS is default) */
5539 if (m0->m_flags & M_BCAST) {
5540 mac_type = BROADCAST_ADDRESS;
5541 } else if (m0->m_flags & M_MCAST) {
5542 mac_type = MULTICAST_ADDRESS;
5545 /* store the mbuf into the mbuf ring */
5547 tx_buf->first_bd = fp->tx_bd_prod;
5550 /* prepare the first transmit (start) BD for the mbuf */
5551 tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd;
5554 "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n",
5555 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
5557 tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
5558 tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
5559 tx_start_bd->nbytes = htole16(segs[0].ds_len);
5560 total_pkt_size += tx_start_bd->nbytes;
5561 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
5563 tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
5565 /* all frames have at least Start BD + Parsing BD */
5567 tx_start_bd->nbd = htole16(nbds);
5569 if (m0->m_flags & M_VLANTAG) {
5570 tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag);
5571 tx_start_bd->bd_flags.as_bitfield |=
5572 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
5574 /* vf tx, start bd must hold the ethertype for fw to enforce it */
5576 /* map ethernet header to find type and header length */
5577 eh = mtod(m0, struct ether_vlan_header *);
5578 tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto;
5580 /* used by FW for packet accounting */
5581 tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod);
5584 * If NPAR-SD is active then FW should do the tagging regardless
5585 * of value of priority. Otherwise, if priority indicates this is
5586 * a control packet we need to indicate to FW to avoid tagging.
5588 if (!IS_MF_AFEX(sc) && (mbuf priority == PRIO_CONTROL)) {
5589 SET_FLAG(tx_start_bd->general_data,
5590 ETH_TX_START_BD_FORCE_VLAN_MODE, 1);
5597 * add a parsing BD from the chain. The parsing BD is always added
5598 * though it is only used for TSO and chksum
5600 bd_prod = TX_BD_NEXT(bd_prod);
5602 if (m0->m_pkthdr.csum_flags) {
5603 if (m0->m_pkthdr.csum_flags & CSUM_IP) {
5604 fp->eth_q_stats.tx_ofld_frames_csum_ip++;
5605 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
5608 if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) {
5609 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5610 ETH_TX_BD_FLAGS_L4_CSUM);
5611 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) {
5612 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5613 ETH_TX_BD_FLAGS_IS_UDP |
5614 ETH_TX_BD_FLAGS_L4_CSUM);
5615 } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) ||
5616 (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5617 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
5618 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) {
5619 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM |
5620 ETH_TX_BD_FLAGS_IS_UDP);
5624 if (!CHIP_IS_E1x(sc)) {
5625 pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2;
5626 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
5628 if (m0->m_pkthdr.csum_flags) {
5629 hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data);
5634 * Add the MACs to the parsing BD if the module param was
5635 * explicitly set, if this is a vf, or in switch independent
5638 if (sc->flags & BXE_TX_SWITCHING || IS_VF(sc) || IS_MF_SI(sc)) {
5639 eh = mtod(m0, struct ether_vlan_header *);
5640 bxe_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
5641 &pbd_e2->data.mac_addr.src_mid,
5642 &pbd_e2->data.mac_addr.src_lo,
5644 bxe_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
5645 &pbd_e2->data.mac_addr.dst_mid,
5646 &pbd_e2->data.mac_addr.dst_lo,
5651 SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE,
5654 uint16_t global_data = 0;
5656 pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x;
5657 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
5659 if (m0->m_pkthdr.csum_flags) {
5660 hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x);
5663 SET_FLAG(global_data,
5664 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
5665 pbd_e1x->global_data |= htole16(global_data);
5668 /* setup the parsing BD with TSO specific info */
5669 if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5670 fp->eth_q_stats.tx_ofld_frames_lso++;
5671 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
5673 if (__predict_false(tx_start_bd->nbytes > hlen)) {
5674 fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++;
5676 /* split the first BD into header/data making the fw job easy */
5678 tx_start_bd->nbd = htole16(nbds);
5680 bd_prod = TX_BD_NEXT(bd_prod);
5682 /* new transmit BD after the tx_parse_bd */
5683 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5684 tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen));
5685 tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen));
5686 tx_data_bd->nbytes = htole16(segs[0].ds_len - hlen);
5687 if (tx_total_pkt_size_bd == NULL) {
5688 tx_total_pkt_size_bd = tx_data_bd;
5692 "TSO split header size is %d (%x:%x) nbds %d\n",
5693 le16toh(tx_start_bd->nbytes),
5694 le32toh(tx_start_bd->addr_hi),
5695 le32toh(tx_start_bd->addr_lo),
5699 if (!CHIP_IS_E1x(sc)) {
5700 bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data);
5702 bxe_set_pbd_lso(m0, pbd_e1x);
5706 if (pbd_e2_parsing_data) {
5707 pbd_e2->parsing_data = htole32(pbd_e2_parsing_data);
5710 /* prepare remaining BDs, start tx bd contains first seg/frag */
5711 for (i = 1; i < nsegs ; i++) {
5712 bd_prod = TX_BD_NEXT(bd_prod);
5713 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5714 tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr));
5715 tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr));
5716 tx_data_bd->nbytes = htole16(segs[i].ds_len);
5717 if (tx_total_pkt_size_bd == NULL) {
5718 tx_total_pkt_size_bd = tx_data_bd;
5720 total_pkt_size += tx_data_bd->nbytes;
5723 BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd);
5725 if (tx_total_pkt_size_bd != NULL) {
5726 tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size;
5729 if (__predict_false(sc->debug & DBG_TX)) {
5730 tmp_bd = tx_buf->first_bd;
5731 for (i = 0; i < nbds; i++)
5735 "TX Strt: %p bd=%d nbd=%d vlan=0x%x "
5736 "bd_flags=0x%x hdr_nbds=%d\n",
5739 le16toh(tx_start_bd->nbd),
5740 le16toh(tx_start_bd->vlan_or_ethertype),
5741 tx_start_bd->bd_flags.as_bitfield,
5742 (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS));
5743 } else if (i == 1) {
5746 "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u "
5747 "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x "
5748 "tcp_seq=%u total_hlen_w=%u\n",
5751 pbd_e1x->global_data,
5756 pbd_e1x->tcp_pseudo_csum,
5757 pbd_e1x->tcp_send_seq,
5758 le16toh(pbd_e1x->total_hlen_w));
5759 } else { /* if (pbd_e2) */
5761 "-> Parse: %p bd=%d dst=%02x:%02x:%02x "
5762 "src=%02x:%02x:%02x parsing_data=0x%x\n",
5765 pbd_e2->data.mac_addr.dst_hi,
5766 pbd_e2->data.mac_addr.dst_mid,
5767 pbd_e2->data.mac_addr.dst_lo,
5768 pbd_e2->data.mac_addr.src_hi,
5769 pbd_e2->data.mac_addr.src_mid,
5770 pbd_e2->data.mac_addr.src_lo,
5771 pbd_e2->parsing_data);
5775 if (i != 1) { /* skip parse db as it doesn't hold data */
5776 tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd;
5778 "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n",
5781 le16toh(tx_data_bd->nbytes),
5782 le32toh(tx_data_bd->addr_hi),
5783 le32toh(tx_data_bd->addr_lo));
5786 tmp_bd = TX_BD_NEXT(tmp_bd);
5790 BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod);
5792 /* update TX BD producer index value for next TX */
5793 bd_prod = TX_BD_NEXT(bd_prod);
5796 * If the chain of tx_bd's describing this frame is adjacent to or spans
5797 * an eth_tx_next_bd element then we need to increment the nbds value.
5799 if (TX_BD_IDX(bd_prod) < nbds) {
5803 /* don't allow reordering of writes for nbd and packets */
5806 fp->tx_db.data.prod += nbds;
5808 /* producer points to the next free tx_bd at this point */
5810 fp->tx_bd_prod = bd_prod;
5812 DOORBELL(sc, fp->index, fp->tx_db.raw);
5814 fp->eth_q_stats.tx_pkts++;
5816 /* Prevent speculative reads from getting ahead of the status block. */
5817 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle,
5818 0, 0, BUS_SPACE_BARRIER_READ);
5820 /* Prevent speculative reads from getting ahead of the doorbell. */
5821 bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle,
5822 0, 0, BUS_SPACE_BARRIER_READ);
5828 bxe_tx_start_locked(struct bxe_softc *sc,
5830 struct bxe_fastpath *fp)
5832 struct mbuf *m = NULL;
5834 uint16_t tx_bd_avail;
5836 BXE_FP_TX_LOCK_ASSERT(fp);
5838 /* keep adding entries while there are frames to send */
5839 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
5842 * check for any frames to send
5843 * dequeue can still be NULL even if queue is not empty
5845 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
5846 if (__predict_false(m == NULL)) {
5850 /* the mbuf now belongs to us */
5851 fp->eth_q_stats.mbuf_alloc_tx++;
5854 * Put the frame into the transmit ring. If we don't have room,
5855 * place the mbuf back at the head of the TX queue, set the
5856 * OACTIVE flag, and wait for the NIC to drain the chain.
5858 if (__predict_false(bxe_tx_encap(fp, &m))) {
5859 fp->eth_q_stats.tx_encap_failures++;
5861 /* mark the TX queue as full and return the frame */
5862 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
5863 IFQ_DRV_PREPEND(&ifp->if_snd, m);
5864 fp->eth_q_stats.mbuf_alloc_tx--;
5865 fp->eth_q_stats.tx_queue_xoff++;
5868 /* stop looking for more work */
5872 /* the frame was enqueued successfully */
5875 /* send a copy of the frame to any BPF listeners. */
5878 tx_bd_avail = bxe_tx_avail(sc, fp);
5880 /* handle any completions if we're running low */
5881 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5882 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5884 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
5890 /* all TX packets were dequeued and/or the tx ring is full */
5892 /* reset the TX watchdog timeout timer */
5893 fp->watchdog_timer = BXE_TX_TIMEOUT;
5897 /* Legacy (non-RSS) dispatch routine */
5899 bxe_tx_start(struct ifnet *ifp)
5901 struct bxe_softc *sc;
5902 struct bxe_fastpath *fp;
5906 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
5907 BLOGW(sc, "Interface not running, ignoring transmit request\n");
5911 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
5912 BLOGW(sc, "Interface TX queue is full, ignoring transmit request\n");
5916 if (!sc->link_vars.link_up) {
5917 BLOGW(sc, "Interface link is down, ignoring transmit request\n");
5924 bxe_tx_start_locked(sc, ifp, fp);
5925 BXE_FP_TX_UNLOCK(fp);
5928 #if __FreeBSD_version >= 800000
5931 bxe_tx_mq_start_locked(struct bxe_softc *sc,
5933 struct bxe_fastpath *fp,
5936 struct buf_ring *tx_br = fp->tx_br;
5938 int depth, rc, tx_count;
5939 uint16_t tx_bd_avail;
5944 BLOGE(sc, "Multiqueue TX and no buf_ring!\n");
5948 /* fetch the depth of the driver queue */
5949 depth = drbr_inuse(ifp, tx_br);
5950 if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) {
5951 fp->eth_q_stats.tx_max_drbr_queue_depth = depth;
5954 BXE_FP_TX_LOCK_ASSERT(fp);
5957 /* no new work, check for pending frames */
5958 next = drbr_dequeue(ifp, tx_br);
5959 } else if (drbr_needs_enqueue(ifp, tx_br)) {
5960 /* have both new and pending work, maintain packet order */
5961 rc = drbr_enqueue(ifp, tx_br, m);
5963 fp->eth_q_stats.tx_soft_errors++;
5964 goto bxe_tx_mq_start_locked_exit;
5966 next = drbr_dequeue(ifp, tx_br);
5968 /* new work only and nothing pending */
5972 /* keep adding entries while there are frames to send */
5973 while (next != NULL) {
5975 /* the mbuf now belongs to us */
5976 fp->eth_q_stats.mbuf_alloc_tx++;
5979 * Put the frame into the transmit ring. If we don't have room,
5980 * place the mbuf back at the head of the TX queue, set the
5981 * OACTIVE flag, and wait for the NIC to drain the chain.
5983 rc = bxe_tx_encap(fp, &next);
5984 if (__predict_false(rc != 0)) {
5985 fp->eth_q_stats.tx_encap_failures++;
5987 /* mark the TX queue as full and save the frame */
5988 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
5989 /* XXX this may reorder the frame */
5990 rc = drbr_enqueue(ifp, tx_br, next);
5991 fp->eth_q_stats.mbuf_alloc_tx--;
5992 fp->eth_q_stats.tx_frames_deferred++;
5995 /* stop looking for more work */
5999 /* the transmit frame was enqueued successfully */
6002 /* send a copy of the frame to any BPF listeners */
6003 BPF_MTAP(ifp, next);
6005 tx_bd_avail = bxe_tx_avail(sc, fp);
6007 /* handle any completions if we're running low */
6008 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
6009 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
6011 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
6016 next = drbr_dequeue(ifp, tx_br);
6019 /* all TX packets were dequeued and/or the tx ring is full */
6021 /* reset the TX watchdog timeout timer */
6022 fp->watchdog_timer = BXE_TX_TIMEOUT;
6025 bxe_tx_mq_start_locked_exit:
6030 /* Multiqueue (TSS) dispatch routine. */
6032 bxe_tx_mq_start(struct ifnet *ifp,
6035 struct bxe_softc *sc = ifp->if_softc;
6036 struct bxe_fastpath *fp;
6039 fp_index = 0; /* default is the first queue */
6041 /* change the queue if using flow ID */
6042 if ((m->m_flags & M_FLOWID) != 0) {
6043 fp_index = (m->m_pkthdr.flowid % sc->num_queues);
6046 fp = &sc->fp[fp_index];
6048 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
6049 BLOGW(sc, "Interface not running, ignoring transmit request\n");
6053 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
6054 BLOGW(sc, "Interface TX queue is full, ignoring transmit request\n");
6058 if (!sc->link_vars.link_up) {
6059 BLOGW(sc, "Interface link is down, ignoring transmit request\n");
6063 /* XXX change to TRYLOCK here and if failed then schedule taskqueue */
6066 rc = bxe_tx_mq_start_locked(sc, ifp, fp, m);
6067 BXE_FP_TX_UNLOCK(fp);
6073 bxe_mq_flush(struct ifnet *ifp)
6075 struct bxe_softc *sc = ifp->if_softc;
6076 struct bxe_fastpath *fp;
6080 for (i = 0; i < sc->num_queues; i++) {
6083 if (fp->state != BXE_FP_STATE_OPEN) {
6084 BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n",
6085 fp->index, fp->state);
6089 if (fp->tx_br != NULL) {
6090 BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index);
6092 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) {
6095 BXE_FP_TX_UNLOCK(fp);
6102 #endif /* FreeBSD_version >= 800000 */
6105 bxe_cid_ilt_lines(struct bxe_softc *sc)
6108 return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS);
6110 return (L2_ILT_LINES(sc));
6114 bxe_ilt_set_info(struct bxe_softc *sc)
6116 struct ilt_client_info *ilt_client;
6117 struct ecore_ilt *ilt = sc->ilt;
6120 ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc));
6121 BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line);
6124 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6125 ilt_client->client_num = ILT_CLIENT_CDU;
6126 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6127 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6128 ilt_client->start = line;
6129 line += bxe_cid_ilt_lines(sc);
6131 if (CNIC_SUPPORT(sc)) {
6132 line += CNIC_ILT_LINES;
6135 ilt_client->end = (line - 1);
6138 "ilt client[CDU]: start %d, end %d, "
6139 "psz 0x%x, flags 0x%x, hw psz %d\n",
6140 ilt_client->start, ilt_client->end,
6141 ilt_client->page_size,
6143 ilog2(ilt_client->page_size >> 12));
6146 if (QM_INIT(sc->qm_cid_count)) {
6147 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6148 ilt_client->client_num = ILT_CLIENT_QM;
6149 ilt_client->page_size = QM_ILT_PAGE_SZ;
6150 ilt_client->flags = 0;
6151 ilt_client->start = line;
6153 /* 4 bytes for each cid */
6154 line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6157 ilt_client->end = (line - 1);
6160 "ilt client[QM]: start %d, end %d, "
6161 "psz 0x%x, flags 0x%x, hw psz %d\n",
6162 ilt_client->start, ilt_client->end,
6163 ilt_client->page_size, ilt_client->flags,
6164 ilog2(ilt_client->page_size >> 12));
6167 if (CNIC_SUPPORT(sc)) {
6169 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6170 ilt_client->client_num = ILT_CLIENT_SRC;
6171 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6172 ilt_client->flags = 0;
6173 ilt_client->start = line;
6174 line += SRC_ILT_LINES;
6175 ilt_client->end = (line - 1);
6178 "ilt client[SRC]: start %d, end %d, "
6179 "psz 0x%x, flags 0x%x, hw psz %d\n",
6180 ilt_client->start, ilt_client->end,
6181 ilt_client->page_size, ilt_client->flags,
6182 ilog2(ilt_client->page_size >> 12));
6185 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6186 ilt_client->client_num = ILT_CLIENT_TM;
6187 ilt_client->page_size = TM_ILT_PAGE_SZ;
6188 ilt_client->flags = 0;
6189 ilt_client->start = line;
6190 line += TM_ILT_LINES;
6191 ilt_client->end = (line - 1);
6194 "ilt client[TM]: start %d, end %d, "
6195 "psz 0x%x, flags 0x%x, hw psz %d\n",
6196 ilt_client->start, ilt_client->end,
6197 ilt_client->page_size, ilt_client->flags,
6198 ilog2(ilt_client->page_size >> 12));
6201 KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!"));
6205 bxe_set_fp_rx_buf_size(struct bxe_softc *sc)
6209 BLOGD(sc, DBG_LOAD, "mtu = %d\n", sc->mtu);
6211 for (i = 0; i < sc->num_queues; i++) {
6212 /* get the Rx buffer size for RX frames */
6213 sc->fp[i].rx_buf_size =
6214 (IP_HEADER_ALIGNMENT_PADDING +
6218 BLOGD(sc, DBG_LOAD, "rx_buf_size for fp[%02d] = %d\n",
6219 i, sc->fp[i].rx_buf_size);
6221 /* get the mbuf allocation size for RX frames */
6222 if (sc->fp[i].rx_buf_size <= MCLBYTES) {
6223 sc->fp[i].mbuf_alloc_size = MCLBYTES;
6224 } else if (sc->fp[i].rx_buf_size <= BCM_PAGE_SIZE) {
6225 sc->fp[i].mbuf_alloc_size = PAGE_SIZE;
6227 sc->fp[i].mbuf_alloc_size = MJUM9BYTES;
6230 BLOGD(sc, DBG_LOAD, "mbuf_alloc_size for fp[%02d] = %d\n",
6231 i, sc->fp[i].mbuf_alloc_size);
6236 bxe_alloc_ilt_mem(struct bxe_softc *sc)
6241 (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt),
6243 (M_NOWAIT | M_ZERO))) == NULL) {
6251 bxe_alloc_ilt_lines_mem(struct bxe_softc *sc)
6255 if ((sc->ilt->lines =
6256 (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES),
6258 (M_NOWAIT | M_ZERO))) == NULL) {
6266 bxe_free_ilt_mem(struct bxe_softc *sc)
6268 if (sc->ilt != NULL) {
6269 free(sc->ilt, M_BXE_ILT);
6275 bxe_free_ilt_lines_mem(struct bxe_softc *sc)
6277 if (sc->ilt->lines != NULL) {
6278 free(sc->ilt->lines, M_BXE_ILT);
6279 sc->ilt->lines = NULL;
6284 bxe_free_mem(struct bxe_softc *sc)
6289 if (!CONFIGURE_NIC_MODE(sc)) {
6290 /* free searcher T2 table */
6291 bxe_dma_free(sc, &sc->t2);
6295 for (i = 0; i < L2_ILT_LINES(sc); i++) {
6296 bxe_dma_free(sc, &sc->context[i].vcxt_dma);
6297 sc->context[i].vcxt = NULL;
6298 sc->context[i].size = 0;
6301 ecore_ilt_mem_op(sc, ILT_MEMOP_FREE);
6303 bxe_free_ilt_lines_mem(sc);
6306 bxe_iov_free_mem(sc);
6311 bxe_alloc_mem(struct bxe_softc *sc)
6318 if (!CONFIGURE_NIC_MODE(sc)) {
6319 /* allocate searcher T2 table */
6320 if (bxe_dma_alloc(sc, SRC_T2_SZ,
6321 &sc->t2, "searcher t2 table") != 0) {
6328 * Allocate memory for CDU context:
6329 * This memory is allocated separately and not in the generic ILT
6330 * functions because CDU differs in few aspects:
6331 * 1. There can be multiple entities allocating memory for context -
6332 * regular L2, CNIC, and SRIOV drivers. Each separately controls
6333 * its own ILT lines.
6334 * 2. Since CDU page-size is not a single 4KB page (which is the case
6335 * for the other ILT clients), to be efficient we want to support
6336 * allocation of sub-page-size in the last entry.
6337 * 3. Context pointers are used by the driver to pass to FW / update
6338 * the context (for the other ILT clients the pointers are used just to
6339 * free the memory during unload).
6341 context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc));
6342 for (i = 0, allocated = 0; allocated < context_size; i++) {
6343 sc->context[i].size = min(CDU_ILT_PAGE_SZ,
6344 (context_size - allocated));
6346 if (bxe_dma_alloc(sc, sc->context[i].size,
6347 &sc->context[i].vcxt_dma,
6348 "cdu context") != 0) {
6353 sc->context[i].vcxt =
6354 (union cdu_context *)sc->context[i].vcxt_dma.vaddr;
6356 allocated += sc->context[i].size;
6359 bxe_alloc_ilt_lines_mem(sc);
6361 BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n",
6362 sc->ilt, sc->ilt->start_line, sc->ilt->lines);
6364 for (i = 0; i < 4; i++) {
6366 "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n",
6368 sc->ilt->clients[i].page_size,
6369 sc->ilt->clients[i].start,
6370 sc->ilt->clients[i].end,
6371 sc->ilt->clients[i].client_num,
6372 sc->ilt->clients[i].flags);
6375 if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) {
6376 BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n");
6382 if (bxe_iov_alloc_mem(sc)) {
6383 BLOGE(sc, "Failed to allocate memory for SRIOV\n");
6393 bxe_free_rx_bd_chain(struct bxe_fastpath *fp)
6395 struct bxe_softc *sc;
6400 if (fp->rx_mbuf_tag == NULL) {
6404 /* free all mbufs and unload all maps */
6405 for (i = 0; i < RX_BD_TOTAL; i++) {
6406 if (fp->rx_mbuf_chain[i].m_map != NULL) {
6407 bus_dmamap_sync(fp->rx_mbuf_tag,
6408 fp->rx_mbuf_chain[i].m_map,
6409 BUS_DMASYNC_POSTREAD);
6410 bus_dmamap_unload(fp->rx_mbuf_tag,
6411 fp->rx_mbuf_chain[i].m_map);
6414 if (fp->rx_mbuf_chain[i].m != NULL) {
6415 m_freem(fp->rx_mbuf_chain[i].m);
6416 fp->rx_mbuf_chain[i].m = NULL;
6417 fp->eth_q_stats.mbuf_alloc_rx--;
6423 bxe_free_tpa_pool(struct bxe_fastpath *fp)
6425 struct bxe_softc *sc;
6426 int i, max_agg_queues;
6430 if (fp->rx_mbuf_tag == NULL) {
6434 max_agg_queues = MAX_AGG_QS(sc);
6436 /* release all mbufs and unload all DMA maps in the TPA pool */
6437 for (i = 0; i < max_agg_queues; i++) {
6438 if (fp->rx_tpa_info[i].bd.m_map != NULL) {
6439 bus_dmamap_sync(fp->rx_mbuf_tag,
6440 fp->rx_tpa_info[i].bd.m_map,
6441 BUS_DMASYNC_POSTREAD);
6442 bus_dmamap_unload(fp->rx_mbuf_tag,
6443 fp->rx_tpa_info[i].bd.m_map);
6446 if (fp->rx_tpa_info[i].bd.m != NULL) {
6447 m_freem(fp->rx_tpa_info[i].bd.m);
6448 fp->rx_tpa_info[i].bd.m = NULL;
6449 fp->eth_q_stats.mbuf_alloc_tpa--;
6455 bxe_free_sge_chain(struct bxe_fastpath *fp)
6457 struct bxe_softc *sc;
6462 if (fp->rx_sge_mbuf_tag == NULL) {
6466 /* rree all mbufs and unload all maps */
6467 for (i = 0; i < RX_SGE_TOTAL; i++) {
6468 if (fp->rx_sge_mbuf_chain[i].m_map != NULL) {
6469 bus_dmamap_sync(fp->rx_sge_mbuf_tag,
6470 fp->rx_sge_mbuf_chain[i].m_map,
6471 BUS_DMASYNC_POSTREAD);
6472 bus_dmamap_unload(fp->rx_sge_mbuf_tag,
6473 fp->rx_sge_mbuf_chain[i].m_map);
6476 if (fp->rx_sge_mbuf_chain[i].m != NULL) {
6477 m_freem(fp->rx_sge_mbuf_chain[i].m);
6478 fp->rx_sge_mbuf_chain[i].m = NULL;
6479 fp->eth_q_stats.mbuf_alloc_sge--;
6485 bxe_free_fp_buffers(struct bxe_softc *sc)
6487 struct bxe_fastpath *fp;
6490 for (i = 0; i < sc->num_queues; i++) {
6493 #if __FreeBSD_version >= 800000
6494 if (fp->tx_br != NULL) {
6496 /* just in case bxe_mq_flush() wasn't called */
6497 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) {
6500 buf_ring_free(fp->tx_br, M_DEVBUF);
6505 /* free all RX buffers */
6506 bxe_free_rx_bd_chain(fp);
6507 bxe_free_tpa_pool(fp);
6508 bxe_free_sge_chain(fp);
6510 if (fp->eth_q_stats.mbuf_alloc_rx != 0) {
6511 BLOGE(sc, "failed to claim all rx mbufs (%d left)\n",
6512 fp->eth_q_stats.mbuf_alloc_rx);
6515 if (fp->eth_q_stats.mbuf_alloc_sge != 0) {
6516 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6517 fp->eth_q_stats.mbuf_alloc_sge);
6520 if (fp->eth_q_stats.mbuf_alloc_tpa != 0) {
6521 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6522 fp->eth_q_stats.mbuf_alloc_tpa);
6525 if (fp->eth_q_stats.mbuf_alloc_tx != 0) {
6526 BLOGE(sc, "failed to release tx mbufs (%d left)\n",
6527 fp->eth_q_stats.mbuf_alloc_tx);
6530 /* XXX verify all mbufs were reclaimed */
6532 if (mtx_initialized(&fp->tx_mtx)) {
6533 mtx_destroy(&fp->tx_mtx);
6536 if (mtx_initialized(&fp->rx_mtx)) {
6537 mtx_destroy(&fp->rx_mtx);
6543 bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
6544 uint16_t prev_index,
6547 struct bxe_sw_rx_bd *rx_buf;
6548 struct eth_rx_bd *rx_bd;
6549 bus_dma_segment_t segs[1];
6556 /* allocate the new RX BD mbuf */
6557 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6558 if (__predict_false(m == NULL)) {
6559 fp->eth_q_stats.mbuf_rx_bd_alloc_failed++;
6563 fp->eth_q_stats.mbuf_alloc_rx++;
6565 /* initialize the mbuf buffer length */
6566 m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6568 /* map the mbuf into non-paged pool */
6569 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6570 fp->rx_mbuf_spare_map,
6571 m, segs, &nsegs, BUS_DMA_NOWAIT);
6572 if (__predict_false(rc != 0)) {
6573 fp->eth_q_stats.mbuf_rx_bd_mapping_failed++;
6575 fp->eth_q_stats.mbuf_alloc_rx--;
6579 /* all mbufs must map to a single segment */
6580 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6582 /* release any existing RX BD mbuf mappings */
6584 if (prev_index != index) {
6585 rx_buf = &fp->rx_mbuf_chain[prev_index];
6587 if (rx_buf->m_map != NULL) {
6588 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6589 BUS_DMASYNC_POSTREAD);
6590 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6594 * We only get here from bxe_rxeof() when the maximum number
6595 * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already
6596 * holds the mbuf in the prev_index so it's OK to NULL it out
6597 * here without concern of a memory leak.
6599 fp->rx_mbuf_chain[prev_index].m = NULL;
6602 rx_buf = &fp->rx_mbuf_chain[index];
6604 if (rx_buf->m_map != NULL) {
6605 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6606 BUS_DMASYNC_POSTREAD);
6607 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6610 /* save the mbuf and mapping info for a future packet */
6611 map = (prev_index != index) ?
6612 fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map;
6613 rx_buf->m_map = fp->rx_mbuf_spare_map;
6614 fp->rx_mbuf_spare_map = map;
6615 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6616 BUS_DMASYNC_PREREAD);
6619 rx_bd = &fp->rx_chain[index];
6620 rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6621 rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6627 bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
6630 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
6631 bus_dma_segment_t segs[1];
6637 /* allocate the new TPA mbuf */
6638 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6639 if (__predict_false(m == NULL)) {
6640 fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++;
6644 fp->eth_q_stats.mbuf_alloc_tpa++;
6646 /* initialize the mbuf buffer length */
6647 m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6649 /* map the mbuf into non-paged pool */
6650 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6651 fp->rx_tpa_info_mbuf_spare_map,
6652 m, segs, &nsegs, BUS_DMA_NOWAIT);
6653 if (__predict_false(rc != 0)) {
6654 fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++;
6656 fp->eth_q_stats.mbuf_alloc_tpa--;
6660 /* all mbufs must map to a single segment */
6661 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6663 /* release any existing TPA mbuf mapping */
6664 if (tpa_info->bd.m_map != NULL) {
6665 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6666 BUS_DMASYNC_POSTREAD);
6667 bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map);
6670 /* save the mbuf and mapping info for the TPA mbuf */
6671 map = tpa_info->bd.m_map;
6672 tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map;
6673 fp->rx_tpa_info_mbuf_spare_map = map;
6674 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6675 BUS_DMASYNC_PREREAD);
6677 tpa_info->seg = segs[0];
6683 * Allocate an mbuf and assign it to the receive scatter gather chain. The
6684 * caller must take care to save a copy of the existing mbuf in the SG mbuf
6688 bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
6691 struct bxe_sw_rx_bd *sge_buf;
6692 struct eth_rx_sge *sge;
6693 bus_dma_segment_t segs[1];
6699 /* allocate a new SGE mbuf */
6700 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
6701 if (__predict_false(m == NULL)) {
6702 fp->eth_q_stats.mbuf_rx_sge_alloc_failed++;
6706 fp->eth_q_stats.mbuf_alloc_sge++;
6708 /* initialize the mbuf buffer length */
6709 m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE;
6711 /* map the SGE mbuf into non-paged pool */
6712 rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag,
6713 fp->rx_sge_mbuf_spare_map,
6714 m, segs, &nsegs, BUS_DMA_NOWAIT);
6715 if (__predict_false(rc != 0)) {
6716 fp->eth_q_stats.mbuf_rx_sge_mapping_failed++;
6718 fp->eth_q_stats.mbuf_alloc_sge--;
6722 /* all mbufs must map to a single segment */
6723 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6725 sge_buf = &fp->rx_sge_mbuf_chain[index];
6727 /* release any existing SGE mbuf mapping */
6728 if (sge_buf->m_map != NULL) {
6729 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6730 BUS_DMASYNC_POSTREAD);
6731 bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map);
6734 /* save the mbuf and mapping info for a future packet */
6735 map = sge_buf->m_map;
6736 sge_buf->m_map = fp->rx_sge_mbuf_spare_map;
6737 fp->rx_sge_mbuf_spare_map = map;
6738 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6739 BUS_DMASYNC_PREREAD);
6742 sge = &fp->rx_sge_chain[index];
6743 sge->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6744 sge->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6749 static __noinline int
6750 bxe_alloc_fp_buffers(struct bxe_softc *sc)
6752 struct bxe_fastpath *fp;
6754 int ring_prod, cqe_ring_prod;
6757 for (i = 0; i < sc->num_queues; i++) {
6760 #if __FreeBSD_version >= 800000
6761 fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF,
6762 M_DONTWAIT, &fp->tx_mtx);
6763 if (fp->tx_br == NULL) {
6764 BLOGE(sc, "buf_ring alloc fail for fp[%02d]\n", i);
6765 goto bxe_alloc_fp_buffers_error;
6769 ring_prod = cqe_ring_prod = 0;
6773 /* allocate buffers for the RX BDs in RX BD chain */
6774 for (j = 0; j < sc->max_rx_bufs; j++) {
6775 rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod);
6777 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
6779 goto bxe_alloc_fp_buffers_error;
6782 ring_prod = RX_BD_NEXT(ring_prod);
6783 cqe_ring_prod = RCQ_NEXT(cqe_ring_prod);
6786 fp->rx_bd_prod = ring_prod;
6787 fp->rx_cq_prod = cqe_ring_prod;
6788 fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0;
6790 if (sc->ifnet->if_capenable & IFCAP_LRO) {
6791 max_agg_queues = MAX_AGG_QS(sc);
6793 fp->tpa_enable = TRUE;
6795 /* fill the TPA pool */
6796 for (j = 0; j < max_agg_queues; j++) {
6797 rc = bxe_alloc_rx_tpa_mbuf(fp, j);
6799 BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n",
6801 fp->tpa_enable = FALSE;
6802 goto bxe_alloc_fp_buffers_error;
6805 fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP;
6808 if (fp->tpa_enable) {
6809 /* fill the RX SGE chain */
6811 for (j = 0; j < RX_SGE_USABLE; j++) {
6812 rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod);
6814 BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n",
6816 fp->tpa_enable = FALSE;
6818 goto bxe_alloc_fp_buffers_error;
6821 ring_prod = RX_SGE_NEXT(ring_prod);
6824 fp->rx_sge_prod = ring_prod;
6831 bxe_alloc_fp_buffers_error:
6833 /* unwind what was already allocated */
6834 bxe_free_rx_bd_chain(fp);
6835 bxe_free_tpa_pool(fp);
6836 bxe_free_sge_chain(fp);
6842 bxe_free_fw_stats_mem(struct bxe_softc *sc)
6844 bxe_dma_free(sc, &sc->fw_stats_dma);
6846 sc->fw_stats_num = 0;
6848 sc->fw_stats_req_size = 0;
6849 sc->fw_stats_req = NULL;
6850 sc->fw_stats_req_mapping = 0;
6852 sc->fw_stats_data_size = 0;
6853 sc->fw_stats_data = NULL;
6854 sc->fw_stats_data_mapping = 0;
6858 bxe_alloc_fw_stats_mem(struct bxe_softc *sc)
6860 uint8_t num_queue_stats;
6863 /* number of queues for statistics is number of eth queues */
6864 num_queue_stats = BXE_NUM_ETH_QUEUES(sc);
6867 * Total number of FW statistics requests =
6868 * 1 for port stats + 1 for PF stats + num of queues
6870 sc->fw_stats_num = (2 + num_queue_stats);
6873 * Request is built from stats_query_header and an array of
6874 * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT
6875 * rules. The real number or requests is configured in the
6876 * stats_query_header.
6879 ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) +
6880 ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0));
6882 BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n",
6883 sc->fw_stats_num, num_groups);
6885 sc->fw_stats_req_size =
6886 (sizeof(struct stats_query_header) +
6887 (num_groups * sizeof(struct stats_query_cmd_group)));
6890 * Data for statistics requests + stats_counter.
6891 * stats_counter holds per-STORM counters that are incremented when
6892 * STORM has finished with the current request. Memory for FCoE
6893 * offloaded statistics are counted anyway, even if they will not be sent.
6894 * VF stats are not accounted for here as the data of VF stats is stored
6895 * in memory allocated by the VF, not here.
6897 sc->fw_stats_data_size =
6898 (sizeof(struct stats_counter) +
6899 sizeof(struct per_port_stats) +
6900 sizeof(struct per_pf_stats) +
6901 /* sizeof(struct fcoe_statistics_params) + */
6902 (sizeof(struct per_queue_stats) * num_queue_stats));
6904 if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size),
6905 &sc->fw_stats_dma, "fw stats") != 0) {
6906 bxe_free_fw_stats_mem(sc);
6910 /* set up the shortcuts */
6913 (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr;
6914 sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr;
6917 (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr +
6918 sc->fw_stats_req_size);
6919 sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr +
6920 sc->fw_stats_req_size);
6922 BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n",
6923 (uintmax_t)sc->fw_stats_req_mapping);
6925 BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n",
6926 (uintmax_t)sc->fw_stats_data_mapping);
6933 * 0-7 - Engine0 load counter.
6934 * 8-15 - Engine1 load counter.
6935 * 16 - Engine0 RESET_IN_PROGRESS bit.
6936 * 17 - Engine1 RESET_IN_PROGRESS bit.
6937 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active
6938 * function on the engine
6939 * 19 - Engine1 ONE_IS_LOADED.
6940 * 20 - Chip reset flow bit. When set none-leader must wait for both engines
6941 * leader to complete (check for both RESET_IN_PROGRESS bits and not
6942 * for just the one belonging to its engine).
6944 #define BXE_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
6945 #define BXE_PATH0_LOAD_CNT_MASK 0x000000ff
6946 #define BXE_PATH0_LOAD_CNT_SHIFT 0
6947 #define BXE_PATH1_LOAD_CNT_MASK 0x0000ff00
6948 #define BXE_PATH1_LOAD_CNT_SHIFT 8
6949 #define BXE_PATH0_RST_IN_PROG_BIT 0x00010000
6950 #define BXE_PATH1_RST_IN_PROG_BIT 0x00020000
6951 #define BXE_GLOBAL_RESET_BIT 0x00040000
6953 /* set the GLOBAL_RESET bit, should be run under rtnl lock */
6955 bxe_set_reset_global(struct bxe_softc *sc)
6958 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6959 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6960 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT);
6961 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6964 /* clear the GLOBAL_RESET bit, should be run under rtnl lock */
6966 bxe_clear_reset_global(struct bxe_softc *sc)
6969 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6970 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6971 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT));
6972 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6975 /* checks the GLOBAL_RESET bit, should be run under rtnl lock */
6977 bxe_reset_is_global(struct bxe_softc *sc)
6979 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6980 BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val);
6981 return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE;
6984 /* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */
6986 bxe_set_reset_done(struct bxe_softc *sc)
6989 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6990 BXE_PATH0_RST_IN_PROG_BIT;
6992 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6994 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6997 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6999 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7002 /* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */
7004 bxe_set_reset_in_progress(struct bxe_softc *sc)
7007 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
7008 BXE_PATH0_RST_IN_PROG_BIT;
7010 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7012 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
7015 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
7017 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7020 /* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */
7022 bxe_reset_is_done(struct bxe_softc *sc,
7025 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
7026 uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT :
7027 BXE_PATH0_RST_IN_PROG_BIT;
7029 /* return false if bit is set */
7030 return (val & bit) ? FALSE : TRUE;
7033 /* get the load status for an engine, should be run under rtnl lock */
7035 bxe_get_load_status(struct bxe_softc *sc,
7038 uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK :
7039 BXE_PATH0_LOAD_CNT_MASK;
7040 uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT :
7041 BXE_PATH0_LOAD_CNT_SHIFT;
7042 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
7044 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
7046 val = ((val & mask) >> shift);
7048 BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val);
7053 /* set pf load mark */
7054 /* XXX needs to be under rtnl lock */
7056 bxe_set_pf_load(struct bxe_softc *sc)
7060 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
7061 BXE_PATH0_LOAD_CNT_MASK;
7062 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
7063 BXE_PATH0_LOAD_CNT_SHIFT;
7065 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7067 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
7068 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
7070 /* get the current counter value */
7071 val1 = ((val & mask) >> shift);
7073 /* set bit of this PF */
7074 val1 |= (1 << SC_ABS_FUNC(sc));
7076 /* clear the old value */
7079 /* set the new one */
7080 val |= ((val1 << shift) & mask);
7082 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
7084 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7087 /* clear pf load mark */
7088 /* XXX needs to be under rtnl lock */
7090 bxe_clear_pf_load(struct bxe_softc *sc)
7093 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
7094 BXE_PATH0_LOAD_CNT_MASK;
7095 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
7096 BXE_PATH0_LOAD_CNT_SHIFT;
7098 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7099 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
7100 BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val);
7102 /* get the current counter value */
7103 val1 = (val & mask) >> shift;
7105 /* clear bit of that PF */
7106 val1 &= ~(1 << SC_ABS_FUNC(sc));
7108 /* clear the old value */
7111 /* set the new one */
7112 val |= ((val1 << shift) & mask);
7114 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
7115 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
7119 /* send load requrest to mcp and analyze response */
7121 bxe_nic_load_request(struct bxe_softc *sc,
7122 uint32_t *load_code)
7126 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
7127 DRV_MSG_SEQ_NUMBER_MASK);
7129 BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq);
7131 /* get the current FW pulse sequence */
7132 sc->fw_drv_pulse_wr_seq =
7133 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) &
7134 DRV_PULSE_SEQ_MASK);
7136 BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n",
7137 sc->fw_drv_pulse_wr_seq);
7140 (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
7141 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
7143 /* if the MCP fails to respond we must abort */
7144 if (!(*load_code)) {
7145 BLOGE(sc, "MCP response failure!\n");
7149 /* if MCP refused then must abort */
7150 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7151 BLOGE(sc, "MCP refused load request\n");
7159 * Check whether another PF has already loaded FW to chip. In virtualized
7160 * environments a pf from anoth VM may have already initialized the device
7161 * including loading FW.
7164 bxe_nic_load_analyze_req(struct bxe_softc *sc,
7167 uint32_t my_fw, loaded_fw;
7169 /* is another pf loaded on this engine? */
7170 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
7171 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
7172 /* build my FW version dword */
7173 my_fw = (BCM_5710_FW_MAJOR_VERSION +
7174 (BCM_5710_FW_MINOR_VERSION << 8 ) +
7175 (BCM_5710_FW_REVISION_VERSION << 16) +
7176 (BCM_5710_FW_ENGINEERING_VERSION << 24));
7178 /* read loaded FW from chip */
7179 loaded_fw = REG_RD(sc, XSEM_REG_PRAM);
7180 BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n",
7183 /* abort nic load if version mismatch */
7184 if (my_fw != loaded_fw) {
7185 BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)",
7194 /* mark PMF if applicable */
7196 bxe_nic_load_pmf(struct bxe_softc *sc,
7199 uint32_t ncsi_oem_data_addr;
7201 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7202 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
7203 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
7205 * Barrier here for ordering between the writing to sc->port.pmf here
7206 * and reading it from the periodic task.
7214 BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf);
7217 if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) {
7218 if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) {
7219 ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr);
7220 if (ncsi_oem_data_addr) {
7222 (ncsi_oem_data_addr +
7223 offsetof(struct glob_ncsi_oem_data, driver_version)),
7231 bxe_read_mf_cfg(struct bxe_softc *sc)
7233 int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1);
7237 if (BXE_NOMCP(sc)) {
7238 return; /* what should be the default bvalue in this case */
7242 * The formula for computing the absolute function number is...
7243 * For 2 port configuration (4 functions per port):
7244 * abs_func = 2 * vn + SC_PORT + SC_PATH
7245 * For 4 port configuration (2 functions per port):
7246 * abs_func = 4 * vn + 2 * SC_PORT + SC_PATH
7248 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
7249 abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc));
7250 if (abs_func >= E1H_FUNC_MAX) {
7253 sc->devinfo.mf_info.mf_config[vn] =
7254 MFCFG_RD(sc, func_mf_config[abs_func].config);
7257 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] &
7258 FUNC_MF_CFG_FUNC_DISABLED) {
7259 BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n");
7260 sc->flags |= BXE_MF_FUNC_DIS;
7262 BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n");
7263 sc->flags &= ~BXE_MF_FUNC_DIS;
7267 /* acquire split MCP access lock register */
7268 static int bxe_acquire_alr(struct bxe_softc *sc)
7272 for (j = 0; j < 1000; j++) {
7274 REG_WR(sc, GRCBASE_MCP + 0x9c, val);
7275 val = REG_RD(sc, GRCBASE_MCP + 0x9c);
7276 if (val & (1L << 31))
7282 if (!(val & (1L << 31))) {
7283 BLOGE(sc, "Cannot acquire MCP access lock register\n");
7290 /* release split MCP access lock register */
7291 static void bxe_release_alr(struct bxe_softc *sc)
7293 REG_WR(sc, GRCBASE_MCP + 0x9c, 0);
7297 bxe_fan_failure(struct bxe_softc *sc)
7299 int port = SC_PORT(sc);
7300 uint32_t ext_phy_config;
7302 /* mark the failure */
7304 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
7306 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
7307 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
7308 SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config,
7311 /* log the failure */
7312 BLOGW(sc, "Fan Failure has caused the driver to shutdown "
7313 "the card to prevent permanent damage. "
7314 "Please contact OEM Support for assistance\n");
7318 bxe_panic(sc, ("Schedule task to handle fan failure\n"));
7321 * Schedule device reset (unload)
7322 * This is due to some boards consuming sufficient power when driver is
7323 * up to overheat if fan fails.
7325 bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state);
7326 schedule_delayed_work(&sc->sp_rtnl_task, 0);
7330 /* this function is called upon a link interrupt */
7332 bxe_link_attn(struct bxe_softc *sc)
7334 uint32_t pause_enabled = 0;
7335 struct host_port_stats *pstats;
7338 /* Make sure that we are synced with the current statistics */
7339 bxe_stats_handle(sc, STATS_EVENT_STOP);
7341 elink_link_update(&sc->link_params, &sc->link_vars);
7343 if (sc->link_vars.link_up) {
7345 /* dropless flow control */
7346 if (!CHIP_IS_E1(sc) && sc->dropless_fc) {
7349 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
7354 (BAR_USTRORM_INTMEM +
7355 USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))),
7359 if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) {
7360 pstats = BXE_SP(sc, port_stats);
7361 /* reset old mac stats */
7362 memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx));
7365 if (sc->state == BXE_STATE_OPEN) {
7366 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
7370 if (sc->link_vars.link_up && sc->link_vars.line_speed) {
7371 cmng_fns = bxe_get_cmng_fns_mode(sc);
7373 if (cmng_fns != CMNG_FNS_NONE) {
7374 bxe_cmng_fns_init(sc, FALSE, cmng_fns);
7375 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7377 /* rate shaping and fairness are disabled */
7378 BLOGD(sc, DBG_LOAD, "single function mode without fairness\n");
7382 bxe_link_report_locked(sc);
7385 ; // XXX bxe_link_sync_notify(sc);
7390 bxe_attn_int_asserted(struct bxe_softc *sc,
7393 int port = SC_PORT(sc);
7394 uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7395 MISC_REG_AEU_MASK_ATTN_FUNC_0;
7396 uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
7397 NIG_REG_MASK_INTERRUPT_PORT0;
7399 uint32_t nig_mask = 0;
7404 if (sc->attn_state & asserted) {
7405 BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted);
7408 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7410 aeu_mask = REG_RD(sc, aeu_addr);
7412 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n",
7413 aeu_mask, asserted);
7415 aeu_mask &= ~(asserted & 0x3ff);
7417 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
7419 REG_WR(sc, aeu_addr, aeu_mask);
7421 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7423 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
7424 sc->attn_state |= asserted;
7425 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
7427 if (asserted & ATTN_HARD_WIRED_MASK) {
7428 if (asserted & ATTN_NIG_FOR_FUNC) {
7432 /* save nig interrupt mask */
7433 nig_mask = REG_RD(sc, nig_int_mask_addr);
7435 /* If nig_mask is not set, no need to call the update function */
7437 REG_WR(sc, nig_int_mask_addr, 0);
7442 /* handle unicore attn? */
7445 if (asserted & ATTN_SW_TIMER_4_FUNC) {
7446 BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n");
7449 if (asserted & GPIO_2_FUNC) {
7450 BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n");
7453 if (asserted & GPIO_3_FUNC) {
7454 BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n");
7457 if (asserted & GPIO_4_FUNC) {
7458 BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n");
7462 if (asserted & ATTN_GENERAL_ATTN_1) {
7463 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n");
7464 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
7466 if (asserted & ATTN_GENERAL_ATTN_2) {
7467 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n");
7468 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
7470 if (asserted & ATTN_GENERAL_ATTN_3) {
7471 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n");
7472 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
7475 if (asserted & ATTN_GENERAL_ATTN_4) {
7476 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n");
7477 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
7479 if (asserted & ATTN_GENERAL_ATTN_5) {
7480 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n");
7481 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
7483 if (asserted & ATTN_GENERAL_ATTN_6) {
7484 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n");
7485 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
7490 if (sc->devinfo.int_block == INT_BLOCK_HC) {
7491 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET);
7493 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
7496 BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n",
7498 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
7499 REG_WR(sc, reg_addr, asserted);
7501 /* now set back the mask */
7502 if (asserted & ATTN_NIG_FOR_FUNC) {
7504 * Verify that IGU ack through BAR was written before restoring
7505 * NIG mask. This loop should exit after 2-3 iterations max.
7507 if (sc->devinfo.int_block != INT_BLOCK_HC) {
7511 igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS);
7512 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
7513 (++cnt < MAX_IGU_ATTN_ACK_TO));
7516 BLOGE(sc, "Failed to verify IGU ack on time\n");
7522 REG_WR(sc, nig_int_mask_addr, nig_mask);
7529 bxe_print_next_block(struct bxe_softc *sc,
7533 BLOGI(sc, "%s%s", idx ? ", " : "", blk);
7537 bxe_check_blocks_with_parity0(struct bxe_softc *sc,
7542 uint32_t cur_bit = 0;
7545 for (i = 0; sig; i++) {
7546 cur_bit = ((uint32_t)0x1 << i);
7547 if (sig & cur_bit) {
7549 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
7551 bxe_print_next_block(sc, par_num++, "BRB");
7553 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
7555 bxe_print_next_block(sc, par_num++, "PARSER");
7557 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
7559 bxe_print_next_block(sc, par_num++, "TSDM");
7561 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
7563 bxe_print_next_block(sc, par_num++, "SEARCHER");
7565 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
7567 bxe_print_next_block(sc, par_num++, "TCM");
7569 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
7571 bxe_print_next_block(sc, par_num++, "TSEMI");
7573 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
7575 bxe_print_next_block(sc, par_num++, "XPB");
7588 bxe_check_blocks_with_parity1(struct bxe_softc *sc,
7595 uint32_t cur_bit = 0;
7596 for (i = 0; sig; i++) {
7597 cur_bit = ((uint32_t)0x1 << i);
7598 if (sig & cur_bit) {
7600 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
7602 bxe_print_next_block(sc, par_num++, "PBF");
7604 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
7606 bxe_print_next_block(sc, par_num++, "QM");
7608 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
7610 bxe_print_next_block(sc, par_num++, "TM");
7612 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
7614 bxe_print_next_block(sc, par_num++, "XSDM");
7616 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
7618 bxe_print_next_block(sc, par_num++, "XCM");
7620 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
7622 bxe_print_next_block(sc, par_num++, "XSEMI");
7624 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
7626 bxe_print_next_block(sc, par_num++, "DOORBELLQ");
7628 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
7630 bxe_print_next_block(sc, par_num++, "NIG");
7632 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
7634 bxe_print_next_block(sc, par_num++, "VAUX PCI CORE");
7637 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
7639 bxe_print_next_block(sc, par_num++, "DEBUG");
7641 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
7643 bxe_print_next_block(sc, par_num++, "USDM");
7645 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
7647 bxe_print_next_block(sc, par_num++, "UCM");
7649 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
7651 bxe_print_next_block(sc, par_num++, "USEMI");
7653 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
7655 bxe_print_next_block(sc, par_num++, "UPB");
7657 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
7659 bxe_print_next_block(sc, par_num++, "CSDM");
7661 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
7663 bxe_print_next_block(sc, par_num++, "CCM");
7676 bxe_check_blocks_with_parity2(struct bxe_softc *sc,
7681 uint32_t cur_bit = 0;
7684 for (i = 0; sig; i++) {
7685 cur_bit = ((uint32_t)0x1 << i);
7686 if (sig & cur_bit) {
7688 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
7690 bxe_print_next_block(sc, par_num++, "CSEMI");
7692 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
7694 bxe_print_next_block(sc, par_num++, "PXP");
7696 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
7698 bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT");
7700 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
7702 bxe_print_next_block(sc, par_num++, "CFC");
7704 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
7706 bxe_print_next_block(sc, par_num++, "CDU");
7708 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
7710 bxe_print_next_block(sc, par_num++, "DMAE");
7712 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
7714 bxe_print_next_block(sc, par_num++, "IGU");
7716 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
7718 bxe_print_next_block(sc, par_num++, "MISC");
7731 bxe_check_blocks_with_parity3(struct bxe_softc *sc,
7737 uint32_t cur_bit = 0;
7740 for (i = 0; sig; i++) {
7741 cur_bit = ((uint32_t)0x1 << i);
7742 if (sig & cur_bit) {
7744 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
7746 bxe_print_next_block(sc, par_num++, "MCP ROM");
7749 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
7751 bxe_print_next_block(sc, par_num++,
7755 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
7757 bxe_print_next_block(sc, par_num++,
7761 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
7763 bxe_print_next_block(sc, par_num++,
7778 bxe_check_blocks_with_parity4(struct bxe_softc *sc,
7783 uint32_t cur_bit = 0;
7786 for (i = 0; sig; i++) {
7787 cur_bit = ((uint32_t)0x1 << i);
7788 if (sig & cur_bit) {
7790 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
7792 bxe_print_next_block(sc, par_num++, "PGLUE_B");
7794 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
7796 bxe_print_next_block(sc, par_num++, "ATC");
7809 bxe_parity_attn(struct bxe_softc *sc,
7816 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
7817 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
7818 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
7819 (sig[3] & HW_PRTY_ASSERT_SET_3) ||
7820 (sig[4] & HW_PRTY_ASSERT_SET_4)) {
7821 BLOGE(sc, "Parity error: HW block parity attention:\n"
7822 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
7823 (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0),
7824 (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1),
7825 (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2),
7826 (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3),
7827 (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4));
7830 BLOGI(sc, "Parity errors detected in blocks: ");
7833 bxe_check_blocks_with_parity0(sc, sig[0] &
7834 HW_PRTY_ASSERT_SET_0,
7837 bxe_check_blocks_with_parity1(sc, sig[1] &
7838 HW_PRTY_ASSERT_SET_1,
7839 par_num, global, print);
7841 bxe_check_blocks_with_parity2(sc, sig[2] &
7842 HW_PRTY_ASSERT_SET_2,
7845 bxe_check_blocks_with_parity3(sc, sig[3] &
7846 HW_PRTY_ASSERT_SET_3,
7847 par_num, global, print);
7849 bxe_check_blocks_with_parity4(sc, sig[4] &
7850 HW_PRTY_ASSERT_SET_4,
7863 bxe_chk_parity_attn(struct bxe_softc *sc,
7867 struct attn_route attn = { {0} };
7868 int port = SC_PORT(sc);
7870 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
7871 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
7872 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
7873 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
7875 if (!CHIP_IS_E1x(sc))
7876 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
7878 return (bxe_parity_attn(sc, global, print, attn.sig));
7882 bxe_attn_int_deasserted4(struct bxe_softc *sc,
7887 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
7888 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
7889 BLOGE(sc, "PGLUE hw attention 0x%08x\n", val);
7890 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
7891 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
7892 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
7893 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
7894 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
7895 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
7896 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
7897 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
7898 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
7899 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
7900 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
7901 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
7902 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
7903 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
7904 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
7905 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
7906 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
7907 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
7910 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
7911 val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR);
7912 BLOGE(sc, "ATC hw attention 0x%08x\n", val);
7913 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
7914 BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
7915 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
7916 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
7917 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
7918 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
7919 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
7920 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
7921 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
7922 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
7923 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
7924 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
7927 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7928 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
7929 BLOGE(sc, "FATAL parity attention set4 0x%08x\n",
7930 (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7931 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
7936 bxe_e1h_disable(struct bxe_softc *sc)
7938 int port = SC_PORT(sc);
7942 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7946 bxe_e1h_enable(struct bxe_softc *sc)
7948 int port = SC_PORT(sc);
7950 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7952 // XXX bxe_tx_enable(sc);
7956 * called due to MCP event (on pmf):
7957 * reread new bandwidth configuration
7959 * notify others function about the change
7962 bxe_config_mf_bw(struct bxe_softc *sc)
7964 if (sc->link_vars.link_up) {
7965 bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX);
7966 // XXX bxe_link_sync_notify(sc);
7969 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7973 bxe_set_mf_bw(struct bxe_softc *sc)
7975 bxe_config_mf_bw(sc);
7976 bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
7980 bxe_handle_eee_event(struct bxe_softc *sc)
7982 BLOGD(sc, DBG_INTR, "EEE - LLDP event\n");
7983 bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
7986 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
7989 bxe_drv_info_ether_stat(struct bxe_softc *sc)
7991 struct eth_stats_info *ether_stat =
7992 &sc->sp->drv_info_to_mcp.ether_stat;
7994 strlcpy(ether_stat->version, BXE_DRIVER_VERSION,
7995 ETH_STAT_INFO_VERSION_LEN);
7997 /* XXX (+ MAC_PAD) taken from other driver... verify this is right */
7998 sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj,
7999 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
8000 ether_stat->mac_local + MAC_PAD,
8003 ether_stat->mtu_size = sc->mtu;
8005 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
8006 if (sc->ifnet->if_capenable & (IFCAP_TSO4 | IFCAP_TSO6)) {
8007 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
8010 // XXX ether_stat->feature_flags |= ???;
8012 ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0;
8014 ether_stat->txq_size = sc->tx_ring_size;
8015 ether_stat->rxq_size = sc->rx_ring_size;
8019 bxe_handle_drv_info_req(struct bxe_softc *sc)
8021 enum drv_info_opcode op_code;
8022 uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control);
8024 /* if drv_info version supported by MFW doesn't match - send NACK */
8025 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
8026 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
8030 op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
8031 DRV_INFO_CONTROL_OP_CODE_SHIFT);
8033 memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp));
8036 case ETH_STATS_OPCODE:
8037 bxe_drv_info_ether_stat(sc);
8039 case FCOE_STATS_OPCODE:
8040 case ISCSI_STATS_OPCODE:
8042 /* if op code isn't supported - send NACK */
8043 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
8048 * If we got drv_info attn from MFW then these fields are defined in
8051 SHMEM2_WR(sc, drv_info_host_addr_lo,
8052 U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
8053 SHMEM2_WR(sc, drv_info_host_addr_hi,
8054 U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
8056 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0);
8060 bxe_dcc_event(struct bxe_softc *sc,
8063 BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event);
8065 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
8067 * This is the only place besides the function initialization
8068 * where the sc->flags can change so it is done without any
8071 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
8072 BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n");
8073 sc->flags |= BXE_MF_FUNC_DIS;
8074 bxe_e1h_disable(sc);
8076 BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n");
8077 sc->flags &= ~BXE_MF_FUNC_DIS;
8080 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
8083 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
8084 bxe_config_mf_bw(sc);
8085 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
8088 /* Report results to MCP */
8090 bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0);
8092 bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0);
8096 bxe_pmf_update(struct bxe_softc *sc)
8098 int port = SC_PORT(sc);
8102 BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf);
8105 * We need the mb() to ensure the ordering between the writing to
8106 * sc->port.pmf here and reading it from the bxe_periodic_task().
8110 /* queue a periodic task */
8111 // XXX schedule task...
8113 // XXX bxe_dcbx_pmf_update(sc);
8115 /* enable nig attention */
8116 val = (0xff0f | (1 << (SC_VN(sc) + 4)));
8117 if (sc->devinfo.int_block == INT_BLOCK_HC) {
8118 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val);
8119 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val);
8120 } else if (!CHIP_IS_E1x(sc)) {
8121 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
8122 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
8125 bxe_stats_handle(sc, STATS_EVENT_PMF);
8129 bxe_mc_assert(struct bxe_softc *sc)
8133 uint32_t row0, row1, row2, row3;
8136 last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET);
8138 BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
8140 /* print the asserts */
8141 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
8143 row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i));
8144 row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4);
8145 row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8);
8146 row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12);
8148 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
8149 BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
8150 i, row3, row2, row1, row0);
8158 last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET);
8160 BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
8163 /* print the asserts */
8164 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
8166 row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i));
8167 row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4);
8168 row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8);
8169 row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12);
8171 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
8172 BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
8173 i, row3, row2, row1, row0);
8181 last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET);
8183 BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
8186 /* print the asserts */
8187 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
8189 row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i));
8190 row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4);
8191 row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8);
8192 row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12);
8194 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
8195 BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
8196 i, row3, row2, row1, row0);
8204 last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET);
8206 BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
8209 /* print the asserts */
8210 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
8212 row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i));
8213 row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4);
8214 row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8);
8215 row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12);
8217 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
8218 BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
8219 i, row3, row2, row1, row0);
8230 bxe_attn_int_deasserted3(struct bxe_softc *sc,
8233 int func = SC_FUNC(sc);
8236 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
8238 if (attn & BXE_PMF_LINK_ASSERT(sc)) {
8240 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8241 bxe_read_mf_cfg(sc);
8242 sc->devinfo.mf_info.mf_config[SC_VN(sc)] =
8243 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
8244 val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status);
8246 if (val & DRV_STATUS_DCC_EVENT_MASK)
8247 bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK));
8249 if (val & DRV_STATUS_SET_MF_BW)
8252 if (val & DRV_STATUS_DRV_INFO_REQ)
8253 bxe_handle_drv_info_req(sc);
8256 if (val & DRV_STATUS_VF_DISABLED)
8257 bxe_vf_handle_flr_event(sc);
8260 if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF))
8265 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
8266 (sc->dcbx_enabled > 0))
8267 /* start dcbx state machine */
8268 bxe_dcbx_set_params(sc, BXE_DCBX_STATE_NEG_RECEIVED);
8272 if (val & DRV_STATUS_AFEX_EVENT_MASK)
8273 bxe_handle_afex_cmd(sc, val & DRV_STATUS_AFEX_EVENT_MASK);
8276 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
8277 bxe_handle_eee_event(sc);
8279 if (sc->link_vars.periodic_flags &
8280 ELINK_PERIODIC_FLAGS_LINK_EVENT) {
8281 /* sync with link */
8283 sc->link_vars.periodic_flags &=
8284 ~ELINK_PERIODIC_FLAGS_LINK_EVENT;
8287 ; // XXX bxe_link_sync_notify(sc);
8288 bxe_link_report(sc);
8292 * Always call it here: bxe_link_report() will
8293 * prevent the link indication duplication.
8295 bxe_link_status_update(sc);
8297 } else if (attn & BXE_MC_ASSERT_BITS) {
8299 BLOGE(sc, "MC assert!\n");
8301 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0);
8302 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0);
8303 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0);
8304 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0);
8305 bxe_panic(sc, ("MC assert!\n"));
8307 } else if (attn & BXE_MCP_ASSERT) {
8309 BLOGE(sc, "MCP assert!\n");
8310 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0);
8311 // XXX bxe_fw_dump(sc);
8314 BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn);
8318 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
8319 BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn);
8320 if (attn & BXE_GRC_TIMEOUT) {
8321 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN);
8322 BLOGE(sc, "GRC time-out 0x%08x\n", val);
8324 if (attn & BXE_GRC_RSV) {
8325 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN);
8326 BLOGE(sc, "GRC reserved 0x%08x\n", val);
8328 REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
8333 bxe_attn_int_deasserted2(struct bxe_softc *sc,
8336 int port = SC_PORT(sc);
8338 uint32_t val0, mask0, val1, mask1;
8341 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
8342 val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR);
8343 BLOGE(sc, "CFC hw attention 0x%08x\n", val);
8344 /* CFC error attention */
8346 BLOGE(sc, "FATAL error from CFC\n");
8350 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
8351 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0);
8352 BLOGE(sc, "PXP hw attention-0 0x%08x\n", val);
8353 /* RQ_USDMDP_FIFO_OVERFLOW */
8354 if (val & 0x18000) {
8355 BLOGE(sc, "FATAL error from PXP\n");
8358 if (!CHIP_IS_E1x(sc)) {
8359 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1);
8360 BLOGE(sc, "PXP hw attention-1 0x%08x\n", val);
8364 #define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR
8365 #define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT
8367 if (attn & AEU_PXP2_HW_INT_BIT) {
8368 /* CQ47854 workaround do not panic on
8369 * PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8371 if (!CHIP_IS_E1x(sc)) {
8372 mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0);
8373 val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1);
8374 mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1);
8375 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0);
8377 * If the olny PXP2_EOP_ERROR_BIT is set in
8378 * STS0 and STS1 - clear it
8380 * probably we lose additional attentions between
8381 * STS0 and STS_CLR0, in this case user will not
8382 * be notified about them
8384 if (val0 & mask0 & PXP2_EOP_ERROR_BIT &&
8386 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
8388 /* print the register, since no one can restore it */
8389 BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0);
8392 * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8395 if (val0 & PXP2_EOP_ERROR_BIT) {
8396 BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n");
8399 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is
8400 * set then clear attention from PXP2 block without panic
8402 if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) &&
8403 ((val1 & mask1) == 0))
8404 attn &= ~AEU_PXP2_HW_INT_BIT;
8409 if (attn & HW_INTERRUT_ASSERT_SET_2) {
8410 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
8411 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
8413 val = REG_RD(sc, reg_offset);
8414 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
8415 REG_WR(sc, reg_offset, val);
8417 BLOGE(sc, "FATAL HW block attention set2 0x%x\n",
8418 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2));
8419 bxe_panic(sc, ("HW block attention set2\n"));
8424 bxe_attn_int_deasserted1(struct bxe_softc *sc,
8427 int port = SC_PORT(sc);
8431 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
8432 val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR);
8433 BLOGE(sc, "DB hw attention 0x%08x\n", val);
8434 /* DORQ discard attention */
8436 BLOGE(sc, "FATAL error from DORQ\n");
8440 if (attn & HW_INTERRUT_ASSERT_SET_1) {
8441 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
8442 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
8444 val = REG_RD(sc, reg_offset);
8445 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
8446 REG_WR(sc, reg_offset, val);
8448 BLOGE(sc, "FATAL HW block attention set1 0x%08x\n",
8449 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1));
8450 bxe_panic(sc, ("HW block attention set1\n"));
8455 bxe_attn_int_deasserted0(struct bxe_softc *sc,
8458 int port = SC_PORT(sc);
8462 reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
8463 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
8465 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
8466 val = REG_RD(sc, reg_offset);
8467 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
8468 REG_WR(sc, reg_offset, val);
8470 BLOGW(sc, "SPIO5 hw attention\n");
8472 /* Fan failure attention */
8473 elink_hw_reset_phy(&sc->link_params);
8474 bxe_fan_failure(sc);
8477 if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) {
8479 elink_handle_module_detect_int(&sc->link_params);
8483 if (attn & HW_INTERRUT_ASSERT_SET_0) {
8484 val = REG_RD(sc, reg_offset);
8485 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
8486 REG_WR(sc, reg_offset, val);
8488 bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n",
8489 (attn & HW_INTERRUT_ASSERT_SET_0)));
8494 bxe_attn_int_deasserted(struct bxe_softc *sc,
8495 uint32_t deasserted)
8497 struct attn_route attn;
8498 struct attn_route *group_mask;
8499 int port = SC_PORT(sc);
8504 uint8_t global = FALSE;
8507 * Need to take HW lock because MCP or other port might also
8508 * try to handle this event.
8510 bxe_acquire_alr(sc);
8512 if (bxe_chk_parity_attn(sc, &global, TRUE)) {
8514 * In case of parity errors don't handle attentions so that
8515 * other function would "see" parity errors.
8517 sc->recovery_state = BXE_RECOVERY_INIT;
8518 // XXX schedule a recovery task...
8519 /* disable HW interrupts */
8520 bxe_int_disable(sc);
8521 bxe_release_alr(sc);
8525 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
8526 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
8527 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
8528 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
8529 if (!CHIP_IS_E1x(sc)) {
8530 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
8535 BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
8536 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
8538 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
8539 if (deasserted & (1 << index)) {
8540 group_mask = &sc->attn_group[index];
8543 "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index,
8544 group_mask->sig[0], group_mask->sig[1],
8545 group_mask->sig[2], group_mask->sig[3],
8546 group_mask->sig[4]);
8548 bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]);
8549 bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]);
8550 bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]);
8551 bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]);
8552 bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]);
8556 bxe_release_alr(sc);
8558 if (sc->devinfo.int_block == INT_BLOCK_HC) {
8559 reg_addr = (HC_REG_COMMAND_REG + port*32 +
8560 COMMAND_REG_ATTN_BITS_CLR);
8562 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
8567 "about to mask 0x%08x at %s addr 0x%08x\n", val,
8568 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
8569 REG_WR(sc, reg_addr, val);
8571 if (~sc->attn_state & deasserted) {
8572 BLOGE(sc, "IGU error\n");
8575 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8576 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8578 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8580 aeu_mask = REG_RD(sc, reg_addr);
8582 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n",
8583 aeu_mask, deasserted);
8584 aeu_mask |= (deasserted & 0x3ff);
8585 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
8587 REG_WR(sc, reg_addr, aeu_mask);
8588 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8590 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
8591 sc->attn_state &= ~deasserted;
8592 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
8596 bxe_attn_int(struct bxe_softc *sc)
8598 /* read local copy of bits */
8599 uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits);
8600 uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack);
8601 uint32_t attn_state = sc->attn_state;
8603 /* look for changed bits */
8604 uint32_t asserted = attn_bits & ~attn_ack & ~attn_state;
8605 uint32_t deasserted = ~attn_bits & attn_ack & attn_state;
8608 "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n",
8609 attn_bits, attn_ack, asserted, deasserted);
8611 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) {
8612 BLOGE(sc, "BAD attention state\n");
8615 /* handle bits that were raised */
8617 bxe_attn_int_asserted(sc, asserted);
8621 bxe_attn_int_deasserted(sc, deasserted);
8626 bxe_update_dsb_idx(struct bxe_softc *sc)
8628 struct host_sp_status_block *def_sb = sc->def_sb;
8631 mb(); /* status block is written to by the chip */
8633 if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
8634 sc->def_att_idx = def_sb->atten_status_block.attn_bits_index;
8635 rc |= BXE_DEF_SB_ATT_IDX;
8638 if (sc->def_idx != def_sb->sp_sb.running_index) {
8639 sc->def_idx = def_sb->sp_sb.running_index;
8640 rc |= BXE_DEF_SB_IDX;
8648 static inline struct ecore_queue_sp_obj *
8649 bxe_cid_to_q_obj(struct bxe_softc *sc,
8652 BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid);
8653 return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj);
8657 bxe_handle_mcast_eqe(struct bxe_softc *sc)
8659 struct ecore_mcast_ramrod_params rparam;
8662 memset(&rparam, 0, sizeof(rparam));
8664 rparam.mcast_obj = &sc->mcast_obj;
8668 /* clear pending state for the last command */
8669 sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw);
8671 /* if there are pending mcast commands - send them */
8672 if (sc->mcast_obj.check_pending(&sc->mcast_obj)) {
8673 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
8676 "ERROR: Failed to send pending mcast commands (%d)\n",
8681 BXE_MCAST_UNLOCK(sc);
8685 bxe_handle_classification_eqe(struct bxe_softc *sc,
8686 union event_ring_elem *elem)
8688 unsigned long ramrod_flags = 0;
8690 uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8691 struct ecore_vlan_mac_obj *vlan_mac_obj;
8693 /* always push next commands out, don't wait here */
8694 bit_set(&ramrod_flags, RAMROD_CONT);
8696 switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) {
8697 case ECORE_FILTER_MAC_PENDING:
8698 BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n");
8699 vlan_mac_obj = &sc->sp_objs[cid].mac_obj;
8702 case ECORE_FILTER_MCAST_PENDING:
8703 BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n");
8705 * This is only relevant for 57710 where multicast MACs are
8706 * configured as unicast MACs using the same ramrod.
8708 bxe_handle_mcast_eqe(sc);
8712 BLOGE(sc, "Unsupported classification command: %d\n",
8713 elem->message.data.eth_event.echo);
8717 rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags);
8720 BLOGE(sc, "Failed to schedule new commands (%d)\n", rc);
8721 } else if (rc > 0) {
8722 BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n");
8727 bxe_handle_rx_mode_eqe(struct bxe_softc *sc,
8728 union event_ring_elem *elem)
8730 bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
8732 /* send rx_mode command again if was requested */
8733 if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED,
8735 bxe_set_storm_rx_mode(sc);
8738 else if (bxe_test_and_clear_bit(ECORE_FILTER_ISCSI_ETH_START_SCHED,
8740 bxe_set_iscsi_eth_rx_mode(sc, TRUE);
8742 else if (bxe_test_and_clear_bit(ECORE_FILTER_ISCSI_ETH_STOP_SCHED,
8744 bxe_set_iscsi_eth_rx_mode(sc, FALSE);
8750 bxe_update_eq_prod(struct bxe_softc *sc,
8753 storm_memset_eq_prod(sc, prod, SC_FUNC(sc));
8754 wmb(); /* keep prod updates ordered */
8758 bxe_eq_int(struct bxe_softc *sc)
8760 uint16_t hw_cons, sw_cons, sw_prod;
8761 union event_ring_elem *elem;
8766 struct ecore_queue_sp_obj *q_obj;
8767 struct ecore_func_sp_obj *f_obj = &sc->func_obj;
8768 struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw;
8770 hw_cons = le16toh(*sc->eq_cons_sb);
8773 * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256.
8774 * when we get to the next-page we need to adjust so the loop
8775 * condition below will be met. The next element is the size of a
8776 * regular element and hence incrementing by 1
8778 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) {
8783 * This function may never run in parallel with itself for a
8784 * specific sc and no need for a read memory barrier here.
8786 sw_cons = sc->eq_cons;
8787 sw_prod = sc->eq_prod;
8789 BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n",
8790 hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left));
8794 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
8796 elem = &sc->eq[EQ_DESC(sw_cons)];
8800 rc = bxe_iov_eq_sp_event(sc, elem);
8802 BLOGE(sc, "bxe_iov_eq_sp_event returned %d\n", rc);
8807 /* elem CID originates from FW, actually LE */
8808 cid = SW_CID(elem->message.data.cfc_del_event.cid);
8809 opcode = elem->message.opcode;
8811 /* handle eq element */
8814 case EVENT_RING_OPCODE_VF_PF_CHANNEL:
8815 BLOGD(sc, DBG_SP, "vf/pf channel element on eq\n");
8816 bxe_vf_mbx(sc, &elem->message.data.vf_pf_event);
8820 case EVENT_RING_OPCODE_STAT_QUERY:
8821 BLOGD(sc, DBG_SP, "got statistics completion event %d\n",
8823 /* nothing to do with stats comp */
8826 case EVENT_RING_OPCODE_CFC_DEL:
8827 /* handle according to cid range */
8828 /* we may want to verify here that the sc state is HALTING */
8829 BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid);
8830 q_obj = bxe_cid_to_q_obj(sc, cid);
8831 if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) {
8836 case EVENT_RING_OPCODE_STOP_TRAFFIC:
8837 BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n");
8838 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) {
8841 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED);
8844 case EVENT_RING_OPCODE_START_TRAFFIC:
8845 BLOGD(sc, DBG_SP, "got START TRAFFIC\n");
8846 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) {
8849 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED);
8852 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
8853 echo = elem->message.data.function_update_event.echo;
8854 if (echo == SWITCH_UPDATE) {
8855 BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n");
8856 if (f_obj->complete_cmd(sc, f_obj,
8857 ECORE_F_CMD_SWITCH_UPDATE)) {
8863 "AFEX: ramrod completed FUNCTION_UPDATE\n");
8865 f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_AFEX_UPDATE);
8867 * We will perform the queues update from the sp_core_task as
8868 * all queue SP operations should run with CORE_LOCK.
8870 bxe_set_bit(BXE_SP_CORE_AFEX_F_UPDATE, &sc->sp_core_state);
8871 taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8877 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
8878 f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_AFEX_VIFLISTS);
8879 bxe_after_afex_vif_lists(sc, elem);
8883 case EVENT_RING_OPCODE_FORWARD_SETUP:
8884 q_obj = &bxe_fwd_sp_obj(sc, q_obj);
8885 if (q_obj->complete_cmd(sc, q_obj,
8886 ECORE_Q_CMD_SETUP_TX_ONLY)) {
8891 case EVENT_RING_OPCODE_FUNCTION_START:
8892 BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n");
8893 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) {
8898 case EVENT_RING_OPCODE_FUNCTION_STOP:
8899 BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n");
8900 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) {
8906 switch (opcode | sc->state) {
8907 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN):
8908 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT):
8909 cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8910 BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid);
8911 rss_raw->clear_pending(rss_raw);
8914 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN):
8915 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG):
8916 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT):
8917 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN):
8918 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG):
8919 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8920 BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n");
8921 bxe_handle_classification_eqe(sc, elem);
8924 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN):
8925 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG):
8926 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8927 BLOGD(sc, DBG_SP, "got mcast ramrod\n");
8928 bxe_handle_mcast_eqe(sc);
8931 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN):
8932 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG):
8933 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8934 BLOGD(sc, DBG_SP, "got rx_mode ramrod\n");
8935 bxe_handle_rx_mode_eqe(sc, elem);
8939 /* unknown event log error and continue */
8940 BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n",
8941 elem->message.opcode, sc->state);
8949 atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt);
8951 sc->eq_cons = sw_cons;
8952 sc->eq_prod = sw_prod;
8954 /* make sure that above mem writes were issued towards the memory */
8957 /* update producer */
8958 bxe_update_eq_prod(sc, sc->eq_prod);
8962 bxe_handle_sp_tq(void *context,
8965 struct bxe_softc *sc = (struct bxe_softc *)context;
8968 BLOGD(sc, DBG_SP, "---> SP TASK <---\n");
8970 /* what work needs to be performed? */
8971 status = bxe_update_dsb_idx(sc);
8973 BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status);
8976 if (status & BXE_DEF_SB_ATT_IDX) {
8977 BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n");
8979 status &= ~BXE_DEF_SB_ATT_IDX;
8982 /* SP events: STAT_QUERY and others */
8983 if (status & BXE_DEF_SB_IDX) {
8984 /* handle EQ completions */
8985 BLOGD(sc, DBG_SP, "---> EQ INTR <---\n");
8987 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
8988 le16toh(sc->def_idx), IGU_INT_NOP, 1);
8989 status &= ~BXE_DEF_SB_IDX;
8992 /* if status is non zero then something went wrong */
8993 if (__predict_false(status)) {
8994 BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status);
8997 /* ack status block only if something was actually handled */
8998 bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID,
8999 le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1);
9002 * Must be called after the EQ processing (since eq leads to sriov
9003 * ramrod completion flows).
9004 * This flow may have been scheduled by the arrival of a ramrod
9005 * completion, or by the sriov code rescheduling itself.
9007 // XXX bxe_iov_sp_task(sc);
9010 /* AFEX - poll to check if VIFSET_ACK should be sent to MFW */
9011 if (bxe_test_and_clear_bit(ECORE_AFEX_PENDING_VIFSET_MCP_ACK,
9013 bxe_link_report(sc);
9014 bxe_fw_command(sc, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
9020 bxe_handle_fp_tq(void *context,
9023 struct bxe_fastpath *fp = (struct bxe_fastpath *)context;
9024 struct bxe_softc *sc = fp->sc;
9025 uint8_t more_tx = FALSE;
9026 uint8_t more_rx = FALSE;
9028 BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index);
9031 * IFF_DRV_RUNNING state can't be checked here since we process
9032 * slowpath events on a client queue during setup. Instead
9033 * we need to add a "process/continue" flag here that the driver
9034 * can use to tell the task here not to do anything.
9037 if (!(sc->ifnet->if_drv_flags & IFF_DRV_RUNNING)) {
9042 /* update the fastpath index */
9043 bxe_update_fp_sb_idx(fp);
9045 /* XXX add loop here if ever support multiple tx CoS */
9046 /* fp->txdata[cos] */
9047 if (bxe_has_tx_work(fp)) {
9049 more_tx = bxe_txeof(sc, fp);
9050 BXE_FP_TX_UNLOCK(fp);
9053 if (bxe_has_rx_work(fp)) {
9054 more_rx = bxe_rxeof(sc, fp);
9057 if (more_rx /*|| more_tx*/) {
9058 /* still more work to do */
9059 taskqueue_enqueue_fast(fp->tq, &fp->tq_task);
9063 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
9064 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
9068 bxe_task_fp(struct bxe_fastpath *fp)
9070 struct bxe_softc *sc = fp->sc;
9071 uint8_t more_tx = FALSE;
9072 uint8_t more_rx = FALSE;
9074 BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index);
9076 /* update the fastpath index */
9077 bxe_update_fp_sb_idx(fp);
9079 /* XXX add loop here if ever support multiple tx CoS */
9080 /* fp->txdata[cos] */
9081 if (bxe_has_tx_work(fp)) {
9083 more_tx = bxe_txeof(sc, fp);
9084 BXE_FP_TX_UNLOCK(fp);
9087 if (bxe_has_rx_work(fp)) {
9088 more_rx = bxe_rxeof(sc, fp);
9091 if (more_rx /*|| more_tx*/) {
9092 /* still more work to do, bail out if this ISR and process later */
9093 taskqueue_enqueue_fast(fp->tq, &fp->tq_task);
9098 * Here we write the fastpath index taken before doing any tx or rx work.
9099 * It is very well possible other hw events occurred up to this point and
9100 * they were actually processed accordingly above. Since we're going to
9101 * write an older fastpath index, an interrupt is coming which we might
9102 * not do any work in.
9104 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
9105 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
9109 * Legacy interrupt entry point.
9111 * Verifies that the controller generated the interrupt and
9112 * then calls a separate routine to handle the various
9113 * interrupt causes: link, RX, and TX.
9116 bxe_intr_legacy(void *xsc)
9118 struct bxe_softc *sc = (struct bxe_softc *)xsc;
9119 struct bxe_fastpath *fp;
9120 uint16_t status, mask;
9123 BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n");
9126 /* Don't handle any interrupts if we're not ready. */
9127 if (__predict_false(sc->intr_sem != 0)) {
9133 * 0 for ustorm, 1 for cstorm
9134 * the bits returned from ack_int() are 0-15
9135 * bit 0 = attention status block
9136 * bit 1 = fast path status block
9137 * a mask of 0x2 or more = tx/rx event
9138 * a mask of 1 = slow path event
9141 status = bxe_ack_int(sc);
9143 /* the interrupt is not for us */
9144 if (__predict_false(status == 0)) {
9145 BLOGD(sc, DBG_INTR, "Not our interrupt!\n");
9149 BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status);
9151 FOR_EACH_ETH_QUEUE(sc, i) {
9153 mask = (0x2 << (fp->index + CNIC_SUPPORT(sc)));
9154 if (status & mask) {
9155 /* acknowledge and disable further fastpath interrupts */
9156 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9163 if (CNIC_SUPPORT(sc)) {
9165 if (status & (mask | 0x1)) {
9172 if (__predict_false(status & 0x1)) {
9173 /* acknowledge and disable further slowpath interrupts */
9174 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9176 /* schedule slowpath handler */
9177 taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task);
9182 if (__predict_false(status)) {
9183 BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status);
9187 /* slowpath interrupt entry point */
9189 bxe_intr_sp(void *xsc)
9191 struct bxe_softc *sc = (struct bxe_softc *)xsc;
9193 BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n");
9195 /* acknowledge and disable further slowpath interrupts */
9196 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9198 /* schedule slowpath handler */
9199 taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task);
9202 /* fastpath interrupt entry point */
9204 bxe_intr_fp(void *xfp)
9206 struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp;
9207 struct bxe_softc *sc = fp->sc;
9209 BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index);
9212 "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n",
9213 curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id);
9216 /* Don't handle any interrupts if we're not ready. */
9217 if (__predict_false(sc->intr_sem != 0)) {
9222 /* acknowledge and disable further fastpath interrupts */
9223 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9228 /* Release all interrupts allocated by the driver. */
9230 bxe_interrupt_free(struct bxe_softc *sc)
9234 switch (sc->interrupt_mode) {
9235 case INTR_MODE_INTX:
9236 BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n");
9237 if (sc->intr[0].resource != NULL) {
9238 bus_release_resource(sc->dev,
9241 sc->intr[0].resource);
9245 for (i = 0; i < sc->intr_count; i++) {
9246 BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i);
9247 if (sc->intr[i].resource && sc->intr[i].rid) {
9248 bus_release_resource(sc->dev,
9251 sc->intr[i].resource);
9254 pci_release_msi(sc->dev);
9256 case INTR_MODE_MSIX:
9257 for (i = 0; i < sc->intr_count; i++) {
9258 BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i);
9259 if (sc->intr[i].resource && sc->intr[i].rid) {
9260 bus_release_resource(sc->dev,
9263 sc->intr[i].resource);
9266 pci_release_msi(sc->dev);
9269 /* nothing to do as initial allocation failed */
9275 * This function determines and allocates the appropriate
9276 * interrupt based on system capabilites and user request.
9278 * The user may force a particular interrupt mode, specify
9279 * the number of receive queues, specify the method for
9280 * distribuitng received frames to receive queues, or use
9281 * the default settings which will automatically select the
9282 * best supported combination. In addition, the OS may or
9283 * may not support certain combinations of these settings.
9284 * This routine attempts to reconcile the settings requested
9285 * by the user with the capabilites available from the system
9286 * to select the optimal combination of features.
9289 * 0 = Success, !0 = Failure.
9292 bxe_interrupt_alloc(struct bxe_softc *sc)
9296 int num_requested = 0;
9297 int num_allocated = 0;
9301 /* get the number of available MSI/MSI-X interrupts from the OS */
9302 if (sc->interrupt_mode > 0) {
9303 if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) {
9304 msix_count = pci_msix_count(sc->dev);
9307 if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) {
9308 msi_count = pci_msi_count(sc->dev);
9311 BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n",
9312 msi_count, msix_count);
9315 do { /* try allocating MSI-X interrupt resources (at least 2) */
9316 if (sc->interrupt_mode != INTR_MODE_MSIX) {
9320 if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) ||
9322 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9326 /* ask for the necessary number of MSI-X vectors */
9327 num_requested = min((sc->num_queues + 1), msix_count);
9329 BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested);
9331 num_allocated = num_requested;
9332 if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) {
9333 BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc);
9334 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9338 if (num_allocated < 2) { /* possible? */
9339 BLOGE(sc, "MSI-X allocation less than 2!\n");
9340 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9341 pci_release_msi(sc->dev);
9345 BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n",
9346 num_requested, num_allocated);
9348 /* best effort so use the number of vectors allocated to us */
9349 sc->intr_count = num_allocated;
9350 sc->num_queues = num_allocated - 1;
9352 rid = 1; /* initial resource identifier */
9354 /* allocate the MSI-X vectors */
9355 for (i = 0; i < num_allocated; i++) {
9356 sc->intr[i].rid = (rid + i);
9358 if ((sc->intr[i].resource =
9359 bus_alloc_resource_any(sc->dev,
9362 RF_ACTIVE)) == NULL) {
9363 BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n",
9366 for (j = (i - 1); j >= 0; j--) {
9367 bus_release_resource(sc->dev,
9370 sc->intr[j].resource);
9375 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9376 pci_release_msi(sc->dev);
9380 BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i));
9384 do { /* try allocating MSI vector resources (at least 2) */
9385 if (sc->interrupt_mode != INTR_MODE_MSI) {
9389 if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) ||
9391 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9395 /* ask for the necessary number of MSI vectors */
9396 num_requested = min((sc->num_queues + 1), msi_count);
9398 BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested);
9400 num_allocated = num_requested;
9401 if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) {
9402 BLOGE(sc, "MSI alloc failed (%d)!\n", rc);
9403 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9407 if (num_allocated < 2) { /* possible? */
9408 BLOGE(sc, "MSI allocation less than 2!\n");
9409 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9410 pci_release_msi(sc->dev);
9414 BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n",
9415 num_requested, num_allocated);
9417 /* best effort so use the number of vectors allocated to us */
9418 sc->intr_count = num_allocated;
9419 sc->num_queues = num_allocated - 1;
9421 rid = 1; /* initial resource identifier */
9423 /* allocate the MSI vectors */
9424 for (i = 0; i < num_allocated; i++) {
9425 sc->intr[i].rid = (rid + i);
9427 if ((sc->intr[i].resource =
9428 bus_alloc_resource_any(sc->dev,
9431 RF_ACTIVE)) == NULL) {
9432 BLOGE(sc, "Failed to map MSI[%d] (rid=%d)!\n",
9435 for (j = (i - 1); j >= 0; j--) {
9436 bus_release_resource(sc->dev,
9439 sc->intr[j].resource);
9444 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9445 pci_release_msi(sc->dev);
9449 BLOGD(sc, DBG_LOAD, "Mapped MSI[%d] (rid=%d)\n", i, (rid + i));
9453 do { /* try allocating INTx vector resources */
9454 if (sc->interrupt_mode != INTR_MODE_INTX) {
9458 BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n");
9460 /* only one vector for INTx */
9464 rid = 0; /* initial resource identifier */
9466 sc->intr[0].rid = rid;
9468 if ((sc->intr[0].resource =
9469 bus_alloc_resource_any(sc->dev,
9472 (RF_ACTIVE | RF_SHAREABLE))) == NULL) {
9473 BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid);
9476 sc->interrupt_mode = -1; /* Failed! */
9480 BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid);
9483 if (sc->interrupt_mode == -1) {
9484 BLOGE(sc, "Interrupt Allocation: FAILED!!!\n");
9488 "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n",
9489 sc->interrupt_mode, sc->num_queues);
9497 bxe_interrupt_detach(struct bxe_softc *sc)
9499 struct bxe_fastpath *fp;
9502 /* release interrupt resources */
9503 for (i = 0; i < sc->intr_count; i++) {
9504 if (sc->intr[i].resource && sc->intr[i].tag) {
9505 BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i);
9506 bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag);
9510 for (i = 0; i < sc->num_queues; i++) {
9513 taskqueue_drain(fp->tq, &fp->tq_task);
9514 taskqueue_free(fp->tq);
9519 if (sc->rx_mode_tq) {
9520 taskqueue_drain(sc->rx_mode_tq, &sc->rx_mode_tq_task);
9521 taskqueue_free(sc->rx_mode_tq);
9522 sc->rx_mode_tq = NULL;
9526 taskqueue_drain(sc->sp_tq, &sc->sp_tq_task);
9527 taskqueue_free(sc->sp_tq);
9533 * Enables interrupts and attach to the ISR.
9535 * When using multiple MSI/MSI-X vectors the first vector
9536 * is used for slowpath operations while all remaining
9537 * vectors are used for fastpath operations. If only a
9538 * single MSI/MSI-X vector is used (SINGLE_ISR) then the
9539 * ISR must look for both slowpath and fastpath completions.
9542 bxe_interrupt_attach(struct bxe_softc *sc)
9544 struct bxe_fastpath *fp;
9548 snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name),
9549 "bxe%d_sp_tq", sc->unit);
9550 TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc);
9551 sc->sp_tq = taskqueue_create_fast(sc->sp_tq_name, M_NOWAIT,
9552 taskqueue_thread_enqueue,
9554 taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */
9555 "%s", sc->sp_tq_name);
9557 snprintf(sc->rx_mode_tq_name, sizeof(sc->rx_mode_tq_name),
9558 "bxe%d_rx_mode_tq", sc->unit);
9559 TASK_INIT(&sc->rx_mode_tq_task, 0, bxe_handle_rx_mode_tq, sc);
9560 sc->rx_mode_tq = taskqueue_create_fast(sc->rx_mode_tq_name, M_NOWAIT,
9561 taskqueue_thread_enqueue,
9563 taskqueue_start_threads(&sc->rx_mode_tq, 1, PWAIT, /* lower priority */
9564 "%s", sc->rx_mode_tq_name);
9566 for (i = 0; i < sc->num_queues; i++) {
9568 snprintf(fp->tq_name, sizeof(fp->tq_name),
9569 "bxe%d_fp%d_tq", sc->unit, i);
9570 TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
9571 fp->tq = taskqueue_create_fast(fp->tq_name, M_NOWAIT,
9572 taskqueue_thread_enqueue,
9574 taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */
9578 /* setup interrupt handlers */
9579 if (sc->interrupt_mode == INTR_MODE_MSIX) {
9580 BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n");
9583 * Setup the interrupt handler. Note that we pass the driver instance
9584 * to the interrupt handler for the slowpath.
9586 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9587 (INTR_TYPE_NET | INTR_MPSAFE),
9588 NULL, bxe_intr_sp, sc,
9589 &sc->intr[0].tag)) != 0) {
9590 BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc);
9591 goto bxe_interrupt_attach_exit;
9594 bus_describe_intr(sc->dev, sc->intr[0].resource,
9595 sc->intr[0].tag, "sp");
9597 /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */
9599 /* initialize the fastpath vectors (note the first was used for sp) */
9600 for (i = 0; i < sc->num_queues; i++) {
9602 BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1));
9605 * Setup the interrupt handler. Note that we pass the
9606 * fastpath context to the interrupt handler in this
9609 if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource,
9610 (INTR_TYPE_NET | INTR_MPSAFE),
9611 NULL, bxe_intr_fp, fp,
9612 &sc->intr[i + 1].tag)) != 0) {
9613 BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n",
9615 goto bxe_interrupt_attach_exit;
9618 bus_describe_intr(sc->dev, sc->intr[i + 1].resource,
9619 sc->intr[i + 1].tag, "fp%02d", i);
9621 /* bind the fastpath instance to a cpu */
9622 if (sc->num_queues > 1) {
9623 bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i);
9626 fp->state = BXE_FP_STATE_IRQ;
9628 } else if (sc->interrupt_mode == INTR_MODE_MSI) {
9629 BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI[0] vector.\n");
9632 * Setup the interrupt handler. Note that we pass the driver instance
9633 * to the interrupt handler for the slowpath.
9635 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9636 (INTR_TYPE_NET | INTR_MPSAFE),
9637 NULL, bxe_intr_sp, sc,
9638 &sc->intr[0].tag)) != 0) {
9639 BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc);
9640 goto bxe_interrupt_attach_exit;
9643 bus_describe_intr(sc->dev, sc->intr[0].resource,
9644 sc->intr[0].tag, "sp");
9646 /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */
9648 /* initialize the fastpath vectors (note the first was used for sp) */
9649 for (i = 0; i < sc->num_queues; i++) {
9651 BLOGD(sc, DBG_LOAD, "Enabling MSI[%d] vector\n", (i + 1));
9654 * Setup the interrupt handler. Note that we pass the
9655 * fastpath context to the interrupt handler in this
9658 if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource,
9659 (INTR_TYPE_NET | INTR_MPSAFE),
9660 NULL, bxe_intr_fp, fp,
9661 &sc->intr[i + 1].tag)) != 0) {
9662 BLOGE(sc, "Failed to allocate MSI[%d] vector (%d)\n",
9664 goto bxe_interrupt_attach_exit;
9667 bus_describe_intr(sc->dev, sc->intr[i + 1].resource,
9668 sc->intr[i + 1].tag, "fp%02d", i);
9670 /* bind the fastpath instance to a cpu */
9671 if (sc->num_queues > 1) {
9672 bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i);
9675 fp->state = BXE_FP_STATE_IRQ;
9677 } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */
9678 BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n");
9681 * Setup the interrupt handler. Note that we pass the
9682 * driver instance to the interrupt handler which
9683 * will handle both the slowpath and fastpath.
9685 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9686 (INTR_TYPE_NET | INTR_MPSAFE),
9687 NULL, bxe_intr_legacy, sc,
9688 &sc->intr[0].tag)) != 0) {
9689 BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc);
9690 goto bxe_interrupt_attach_exit;
9694 bxe_interrupt_attach_exit:
9699 static int bxe_init_hw_common_chip(struct bxe_softc *sc);
9700 static int bxe_init_hw_common(struct bxe_softc *sc);
9701 static int bxe_init_hw_port(struct bxe_softc *sc);
9702 static int bxe_init_hw_func(struct bxe_softc *sc);
9703 static void bxe_reset_common(struct bxe_softc *sc);
9704 static void bxe_reset_port(struct bxe_softc *sc);
9705 static void bxe_reset_func(struct bxe_softc *sc);
9706 static int bxe_gunzip_init(struct bxe_softc *sc);
9707 static void bxe_gunzip_end(struct bxe_softc *sc);
9708 static int bxe_init_firmware(struct bxe_softc *sc);
9709 static void bxe_release_firmware(struct bxe_softc *sc);
9712 ecore_func_sp_drv_ops bxe_func_sp_drv = {
9713 .init_hw_cmn_chip = bxe_init_hw_common_chip,
9714 .init_hw_cmn = bxe_init_hw_common,
9715 .init_hw_port = bxe_init_hw_port,
9716 .init_hw_func = bxe_init_hw_func,
9718 .reset_hw_cmn = bxe_reset_common,
9719 .reset_hw_port = bxe_reset_port,
9720 .reset_hw_func = bxe_reset_func,
9722 .gunzip_init = bxe_gunzip_init,
9723 .gunzip_end = bxe_gunzip_end,
9725 .init_fw = bxe_init_firmware,
9726 .release_fw = bxe_release_firmware,
9730 bxe_init_func_obj(struct bxe_softc *sc)
9734 ecore_init_func_obj(sc,
9736 BXE_SP(sc, func_rdata),
9737 BXE_SP_MAPPING(sc, func_rdata),
9738 BXE_SP(sc, func_afex_rdata),
9739 BXE_SP_MAPPING(sc, func_afex_rdata),
9744 bxe_init_hw(struct bxe_softc *sc,
9747 struct ecore_func_state_params func_params = { NULL };
9750 /* prepare the parameters for function state transitions */
9751 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
9753 func_params.f_obj = &sc->func_obj;
9754 func_params.cmd = ECORE_F_CMD_HW_INIT;
9756 func_params.params.hw_init.load_phase = load_code;
9759 * Via a plethora of function pointers, we will eventually reach
9760 * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func().
9762 rc = ecore_func_state_change(sc, &func_params);
9768 bxe_fill(struct bxe_softc *sc,
9775 if (!(len % 4) && !(addr % 4)) {
9776 for (i = 0; i < len; i += 4) {
9777 REG_WR(sc, (addr + i), fill);
9780 for (i = 0; i < len; i++) {
9781 REG_WR8(sc, (addr + i), fill);
9786 /* writes FP SP data to FW - data_size in dwords */
9788 bxe_wr_fp_sb_data(struct bxe_softc *sc,
9790 uint32_t *sb_data_p,
9795 for (index = 0; index < data_size; index++) {
9797 (BAR_CSTRORM_INTMEM +
9798 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
9799 (sizeof(uint32_t) * index)),
9800 *(sb_data_p + index));
9805 bxe_zero_fp_sb(struct bxe_softc *sc,
9808 struct hc_status_block_data_e2 sb_data_e2;
9809 struct hc_status_block_data_e1x sb_data_e1x;
9810 uint32_t *sb_data_p;
9811 uint32_t data_size = 0;
9813 if (!CHIP_IS_E1x(sc)) {
9814 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9815 sb_data_e2.common.state = SB_DISABLED;
9816 sb_data_e2.common.p_func.vf_valid = FALSE;
9817 sb_data_p = (uint32_t *)&sb_data_e2;
9818 data_size = (sizeof(struct hc_status_block_data_e2) /
9821 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9822 sb_data_e1x.common.state = SB_DISABLED;
9823 sb_data_e1x.common.p_func.vf_valid = FALSE;
9824 sb_data_p = (uint32_t *)&sb_data_e1x;
9825 data_size = (sizeof(struct hc_status_block_data_e1x) /
9829 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9831 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)),
9832 0, CSTORM_STATUS_BLOCK_SIZE);
9833 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)),
9834 0, CSTORM_SYNC_BLOCK_SIZE);
9838 bxe_wr_sp_sb_data(struct bxe_softc *sc,
9839 struct hc_sp_status_block_data *sp_sb_data)
9844 i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t));
9847 (BAR_CSTRORM_INTMEM +
9848 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) +
9849 (i * sizeof(uint32_t))),
9850 *((uint32_t *)sp_sb_data + i));
9855 bxe_zero_sp_sb(struct bxe_softc *sc)
9857 struct hc_sp_status_block_data sp_sb_data;
9859 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9861 sp_sb_data.state = SB_DISABLED;
9862 sp_sb_data.p_func.vf_valid = FALSE;
9864 bxe_wr_sp_sb_data(sc, &sp_sb_data);
9867 (BAR_CSTRORM_INTMEM +
9868 CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))),
9869 0, CSTORM_SP_STATUS_BLOCK_SIZE);
9871 (BAR_CSTRORM_INTMEM +
9872 CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))),
9873 0, CSTORM_SP_SYNC_BLOCK_SIZE);
9877 bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
9881 hc_sm->igu_sb_id = igu_sb_id;
9882 hc_sm->igu_seg_id = igu_seg_id;
9883 hc_sm->timer_value = 0xFF;
9884 hc_sm->time_to_expire = 0xFFFFFFFF;
9888 bxe_map_sb_state_machines(struct hc_index_data *index_data)
9890 /* zero out state machine indices */
9893 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9896 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9897 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
9898 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
9899 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
9904 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
9905 (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9908 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
9909 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9910 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
9911 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9912 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
9913 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9914 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
9915 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9919 bxe_init_sb(struct bxe_softc *sc,
9926 struct hc_status_block_data_e2 sb_data_e2;
9927 struct hc_status_block_data_e1x sb_data_e1x;
9928 struct hc_status_block_sm *hc_sm_p;
9929 uint32_t *sb_data_p;
9933 if (CHIP_INT_MODE_IS_BC(sc)) {
9934 igu_seg_id = HC_SEG_ACCESS_NORM;
9936 igu_seg_id = IGU_SEG_ACCESS_NORM;
9939 bxe_zero_fp_sb(sc, fw_sb_id);
9941 if (!CHIP_IS_E1x(sc)) {
9942 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9943 sb_data_e2.common.state = SB_ENABLED;
9944 sb_data_e2.common.p_func.pf_id = SC_FUNC(sc);
9945 sb_data_e2.common.p_func.vf_id = vfid;
9946 sb_data_e2.common.p_func.vf_valid = vf_valid;
9947 sb_data_e2.common.p_func.vnic_id = SC_VN(sc);
9948 sb_data_e2.common.same_igu_sb_1b = TRUE;
9949 sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr);
9950 sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr);
9951 hc_sm_p = sb_data_e2.common.state_machine;
9952 sb_data_p = (uint32_t *)&sb_data_e2;
9953 data_size = (sizeof(struct hc_status_block_data_e2) /
9955 bxe_map_sb_state_machines(sb_data_e2.index_data);
9957 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9958 sb_data_e1x.common.state = SB_ENABLED;
9959 sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc);
9960 sb_data_e1x.common.p_func.vf_id = 0xff;
9961 sb_data_e1x.common.p_func.vf_valid = FALSE;
9962 sb_data_e1x.common.p_func.vnic_id = SC_VN(sc);
9963 sb_data_e1x.common.same_igu_sb_1b = TRUE;
9964 sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr);
9965 sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr);
9966 hc_sm_p = sb_data_e1x.common.state_machine;
9967 sb_data_p = (uint32_t *)&sb_data_e1x;
9968 data_size = (sizeof(struct hc_status_block_data_e1x) /
9970 bxe_map_sb_state_machines(sb_data_e1x.index_data);
9973 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id);
9974 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id);
9976 BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id);
9978 /* write indices to HW - PCI guarantees endianity of regpairs */
9979 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9982 static inline uint8_t
9983 bxe_fp_qzone_id(struct bxe_fastpath *fp)
9985 if (CHIP_IS_E1x(fp->sc)) {
9986 return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H);
9992 static inline uint32_t
9993 bxe_rx_ustorm_prods_offset(struct bxe_softc *sc,
9994 struct bxe_fastpath *fp)
9996 uint32_t offset = BAR_USTRORM_INTMEM;
10000 return (PXP_VF_ADDR_USDM_QUEUES_START +
10001 (sc->acquire_resp.resc.hw_qid[fp->index] *
10002 sizeof(struct ustorm_queue_zone_data)));
10005 if (!CHIP_IS_E1x(sc)) {
10006 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
10008 offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id);
10015 bxe_init_eth_fp(struct bxe_softc *sc,
10018 struct bxe_fastpath *fp = &sc->fp[idx];
10019 uint32_t cids[ECORE_MULTI_TX_COS] = { 0 };
10020 unsigned long q_type = 0;
10026 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
10027 "bxe%d_fp%d_tx_lock", sc->unit, idx);
10028 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
10030 snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name),
10031 "bxe%d_fp%d_rx_lock", sc->unit, idx);
10032 mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF);
10034 fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc));
10035 fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc));
10037 fp->cl_id = (CHIP_IS_E1x(sc)) ?
10038 (SC_L_ID(sc) + idx) :
10039 /* want client ID same as IGU SB ID for non-E1 */
10041 fp->cl_qzone_id = bxe_fp_qzone_id(fp);
10043 /* setup sb indices */
10044 if (!CHIP_IS_E1x(sc)) {
10045 fp->sb_index_values = fp->status_block.e2_sb->sb.index_values;
10046 fp->sb_running_index = fp->status_block.e2_sb->sb.running_index;
10048 fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values;
10049 fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index;
10052 /* init shortcut */
10053 fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp);
10055 fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS];
10058 * XXX If multiple CoS is ever supported then each fastpath structure
10059 * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
10061 for (cos = 0; cos < sc->max_cos; cos++) {
10064 fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0];
10066 /* nothing more for a VF to do */
10071 bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE,
10072 fp->fw_sb_id, fp->igu_sb_id);
10074 bxe_update_fp_sb_idx(fp);
10076 /* Configure Queue State object */
10077 bit_set(&q_type, ECORE_Q_TYPE_HAS_RX);
10078 bit_set(&q_type, ECORE_Q_TYPE_HAS_TX);
10080 ecore_init_queue_obj(sc,
10081 &sc->sp_objs[idx].q_obj,
10086 BXE_SP(sc, q_rdata),
10087 BXE_SP_MAPPING(sc, q_rdata),
10090 /* configure classification DBs */
10091 ecore_init_mac_obj(sc,
10092 &sc->sp_objs[idx].mac_obj,
10096 BXE_SP(sc, mac_rdata),
10097 BXE_SP_MAPPING(sc, mac_rdata),
10098 ECORE_FILTER_MAC_PENDING,
10100 ECORE_OBJ_TYPE_RX_TX,
10103 BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n",
10104 idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id);
10108 bxe_update_rx_prod(struct bxe_softc *sc,
10109 struct bxe_fastpath *fp,
10110 uint16_t rx_bd_prod,
10111 uint16_t rx_cq_prod,
10112 uint16_t rx_sge_prod)
10114 struct ustorm_eth_rx_producers rx_prods = { 0 };
10117 /* update producers */
10118 rx_prods.bd_prod = rx_bd_prod;
10119 rx_prods.cqe_prod = rx_cq_prod;
10120 rx_prods.sge_prod = rx_sge_prod;
10123 * Make sure that the BD and SGE data is updated before updating the
10124 * producers since FW might read the BD/SGE right after the producer
10126 * This is only applicable for weak-ordered memory model archs such
10127 * as IA-64. The following barrier is also mandatory since FW will
10128 * assumes BDs must have buffers.
10132 for (i = 0; i < (sizeof(rx_prods) / 4); i++) {
10134 (fp->ustorm_rx_prods_offset + (i * 4)),
10135 ((uint32_t *)&rx_prods)[i]);
10138 wmb(); /* keep prod updates ordered */
10141 "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n",
10142 fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod);
10146 bxe_init_rx_rings(struct bxe_softc *sc)
10148 struct bxe_fastpath *fp;
10151 for (i = 0; i < sc->num_queues; i++) {
10154 fp->rx_bd_cons = 0;
10157 * Activate the BD ring...
10158 * Warning, this will generate an interrupt (to the TSTORM)
10159 * so this can only be done after the chip is initialized
10161 bxe_update_rx_prod(sc, fp,
10170 if (CHIP_IS_E1(sc)) {
10172 (BAR_USTRORM_INTMEM +
10173 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))),
10174 U64_LO(fp->rcq_dma.paddr));
10176 (BAR_USTRORM_INTMEM +
10177 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4),
10178 U64_HI(fp->rcq_dma.paddr));
10184 bxe_init_tx_ring_one(struct bxe_fastpath *fp)
10186 SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
10187 fp->tx_db.data.zero_fill1 = 0;
10188 fp->tx_db.data.prod = 0;
10190 fp->tx_pkt_prod = 0;
10191 fp->tx_pkt_cons = 0;
10192 fp->tx_bd_prod = 0;
10193 fp->tx_bd_cons = 0;
10194 fp->eth_q_stats.tx_pkts = 0;
10198 bxe_init_tx_rings(struct bxe_softc *sc)
10202 for (i = 0; i < sc->num_queues; i++) {
10205 for (cos = 0; cos < sc->max_cos; cos++) {
10206 bxe_init_tx_ring_one(&sc->fp[i].txdata[cos]);
10209 bxe_init_tx_ring_one(&sc->fp[i]);
10215 bxe_init_def_sb(struct bxe_softc *sc)
10217 struct host_sp_status_block *def_sb = sc->def_sb;
10218 bus_addr_t mapping = sc->def_sb_dma.paddr;
10219 int igu_sp_sb_index;
10221 int port = SC_PORT(sc);
10222 int func = SC_FUNC(sc);
10223 int reg_offset, reg_offset_en5;
10226 struct hc_sp_status_block_data sp_sb_data;
10228 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
10230 if (CHIP_INT_MODE_IS_BC(sc)) {
10231 igu_sp_sb_index = DEF_SB_IGU_ID;
10232 igu_seg_id = HC_SEG_ACCESS_DEF;
10234 igu_sp_sb_index = sc->igu_dsb_id;
10235 igu_seg_id = IGU_SEG_ACCESS_DEF;
10239 section = ((uint64_t)mapping +
10240 offsetof(struct host_sp_status_block, atten_status_block));
10241 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
10242 sc->attn_state = 0;
10244 reg_offset = (port) ?
10245 MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
10246 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
10247 reg_offset_en5 = (port) ?
10248 MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
10249 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0;
10251 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
10252 /* take care of sig[0]..sig[4] */
10253 for (sindex = 0; sindex < 4; sindex++) {
10254 sc->attn_group[index].sig[sindex] =
10255 REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index)));
10258 if (!CHIP_IS_E1x(sc)) {
10260 * enable5 is separate from the rest of the registers,
10261 * and the address skip is 4 and not 16 between the
10264 sc->attn_group[index].sig[4] =
10265 REG_RD(sc, (reg_offset_en5 + (0x4 * index)));
10267 sc->attn_group[index].sig[4] = 0;
10271 if (sc->devinfo.int_block == INT_BLOCK_HC) {
10272 reg_offset = (port) ?
10273 HC_REG_ATTN_MSG1_ADDR_L :
10274 HC_REG_ATTN_MSG0_ADDR_L;
10275 REG_WR(sc, reg_offset, U64_LO(section));
10276 REG_WR(sc, (reg_offset + 4), U64_HI(section));
10277 } else if (!CHIP_IS_E1x(sc)) {
10278 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
10279 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
10282 section = ((uint64_t)mapping +
10283 offsetof(struct host_sp_status_block, sp_sb));
10285 bxe_zero_sp_sb(sc);
10287 /* PCI guarantees endianity of regpair */
10288 sp_sb_data.state = SB_ENABLED;
10289 sp_sb_data.host_sb_addr.lo = U64_LO(section);
10290 sp_sb_data.host_sb_addr.hi = U64_HI(section);
10291 sp_sb_data.igu_sb_id = igu_sp_sb_index;
10292 sp_sb_data.igu_seg_id = igu_seg_id;
10293 sp_sb_data.p_func.pf_id = func;
10294 sp_sb_data.p_func.vnic_id = SC_VN(sc);
10295 sp_sb_data.p_func.vf_id = 0xff;
10297 bxe_wr_sp_sb_data(sc, &sp_sb_data);
10299 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
10303 bxe_init_sp_ring(struct bxe_softc *sc)
10305 atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING);
10306 sc->spq_prod_idx = 0;
10307 sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS];
10308 sc->spq_prod_bd = sc->spq;
10309 sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT);
10313 bxe_init_eq_ring(struct bxe_softc *sc)
10315 union event_ring_elem *elem;
10318 for (i = 1; i <= NUM_EQ_PAGES; i++) {
10319 elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1];
10321 elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr +
10323 (i % NUM_EQ_PAGES)));
10324 elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr +
10326 (i % NUM_EQ_PAGES)));
10330 sc->eq_prod = NUM_EQ_DESC;
10331 sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS];
10333 atomic_store_rel_long(&sc->eq_spq_left,
10334 (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING),
10335 NUM_EQ_DESC) - 1));
10339 bxe_init_internal_common(struct bxe_softc *sc)
10343 if (IS_MF_SI(sc)) {
10345 * In switch independent mode, the TSTORM needs to accept
10346 * packets that failed classification, since approximate match
10347 * mac addresses aren't written to NIG LLH.
10350 (BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET),
10352 } else if (!CHIP_IS_E1(sc)) { /* 57710 doesn't support MF */
10354 (BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET),
10359 * Zero this manually as its initialization is currently missing
10362 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) {
10364 (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)),
10368 if (!CHIP_IS_E1x(sc)) {
10369 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET),
10370 CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
10375 bxe_init_internal(struct bxe_softc *sc,
10376 uint32_t load_code)
10378 switch (load_code) {
10379 case FW_MSG_CODE_DRV_LOAD_COMMON:
10380 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
10381 bxe_init_internal_common(sc);
10384 case FW_MSG_CODE_DRV_LOAD_PORT:
10385 /* nothing to do */
10388 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
10389 /* internal memory per function is initialized inside bxe_pf_init */
10393 BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code);
10399 storm_memset_func_cfg(struct bxe_softc *sc,
10400 struct tstorm_eth_function_common_config *tcfg,
10406 addr = (BAR_TSTRORM_INTMEM +
10407 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid));
10408 size = sizeof(struct tstorm_eth_function_common_config);
10409 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg);
10413 bxe_func_init(struct bxe_softc *sc,
10414 struct bxe_func_init_params *p)
10416 struct tstorm_eth_function_common_config tcfg = { 0 };
10418 if (CHIP_IS_E1x(sc)) {
10419 storm_memset_func_cfg(sc, &tcfg, p->func_id);
10422 /* Enable the function in the FW */
10423 storm_memset_vf_to_pf(sc, p->func_id, p->pf_id);
10424 storm_memset_func_en(sc, p->func_id, 1);
10427 if (p->func_flgs & FUNC_FLG_SPQ) {
10428 storm_memset_spq_addr(sc, p->spq_map, p->func_id);
10430 (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)),
10436 * Calculates the sum of vn_min_rates.
10437 * It's needed for further normalizing of the min_rates.
10439 * sum of vn_min_rates.
10441 * 0 - if all the min_rates are 0.
10442 * In the later case fainess algorithm should be deactivated.
10443 * If all min rates are not zero then those that are zeroes will be set to 1.
10446 bxe_calc_vn_min(struct bxe_softc *sc,
10447 struct cmng_init_input *input)
10450 uint32_t vn_min_rate;
10454 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10455 vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10456 vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
10457 FUNC_MF_CFG_MIN_BW_SHIFT) * 100);
10459 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10460 /* skip hidden VNs */
10462 } else if (!vn_min_rate) {
10463 /* If min rate is zero - set it to 100 */
10464 vn_min_rate = DEF_MIN_RATE;
10469 input->vnic_min_rate[vn] = vn_min_rate;
10472 /* if ETS or all min rates are zeros - disable fairness */
10473 if (BXE_IS_ETS_ENABLED(sc)) {
10474 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10475 BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n");
10476 } else if (all_zero) {
10477 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10478 BLOGD(sc, DBG_LOAD,
10479 "Fariness disabled (all MIN values are zeroes)\n");
10481 input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10485 static inline uint16_t
10486 bxe_extract_max_cfg(struct bxe_softc *sc,
10489 uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
10490 FUNC_MF_CFG_MAX_BW_SHIFT);
10493 BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n");
10501 bxe_calc_vn_max(struct bxe_softc *sc,
10503 struct cmng_init_input *input)
10505 uint16_t vn_max_rate;
10506 uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10509 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10512 max_cfg = bxe_extract_max_cfg(sc, vn_cfg);
10514 if (IS_MF_SI(sc)) {
10515 /* max_cfg in percents of linkspeed */
10516 vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100);
10517 } else { /* SD modes */
10518 /* max_cfg is absolute in 100Mb units */
10519 vn_max_rate = (max_cfg * 100);
10523 BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
10525 input->vnic_max_rate[vn] = vn_max_rate;
10529 bxe_cmng_fns_init(struct bxe_softc *sc,
10533 struct cmng_init_input input;
10536 memset(&input, 0, sizeof(struct cmng_init_input));
10538 input.port_rate = sc->link_vars.line_speed;
10540 if (cmng_type == CMNG_FNS_MINMAX) {
10541 /* read mf conf from shmem */
10543 bxe_read_mf_cfg(sc);
10546 /* get VN min rate and enable fairness if not 0 */
10547 bxe_calc_vn_min(sc, &input);
10549 /* get VN max rate */
10550 if (sc->port.pmf) {
10551 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10552 bxe_calc_vn_max(sc, vn, &input);
10556 /* always enable rate shaping and fairness */
10557 input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
10559 ecore_init_cmng(&input, &sc->cmng);
10563 /* rate shaping and fairness are disabled */
10564 BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n");
10568 bxe_get_cmng_fns_mode(struct bxe_softc *sc)
10570 if (CHIP_REV_IS_SLOW(sc)) {
10571 return (CMNG_FNS_NONE);
10575 return (CMNG_FNS_MINMAX);
10578 return (CMNG_FNS_NONE);
10582 storm_memset_cmng(struct bxe_softc *sc,
10583 struct cmng_init *cmng,
10591 addr = (BAR_XSTRORM_INTMEM +
10592 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port));
10593 size = sizeof(struct cmng_struct_per_port);
10594 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port);
10596 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10597 func = func_by_vn(sc, vn);
10599 addr = (BAR_XSTRORM_INTMEM +
10600 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func));
10601 size = sizeof(struct rate_shaping_vars_per_vn);
10602 ecore_storm_memset_struct(sc, addr, size,
10603 (uint32_t *)&cmng->vnic.vnic_max_rate[vn]);
10605 addr = (BAR_XSTRORM_INTMEM +
10606 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func));
10607 size = sizeof(struct fairness_vars_per_vn);
10608 ecore_storm_memset_struct(sc, addr, size,
10609 (uint32_t *)&cmng->vnic.vnic_min_rate[vn]);
10614 bxe_pf_init(struct bxe_softc *sc)
10616 struct bxe_func_init_params func_init = { 0 };
10617 struct event_ring_data eq_data = { { 0 } };
10620 if (!CHIP_IS_E1x(sc)) {
10621 /* reset IGU PF statistics: MSIX + ATTN */
10624 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10625 (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10626 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10630 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10631 (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10632 (BXE_IGU_STAS_MSG_PF_CNT * 4) +
10633 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10637 /* function setup flags */
10638 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
10641 * This flag is relevant for E1x only.
10642 * E2 doesn't have a TPA configuration in a function level.
10644 flags |= (sc->ifnet->if_capenable & IFCAP_LRO) ? FUNC_FLG_TPA : 0;
10646 func_init.func_flgs = flags;
10647 func_init.pf_id = SC_FUNC(sc);
10648 func_init.func_id = SC_FUNC(sc);
10649 func_init.spq_map = sc->spq_dma.paddr;
10650 func_init.spq_prod = sc->spq_prod_idx;
10652 bxe_func_init(sc, &func_init);
10654 memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port));
10657 * Congestion management values depend on the link rate.
10658 * There is no active link so initial link rate is set to 10Gbps.
10659 * When the link comes up the congestion management values are
10660 * re-calculated according to the actual link rate.
10662 sc->link_vars.line_speed = SPEED_10000;
10663 bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc));
10665 /* Only the PMF sets the HW */
10666 if (sc->port.pmf) {
10667 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
10670 /* init Event Queue - PCI bus guarantees correct endainity */
10671 eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr);
10672 eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr);
10673 eq_data.producer = sc->eq_prod;
10674 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
10675 eq_data.sb_id = DEF_SB_ID;
10676 storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc));
10680 bxe_hc_int_enable(struct bxe_softc *sc)
10682 int port = SC_PORT(sc);
10683 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10684 uint32_t val = REG_RD(sc, addr);
10685 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10686 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10687 (sc->intr_count == 1)) ? TRUE : FALSE;
10688 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10691 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10692 HC_CONFIG_0_REG_INT_LINE_EN_0);
10693 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10694 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10696 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
10699 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
10700 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10701 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10702 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10704 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10705 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10706 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10707 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10709 if (!CHIP_IS_E1(sc)) {
10710 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n",
10713 REG_WR(sc, addr, val);
10715 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
10719 if (CHIP_IS_E1(sc)) {
10720 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF);
10723 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
10724 val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10726 REG_WR(sc, addr, val);
10728 /* ensure that HC_CONFIG is written before leading/trailing edge config */
10731 if (!CHIP_IS_E1(sc)) {
10732 /* init leading/trailing edge */
10734 val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10735 if (sc->port.pmf) {
10736 /* enable nig and gpio3 attention */
10743 REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val);
10744 REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val);
10747 /* make sure that interrupts are indeed enabled from here on */
10752 bxe_igu_int_enable(struct bxe_softc *sc)
10755 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10756 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10757 (sc->intr_count == 1)) ? TRUE : FALSE;
10758 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10760 val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10763 val &= ~(IGU_PF_CONF_INT_LINE_EN |
10764 IGU_PF_CONF_SINGLE_ISR_EN);
10765 val |= (IGU_PF_CONF_MSI_MSIX_EN |
10766 IGU_PF_CONF_ATTN_BIT_EN);
10768 val |= IGU_PF_CONF_SINGLE_ISR_EN;
10771 val &= ~IGU_PF_CONF_INT_LINE_EN;
10772 val |= (IGU_PF_CONF_MSI_MSIX_EN |
10773 IGU_PF_CONF_ATTN_BIT_EN |
10774 IGU_PF_CONF_SINGLE_ISR_EN);
10776 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
10777 val |= (IGU_PF_CONF_INT_LINE_EN |
10778 IGU_PF_CONF_ATTN_BIT_EN |
10779 IGU_PF_CONF_SINGLE_ISR_EN);
10782 /* clean previous status - need to configure igu prior to ack*/
10783 if ((!msix) || single_msix) {
10784 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10788 val |= IGU_PF_CONF_FUNC_EN;
10790 BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n",
10791 val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10793 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10797 /* init leading/trailing edge */
10799 val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10800 if (sc->port.pmf) {
10801 /* enable nig and gpio3 attention */
10808 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
10809 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
10811 /* make sure that interrupts are indeed enabled from here on */
10816 bxe_int_enable(struct bxe_softc *sc)
10818 if (sc->devinfo.int_block == INT_BLOCK_HC) {
10819 bxe_hc_int_enable(sc);
10821 bxe_igu_int_enable(sc);
10826 bxe_hc_int_disable(struct bxe_softc *sc)
10828 int port = SC_PORT(sc);
10829 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10830 uint32_t val = REG_RD(sc, addr);
10833 * In E1 we must use only PCI configuration space to disable MSI/MSIX
10834 * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC
10837 if (CHIP_IS_E1(sc)) {
10839 * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register
10840 * to prevent from HC sending interrupts after we exit the function
10842 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0);
10844 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10845 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10846 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10848 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10849 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10850 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10851 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10854 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr);
10856 /* flush all outstanding writes */
10859 REG_WR(sc, addr, val);
10860 if (REG_RD(sc, addr) != val) {
10861 BLOGE(sc, "proper val not read from HC IGU!\n");
10866 bxe_igu_int_disable(struct bxe_softc *sc)
10868 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10870 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
10871 IGU_PF_CONF_INT_LINE_EN |
10872 IGU_PF_CONF_ATTN_BIT_EN);
10874 BLOGD(sc, DBG_INTR, "write %x to IGU\n", val);
10876 /* flush all outstanding writes */
10879 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10880 if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) {
10881 BLOGE(sc, "proper val not read from IGU!\n");
10886 bxe_int_disable(struct bxe_softc *sc)
10888 if (sc->devinfo.int_block == INT_BLOCK_HC) {
10889 bxe_hc_int_disable(sc);
10891 bxe_igu_int_disable(sc);
10896 bxe_nic_init(struct bxe_softc *sc,
10901 for (i = 0; i < sc->num_queues; i++) {
10902 bxe_init_eth_fp(sc, i);
10905 rmb(); /* ensure status block indices were read */
10907 bxe_init_rx_rings(sc);
10908 bxe_init_tx_rings(sc);
10914 /* initialize MOD_ABS interrupts */
10915 elink_init_mod_abs_int(sc, &sc->link_vars,
10916 sc->devinfo.chip_id,
10917 sc->devinfo.shmem_base,
10918 sc->devinfo.shmem2_base,
10921 bxe_init_def_sb(sc);
10922 bxe_update_dsb_idx(sc);
10923 bxe_init_sp_ring(sc);
10924 bxe_init_eq_ring(sc);
10925 bxe_init_internal(sc, load_code);
10927 bxe_stats_init(sc);
10929 /* flush all before enabling interrupts */
10932 bxe_int_enable(sc);
10934 /* check for SPIO5 */
10935 bxe_attn_int_deasserted0(sc,
10937 (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
10939 AEU_INPUTS_ATTN_BITS_SPIO5);
10943 bxe_init_objs(struct bxe_softc *sc)
10945 /* mcast rules must be added to tx if tx switching is enabled */
10946 ecore_obj_type o_type =
10947 (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX :
10950 /* RX_MODE controlling object */
10951 ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj);
10953 /* multicast configuration controlling object */
10954 ecore_init_mcast_obj(sc,
10960 BXE_SP(sc, mcast_rdata),
10961 BXE_SP_MAPPING(sc, mcast_rdata),
10962 ECORE_FILTER_MCAST_PENDING,
10966 /* Setup CAM credit pools */
10967 ecore_init_mac_credit_pool(sc,
10970 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10971 VNICS_PER_PATH(sc));
10973 ecore_init_vlan_credit_pool(sc,
10975 SC_ABS_FUNC(sc) >> 1,
10976 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10977 VNICS_PER_PATH(sc));
10979 /* RSS configuration object */
10980 ecore_init_rss_config_obj(sc,
10986 BXE_SP(sc, rss_rdata),
10987 BXE_SP_MAPPING(sc, rss_rdata),
10988 ECORE_FILTER_RSS_CONF_PENDING,
10989 &sc->sp_state, ECORE_OBJ_TYPE_RX);
10993 * Initialize the function. This must be called before sending CLIENT_SETUP
10994 * for the first client.
10997 bxe_func_start(struct bxe_softc *sc)
10999 struct ecore_func_state_params func_params = { NULL };
11000 struct ecore_func_start_params *start_params = &func_params.params.start;
11002 /* Prepare parameters for function state transitions */
11003 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
11005 func_params.f_obj = &sc->func_obj;
11006 func_params.cmd = ECORE_F_CMD_START;
11008 /* Function parameters */
11009 start_params->mf_mode = sc->devinfo.mf_info.mf_mode;
11010 start_params->sd_vlan_tag = OVLAN(sc);
11012 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
11013 start_params->network_cos_mode = STATIC_COS;
11014 } else { /* CHIP_IS_E1X */
11015 start_params->network_cos_mode = FW_WRR;
11018 start_params->gre_tunnel_mode = 0;
11019 start_params->gre_tunnel_rss = 0;
11021 return (ecore_func_state_change(sc, &func_params));
11025 bxe_set_power_state(struct bxe_softc *sc,
11030 /* If there is no power capability, silently succeed */
11031 if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) {
11032 BLOGW(sc, "No power capability\n");
11036 pmcsr = pci_read_config(sc->dev,
11037 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
11042 pci_write_config(sc->dev,
11043 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
11044 ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2);
11046 if (pmcsr & PCIM_PSTAT_DMASK) {
11047 /* delay required during transition out of D3hot */
11054 /* XXX if there are other clients above don't shut down the power */
11056 /* don't shut down the power for emulation and FPGA */
11057 if (CHIP_REV_IS_SLOW(sc)) {
11061 pmcsr &= ~PCIM_PSTAT_DMASK;
11062 pmcsr |= PCIM_PSTAT_D3;
11065 pmcsr |= PCIM_PSTAT_PMEENABLE;
11068 pci_write_config(sc->dev,
11069 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
11073 * No more memory access after this point until device is brought back
11079 BLOGE(sc, "Can't support PCI power state = %d\n", state);
11087 /* return true if succeeded to acquire the lock */
11089 bxe_trylock_hw_lock(struct bxe_softc *sc,
11092 uint32_t lock_status;
11093 uint32_t resource_bit = (1 << resource);
11094 int func = SC_FUNC(sc);
11095 uint32_t hw_lock_control_reg;
11097 BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource);
11099 /* Validating that the resource is within range */
11100 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
11101 BLOGD(sc, DBG_LOAD,
11102 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
11103 resource, HW_LOCK_MAX_RESOURCE_VALUE);
11108 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
11110 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
11113 /* try to acquire the lock */
11114 REG_WR(sc, hw_lock_control_reg + 4, resource_bit);
11115 lock_status = REG_RD(sc, hw_lock_control_reg);
11116 if (lock_status & resource_bit) {
11120 BLOGE(sc, "Failed to get a resource lock 0x%x\n", resource);
11126 * Get the recovery leader resource id according to the engine this function
11127 * belongs to. Currently only only 2 engines is supported.
11130 bxe_get_leader_lock_resource(struct bxe_softc *sc)
11133 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1);
11135 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0);
11139 /* try to acquire a leader lock for current engine */
11141 bxe_trylock_leader_lock(struct bxe_softc *sc)
11143 return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
11147 bxe_release_leader_lock(struct bxe_softc *sc)
11149 return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
11152 /* close gates #2, #3 and #4 */
11154 bxe_set_234_gates(struct bxe_softc *sc,
11159 /* gates #2 and #4a are closed/opened for "not E1" only */
11160 if (!CHIP_IS_E1(sc)) {
11162 REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
11164 REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
11168 if (CHIP_IS_E1x(sc)) {
11169 /* prevent interrupts from HC on both ports */
11170 val = REG_RD(sc, HC_REG_CONFIG_1);
11171 REG_WR(sc, HC_REG_CONFIG_1,
11172 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
11173 (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
11175 val = REG_RD(sc, HC_REG_CONFIG_0);
11176 REG_WR(sc, HC_REG_CONFIG_0,
11177 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
11178 (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
11180 /* Prevent incomming interrupts in IGU */
11181 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
11183 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION,
11185 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
11186 (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
11189 BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n",
11190 close ? "closing" : "opening");
11195 /* poll for pending writes bit, it should get cleared in no more than 1s */
11197 bxe_er_poll_igu_vq(struct bxe_softc *sc)
11199 uint32_t cnt = 1000;
11200 uint32_t pend_bits = 0;
11203 pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS);
11205 if (pend_bits == 0) {
11210 } while (--cnt > 0);
11213 BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits);
11220 #define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */
11223 bxe_clp_reset_prep(struct bxe_softc *sc,
11224 uint32_t *magic_val)
11226 /* Do some magic... */
11227 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
11228 *magic_val = val & SHARED_MF_CLP_MAGIC;
11229 MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
11232 /* restore the value of the 'magic' bit */
11234 bxe_clp_reset_done(struct bxe_softc *sc,
11235 uint32_t magic_val)
11237 /* Restore the 'magic' bit value... */
11238 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
11239 MFCFG_WR(sc, shared_mf_config.clp_mb,
11240 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
11243 /* prepare for MCP reset, takes care of CLP configurations */
11245 bxe_reset_mcp_prep(struct bxe_softc *sc,
11246 uint32_t *magic_val)
11249 uint32_t validity_offset;
11251 /* set `magic' bit in order to save MF config */
11252 if (!CHIP_IS_E1(sc)) {
11253 bxe_clp_reset_prep(sc, magic_val);
11256 /* get shmem offset */
11257 shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
11259 offsetof(struct shmem_region, validity_map[SC_PORT(sc)]);
11261 /* Clear validity map flags */
11263 REG_WR(sc, shmem + validity_offset, 0);
11267 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
11268 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
11271 bxe_mcp_wait_one(struct bxe_softc *sc)
11273 /* special handling for emulation and FPGA (10 times longer) */
11274 if (CHIP_REV_IS_SLOW(sc)) {
11275 DELAY((MCP_ONE_TIMEOUT*10) * 1000);
11277 DELAY((MCP_ONE_TIMEOUT) * 1000);
11281 /* initialize shmem_base and waits for validity signature to appear */
11283 bxe_init_shmem(struct bxe_softc *sc)
11289 sc->devinfo.shmem_base =
11290 sc->link_params.shmem_base =
11291 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
11293 if (sc->devinfo.shmem_base) {
11294 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
11295 if (val & SHR_MEM_VALIDITY_MB)
11299 bxe_mcp_wait_one(sc);
11301 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
11303 BLOGE(sc, "BAD MCP validity signature\n");
11309 bxe_reset_mcp_comp(struct bxe_softc *sc,
11310 uint32_t magic_val)
11312 int rc = bxe_init_shmem(sc);
11314 /* Restore the `magic' bit value */
11315 if (!CHIP_IS_E1(sc)) {
11316 bxe_clp_reset_done(sc, magic_val);
11323 bxe_pxp_prep(struct bxe_softc *sc)
11325 if (!CHIP_IS_E1(sc)) {
11326 REG_WR(sc, PXP2_REG_RD_START_INIT, 0);
11327 REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0);
11333 * Reset the whole chip except for:
11335 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit)
11337 * - MISC (including AEU)
11342 bxe_process_kill_chip_reset(struct bxe_softc *sc,
11345 uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
11346 uint32_t global_bits2, stay_reset2;
11349 * Bits that have to be set in reset_mask2 if we want to reset 'global'
11350 * (per chip) blocks.
11353 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
11354 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
11357 * Don't reset the following blocks.
11358 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
11359 * reset, as in 4 port device they might still be owned
11360 * by the MCP (there is only one leader per path).
11363 MISC_REGISTERS_RESET_REG_1_RST_HC |
11364 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
11365 MISC_REGISTERS_RESET_REG_1_RST_PXP;
11368 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
11369 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
11370 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
11371 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
11372 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
11373 MISC_REGISTERS_RESET_REG_2_RST_GRC |
11374 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
11375 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
11376 MISC_REGISTERS_RESET_REG_2_RST_ATC |
11377 MISC_REGISTERS_RESET_REG_2_PGLC |
11378 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
11379 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
11380 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
11381 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
11382 MISC_REGISTERS_RESET_REG_2_UMAC0 |
11383 MISC_REGISTERS_RESET_REG_2_UMAC1;
11386 * Keep the following blocks in reset:
11387 * - all xxMACs are handled by the elink code.
11390 MISC_REGISTERS_RESET_REG_2_XMAC |
11391 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
11393 /* Full reset masks according to the chip */
11394 reset_mask1 = 0xffffffff;
11396 if (CHIP_IS_E1(sc))
11397 reset_mask2 = 0xffff;
11398 else if (CHIP_IS_E1H(sc))
11399 reset_mask2 = 0x1ffff;
11400 else if (CHIP_IS_E2(sc))
11401 reset_mask2 = 0xfffff;
11402 else /* CHIP_IS_E3 */
11403 reset_mask2 = 0x3ffffff;
11405 /* Don't reset global blocks unless we need to */
11407 reset_mask2 &= ~global_bits2;
11410 * In case of attention in the QM, we need to reset PXP
11411 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
11412 * because otherwise QM reset would release 'close the gates' shortly
11413 * before resetting the PXP, then the PSWRQ would send a write
11414 * request to PGLUE. Then when PXP is reset, PGLUE would try to
11415 * read the payload data from PSWWR, but PSWWR would not
11416 * respond. The write queue in PGLUE would stuck, dmae commands
11417 * would not return. Therefore it's important to reset the second
11418 * reset register (containing the
11419 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
11420 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
11423 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
11424 reset_mask2 & (~not_reset_mask2));
11426 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
11427 reset_mask1 & (~not_reset_mask1));
11432 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
11433 reset_mask2 & (~stay_reset2));
11438 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
11443 bxe_process_kill(struct bxe_softc *sc,
11448 uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
11449 uint32_t tags_63_32 = 0;
11451 /* Empty the Tetris buffer, wait for 1s */
11453 sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT);
11454 blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT);
11455 port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0);
11456 port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1);
11457 pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2);
11458 if (CHIP_IS_E3(sc)) {
11459 tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32);
11462 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
11463 ((port_is_idle_0 & 0x1) == 0x1) &&
11464 ((port_is_idle_1 & 0x1) == 0x1) &&
11465 (pgl_exp_rom2 == 0xffffffff) &&
11466 (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff)))
11469 } while (cnt-- > 0);
11472 BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there "
11473 "are still outstanding read requests after 1s! "
11474 "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, "
11475 "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
11476 sr_cnt, blk_cnt, port_is_idle_0,
11477 port_is_idle_1, pgl_exp_rom2);
11483 /* Close gates #2, #3 and #4 */
11484 bxe_set_234_gates(sc, TRUE);
11486 /* Poll for IGU VQs for 57712 and newer chips */
11487 if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) {
11491 /* XXX indicate that "process kill" is in progress to MCP */
11493 /* clear "unprepared" bit */
11494 REG_WR(sc, MISC_REG_UNPREPARED, 0);
11497 /* Make sure all is written to the chip before the reset */
11501 * Wait for 1ms to empty GLUE and PCI-E core queues,
11502 * PSWHST, GRC and PSWRD Tetris buffer.
11506 /* Prepare to chip reset: */
11509 bxe_reset_mcp_prep(sc, &val);
11516 /* reset the chip */
11517 bxe_process_kill_chip_reset(sc, global);
11520 /* Recover after reset: */
11522 if (global && bxe_reset_mcp_comp(sc, val)) {
11526 /* XXX add resetting the NO_MCP mode DB here */
11528 /* Open the gates #2, #3 and #4 */
11529 bxe_set_234_gates(sc, FALSE);
11532 * IGU/AEU preparation bring back the AEU/IGU to a reset state
11533 * re-enable attentions
11540 bxe_leader_reset(struct bxe_softc *sc)
11543 uint8_t global = bxe_reset_is_global(sc);
11544 uint32_t load_code;
11547 * If not going to reset MCP, load "fake" driver to reset HW while
11548 * driver is owner of the HW.
11550 if (!global && !BXE_NOMCP(sc)) {
11551 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
11552 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
11554 BLOGE(sc, "MCP response failure, aborting\n");
11556 goto exit_leader_reset;
11559 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
11560 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
11561 BLOGE(sc, "MCP unexpected response, aborting\n");
11563 goto exit_leader_reset2;
11566 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
11568 BLOGE(sc, "MCP response failure, aborting\n");
11570 goto exit_leader_reset2;
11574 /* try to recover after the failure */
11575 if (bxe_process_kill(sc, global)) {
11576 BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc));
11578 goto exit_leader_reset2;
11582 * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver
11585 bxe_set_reset_done(sc);
11587 bxe_clear_reset_global(sc);
11590 exit_leader_reset2:
11592 /* unload "fake driver" if it was loaded */
11593 if (!global && !BXE_NOMCP(sc)) {
11594 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
11595 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
11601 bxe_release_leader_lock(sc);
11608 * prepare INIT transition, parameters configured:
11609 * - HC configuration
11610 * - Queue's CDU context
11613 bxe_pf_q_prep_init(struct bxe_softc *sc,
11614 struct bxe_fastpath *fp,
11615 struct ecore_queue_init_params *init_params)
11618 int cxt_index, cxt_offset;
11620 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags);
11621 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags);
11623 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags);
11624 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags);
11627 init_params->rx.hc_rate =
11628 sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0;
11629 init_params->tx.hc_rate =
11630 sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0;
11633 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id;
11635 /* CQ index among the SB indices */
11636 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11637 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
11639 /* set maximum number of COSs supported by this queue */
11640 init_params->max_cos = sc->max_cos;
11642 BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n",
11643 fp->index, init_params->max_cos);
11645 /* set the context pointers queue object */
11646 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
11647 /* XXX change index/cid here if ever support multiple tx CoS */
11648 /* fp->txdata[cos]->cid */
11649 cxt_index = fp->index / ILT_PAGE_CIDS;
11650 cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS);
11651 init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth;
11655 /* set flags that are common for the Tx-only and not normal connections */
11656 static unsigned long
11657 bxe_get_common_flags(struct bxe_softc *sc,
11658 struct bxe_fastpath *fp,
11659 uint8_t zero_stats)
11661 unsigned long flags = 0;
11663 /* PF driver will always initialize the Queue to an ACTIVE state */
11664 bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags);
11667 * tx only connections collect statistics (on the same index as the
11668 * parent connection). The statistics are zeroed when the parent
11669 * connection is initialized.
11672 bxe_set_bit(ECORE_Q_FLG_STATS, &flags);
11674 bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags);
11678 * tx only connections can support tx-switching, though their
11679 * CoS-ness doesn't survive the loopback
11681 if (sc->flags & BXE_TX_SWITCHING) {
11682 bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags);
11685 bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags);
11690 static unsigned long
11691 bxe_get_q_flags(struct bxe_softc *sc,
11692 struct bxe_fastpath *fp,
11695 unsigned long flags = 0;
11697 if (IS_MF_SD(sc)) {
11698 bxe_set_bit(ECORE_Q_FLG_OV, &flags);
11701 if (sc->ifnet->if_capenable & IFCAP_LRO) {
11702 bxe_set_bit(ECORE_Q_FLG_TPA, &flags);
11703 bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags);
11705 if (fp->mode == TPA_MODE_GRO)
11706 __set_bit(ECORE_Q_FLG_TPA_GRO, &flags);
11711 bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags);
11712 bxe_set_bit(ECORE_Q_FLG_MCAST, &flags);
11715 bxe_set_bit(ECORE_Q_FLG_VLAN, &flags);
11718 /* configure silent vlan removal */
11719 if (IS_MF_AFEX(sc)) {
11720 bxe_set_bit(ECORE_Q_FLG_SILENT_VLAN_REM, &flags);
11724 /* merge with common flags */
11725 return (flags | bxe_get_common_flags(sc, fp, TRUE));
11729 bxe_pf_q_prep_general(struct bxe_softc *sc,
11730 struct bxe_fastpath *fp,
11731 struct ecore_general_setup_params *gen_init,
11734 gen_init->stat_id = bxe_stats_id(fp);
11735 gen_init->spcl_id = fp->cl_id;
11736 gen_init->mtu = sc->mtu;
11737 gen_init->cos = cos;
11741 bxe_pf_rx_q_prep(struct bxe_softc *sc,
11742 struct bxe_fastpath *fp,
11743 struct rxq_pause_params *pause,
11744 struct ecore_rxq_setup_params *rxq_init)
11746 uint8_t max_sge = 0;
11747 uint16_t sge_sz = 0;
11748 uint16_t tpa_agg_size = 0;
11750 if (sc->ifnet->if_capenable & IFCAP_LRO) {
11751 pause->sge_th_lo = SGE_TH_LO(sc);
11752 pause->sge_th_hi = SGE_TH_HI(sc);
11754 /* validate SGE ring has enough to cross high threshold */
11755 if (sc->dropless_fc &&
11756 (pause->sge_th_hi + FW_PREFETCH_CNT) >
11757 (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) {
11758 BLOGW(sc, "sge ring threshold limit\n");
11761 /* minimum max_aggregation_size is 2*MTU (two full buffers) */
11762 tpa_agg_size = (2 * sc->mtu);
11763 if (tpa_agg_size < sc->max_aggregation_size) {
11764 tpa_agg_size = sc->max_aggregation_size;
11767 max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT;
11768 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
11769 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
11770 sge_sz = (uint16_t)min(SGE_PAGES, 0xffff);
11773 /* pause - not for e1 */
11774 if (!CHIP_IS_E1(sc)) {
11775 pause->bd_th_lo = BD_TH_LO(sc);
11776 pause->bd_th_hi = BD_TH_HI(sc);
11778 pause->rcq_th_lo = RCQ_TH_LO(sc);
11779 pause->rcq_th_hi = RCQ_TH_HI(sc);
11781 /* validate rings have enough entries to cross high thresholds */
11782 if (sc->dropless_fc &&
11783 pause->bd_th_hi + FW_PREFETCH_CNT >
11784 sc->rx_ring_size) {
11785 BLOGW(sc, "rx bd ring threshold limit\n");
11788 if (sc->dropless_fc &&
11789 pause->rcq_th_hi + FW_PREFETCH_CNT >
11790 RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) {
11791 BLOGW(sc, "rcq ring threshold limit\n");
11794 pause->pri_map = 1;
11798 rxq_init->dscr_map = fp->rx_dma.paddr;
11799 rxq_init->sge_map = fp->rx_sge_dma.paddr;
11800 rxq_init->rcq_map = fp->rcq_dma.paddr;
11801 rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE);
11804 * This should be a maximum number of data bytes that may be
11805 * placed on the BD (not including paddings).
11807 rxq_init->buf_sz = (fp->rx_buf_size -
11808 IP_HEADER_ALIGNMENT_PADDING);
11810 rxq_init->cl_qzone_id = fp->cl_qzone_id;
11811 rxq_init->tpa_agg_sz = tpa_agg_size;
11812 rxq_init->sge_buf_sz = sge_sz;
11813 rxq_init->max_sges_pkt = max_sge;
11814 rxq_init->rss_engine_id = SC_FUNC(sc);
11815 rxq_init->mcast_engine_id = SC_FUNC(sc);
11818 * Maximum number or simultaneous TPA aggregation for this Queue.
11819 * For PF Clients it should be the maximum available number.
11820 * VF driver(s) may want to define it to a smaller value.
11822 rxq_init->max_tpa_queues = MAX_AGG_QS(sc);
11824 rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT;
11825 rxq_init->fw_sb_id = fp->fw_sb_id;
11827 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11830 * configure silent vlan removal
11831 * if multi function mode is afex, then mask default vlan
11833 if (IS_MF_AFEX(sc)) {
11834 rxq_init->silent_removal_value =
11835 sc->devinfo.mf_info.afex_def_vlan_tag;
11836 rxq_init->silent_removal_mask = EVL_VLID_MASK;
11841 bxe_pf_tx_q_prep(struct bxe_softc *sc,
11842 struct bxe_fastpath *fp,
11843 struct ecore_txq_setup_params *txq_init,
11847 * XXX If multiple CoS is ever supported then each fastpath structure
11848 * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
11849 * fp->txdata[cos]->tx_dma.paddr;
11851 txq_init->dscr_map = fp->tx_dma.paddr;
11852 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
11853 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
11854 txq_init->fw_sb_id = fp->fw_sb_id;
11857 * set the TSS leading client id for TX classfication to the
11858 * leading RSS client id
11860 txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id);
11864 * This function performs 2 steps in a queue state machine:
11869 bxe_setup_queue(struct bxe_softc *sc,
11870 struct bxe_fastpath *fp,
11873 struct ecore_queue_state_params q_params = { NULL };
11874 struct ecore_queue_setup_params *setup_params =
11875 &q_params.params.setup;
11877 struct ecore_queue_setup_tx_only_params *tx_only_params =
11878 &q_params.params.tx_only;
11883 BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index);
11885 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
11887 q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
11889 /* we want to wait for completion in this context */
11890 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
11892 /* prepare the INIT parameters */
11893 bxe_pf_q_prep_init(sc, fp, &q_params.params.init);
11895 /* Set the command */
11896 q_params.cmd = ECORE_Q_CMD_INIT;
11898 /* Change the state to INIT */
11899 rc = ecore_queue_state_change(sc, &q_params);
11901 BLOGE(sc, "Queue(%d) INIT failed\n", fp->index);
11905 BLOGD(sc, DBG_LOAD, "init complete\n");
11907 /* now move the Queue to the SETUP state */
11908 memset(setup_params, 0, sizeof(*setup_params));
11910 /* set Queue flags */
11911 setup_params->flags = bxe_get_q_flags(sc, fp, leading);
11913 /* set general SETUP parameters */
11914 bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params,
11915 FIRST_TX_COS_INDEX);
11917 bxe_pf_rx_q_prep(sc, fp,
11918 &setup_params->pause_params,
11919 &setup_params->rxq_params);
11921 bxe_pf_tx_q_prep(sc, fp,
11922 &setup_params->txq_params,
11923 FIRST_TX_COS_INDEX);
11925 /* Set the command */
11926 q_params.cmd = ECORE_Q_CMD_SETUP;
11928 /* change the state to SETUP */
11929 rc = ecore_queue_state_change(sc, &q_params);
11931 BLOGE(sc, "Queue(%d) SETUP failed\n", fp->index);
11936 /* loop through the relevant tx-only indices */
11937 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
11938 tx_index < sc->max_cos;
11940 /* prepare and send tx-only ramrod*/
11941 rc = bxe_setup_tx_only(sc, fp, &q_params,
11942 tx_only_params, tx_index, leading);
11944 BLOGE(sc, "Queue(%d.%d) TX_ONLY_SETUP failed\n",
11945 fp->index, tx_index);
11955 bxe_setup_leading(struct bxe_softc *sc)
11957 return (bxe_setup_queue(sc, &sc->fp[0], TRUE));
11961 bxe_config_rss_pf(struct bxe_softc *sc,
11962 struct ecore_rss_config_obj *rss_obj,
11963 uint8_t config_hash)
11965 struct ecore_config_rss_params params = { NULL };
11969 * Although RSS is meaningless when there is a single HW queue we
11970 * still need it enabled in order to have HW Rx hash generated.
11973 params.rss_obj = rss_obj;
11975 bxe_set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
11977 bxe_set_bit(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags);
11979 /* RSS configuration */
11980 bxe_set_bit(ECORE_RSS_IPV4, ¶ms.rss_flags);
11981 bxe_set_bit(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags);
11982 bxe_set_bit(ECORE_RSS_IPV6, ¶ms.rss_flags);
11983 bxe_set_bit(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags);
11984 if (rss_obj->udp_rss_v4) {
11985 bxe_set_bit(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags);
11987 if (rss_obj->udp_rss_v6) {
11988 bxe_set_bit(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags);
11992 params.rss_result_mask = MULTI_MASK;
11994 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
11998 for (i = 0; i < sizeof(params.rss_key) / 4; i++) {
11999 params.rss_key[i] = arc4random();
12002 bxe_set_bit(ECORE_RSS_SET_SRCH, ¶ms.rss_flags);
12005 return (ecore_config_rss(sc, ¶ms));
12009 bxe_config_rss_eth(struct bxe_softc *sc,
12010 uint8_t config_hash)
12012 return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash));
12016 bxe_init_rss_pf(struct bxe_softc *sc)
12018 uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc);
12022 * Prepare the initial contents of the indirection table if
12025 for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) {
12026 sc->rss_conf_obj.ind_table[i] =
12027 (sc->fp->cl_id + (i % num_eth_queues));
12031 sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1;
12035 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
12036 * per-port, so if explicit configuration is needed, do it only
12039 * For 57712 and newer it's a per-function configuration.
12041 return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)));
12045 bxe_set_mac_one(struct bxe_softc *sc,
12047 struct ecore_vlan_mac_obj *obj,
12050 unsigned long *ramrod_flags)
12052 struct ecore_vlan_mac_ramrod_params ramrod_param;
12055 memset(&ramrod_param, 0, sizeof(ramrod_param));
12057 /* fill in general parameters */
12058 ramrod_param.vlan_mac_obj = obj;
12059 ramrod_param.ramrod_flags = *ramrod_flags;
12061 /* fill a user request section if needed */
12062 if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) {
12063 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
12065 bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
12067 /* Set the command: ADD or DEL */
12068 ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD :
12069 ECORE_VLAN_MAC_DEL;
12072 rc = ecore_config_vlan_mac(sc, &ramrod_param);
12074 if (rc == ECORE_EXISTS) {
12075 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
12076 /* do not treat adding same MAC as error */
12078 } else if (rc < 0) {
12079 BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc);
12086 bxe_set_eth_mac(struct bxe_softc *sc,
12089 unsigned long ramrod_flags = 0;
12091 BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n");
12093 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
12095 /* Eth MAC is set on RSS leading client (fp[0]) */
12096 return (bxe_set_mac_one(sc, sc->link_params.mac_addr,
12097 &sc->sp_objs->mac_obj,
12098 set, ECORE_ETH_MAC, &ramrod_flags));
12103 bxe_update_max_mf_config(struct bxe_softc *sc,
12106 /* load old values */
12107 uint32_t mf_cfg = sc->devinfo.mf_info.mf_config[SC_VN(sc)];
12109 if (value != bxe_extract_max_cfg(sc, mf_cfg)) {
12110 /* leave all but MAX value */
12111 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
12113 /* set new MAX value */
12114 mf_cfg |= ((value << FUNC_MF_CFG_MAX_BW_SHIFT) &
12115 FUNC_MF_CFG_MAX_BW_MASK);
12117 bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
12123 bxe_get_cur_phy_idx(struct bxe_softc *sc)
12125 uint32_t sel_phy_idx = 0;
12127 if (sc->link_params.num_phys <= 1) {
12128 return (ELINK_INT_PHY);
12131 if (sc->link_vars.link_up) {
12132 sel_phy_idx = ELINK_EXT_PHY1;
12133 /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */
12134 if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
12135 (sc->link_params.phy[ELINK_EXT_PHY2].supported &
12136 ELINK_SUPPORTED_FIBRE))
12137 sel_phy_idx = ELINK_EXT_PHY2;
12139 switch (elink_phy_selection(&sc->link_params)) {
12140 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
12141 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
12142 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
12143 sel_phy_idx = ELINK_EXT_PHY1;
12145 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
12146 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
12147 sel_phy_idx = ELINK_EXT_PHY2;
12152 return (sel_phy_idx);
12156 bxe_get_link_cfg_idx(struct bxe_softc *sc)
12158 uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc);
12161 * The selected activated PHY is always after swapping (in case PHY
12162 * swapping is enabled). So when swapping is enabled, we need to reverse
12163 * the configuration
12166 if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
12167 if (sel_phy_idx == ELINK_EXT_PHY1)
12168 sel_phy_idx = ELINK_EXT_PHY2;
12169 else if (sel_phy_idx == ELINK_EXT_PHY2)
12170 sel_phy_idx = ELINK_EXT_PHY1;
12173 return (ELINK_LINK_CONFIG_IDX(sel_phy_idx));
12177 bxe_set_requested_fc(struct bxe_softc *sc)
12180 * Initialize link parameters structure variables
12181 * It is recommended to turn off RX FC for jumbo frames
12182 * for better performance
12184 if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) {
12185 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX;
12187 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH;
12192 bxe_calc_fc_adv(struct bxe_softc *sc)
12194 uint8_t cfg_idx = bxe_get_link_cfg_idx(sc);
12195 switch (sc->link_vars.ieee_fc &
12196 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
12197 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
12199 sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
12203 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
12204 sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
12208 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
12209 sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
12215 bxe_get_mf_speed(struct bxe_softc *sc)
12217 uint16_t line_speed = sc->link_vars.line_speed;
12220 bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]);
12222 /* calculate the current MAX line speed limit for the MF devices */
12223 if (IS_MF_SI(sc)) {
12224 line_speed = (line_speed * maxCfg) / 100;
12225 } else { /* SD mode */
12226 uint16_t vn_max_rate = maxCfg * 100;
12228 if (vn_max_rate < line_speed) {
12229 line_speed = vn_max_rate;
12234 return (line_speed);
12238 bxe_fill_report_data(struct bxe_softc *sc,
12239 struct bxe_link_report_data *data)
12241 uint16_t line_speed = bxe_get_mf_speed(sc);
12243 memset(data, 0, sizeof(*data));
12245 /* fill the report data with the effective line speed */
12246 data->line_speed = line_speed;
12249 if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) {
12250 bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags);
12254 if (sc->link_vars.duplex == DUPLEX_FULL) {
12255 bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags);
12258 /* Rx Flow Control is ON */
12259 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) {
12260 bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
12263 /* Tx Flow Control is ON */
12264 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
12265 bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
12269 /* report link status to OS, should be called under phy_lock */
12271 bxe_link_report_locked(struct bxe_softc *sc)
12273 struct bxe_link_report_data cur_data;
12275 /* reread mf_cfg */
12276 if (IS_PF(sc) && !CHIP_IS_E1(sc)) {
12277 bxe_read_mf_cfg(sc);
12280 /* Read the current link report info */
12281 bxe_fill_report_data(sc, &cur_data);
12283 /* Don't report link down or exactly the same link status twice */
12284 if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) ||
12285 (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
12286 &sc->last_reported_link.link_report_flags) &&
12287 bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
12288 &cur_data.link_report_flags))) {
12294 /* report new link params and remember the state for the next time */
12295 memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
12297 if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
12298 &cur_data.link_report_flags)) {
12299 if_link_state_change(sc->ifnet, LINK_STATE_DOWN);
12300 BLOGI(sc, "NIC Link is Down\n");
12302 const char *duplex;
12305 if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX,
12306 &cur_data.link_report_flags)) {
12313 * Handle the FC at the end so that only these flags would be
12314 * possibly set. This way we may easily check if there is no FC
12317 if (cur_data.link_report_flags) {
12318 if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
12319 &cur_data.link_report_flags) &&
12320 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
12321 &cur_data.link_report_flags)) {
12322 flow = "ON - receive & transmit";
12323 } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
12324 &cur_data.link_report_flags) &&
12325 !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
12326 &cur_data.link_report_flags)) {
12327 flow = "ON - receive";
12328 } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
12329 &cur_data.link_report_flags) &&
12330 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
12331 &cur_data.link_report_flags)) {
12332 flow = "ON - transmit";
12334 flow = "none"; /* possible? */
12340 if_link_state_change(sc->ifnet, LINK_STATE_UP);
12341 BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
12342 cur_data.line_speed, duplex, flow);
12347 bxe_link_report(struct bxe_softc *sc)
12350 bxe_link_report_locked(sc);
12351 BXE_PHY_UNLOCK(sc);
12355 bxe_link_status_update(struct bxe_softc *sc)
12357 if (sc->state != BXE_STATE_OPEN) {
12362 /* read updated dcb configuration */
12364 bxe_dcbx_pmf_update(sc);
12367 if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) {
12368 elink_link_status_update(&sc->link_params, &sc->link_vars);
12370 sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half |
12371 ELINK_SUPPORTED_10baseT_Full |
12372 ELINK_SUPPORTED_100baseT_Half |
12373 ELINK_SUPPORTED_100baseT_Full |
12374 ELINK_SUPPORTED_1000baseT_Full |
12375 ELINK_SUPPORTED_2500baseX_Full |
12376 ELINK_SUPPORTED_10000baseT_Full |
12377 ELINK_SUPPORTED_TP |
12378 ELINK_SUPPORTED_FIBRE |
12379 ELINK_SUPPORTED_Autoneg |
12380 ELINK_SUPPORTED_Pause |
12381 ELINK_SUPPORTED_Asym_Pause);
12382 sc->port.advertising[0] = sc->port.supported[0];
12384 sc->link_params.sc = sc;
12385 sc->link_params.port = SC_PORT(sc);
12386 sc->link_params.req_duplex[0] = DUPLEX_FULL;
12387 sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE;
12388 sc->link_params.req_line_speed[0] = SPEED_10000;
12389 sc->link_params.speed_cap_mask[0] = 0x7f0000;
12390 sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G;
12392 if (CHIP_REV_IS_FPGA(sc)) {
12393 sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC;
12394 sc->link_vars.line_speed = ELINK_SPEED_1000;
12395 sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
12396 LINK_STATUS_SPEED_AND_DUPLEX_1000TFD);
12398 sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC;
12399 sc->link_vars.line_speed = ELINK_SPEED_10000;
12400 sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
12401 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
12404 sc->link_vars.link_up = 1;
12406 sc->link_vars.duplex = DUPLEX_FULL;
12407 sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE;
12410 REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0);
12411 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
12412 bxe_link_report(sc);
12417 if (sc->link_vars.link_up) {
12418 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
12420 bxe_stats_handle(sc, STATS_EVENT_STOP);
12422 bxe_link_report(sc);
12424 bxe_link_report(sc);
12425 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
12430 bxe_initial_phy_init(struct bxe_softc *sc,
12433 int rc, cfg_idx = bxe_get_link_cfg_idx(sc);
12434 uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx];
12435 struct elink_params *lp = &sc->link_params;
12437 bxe_set_requested_fc(sc);
12439 if (CHIP_REV_IS_SLOW(sc)) {
12440 uint32_t bond = CHIP_BOND_ID(sc);
12443 if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) {
12444 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
12445 } else if (bond & 0x4) {
12446 if (CHIP_IS_E3(sc)) {
12447 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC;
12449 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
12451 } else if (bond & 0x8) {
12452 if (CHIP_IS_E3(sc)) {
12453 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC;
12455 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
12459 /* disable EMAC for E3 and above */
12461 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
12464 sc->link_params.feature_config_flags |= feat;
12469 if (load_mode == LOAD_DIAG) {
12470 lp->loopback_mode = ELINK_LOOPBACK_XGXS;
12471 /* Prefer doing PHY loopback at 10G speed, if possible */
12472 if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) {
12473 if (lp->speed_cap_mask[cfg_idx] &
12474 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
12475 lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000;
12477 lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000;
12482 if (load_mode == LOAD_LOOPBACK_EXT) {
12483 lp->loopback_mode = ELINK_LOOPBACK_EXT;
12486 rc = elink_phy_init(&sc->link_params, &sc->link_vars);
12488 BXE_PHY_UNLOCK(sc);
12490 bxe_calc_fc_adv(sc);
12492 if (sc->link_vars.link_up) {
12493 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
12494 bxe_link_report(sc);
12497 if (!CHIP_REV_IS_SLOW(sc)) {
12498 bxe_periodic_start(sc);
12501 sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
12505 /* must be called under IF_ADDR_LOCK */
12507 bxe_init_mcast_macs_list(struct bxe_softc *sc,
12508 struct ecore_mcast_ramrod_params *p)
12510 struct ifnet *ifp = sc->ifnet;
12512 struct ifmultiaddr *ifma;
12513 struct ecore_mcast_list_elem *mc_mac;
12515 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
12516 if (ifma->ifma_addr->sa_family != AF_LINK) {
12523 ECORE_LIST_INIT(&p->mcast_list);
12524 p->mcast_list_len = 0;
12530 mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF,
12531 (M_NOWAIT | M_ZERO));
12533 BLOGE(sc, "Failed to allocate temp mcast list\n");
12537 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
12538 if (ifma->ifma_addr->sa_family != AF_LINK) {
12542 mc_mac->mac = (uint8_t *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
12543 ECORE_LIST_PUSH_TAIL(&mc_mac->link, &p->mcast_list);
12545 BLOGD(sc, DBG_LOAD,
12546 "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X\n",
12547 mc_mac->mac[0], mc_mac->mac[1], mc_mac->mac[2],
12548 mc_mac->mac[3], mc_mac->mac[4], mc_mac->mac[5]);
12553 p->mcast_list_len = mc_count;
12559 bxe_free_mcast_macs_list(struct ecore_mcast_ramrod_params *p)
12561 struct ecore_mcast_list_elem *mc_mac =
12562 ECORE_LIST_FIRST_ENTRY(&p->mcast_list,
12563 struct ecore_mcast_list_elem,
12567 /* only a single free as all mc_macs are in the same heap array */
12568 free(mc_mac, M_DEVBUF);
12573 bxe_set_mc_list(struct bxe_softc *sc)
12575 struct ecore_mcast_ramrod_params rparam = { NULL };
12578 rparam.mcast_obj = &sc->mcast_obj;
12580 BXE_MCAST_LOCK(sc);
12582 /* first, clear all configured multicast MACs */
12583 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
12585 BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc);
12589 /* configure a new MACs list */
12590 rc = bxe_init_mcast_macs_list(sc, &rparam);
12592 BLOGE(sc, "Failed to create mcast MACs list (%d)\n", rc);
12593 BXE_MCAST_UNLOCK(sc);
12597 /* Now add the new MACs */
12598 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD);
12600 BLOGE(sc, "Failed to set new mcast config (%d)\n", rc);
12603 bxe_free_mcast_macs_list(&rparam);
12605 BXE_MCAST_UNLOCK(sc);
12611 bxe_set_uc_list(struct bxe_softc *sc)
12613 struct ifnet *ifp = sc->ifnet;
12614 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
12615 struct ifaddr *ifa;
12616 unsigned long ramrod_flags = 0;
12619 #if __FreeBSD_version < 800000
12622 if_addr_rlock(ifp);
12625 /* first schedule a cleanup up of old configuration */
12626 rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE);
12628 BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc);
12629 #if __FreeBSD_version < 800000
12630 IF_ADDR_UNLOCK(ifp);
12632 if_addr_runlock(ifp);
12637 ifa = ifp->if_addr;
12639 if (ifa->ifa_addr->sa_family != AF_LINK) {
12640 ifa = TAILQ_NEXT(ifa, ifa_link);
12644 rc = bxe_set_mac_one(sc, (uint8_t *)LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
12645 mac_obj, TRUE, ECORE_UC_LIST_MAC, &ramrod_flags);
12646 if (rc == -EEXIST) {
12647 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
12648 /* do not treat adding same MAC as an error */
12650 } else if (rc < 0) {
12651 BLOGE(sc, "Failed to schedule ADD operations (%d)\n", rc);
12652 #if __FreeBSD_version < 800000
12653 IF_ADDR_UNLOCK(ifp);
12655 if_addr_runlock(ifp);
12660 ifa = TAILQ_NEXT(ifa, ifa_link);
12663 #if __FreeBSD_version < 800000
12664 IF_ADDR_UNLOCK(ifp);
12666 if_addr_runlock(ifp);
12669 /* Execute the pending commands */
12670 bit_set(&ramrod_flags, RAMROD_CONT);
12671 return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */,
12672 ECORE_UC_LIST_MAC, &ramrod_flags));
12676 bxe_handle_rx_mode_tq(void *context,
12679 struct bxe_softc *sc = (struct bxe_softc *)context;
12680 struct ifnet *ifp = sc->ifnet;
12681 uint32_t rx_mode = BXE_RX_MODE_NORMAL;
12685 if (sc->state != BXE_STATE_OPEN) {
12686 BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state);
12687 BXE_CORE_UNLOCK(sc);
12691 BLOGD(sc, DBG_SP, "ifp->if_flags=0x%x\n", ifp->if_flags);
12693 if (ifp->if_flags & IFF_PROMISC) {
12694 rx_mode = BXE_RX_MODE_PROMISC;
12695 } else if ((ifp->if_flags & IFF_ALLMULTI) ||
12696 ((ifp->if_amcount > BXE_MAX_MULTICAST) &&
12698 rx_mode = BXE_RX_MODE_ALLMULTI;
12701 /* some multicasts */
12702 if (bxe_set_mc_list(sc) < 0) {
12703 rx_mode = BXE_RX_MODE_ALLMULTI;
12705 if (bxe_set_uc_list(sc) < 0) {
12706 rx_mode = BXE_RX_MODE_PROMISC;
12712 * Configuring mcast to a VF involves sleeping (when we
12713 * wait for the PF's response). Since this function is
12714 * called from a non sleepable context we must schedule
12715 * a work item for this purpose
12717 bxe_set_bit(BXE_SP_RTNL_VFPF_MCAST, &sc->sp_rtnl_state);
12718 schedule_delayed_work(&sc->sp_rtnl_task, 0);
12723 sc->rx_mode = rx_mode;
12725 /* schedule the rx_mode command */
12726 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
12727 BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n");
12728 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
12729 BXE_CORE_UNLOCK(sc);
12734 bxe_set_storm_rx_mode(sc);
12739 * Configuring mcast to a VF involves sleeping (when we
12740 * wait for the PF's response). Since this function is
12741 * called from a non sleepable context we must schedule
12742 * a work item for this purpose
12744 bxe_set_bit(BXE_SP_RTNL_VFPF_STORM_RX_MODE, &sc->sp_rtnl_state);
12745 schedule_delayed_work(&sc->sp_rtnl_task, 0);
12749 BXE_CORE_UNLOCK(sc);
12753 bxe_set_rx_mode(struct bxe_softc *sc)
12755 taskqueue_enqueue(sc->rx_mode_tq, &sc->rx_mode_tq_task);
12758 /* update flags in shmem */
12760 bxe_update_drv_flags(struct bxe_softc *sc,
12764 uint32_t drv_flags;
12766 if (SHMEM2_HAS(sc, drv_flags)) {
12767 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12768 drv_flags = SHMEM2_RD(sc, drv_flags);
12771 SET_FLAGS(drv_flags, flags);
12773 RESET_FLAGS(drv_flags, flags);
12776 SHMEM2_WR(sc, drv_flags, drv_flags);
12777 BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags);
12779 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12783 /* periodic timer callout routine, only runs when the interface is up */
12786 bxe_periodic_callout_func(void *xsc)
12788 struct bxe_softc *sc = (struct bxe_softc *)xsc;
12791 if (!BXE_CORE_TRYLOCK(sc)) {
12792 /* just bail and try again next time */
12794 if ((sc->state == BXE_STATE_OPEN) &&
12795 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12796 /* schedule the next periodic callout */
12797 callout_reset(&sc->periodic_callout, hz,
12798 bxe_periodic_callout_func, sc);
12804 if ((sc->state != BXE_STATE_OPEN) ||
12805 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
12806 BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state);
12807 BXE_CORE_UNLOCK(sc);
12811 /* Check for TX timeouts on any fastpath. */
12812 FOR_EACH_QUEUE(sc, i) {
12813 if (bxe_watchdog(sc, &sc->fp[i]) != 0) {
12814 /* Ruh-Roh, chip was reset! */
12819 if (!CHIP_REV_IS_SLOW(sc)) {
12821 * This barrier is needed to ensure the ordering between the writing
12822 * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and
12823 * the reading here.
12826 if (sc->port.pmf) {
12828 elink_period_func(&sc->link_params, &sc->link_vars);
12829 BXE_PHY_UNLOCK(sc);
12833 if (IS_PF(sc) && !BXE_NOMCP(sc)) {
12834 int mb_idx = SC_FW_MB_IDX(sc);
12835 uint32_t drv_pulse;
12836 uint32_t mcp_pulse;
12838 ++sc->fw_drv_pulse_wr_seq;
12839 sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
12841 drv_pulse = sc->fw_drv_pulse_wr_seq;
12844 mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) &
12845 MCP_PULSE_SEQ_MASK);
12848 * The delta between driver pulse and mcp response should
12849 * be 1 (before mcp response) or 0 (after mcp response).
12851 if ((drv_pulse != mcp_pulse) &&
12852 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
12853 /* someone lost a heartbeat... */
12854 BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
12855 drv_pulse, mcp_pulse);
12859 /* state is BXE_STATE_OPEN */
12860 bxe_stats_handle(sc, STATS_EVENT_UPDATE);
12863 /* sample VF bulletin board for new posts from PF */
12865 bxe_sample_bulletin(sc);
12869 BXE_CORE_UNLOCK(sc);
12871 if ((sc->state == BXE_STATE_OPEN) &&
12872 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12873 /* schedule the next periodic callout */
12874 callout_reset(&sc->periodic_callout, hz,
12875 bxe_periodic_callout_func, sc);
12880 bxe_periodic_start(struct bxe_softc *sc)
12882 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
12883 callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc);
12887 bxe_periodic_stop(struct bxe_softc *sc)
12889 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
12890 callout_drain(&sc->periodic_callout);
12893 /* start the controller */
12894 static __noinline int
12895 bxe_nic_load(struct bxe_softc *sc,
12902 BXE_CORE_LOCK_ASSERT(sc);
12904 BLOGD(sc, DBG_LOAD, "Starting NIC load...\n");
12906 sc->state = BXE_STATE_OPENING_WAITING_LOAD;
12909 /* must be called before memory allocation and HW init */
12910 bxe_ilt_set_info(sc);
12913 sc->last_reported_link_state = LINK_STATE_UNKNOWN;
12915 bxe_set_fp_rx_buf_size(sc);
12917 if (bxe_alloc_fp_buffers(sc) != 0) {
12918 BLOGE(sc, "Failed to allocate fastpath memory\n");
12919 sc->state = BXE_STATE_CLOSED;
12921 goto bxe_nic_load_error0;
12924 if (bxe_alloc_mem(sc) != 0) {
12925 sc->state = BXE_STATE_CLOSED;
12927 goto bxe_nic_load_error0;
12930 if (bxe_alloc_fw_stats_mem(sc) != 0) {
12931 sc->state = BXE_STATE_CLOSED;
12933 goto bxe_nic_load_error0;
12937 /* set pf load just before approaching the MCP */
12938 bxe_set_pf_load(sc);
12940 /* if MCP exists send load request and analyze response */
12941 if (!BXE_NOMCP(sc)) {
12942 /* attempt to load pf */
12943 if (bxe_nic_load_request(sc, &load_code) != 0) {
12944 sc->state = BXE_STATE_CLOSED;
12946 goto bxe_nic_load_error1;
12949 /* what did the MCP say? */
12950 if (bxe_nic_load_analyze_req(sc, load_code) != 0) {
12951 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12952 sc->state = BXE_STATE_CLOSED;
12954 goto bxe_nic_load_error2;
12957 BLOGI(sc, "Device has no MCP!\n");
12958 load_code = bxe_nic_load_no_mcp(sc);
12961 /* mark PMF if applicable */
12962 bxe_nic_load_pmf(sc, load_code);
12964 /* Init Function state controlling object */
12965 bxe_init_func_obj(sc);
12967 /* Initialize HW */
12968 if (bxe_init_hw(sc, load_code) != 0) {
12969 BLOGE(sc, "HW init failed\n");
12970 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12971 sc->state = BXE_STATE_CLOSED;
12973 goto bxe_nic_load_error2;
12977 /* attach interrupts */
12978 if (bxe_interrupt_attach(sc) != 0) {
12979 sc->state = BXE_STATE_CLOSED;
12981 goto bxe_nic_load_error2;
12984 bxe_nic_init(sc, load_code);
12986 /* Init per-function objects */
12989 // XXX bxe_iov_nic_init(sc);
12991 /* set AFEX default VLAN tag to an invalid value */
12992 sc->devinfo.mf_info.afex_def_vlan_tag = -1;
12993 // XXX bxe_nic_load_afex_dcc(sc, load_code);
12995 sc->state = BXE_STATE_OPENING_WAITING_PORT;
12996 rc = bxe_func_start(sc);
12998 BLOGE(sc, "Function start failed!\n");
12999 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
13000 sc->state = BXE_STATE_ERROR;
13001 goto bxe_nic_load_error3;
13004 /* send LOAD_DONE command to MCP */
13005 if (!BXE_NOMCP(sc)) {
13006 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
13008 BLOGE(sc, "MCP response failure, aborting\n");
13009 sc->state = BXE_STATE_ERROR;
13011 goto bxe_nic_load_error3;
13015 rc = bxe_setup_leading(sc);
13017 BLOGE(sc, "Setup leading failed!\n");
13018 sc->state = BXE_STATE_ERROR;
13019 goto bxe_nic_load_error3;
13022 FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) {
13023 rc = bxe_setup_queue(sc, &sc->fp[i], FALSE);
13025 BLOGE(sc, "Queue(%d) setup failed\n", i);
13026 sc->state = BXE_STATE_ERROR;
13027 goto bxe_nic_load_error3;
13031 rc = bxe_init_rss_pf(sc);
13033 BLOGE(sc, "PF RSS init failed\n");
13034 sc->state = BXE_STATE_ERROR;
13035 goto bxe_nic_load_error3;
13041 FOR_EACH_ETH_QUEUE(sc, i) {
13042 rc = bxe_vfpf_setup_q(sc, i);
13044 BLOGE(sc, "Queue(%d) setup failed\n", i);
13045 sc->state = BXE_STATE_ERROR;
13046 goto bxe_nic_load_error3;
13052 /* now when Clients are configured we are ready to work */
13053 sc->state = BXE_STATE_OPEN;
13055 /* Configure a ucast MAC */
13057 rc = bxe_set_eth_mac(sc, TRUE);
13060 else { /* IS_VF(sc) */
13061 rc = bxe_vfpf_set_mac(sc);
13065 BLOGE(sc, "Setting Ethernet MAC failed\n");
13066 sc->state = BXE_STATE_ERROR;
13067 goto bxe_nic_load_error3;
13071 if (IS_PF(sc) && sc->pending_max) {
13073 bxe_update_max_mf_config(sc, sc->pending_max);
13074 sc->pending_max = 0;
13078 if (sc->port.pmf) {
13079 rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN);
13081 sc->state = BXE_STATE_ERROR;
13082 goto bxe_nic_load_error3;
13086 sc->link_params.feature_config_flags &=
13087 ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN;
13089 /* start fast path */
13091 /* Initialize Rx filter */
13092 bxe_set_rx_mode(sc);
13095 switch (/* XXX load_mode */LOAD_OPEN) {
13101 case LOAD_LOOPBACK_EXT:
13102 sc->state = BXE_STATE_DIAG;
13109 if (sc->port.pmf) {
13110 bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0);
13112 bxe_link_status_update(sc);
13115 /* start the periodic timer callout */
13116 bxe_periodic_start(sc);
13118 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
13119 /* mark driver is loaded in shmem2 */
13120 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
13121 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
13123 DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
13124 DRV_FLAGS_CAPABILITIES_LOADED_L2));
13127 /* wait for all pending SP commands to complete */
13128 if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) {
13129 BLOGE(sc, "Timeout waiting for all SPs to complete!\n");
13130 bxe_periodic_stop(sc);
13131 bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE);
13136 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
13137 if (sc->port.pmf && (sc->state != BXE_STATE_DIAG)) {
13138 bxe_dcbx_init(sc, FALSE);
13142 /* Tell the stack the driver is running! */
13143 sc->ifnet->if_drv_flags = IFF_DRV_RUNNING;
13145 BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n");
13149 bxe_nic_load_error3:
13152 bxe_int_disable_sync(sc, 1);
13154 /* clean out queued objects */
13155 bxe_squeeze_objects(sc);
13158 bxe_interrupt_detach(sc);
13160 bxe_nic_load_error2:
13162 if (IS_PF(sc) && !BXE_NOMCP(sc)) {
13163 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
13164 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
13169 bxe_nic_load_error1:
13171 /* clear pf_load status, as it was already set */
13173 bxe_clear_pf_load(sc);
13176 bxe_nic_load_error0:
13178 bxe_free_fw_stats_mem(sc);
13179 bxe_free_fp_buffers(sc);
13186 bxe_init_locked(struct bxe_softc *sc)
13188 int other_engine = SC_PATH(sc) ? 0 : 1;
13189 uint8_t other_load_status, load_status;
13190 uint8_t global = FALSE;
13193 BXE_CORE_LOCK_ASSERT(sc);
13195 /* check if the driver is already running */
13196 if (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING) {
13197 BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n");
13201 bxe_set_power_state(sc, PCI_PM_D0);
13204 * If parity occurred during the unload, then attentions and/or
13205 * RECOVERY_IN_PROGRES may still be set. If so we want the first function
13206 * loaded on the current engine to complete the recovery. Parity recovery
13207 * is only relevant for PF driver.
13210 other_load_status = bxe_get_load_status(sc, other_engine);
13211 load_status = bxe_get_load_status(sc, SC_PATH(sc));
13213 if (!bxe_reset_is_done(sc, SC_PATH(sc)) ||
13214 bxe_chk_parity_attn(sc, &global, TRUE)) {
13217 * If there are attentions and they are in global blocks, set
13218 * the GLOBAL_RESET bit regardless whether it will be this
13219 * function that will complete the recovery or not.
13222 bxe_set_reset_global(sc);
13226 * Only the first function on the current engine should try
13227 * to recover in open. In case of attentions in global blocks
13228 * only the first in the chip should try to recover.
13230 if ((!load_status && (!global || !other_load_status)) &&
13231 bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) {
13232 BLOGI(sc, "Recovered during init\n");
13236 /* recovery has failed... */
13237 bxe_set_power_state(sc, PCI_PM_D3hot);
13238 sc->recovery_state = BXE_RECOVERY_FAILED;
13240 BLOGE(sc, "Recovery flow hasn't properly "
13241 "completed yet, try again later. "
13242 "If you still see this message after a "
13243 "few retries then power cycle is required.\n");
13246 goto bxe_init_locked_done;
13251 sc->recovery_state = BXE_RECOVERY_DONE;
13253 rc = bxe_nic_load(sc, LOAD_OPEN);
13255 bxe_init_locked_done:
13258 /* Tell the stack the driver is NOT running! */
13259 BLOGE(sc, "Initialization failed, "
13260 "stack notified driver is NOT running!\n");
13261 sc->ifnet->if_drv_flags &= ~IFF_DRV_RUNNING;
13268 bxe_stop_locked(struct bxe_softc *sc)
13270 BXE_CORE_LOCK_ASSERT(sc);
13271 return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE));
13275 * Handles controller initialization when called from an unlocked routine.
13276 * ifconfig calls this function.
13282 bxe_init(void *xsc)
13284 struct bxe_softc *sc = (struct bxe_softc *)xsc;
13287 bxe_init_locked(sc);
13288 BXE_CORE_UNLOCK(sc);
13292 bxe_init_ifnet(struct bxe_softc *sc)
13296 /* ifconfig entrypoint for media type/status reporting */
13297 ifmedia_init(&sc->ifmedia, IFM_IMASK,
13298 bxe_ifmedia_update,
13299 bxe_ifmedia_status);
13301 /* set the default interface values */
13302 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL);
13303 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
13304 ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO));
13306 sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */
13308 /* allocate the ifnet structure */
13309 if ((ifp = if_alloc(IFT_ETHER)) == NULL) {
13310 BLOGE(sc, "Interface allocation failed!\n");
13314 ifp->if_softc = sc;
13315 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
13316 ifp->if_flags = (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
13317 ifp->if_ioctl = bxe_ioctl;
13318 ifp->if_start = bxe_tx_start;
13319 #if __FreeBSD_version >= 800000
13320 ifp->if_transmit = bxe_tx_mq_start;
13321 ifp->if_qflush = bxe_mq_flush;
13326 ifp->if_init = bxe_init;
13327 ifp->if_mtu = sc->mtu;
13328 ifp->if_hwassist = (CSUM_IP |
13334 ifp->if_capabilities =
13335 #if __FreeBSD_version < 700000
13337 IFCAP_VLAN_HWTAGGING |
13343 IFCAP_VLAN_HWTAGGING |
13345 IFCAP_VLAN_HWFILTER |
13346 IFCAP_VLAN_HWCSUM |
13354 ifp->if_capenable = ifp->if_capabilities;
13355 ifp->if_capenable &= ~IFCAP_WOL_MAGIC; /* XXX not yet... */
13356 #if __FreeBSD_version < 1000025
13357 ifp->if_baudrate = 1000000000;
13359 if_initbaudrate(ifp, IF_Gbps(10));
13361 ifp->if_snd.ifq_drv_maxlen = sc->tx_ring_size;
13363 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
13364 IFQ_SET_READY(&ifp->if_snd);
13368 /* attach to the Ethernet interface list */
13369 ether_ifattach(ifp, sc->link_params.mac_addr);
13375 bxe_deallocate_bars(struct bxe_softc *sc)
13379 for (i = 0; i < MAX_BARS; i++) {
13380 if (sc->bar[i].resource != NULL) {
13381 bus_release_resource(sc->dev,
13384 sc->bar[i].resource);
13385 BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n",
13392 bxe_allocate_bars(struct bxe_softc *sc)
13397 memset(sc->bar, 0, sizeof(sc->bar));
13399 for (i = 0; i < MAX_BARS; i++) {
13401 /* memory resources reside at BARs 0, 2, 4 */
13402 /* Run `pciconf -lb` to see mappings */
13403 if ((i != 0) && (i != 2) && (i != 4)) {
13407 sc->bar[i].rid = PCIR_BAR(i);
13411 flags |= RF_SHAREABLE;
13414 if ((sc->bar[i].resource =
13415 bus_alloc_resource_any(sc->dev,
13420 /* BAR4 doesn't exist for E1 */
13421 BLOGE(sc, "PCI BAR%d [%02x] memory allocation failed\n",
13427 sc->bar[i].tag = rman_get_bustag(sc->bar[i].resource);
13428 sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource);
13429 sc->bar[i].kva = (vm_offset_t)rman_get_virtual(sc->bar[i].resource);
13431 BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %p-%p (%ld) -> %p\n",
13433 (void *)rman_get_start(sc->bar[i].resource),
13434 (void *)rman_get_end(sc->bar[i].resource),
13435 rman_get_size(sc->bar[i].resource),
13436 (void *)sc->bar[i].kva);
13443 bxe_get_function_num(struct bxe_softc *sc)
13448 * Read the ME register to get the function number. The ME register
13449 * holds the relative-function number and absolute-function number. The
13450 * absolute-function number appears only in E2 and above. Before that
13451 * these bits always contained zero, therefore we cannot blindly use them.
13454 val = REG_RD(sc, BAR_ME_REGISTER);
13457 (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT);
13459 (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1;
13461 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13462 sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id);
13464 sc->pfunc_abs = (sc->pfunc_rel | sc->path_id);
13467 BLOGD(sc, DBG_LOAD,
13468 "Relative function %d, Absolute function %d, Path %d\n",
13469 sc->pfunc_rel, sc->pfunc_abs, sc->path_id);
13473 bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc)
13475 uint32_t shmem2_size;
13477 uint32_t mf_cfg_offset_value;
13480 offset = (SHMEM_RD(sc, func_mb) +
13481 (MAX_FUNC_NUM * sizeof(struct drv_func_mb)));
13484 if (sc->devinfo.shmem2_base != 0) {
13485 shmem2_size = SHMEM2_RD(sc, size);
13486 if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) {
13487 mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr);
13488 if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) {
13489 offset = mf_cfg_offset_value;
13498 bxe_pcie_capability_read(struct bxe_softc *sc,
13504 /* ensure PCIe capability is enabled */
13505 if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) {
13506 if (pcie_reg != 0) {
13507 BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg);
13508 return (pci_read_config(sc->dev, (pcie_reg + reg), width));
13512 BLOGE(sc, "PCIe capability NOT FOUND!!!\n");
13518 bxe_is_pcie_pending(struct bxe_softc *sc)
13520 return (bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA, 2) &
13521 PCIM_EXP_STA_TRANSACTION_PND);
13525 * Walk the PCI capabiites list for the device to find what features are
13526 * supported. These capabilites may be enabled/disabled by firmware so it's
13527 * best to walk the list rather than make assumptions.
13530 bxe_probe_pci_caps(struct bxe_softc *sc)
13532 uint16_t link_status;
13535 /* check if PCI Power Management is enabled */
13536 if (pci_find_cap(sc->dev, PCIY_PMG, ®) == 0) {
13538 BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg);
13540 sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG;
13541 sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg;
13545 link_status = bxe_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA, 2);
13547 /* handle PCIe 2.0 workarounds for 57710 */
13548 if (CHIP_IS_E1(sc)) {
13549 /* workaround for 57710 errata E4_57710_27462 */
13550 sc->devinfo.pcie_link_speed =
13551 (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1;
13553 /* workaround for 57710 errata E4_57710_27488 */
13554 sc->devinfo.pcie_link_width =
13555 ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
13556 if (sc->devinfo.pcie_link_speed > 1) {
13557 sc->devinfo.pcie_link_width =
13558 ((link_status & PCIM_LINK_STA_WIDTH) >> 4) >> 1;
13561 sc->devinfo.pcie_link_speed =
13562 (link_status & PCIM_LINK_STA_SPEED);
13563 sc->devinfo.pcie_link_width =
13564 ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
13567 BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n",
13568 sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width);
13570 sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG;
13571 sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg;
13573 /* check if MSI capability is enabled */
13574 if (pci_find_cap(sc->dev, PCIY_MSI, ®) == 0) {
13576 BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg);
13578 sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG;
13579 sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg;
13583 /* check if MSI-X capability is enabled */
13584 if (pci_find_cap(sc->dev, PCIY_MSIX, ®) == 0) {
13586 BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg);
13588 sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG;
13589 sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg;
13595 bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc)
13597 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13600 /* get the outer vlan if we're in switch-dependent mode */
13602 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13603 mf_info->ext_id = (uint16_t)val;
13605 mf_info->multi_vnics_mode = 1;
13607 if (!VALID_OVLAN(mf_info->ext_id)) {
13608 BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id);
13612 /* get the capabilities */
13613 if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13614 FUNC_MF_CFG_PROTOCOL_ISCSI) {
13615 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI;
13616 } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13617 FUNC_MF_CFG_PROTOCOL_FCOE) {
13618 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE;
13620 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET;
13623 mf_info->vnics_per_port =
13624 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13630 bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc)
13632 uint32_t retval = 0;
13635 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13637 if (val & MACP_FUNC_CFG_FLAGS_ENABLED) {
13638 if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) {
13639 retval |= MF_PROTO_SUPPORT_ETHERNET;
13641 if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
13642 retval |= MF_PROTO_SUPPORT_ISCSI;
13644 if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
13645 retval |= MF_PROTO_SUPPORT_FCOE;
13653 bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc)
13655 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13659 * There is no outer vlan if we're in switch-independent mode.
13660 * If the mac is valid then assume multi-function.
13663 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13665 mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0);
13667 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13669 mf_info->vnics_per_port =
13670 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13676 bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc)
13678 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13679 uint32_t e1hov_tag;
13680 uint32_t func_config;
13681 uint32_t niv_config;
13683 mf_info->multi_vnics_mode = 1;
13685 e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13686 func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13687 niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config);
13690 (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >>
13691 FUNC_MF_CFG_E1HOV_TAG_SHIFT);
13693 mf_info->default_vlan =
13694 (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >>
13695 FUNC_MF_CFG_AFEX_VLAN_SHIFT);
13697 mf_info->niv_allowed_priorities =
13698 (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
13699 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT);
13701 mf_info->niv_default_cos =
13702 (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
13703 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT);
13705 mf_info->afex_vlan_mode =
13706 ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
13707 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT);
13709 mf_info->niv_mba_enabled =
13710 ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >>
13711 FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT);
13713 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13715 mf_info->vnics_per_port =
13716 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13722 bxe_check_valid_mf_cfg(struct bxe_softc *sc)
13724 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13731 BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n",
13733 BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n",
13734 mf_info->mf_config[SC_VN(sc)]);
13735 BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n",
13736 mf_info->multi_vnics_mode);
13737 BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n",
13738 mf_info->vnics_per_port);
13739 BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n",
13741 BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n",
13742 mf_info->min_bw[0], mf_info->min_bw[1],
13743 mf_info->min_bw[2], mf_info->min_bw[3]);
13744 BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n",
13745 mf_info->max_bw[0], mf_info->max_bw[1],
13746 mf_info->max_bw[2], mf_info->max_bw[3]);
13747 BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n",
13750 /* various MF mode sanity checks... */
13752 if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) {
13753 BLOGE(sc, "Enumerated function %d is marked as hidden\n",
13758 if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) {
13759 BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n",
13760 mf_info->vnics_per_port, mf_info->multi_vnics_mode);
13764 if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13765 /* vnic id > 0 must have valid ovlan in switch-dependent mode */
13766 if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) {
13767 BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n",
13768 SC_VN(sc), OVLAN(sc));
13772 if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) {
13773 BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n",
13774 mf_info->multi_vnics_mode, OVLAN(sc));
13779 * Verify all functions are either MF or SF mode. If MF, make sure
13780 * sure that all non-hidden functions have a valid ovlan. If SF,
13781 * make sure that all non-hidden functions have an invalid ovlan.
13783 FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13784 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13785 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13786 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13787 (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) ||
13788 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) {
13789 BLOGE(sc, "mf_mode=SD function %d MF config "
13790 "mismatch, multi_vnics_mode=%d ovlan=%d\n",
13791 i, mf_info->multi_vnics_mode, ovlan1);
13796 /* Verify all funcs on the same port each have a different ovlan. */
13797 FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13798 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13799 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13800 /* iterate from the next function on the port to the max func */
13801 for (j = i + 2; j < MAX_FUNC_NUM; j += 2) {
13802 mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config);
13803 ovlan2 = MFCFG_RD(sc, func_mf_config[j].e1hov_tag);
13804 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13805 VALID_OVLAN(ovlan1) &&
13806 !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) &&
13807 VALID_OVLAN(ovlan2) &&
13808 (ovlan1 == ovlan2)) {
13809 BLOGE(sc, "mf_mode=SD functions %d and %d "
13810 "have the same ovlan (%d)\n",
13816 } /* MULTI_FUNCTION_SD */
13822 bxe_get_mf_cfg_info(struct bxe_softc *sc)
13824 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13825 uint32_t val, mac_upper;
13828 /* initialize mf_info defaults */
13829 mf_info->vnics_per_port = 1;
13830 mf_info->multi_vnics_mode = FALSE;
13831 mf_info->path_has_ovlan = FALSE;
13832 mf_info->mf_mode = SINGLE_FUNCTION;
13834 if (!CHIP_IS_MF_CAP(sc)) {
13838 if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) {
13839 BLOGE(sc, "Invalid mf_cfg_base!\n");
13843 /* get the MF mode (switch dependent / independent / single-function) */
13845 val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13847 switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)
13849 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
13851 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13853 /* check for legal upper mac bytes */
13854 if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) {
13855 mf_info->mf_mode = MULTI_FUNCTION_SI;
13857 BLOGE(sc, "Invalid config for Switch Independent mode\n");
13862 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
13863 case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4:
13865 /* get outer vlan configuration */
13866 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13868 if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) !=
13869 FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
13870 mf_info->mf_mode = MULTI_FUNCTION_SD;
13872 BLOGE(sc, "Invalid config for Switch Dependent mode\n");
13877 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
13879 /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */
13882 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
13885 * Mark MF mode as NIV if MCP version includes NPAR-SD support
13886 * and the MAC address is valid.
13888 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13890 if ((SHMEM2_HAS(sc, afex_driver_support)) &&
13891 (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) {
13892 mf_info->mf_mode = MULTI_FUNCTION_AFEX;
13894 BLOGE(sc, "Invalid config for AFEX mode\n");
13901 BLOGE(sc, "Unknown MF mode (0x%08x)\n",
13902 (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK));
13907 /* set path mf_mode (which could be different than function mf_mode) */
13908 if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13909 mf_info->path_has_ovlan = TRUE;
13910 } else if (mf_info->mf_mode == SINGLE_FUNCTION) {
13912 * Decide on path multi vnics mode. If we're not in MF mode and in
13913 * 4-port mode, this is good enough to check vnic-0 of the other port
13916 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13917 uint8_t other_port = !(PORT_ID(sc) & 1);
13918 uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port));
13920 val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag);
13922 mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0;
13926 if (mf_info->mf_mode == SINGLE_FUNCTION) {
13927 /* invalid MF config */
13928 if (SC_VN(sc) >= 1) {
13929 BLOGE(sc, "VNIC ID >= 1 in SF mode\n");
13936 /* get the MF configuration */
13937 mf_info->mf_config[SC_VN(sc)] =
13938 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13940 switch(mf_info->mf_mode)
13942 case MULTI_FUNCTION_SD:
13944 bxe_get_shmem_mf_cfg_info_sd(sc);
13947 case MULTI_FUNCTION_SI:
13949 bxe_get_shmem_mf_cfg_info_si(sc);
13952 case MULTI_FUNCTION_AFEX:
13954 bxe_get_shmem_mf_cfg_info_niv(sc);
13959 BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n",
13964 /* get the congestion management parameters */
13967 FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13968 /* get min/max bw */
13969 val = MFCFG_RD(sc, func_mf_config[i].config);
13970 mf_info->min_bw[vnic] =
13971 ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT);
13972 mf_info->max_bw[vnic] =
13973 ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT);
13977 return (bxe_check_valid_mf_cfg(sc));
13981 bxe_get_shmem_info(struct bxe_softc *sc)
13984 uint32_t mac_hi, mac_lo, val;
13986 port = SC_PORT(sc);
13987 mac_hi = mac_lo = 0;
13989 sc->link_params.sc = sc;
13990 sc->link_params.port = port;
13992 /* get the hardware config info */
13993 sc->devinfo.hw_config =
13994 SHMEM_RD(sc, dev_info.shared_hw_config.config);
13995 sc->devinfo.hw_config2 =
13996 SHMEM_RD(sc, dev_info.shared_hw_config.config2);
13998 sc->link_params.hw_led_mode =
13999 ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
14000 SHARED_HW_CFG_LED_MODE_SHIFT);
14002 /* get the port feature config */
14004 SHMEM_RD(sc, dev_info.port_feature_config[port].config),
14006 /* get the link params */
14007 sc->link_params.speed_cap_mask[0] =
14008 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask);
14009 sc->link_params.speed_cap_mask[1] =
14010 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2);
14012 /* get the lane config */
14013 sc->link_params.lane_config =
14014 SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config);
14016 /* get the link config */
14017 val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config);
14018 sc->port.link_config[ELINK_INT_PHY] = val;
14019 sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK);
14020 sc->port.link_config[ELINK_EXT_PHY1] =
14021 SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2);
14023 /* get the override preemphasis flag and enable it or turn it off */
14024 val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
14025 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) {
14026 sc->link_params.feature_config_flags |=
14027 ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
14029 sc->link_params.feature_config_flags &=
14030 ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
14033 /* get the initial value of the link params */
14034 sc->link_params.multi_phy_config =
14035 SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config);
14037 /* get external phy info */
14038 sc->port.ext_phy_config =
14039 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
14041 /* get the multifunction configuration */
14042 bxe_get_mf_cfg_info(sc);
14044 /* get the mac address */
14046 mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
14047 mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower);
14049 mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper);
14050 mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower);
14053 if ((mac_lo == 0) && (mac_hi == 0)) {
14054 *sc->mac_addr_str = 0;
14055 BLOGE(sc, "No Ethernet address programmed!\n");
14057 sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8);
14058 sc->link_params.mac_addr[1] = (uint8_t)(mac_hi);
14059 sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24);
14060 sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16);
14061 sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8);
14062 sc->link_params.mac_addr[5] = (uint8_t)(mac_lo);
14063 snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str),
14064 "%02x:%02x:%02x:%02x:%02x:%02x",
14065 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1],
14066 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3],
14067 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]);
14068 BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str);
14073 ((sc->port.config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
14074 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE)) {
14075 sc->flags |= BXE_NO_ISCSI;
14078 ((sc->port.config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
14079 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI)) {
14080 sc->flags |= BXE_NO_FCOE_FLAG;
14088 bxe_get_tunable_params(struct bxe_softc *sc)
14090 /* sanity checks */
14092 if ((bxe_interrupt_mode != INTR_MODE_INTX) &&
14093 (bxe_interrupt_mode != INTR_MODE_MSI) &&
14094 (bxe_interrupt_mode != INTR_MODE_MSIX)) {
14095 BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode);
14096 bxe_interrupt_mode = INTR_MODE_MSIX;
14099 if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) {
14100 BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count);
14101 bxe_queue_count = 0;
14104 if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) {
14105 if (bxe_max_rx_bufs == 0) {
14106 bxe_max_rx_bufs = RX_BD_USABLE;
14108 BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs);
14109 bxe_max_rx_bufs = 2048;
14113 if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) {
14114 BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks);
14115 bxe_hc_rx_ticks = 25;
14118 if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) {
14119 BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks);
14120 bxe_hc_tx_ticks = 50;
14123 if (bxe_max_aggregation_size == 0) {
14124 bxe_max_aggregation_size = TPA_AGG_SIZE;
14127 if (bxe_max_aggregation_size > 0xffff) {
14128 BLOGW(sc, "invalid max_aggregation_size (%d)\n",
14129 bxe_max_aggregation_size);
14130 bxe_max_aggregation_size = TPA_AGG_SIZE;
14133 if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) {
14134 BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs);
14138 if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) {
14139 BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen);
14140 bxe_autogreeen = 0;
14143 if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) {
14144 BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss);
14148 /* pull in user settings */
14150 sc->interrupt_mode = bxe_interrupt_mode;
14151 sc->max_rx_bufs = bxe_max_rx_bufs;
14152 sc->hc_rx_ticks = bxe_hc_rx_ticks;
14153 sc->hc_tx_ticks = bxe_hc_tx_ticks;
14154 sc->max_aggregation_size = bxe_max_aggregation_size;
14155 sc->mrrs = bxe_mrrs;
14156 sc->autogreeen = bxe_autogreeen;
14157 sc->udp_rss = bxe_udp_rss;
14159 if (bxe_interrupt_mode == INTR_MODE_INTX) {
14160 sc->num_queues = 1;
14161 } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */
14163 min((bxe_queue_count ? bxe_queue_count : mp_ncpus),
14165 if (sc->num_queues > mp_ncpus) {
14166 sc->num_queues = mp_ncpus;
14170 BLOGD(sc, DBG_LOAD,
14173 "interrupt_mode=%d "
14178 "max_aggregation_size=%d "
14183 sc->interrupt_mode,
14188 sc->max_aggregation_size,
14195 bxe_media_detect(struct bxe_softc *sc)
14197 uint32_t phy_idx = bxe_get_cur_phy_idx(sc);
14198 switch (sc->link_params.phy[phy_idx].media_type) {
14199 case ELINK_ETH_PHY_SFPP_10G_FIBER:
14200 case ELINK_ETH_PHY_XFP_FIBER:
14201 BLOGI(sc, "Found 10Gb Fiber media.\n");
14202 sc->media = IFM_10G_SR;
14204 case ELINK_ETH_PHY_SFP_1G_FIBER:
14205 BLOGI(sc, "Found 1Gb Fiber media.\n");
14206 sc->media = IFM_1000_SX;
14208 case ELINK_ETH_PHY_KR:
14209 case ELINK_ETH_PHY_CX4:
14210 BLOGI(sc, "Found 10GBase-CX4 media.\n");
14211 sc->media = IFM_10G_CX4;
14213 case ELINK_ETH_PHY_DA_TWINAX:
14214 BLOGI(sc, "Found 10Gb Twinax media.\n");
14215 sc->media = IFM_10G_TWINAX;
14217 case ELINK_ETH_PHY_BASE_T:
14218 if (sc->link_params.speed_cap_mask[0] &
14219 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
14220 BLOGI(sc, "Found 10GBase-T media.\n");
14221 sc->media = IFM_10G_T;
14223 BLOGI(sc, "Found 1000Base-T media.\n");
14224 sc->media = IFM_1000_T;
14227 case ELINK_ETH_PHY_NOT_PRESENT:
14228 BLOGI(sc, "Media not present.\n");
14231 case ELINK_ETH_PHY_UNSPECIFIED:
14233 BLOGI(sc, "Unknown media!\n");
14239 #define GET_FIELD(value, fname) \
14240 (((value) & (fname##_MASK)) >> (fname##_SHIFT))
14241 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
14242 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
14245 bxe_get_igu_cam_info(struct bxe_softc *sc)
14247 int pfid = SC_FUNC(sc);
14250 uint8_t fid, igu_sb_cnt = 0;
14252 sc->igu_base_sb = 0xff;
14254 if (CHIP_INT_MODE_IS_BC(sc)) {
14255 int vn = SC_VN(sc);
14256 igu_sb_cnt = sc->igu_sb_cnt;
14257 sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) *
14259 sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x +
14260 (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn));
14264 /* IGU in normal mode - read CAM */
14265 for (igu_sb_id = 0;
14266 igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
14268 val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
14269 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) {
14272 fid = IGU_FID(val);
14273 if ((fid & IGU_FID_ENCODE_IS_PF)) {
14274 if ((fid & IGU_FID_PF_NUM_MASK) != pfid) {
14277 if (IGU_VEC(val) == 0) {
14278 /* default status block */
14279 sc->igu_dsb_id = igu_sb_id;
14281 if (sc->igu_base_sb == 0xff) {
14282 sc->igu_base_sb = igu_sb_id;
14290 * Due to new PF resource allocation by MFW T7.4 and above, it's optional
14291 * that number of CAM entries will not be equal to the value advertised in
14292 * PCI. Driver should use the minimal value of both as the actual status
14295 sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt);
14297 if (igu_sb_cnt == 0) {
14298 BLOGE(sc, "CAM configuration error\n");
14306 * Gather various information from the device config space, the device itself,
14307 * shmem, and the user input.
14310 bxe_get_device_info(struct bxe_softc *sc)
14315 /* Get the data for the device */
14316 sc->devinfo.vendor_id = pci_get_vendor(sc->dev);
14317 sc->devinfo.device_id = pci_get_device(sc->dev);
14318 sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev);
14319 sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev);
14321 /* get the chip revision (chip metal comes from pci config space) */
14322 sc->devinfo.chip_id =
14323 sc->link_params.chip_id =
14324 (((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) |
14325 ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) |
14326 (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) |
14327 ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0));
14329 /* force 57811 according to MISC register */
14330 if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
14331 if (CHIP_IS_57810(sc)) {
14332 sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) |
14333 (sc->devinfo.chip_id & 0x0000ffff));
14334 } else if (CHIP_IS_57810_MF(sc)) {
14335 sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) |
14336 (sc->devinfo.chip_id & 0x0000ffff));
14338 sc->devinfo.chip_id |= 0x1;
14341 BLOGD(sc, DBG_LOAD,
14342 "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n",
14343 sc->devinfo.chip_id,
14344 ((sc->devinfo.chip_id >> 16) & 0xffff),
14345 ((sc->devinfo.chip_id >> 12) & 0xf),
14346 ((sc->devinfo.chip_id >> 4) & 0xff),
14347 ((sc->devinfo.chip_id >> 0) & 0xf));
14349 val = (REG_RD(sc, 0x2874) & 0x55);
14350 if ((sc->devinfo.chip_id & 0x1) ||
14351 (CHIP_IS_E1(sc) && val) ||
14352 (CHIP_IS_E1H(sc) && (val == 0x55))) {
14353 sc->flags |= BXE_ONE_PORT_FLAG;
14354 BLOGD(sc, DBG_LOAD, "single port device\n");
14357 /* set the doorbell size */
14358 sc->doorbell_size = (1 << BXE_DB_SHIFT);
14360 /* determine whether the device is in 2 port or 4 port mode */
14361 sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/
14362 if (CHIP_IS_E2E3(sc)) {
14364 * Read port4mode_en_ovwr[0]:
14365 * If 1, four port mode is in port4mode_en_ovwr[1].
14366 * If 0, four port mode is in port4mode_en[0].
14368 val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR);
14370 val = ((val >> 1) & 1);
14372 val = REG_RD(sc, MISC_REG_PORT4MODE_EN);
14375 sc->devinfo.chip_port_mode =
14376 (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE;
14378 BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2");
14381 /* get the function and path info for the device */
14382 bxe_get_function_num(sc);
14384 /* get the shared memory base address */
14385 sc->devinfo.shmem_base =
14386 sc->link_params.shmem_base =
14387 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
14388 sc->devinfo.shmem2_base =
14389 REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 :
14390 MISC_REG_GENERIC_CR_0));
14392 BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n",
14393 sc->devinfo.shmem_base, sc->devinfo.shmem2_base);
14395 if (!sc->devinfo.shmem_base) {
14396 /* this should ONLY prevent upcoming shmem reads */
14397 BLOGI(sc, "MCP not active\n");
14398 sc->flags |= BXE_NO_MCP_FLAG;
14402 /* make sure the shared memory contents are valid */
14403 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
14404 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
14405 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
14406 BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val);
14409 BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val);
14411 /* get the bootcode version */
14412 sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev);
14413 snprintf(sc->devinfo.bc_ver_str,
14414 sizeof(sc->devinfo.bc_ver_str),
14416 ((sc->devinfo.bc_ver >> 24) & 0xff),
14417 ((sc->devinfo.bc_ver >> 16) & 0xff),
14418 ((sc->devinfo.bc_ver >> 8) & 0xff));
14419 BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str);
14421 /* get the bootcode shmem address */
14422 sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc);
14423 BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base);
14425 /* clean indirect addresses as they're not used */
14426 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
14428 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0);
14429 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0);
14430 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0);
14431 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0);
14432 if (CHIP_IS_E1x(sc)) {
14433 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0);
14434 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0);
14435 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
14436 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
14440 * Enable internal target-read (in case we are probed after PF
14441 * FLR). Must be done prior to any BAR read access. Only for
14444 if (!CHIP_IS_E1x(sc)) {
14445 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
14449 /* get the nvram size */
14450 val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4);
14451 sc->devinfo.flash_size =
14452 (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
14453 BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size);
14455 /* get PCI capabilites */
14456 bxe_probe_pci_caps(sc);
14458 bxe_set_power_state(sc, PCI_PM_D0);
14460 /* get various configuration parameters from shmem */
14461 bxe_get_shmem_info(sc);
14463 if (sc->devinfo.pcie_msix_cap_reg != 0) {
14464 val = pci_read_config(sc->dev,
14465 (sc->devinfo.pcie_msix_cap_reg +
14468 sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE);
14470 sc->igu_sb_cnt = 1;
14473 sc->igu_base_addr = BAR_IGU_INTMEM;
14475 /* initialize IGU parameters */
14476 if (CHIP_IS_E1x(sc)) {
14477 sc->devinfo.int_block = INT_BLOCK_HC;
14478 sc->igu_dsb_id = DEF_SB_IGU_ID;
14479 sc->igu_base_sb = 0;
14481 sc->devinfo.int_block = INT_BLOCK_IGU;
14483 /* do not allow device reset during IGU info preocessing */
14484 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14486 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
14488 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
14491 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n");
14493 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
14494 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val);
14495 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f);
14497 while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
14502 if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
14503 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n");
14504 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14509 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
14510 BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n");
14511 sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP;
14513 BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n");
14516 rc = bxe_get_igu_cam_info(sc);
14518 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14526 * Get base FW non-default (fast path) status block ID. This value is
14527 * used to initialize the fw_sb_id saved on the fp/queue structure to
14528 * determine the id used by the FW.
14530 if (CHIP_IS_E1x(sc)) {
14531 sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc));
14534 * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of
14535 * the same queue are indicated on the same IGU SB). So we prefer
14536 * FW and IGU SBs to be the same value.
14538 sc->base_fw_ndsb = sc->igu_base_sb;
14541 BLOGD(sc, DBG_LOAD,
14542 "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n",
14543 sc->igu_dsb_id, sc->igu_base_sb,
14544 sc->igu_sb_cnt, sc->base_fw_ndsb);
14546 elink_phy_probe(&sc->link_params);
14552 bxe_link_settings_supported(struct bxe_softc *sc,
14553 uint32_t switch_cfg)
14555 uint32_t cfg_size = 0;
14557 uint8_t port = SC_PORT(sc);
14559 /* aggregation of supported attributes of all external phys */
14560 sc->port.supported[0] = 0;
14561 sc->port.supported[1] = 0;
14563 switch (sc->link_params.num_phys) {
14565 sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported;
14569 sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported;
14573 if (sc->link_params.multi_phy_config &
14574 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
14575 sc->port.supported[1] =
14576 sc->link_params.phy[ELINK_EXT_PHY1].supported;
14577 sc->port.supported[0] =
14578 sc->link_params.phy[ELINK_EXT_PHY2].supported;
14580 sc->port.supported[0] =
14581 sc->link_params.phy[ELINK_EXT_PHY1].supported;
14582 sc->port.supported[1] =
14583 sc->link_params.phy[ELINK_EXT_PHY2].supported;
14589 if (!(sc->port.supported[0] || sc->port.supported[1])) {
14590 BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n",
14592 dev_info.port_hw_config[port].external_phy_config),
14594 dev_info.port_hw_config[port].external_phy_config2));
14598 if (CHIP_IS_E3(sc))
14599 sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR);
14601 switch (switch_cfg) {
14602 case ELINK_SWITCH_CFG_1G:
14603 sc->port.phy_addr =
14604 REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
14606 case ELINK_SWITCH_CFG_10G:
14607 sc->port.phy_addr =
14608 REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
14611 BLOGE(sc, "Invalid switch config in link_config=0x%08x\n",
14612 sc->port.link_config[0]);
14617 BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr);
14619 /* mask what we support according to speed_cap_mask per configuration */
14620 for (idx = 0; idx < cfg_size; idx++) {
14621 if (!(sc->link_params.speed_cap_mask[idx] &
14622 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
14623 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half;
14626 if (!(sc->link_params.speed_cap_mask[idx] &
14627 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) {
14628 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full;
14631 if (!(sc->link_params.speed_cap_mask[idx] &
14632 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
14633 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half;
14636 if (!(sc->link_params.speed_cap_mask[idx] &
14637 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) {
14638 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full;
14641 if (!(sc->link_params.speed_cap_mask[idx] &
14642 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) {
14643 sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full;
14646 if (!(sc->link_params.speed_cap_mask[idx] &
14647 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) {
14648 sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full;
14651 if (!(sc->link_params.speed_cap_mask[idx] &
14652 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
14653 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full;
14656 if (!(sc->link_params.speed_cap_mask[idx] &
14657 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) {
14658 sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full;
14662 BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n",
14663 sc->port.supported[0], sc->port.supported[1]);
14667 bxe_link_settings_requested(struct bxe_softc *sc)
14669 uint32_t link_config;
14671 uint32_t cfg_size = 0;
14673 sc->port.advertising[0] = 0;
14674 sc->port.advertising[1] = 0;
14676 switch (sc->link_params.num_phys) {
14686 for (idx = 0; idx < cfg_size; idx++) {
14687 sc->link_params.req_duplex[idx] = DUPLEX_FULL;
14688 link_config = sc->port.link_config[idx];
14690 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
14691 case PORT_FEATURE_LINK_SPEED_AUTO:
14692 if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) {
14693 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14694 sc->port.advertising[idx] |= sc->port.supported[idx];
14695 if (sc->link_params.phy[ELINK_EXT_PHY1].type ==
14696 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
14697 sc->port.advertising[idx] |=
14698 (ELINK_SUPPORTED_100baseT_Half |
14699 ELINK_SUPPORTED_100baseT_Full);
14701 /* force 10G, no AN */
14702 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14703 sc->port.advertising[idx] |=
14704 (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
14709 case PORT_FEATURE_LINK_SPEED_10M_FULL:
14710 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) {
14711 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14712 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full |
14715 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14716 "speed_cap_mask=0x%08x\n",
14717 link_config, sc->link_params.speed_cap_mask[idx]);
14722 case PORT_FEATURE_LINK_SPEED_10M_HALF:
14723 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) {
14724 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14725 sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14726 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half |
14729 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14730 "speed_cap_mask=0x%08x\n",
14731 link_config, sc->link_params.speed_cap_mask[idx]);
14736 case PORT_FEATURE_LINK_SPEED_100M_FULL:
14737 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) {
14738 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14739 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full |
14742 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14743 "speed_cap_mask=0x%08x\n",
14744 link_config, sc->link_params.speed_cap_mask[idx]);
14749 case PORT_FEATURE_LINK_SPEED_100M_HALF:
14750 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) {
14751 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14752 sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14753 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half |
14756 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14757 "speed_cap_mask=0x%08x\n",
14758 link_config, sc->link_params.speed_cap_mask[idx]);
14763 case PORT_FEATURE_LINK_SPEED_1G:
14764 if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) {
14765 sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000;
14766 sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full |
14769 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14770 "speed_cap_mask=0x%08x\n",
14771 link_config, sc->link_params.speed_cap_mask[idx]);
14776 case PORT_FEATURE_LINK_SPEED_2_5G:
14777 if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) {
14778 sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500;
14779 sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full |
14782 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14783 "speed_cap_mask=0x%08x\n",
14784 link_config, sc->link_params.speed_cap_mask[idx]);
14789 case PORT_FEATURE_LINK_SPEED_10G_CX4:
14790 if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) {
14791 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14792 sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full |
14795 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14796 "speed_cap_mask=0x%08x\n",
14797 link_config, sc->link_params.speed_cap_mask[idx]);
14802 case PORT_FEATURE_LINK_SPEED_20G:
14803 sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000;
14807 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14808 "speed_cap_mask=0x%08x\n",
14809 link_config, sc->link_params.speed_cap_mask[idx]);
14810 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14811 sc->port.advertising[idx] = sc->port.supported[idx];
14815 sc->link_params.req_flow_ctrl[idx] =
14816 (link_config & PORT_FEATURE_FLOW_CONTROL_MASK);
14818 if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) {
14819 if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) {
14820 sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE;
14822 bxe_set_requested_fc(sc);
14826 BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d "
14827 "req_flow_ctrl=0x%x advertising=0x%x\n",
14828 sc->link_params.req_line_speed[idx],
14829 sc->link_params.req_duplex[idx],
14830 sc->link_params.req_flow_ctrl[idx],
14831 sc->port.advertising[idx]);
14836 bxe_get_phy_info(struct bxe_softc *sc)
14838 uint8_t port = SC_PORT(sc);
14839 uint32_t config = sc->port.config;
14842 /* shmem data already read in bxe_get_shmem_info() */
14844 BLOGD(sc, DBG_LOAD, "lane_config=0x%08x speed_cap_mask0=0x%08x "
14845 "link_config0=0x%08x\n",
14846 sc->link_params.lane_config,
14847 sc->link_params.speed_cap_mask[0],
14848 sc->port.link_config[0]);
14850 bxe_link_settings_supported(sc, sc->link_params.switch_cfg);
14851 bxe_link_settings_requested(sc);
14853 if (sc->autogreeen == AUTO_GREEN_FORCE_ON) {
14854 sc->link_params.feature_config_flags |=
14855 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14856 } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) {
14857 sc->link_params.feature_config_flags &=
14858 ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14859 } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) {
14860 sc->link_params.feature_config_flags |=
14861 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14864 /* configure link feature according to nvram value */
14866 (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) &
14867 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
14868 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
14869 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
14870 sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI |
14871 ELINK_EEE_MODE_ENABLE_LPI |
14872 ELINK_EEE_MODE_OUTPUT_TIME);
14874 sc->link_params.eee_mode = 0;
14877 /* get the media type */
14878 bxe_media_detect(sc);
14882 bxe_get_params(struct bxe_softc *sc)
14884 /* get user tunable params */
14885 bxe_get_tunable_params(sc);
14887 /* select the RX and TX ring sizes */
14888 sc->tx_ring_size = TX_BD_USABLE;
14889 sc->rx_ring_size = RX_BD_USABLE;
14891 /* XXX disable WoL */
14896 bxe_set_modes_bitmap(struct bxe_softc *sc)
14898 uint32_t flags = 0;
14900 if (CHIP_REV_IS_FPGA(sc)) {
14901 SET_FLAGS(flags, MODE_FPGA);
14902 } else if (CHIP_REV_IS_EMUL(sc)) {
14903 SET_FLAGS(flags, MODE_EMUL);
14905 SET_FLAGS(flags, MODE_ASIC);
14908 if (CHIP_IS_MODE_4_PORT(sc)) {
14909 SET_FLAGS(flags, MODE_PORT4);
14911 SET_FLAGS(flags, MODE_PORT2);
14914 if (CHIP_IS_E2(sc)) {
14915 SET_FLAGS(flags, MODE_E2);
14916 } else if (CHIP_IS_E3(sc)) {
14917 SET_FLAGS(flags, MODE_E3);
14918 if (CHIP_REV(sc) == CHIP_REV_Ax) {
14919 SET_FLAGS(flags, MODE_E3_A0);
14920 } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ {
14921 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
14926 SET_FLAGS(flags, MODE_MF);
14927 switch (sc->devinfo.mf_info.mf_mode) {
14928 case MULTI_FUNCTION_SD:
14929 SET_FLAGS(flags, MODE_MF_SD);
14931 case MULTI_FUNCTION_SI:
14932 SET_FLAGS(flags, MODE_MF_SI);
14934 case MULTI_FUNCTION_AFEX:
14935 SET_FLAGS(flags, MODE_MF_AFEX);
14939 SET_FLAGS(flags, MODE_SF);
14942 #if defined(__LITTLE_ENDIAN)
14943 SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
14944 #else /* __BIG_ENDIAN */
14945 SET_FLAGS(flags, MODE_BIG_ENDIAN);
14948 INIT_MODE_FLAGS(sc) = flags;
14952 bxe_alloc_hsi_mem(struct bxe_softc *sc)
14954 struct bxe_fastpath *fp;
14955 bus_addr_t busaddr;
14956 int max_agg_queues;
14958 bus_size_t max_size;
14959 bus_size_t max_seg_size;
14964 /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */
14966 /* allocate the parent bus DMA tag */
14967 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */
14969 0, /* boundary limit */
14970 BUS_SPACE_MAXADDR, /* restricted low */
14971 BUS_SPACE_MAXADDR, /* restricted hi */
14972 NULL, /* addr filter() */
14973 NULL, /* addr filter() arg */
14974 BUS_SPACE_MAXSIZE_32BIT, /* max map size */
14975 BUS_SPACE_UNRESTRICTED, /* num discontinuous */
14976 BUS_SPACE_MAXSIZE_32BIT, /* max seg size */
14979 NULL, /* lock() arg */
14980 &sc->parent_dma_tag); /* returned dma tag */
14982 BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc);
14986 /************************/
14987 /* DEFAULT STATUS BLOCK */
14988 /************************/
14990 if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block),
14991 &sc->def_sb_dma, "default status block") != 0) {
14993 bus_dma_tag_destroy(sc->parent_dma_tag);
14997 sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr;
15003 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
15004 &sc->eq_dma, "event queue") != 0) {
15006 bxe_dma_free(sc, &sc->def_sb_dma);
15008 bus_dma_tag_destroy(sc->parent_dma_tag);
15012 sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr;
15018 if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath),
15019 &sc->sp_dma, "slow path") != 0) {
15021 bxe_dma_free(sc, &sc->eq_dma);
15023 bxe_dma_free(sc, &sc->def_sb_dma);
15025 bus_dma_tag_destroy(sc->parent_dma_tag);
15029 sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr;
15031 /*******************/
15032 /* SLOW PATH QUEUE */
15033 /*******************/
15035 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
15036 &sc->spq_dma, "slow path queue") != 0) {
15038 bxe_dma_free(sc, &sc->sp_dma);
15040 bxe_dma_free(sc, &sc->eq_dma);
15042 bxe_dma_free(sc, &sc->def_sb_dma);
15044 bus_dma_tag_destroy(sc->parent_dma_tag);
15048 sc->spq = (struct eth_spe *)sc->spq_dma.vaddr;
15050 /***************************/
15051 /* FW DECOMPRESSION BUFFER */
15052 /***************************/
15054 if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
15055 "fw decompression buffer") != 0) {
15057 bxe_dma_free(sc, &sc->spq_dma);
15059 bxe_dma_free(sc, &sc->sp_dma);
15061 bxe_dma_free(sc, &sc->eq_dma);
15063 bxe_dma_free(sc, &sc->def_sb_dma);
15065 bus_dma_tag_destroy(sc->parent_dma_tag);
15069 sc->gz_buf = (void *)sc->gz_buf_dma.vaddr;
15072 malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) {
15074 bxe_dma_free(sc, &sc->gz_buf_dma);
15076 bxe_dma_free(sc, &sc->spq_dma);
15078 bxe_dma_free(sc, &sc->sp_dma);
15080 bxe_dma_free(sc, &sc->eq_dma);
15082 bxe_dma_free(sc, &sc->def_sb_dma);
15084 bus_dma_tag_destroy(sc->parent_dma_tag);
15092 /* allocate DMA memory for each fastpath structure */
15093 for (i = 0; i < sc->num_queues; i++) {
15098 /*******************/
15099 /* FP STATUS BLOCK */
15100 /*******************/
15102 snprintf(buf, sizeof(buf), "fp %d status block", i);
15103 if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block),
15104 &fp->sb_dma, buf) != 0) {
15105 /* XXX unwind and free previous fastpath allocations */
15106 BLOGE(sc, "Failed to alloc %s\n", buf);
15109 if (CHIP_IS_E2E3(sc)) {
15110 fp->status_block.e2_sb =
15111 (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr;
15113 fp->status_block.e1x_sb =
15114 (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr;
15118 /******************/
15119 /* FP TX BD CHAIN */
15120 /******************/
15122 snprintf(buf, sizeof(buf), "fp %d tx bd chain", i);
15123 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES),
15124 &fp->tx_dma, buf) != 0) {
15125 /* XXX unwind and free previous fastpath allocations */
15126 BLOGE(sc, "Failed to alloc %s\n", buf);
15129 fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr;
15132 /* link together the tx bd chain pages */
15133 for (j = 1; j <= TX_BD_NUM_PAGES; j++) {
15134 /* index into the tx bd chain array to last entry per page */
15135 struct eth_tx_next_bd *tx_next_bd =
15136 &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd;
15137 /* point to the next page and wrap from last page */
15138 busaddr = (fp->tx_dma.paddr +
15139 (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES)));
15140 tx_next_bd->addr_hi = htole32(U64_HI(busaddr));
15141 tx_next_bd->addr_lo = htole32(U64_LO(busaddr));
15144 /******************/
15145 /* FP RX BD CHAIN */
15146 /******************/
15148 snprintf(buf, sizeof(buf), "fp %d rx bd chain", i);
15149 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES),
15150 &fp->rx_dma, buf) != 0) {
15151 /* XXX unwind and free previous fastpath allocations */
15152 BLOGE(sc, "Failed to alloc %s\n", buf);
15155 fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr;
15158 /* link together the rx bd chain pages */
15159 for (j = 1; j <= RX_BD_NUM_PAGES; j++) {
15160 /* index into the rx bd chain array to last entry per page */
15161 struct eth_rx_bd *rx_bd =
15162 &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2];
15163 /* point to the next page and wrap from last page */
15164 busaddr = (fp->rx_dma.paddr +
15165 (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES)));
15166 rx_bd->addr_hi = htole32(U64_HI(busaddr));
15167 rx_bd->addr_lo = htole32(U64_LO(busaddr));
15170 /*******************/
15171 /* FP RX RCQ CHAIN */
15172 /*******************/
15174 snprintf(buf, sizeof(buf), "fp %d rcq chain", i);
15175 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES),
15176 &fp->rcq_dma, buf) != 0) {
15177 /* XXX unwind and free previous fastpath allocations */
15178 BLOGE(sc, "Failed to alloc %s\n", buf);
15181 fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr;
15184 /* link together the rcq chain pages */
15185 for (j = 1; j <= RCQ_NUM_PAGES; j++) {
15186 /* index into the rcq chain array to last entry per page */
15187 struct eth_rx_cqe_next_page *rx_cqe_next =
15188 (struct eth_rx_cqe_next_page *)
15189 &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1];
15190 /* point to the next page and wrap from last page */
15191 busaddr = (fp->rcq_dma.paddr +
15192 (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES)));
15193 rx_cqe_next->addr_hi = htole32(U64_HI(busaddr));
15194 rx_cqe_next->addr_lo = htole32(U64_LO(busaddr));
15197 /*******************/
15198 /* FP RX SGE CHAIN */
15199 /*******************/
15201 snprintf(buf, sizeof(buf), "fp %d sge chain", i);
15202 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES),
15203 &fp->rx_sge_dma, buf) != 0) {
15204 /* XXX unwind and free previous fastpath allocations */
15205 BLOGE(sc, "Failed to alloc %s\n", buf);
15208 fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr;
15211 /* link together the sge chain pages */
15212 for (j = 1; j <= RX_SGE_NUM_PAGES; j++) {
15213 /* index into the rcq chain array to last entry per page */
15214 struct eth_rx_sge *rx_sge =
15215 &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2];
15216 /* point to the next page and wrap from last page */
15217 busaddr = (fp->rx_sge_dma.paddr +
15218 (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES)));
15219 rx_sge->addr_hi = htole32(U64_HI(busaddr));
15220 rx_sge->addr_lo = htole32(U64_LO(busaddr));
15223 /***********************/
15224 /* FP TX MBUF DMA MAPS */
15225 /***********************/
15227 /* set required sizes before mapping to conserve resources */
15228 if (sc->ifnet->if_capenable & (IFCAP_TSO4 | IFCAP_TSO6)) {
15229 max_size = BXE_TSO_MAX_SIZE;
15230 max_segments = BXE_TSO_MAX_SEGMENTS;
15231 max_seg_size = BXE_TSO_MAX_SEG_SIZE;
15233 max_size = (MCLBYTES * BXE_MAX_SEGMENTS);
15234 max_segments = BXE_MAX_SEGMENTS;
15235 max_seg_size = MCLBYTES;
15238 /* create a dma tag for the tx mbufs */
15239 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
15241 0, /* boundary limit */
15242 BUS_SPACE_MAXADDR, /* restricted low */
15243 BUS_SPACE_MAXADDR, /* restricted hi */
15244 NULL, /* addr filter() */
15245 NULL, /* addr filter() arg */
15246 max_size, /* max map size */
15247 max_segments, /* num discontinuous */
15248 max_seg_size, /* max seg size */
15251 NULL, /* lock() arg */
15252 &fp->tx_mbuf_tag); /* returned dma tag */
15254 /* XXX unwind and free previous fastpath allocations */
15255 BLOGE(sc, "Failed to create dma tag for "
15256 "'fp %d tx mbufs' (%d)\n",
15261 /* create dma maps for each of the tx mbuf clusters */
15262 for (j = 0; j < TX_BD_TOTAL; j++) {
15263 if (bus_dmamap_create(fp->tx_mbuf_tag,
15265 &fp->tx_mbuf_chain[j].m_map)) {
15266 /* XXX unwind and free previous fastpath allocations */
15267 BLOGE(sc, "Failed to create dma map for "
15268 "'fp %d tx mbuf %d' (%d)\n",
15274 /***********************/
15275 /* FP RX MBUF DMA MAPS */
15276 /***********************/
15278 /* create a dma tag for the rx mbufs */
15279 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
15281 0, /* boundary limit */
15282 BUS_SPACE_MAXADDR, /* restricted low */
15283 BUS_SPACE_MAXADDR, /* restricted hi */
15284 NULL, /* addr filter() */
15285 NULL, /* addr filter() arg */
15286 MJUM9BYTES, /* max map size */
15287 1, /* num discontinuous */
15288 MJUM9BYTES, /* max seg size */
15291 NULL, /* lock() arg */
15292 &fp->rx_mbuf_tag); /* returned dma tag */
15294 /* XXX unwind and free previous fastpath allocations */
15295 BLOGE(sc, "Failed to create dma tag for "
15296 "'fp %d rx mbufs' (%d)\n",
15301 /* create dma maps for each of the rx mbuf clusters */
15302 for (j = 0; j < RX_BD_TOTAL; j++) {
15303 if (bus_dmamap_create(fp->rx_mbuf_tag,
15305 &fp->rx_mbuf_chain[j].m_map)) {
15306 /* XXX unwind and free previous fastpath allocations */
15307 BLOGE(sc, "Failed to create dma map for "
15308 "'fp %d rx mbuf %d' (%d)\n",
15314 /* create dma map for the spare rx mbuf cluster */
15315 if (bus_dmamap_create(fp->rx_mbuf_tag,
15317 &fp->rx_mbuf_spare_map)) {
15318 /* XXX unwind and free previous fastpath allocations */
15319 BLOGE(sc, "Failed to create dma map for "
15320 "'fp %d spare rx mbuf' (%d)\n",
15325 /***************************/
15326 /* FP RX SGE MBUF DMA MAPS */
15327 /***************************/
15329 /* create a dma tag for the rx sge mbufs */
15330 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
15332 0, /* boundary limit */
15333 BUS_SPACE_MAXADDR, /* restricted low */
15334 BUS_SPACE_MAXADDR, /* restricted hi */
15335 NULL, /* addr filter() */
15336 NULL, /* addr filter() arg */
15337 BCM_PAGE_SIZE, /* max map size */
15338 1, /* num discontinuous */
15339 BCM_PAGE_SIZE, /* max seg size */
15342 NULL, /* lock() arg */
15343 &fp->rx_sge_mbuf_tag); /* returned dma tag */
15345 /* XXX unwind and free previous fastpath allocations */
15346 BLOGE(sc, "Failed to create dma tag for "
15347 "'fp %d rx sge mbufs' (%d)\n",
15352 /* create dma maps for the rx sge mbuf clusters */
15353 for (j = 0; j < RX_SGE_TOTAL; j++) {
15354 if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
15356 &fp->rx_sge_mbuf_chain[j].m_map)) {
15357 /* XXX unwind and free previous fastpath allocations */
15358 BLOGE(sc, "Failed to create dma map for "
15359 "'fp %d rx sge mbuf %d' (%d)\n",
15365 /* create dma map for the spare rx sge mbuf cluster */
15366 if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
15368 &fp->rx_sge_mbuf_spare_map)) {
15369 /* XXX unwind and free previous fastpath allocations */
15370 BLOGE(sc, "Failed to create dma map for "
15371 "'fp %d spare rx sge mbuf' (%d)\n",
15376 /***************************/
15377 /* FP RX TPA MBUF DMA MAPS */
15378 /***************************/
15380 /* create dma maps for the rx tpa mbuf clusters */
15381 max_agg_queues = MAX_AGG_QS(sc);
15383 for (j = 0; j < max_agg_queues; j++) {
15384 if (bus_dmamap_create(fp->rx_mbuf_tag,
15386 &fp->rx_tpa_info[j].bd.m_map)) {
15387 /* XXX unwind and free previous fastpath allocations */
15388 BLOGE(sc, "Failed to create dma map for "
15389 "'fp %d rx tpa mbuf %d' (%d)\n",
15395 /* create dma map for the spare rx tpa mbuf cluster */
15396 if (bus_dmamap_create(fp->rx_mbuf_tag,
15398 &fp->rx_tpa_info_mbuf_spare_map)) {
15399 /* XXX unwind and free previous fastpath allocations */
15400 BLOGE(sc, "Failed to create dma map for "
15401 "'fp %d spare rx tpa mbuf' (%d)\n",
15406 bxe_init_sge_ring_bit_mask(fp);
15413 bxe_free_hsi_mem(struct bxe_softc *sc)
15415 struct bxe_fastpath *fp;
15416 int max_agg_queues;
15419 if (sc->parent_dma_tag == NULL) {
15420 return; /* assume nothing was allocated */
15423 for (i = 0; i < sc->num_queues; i++) {
15426 /*******************/
15427 /* FP STATUS BLOCK */
15428 /*******************/
15430 bxe_dma_free(sc, &fp->sb_dma);
15431 memset(&fp->status_block, 0, sizeof(fp->status_block));
15433 /******************/
15434 /* FP TX BD CHAIN */
15435 /******************/
15437 bxe_dma_free(sc, &fp->tx_dma);
15438 fp->tx_chain = NULL;
15440 /******************/
15441 /* FP RX BD CHAIN */
15442 /******************/
15444 bxe_dma_free(sc, &fp->rx_dma);
15445 fp->rx_chain = NULL;
15447 /*******************/
15448 /* FP RX RCQ CHAIN */
15449 /*******************/
15451 bxe_dma_free(sc, &fp->rcq_dma);
15452 fp->rcq_chain = NULL;
15454 /*******************/
15455 /* FP RX SGE CHAIN */
15456 /*******************/
15458 bxe_dma_free(sc, &fp->rx_sge_dma);
15459 fp->rx_sge_chain = NULL;
15461 /***********************/
15462 /* FP TX MBUF DMA MAPS */
15463 /***********************/
15465 if (fp->tx_mbuf_tag != NULL) {
15466 for (j = 0; j < TX_BD_TOTAL; j++) {
15467 if (fp->tx_mbuf_chain[j].m_map != NULL) {
15468 bus_dmamap_unload(fp->tx_mbuf_tag,
15469 fp->tx_mbuf_chain[j].m_map);
15470 bus_dmamap_destroy(fp->tx_mbuf_tag,
15471 fp->tx_mbuf_chain[j].m_map);
15475 bus_dma_tag_destroy(fp->tx_mbuf_tag);
15476 fp->tx_mbuf_tag = NULL;
15479 /***********************/
15480 /* FP RX MBUF DMA MAPS */
15481 /***********************/
15483 if (fp->rx_mbuf_tag != NULL) {
15484 for (j = 0; j < RX_BD_TOTAL; j++) {
15485 if (fp->rx_mbuf_chain[j].m_map != NULL) {
15486 bus_dmamap_unload(fp->rx_mbuf_tag,
15487 fp->rx_mbuf_chain[j].m_map);
15488 bus_dmamap_destroy(fp->rx_mbuf_tag,
15489 fp->rx_mbuf_chain[j].m_map);
15493 if (fp->rx_mbuf_spare_map != NULL) {
15494 bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
15495 bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
15498 /***************************/
15499 /* FP RX TPA MBUF DMA MAPS */
15500 /***************************/
15502 max_agg_queues = MAX_AGG_QS(sc);
15504 for (j = 0; j < max_agg_queues; j++) {
15505 if (fp->rx_tpa_info[j].bd.m_map != NULL) {
15506 bus_dmamap_unload(fp->rx_mbuf_tag,
15507 fp->rx_tpa_info[j].bd.m_map);
15508 bus_dmamap_destroy(fp->rx_mbuf_tag,
15509 fp->rx_tpa_info[j].bd.m_map);
15513 if (fp->rx_tpa_info_mbuf_spare_map != NULL) {
15514 bus_dmamap_unload(fp->rx_mbuf_tag,
15515 fp->rx_tpa_info_mbuf_spare_map);
15516 bus_dmamap_destroy(fp->rx_mbuf_tag,
15517 fp->rx_tpa_info_mbuf_spare_map);
15520 bus_dma_tag_destroy(fp->rx_mbuf_tag);
15521 fp->rx_mbuf_tag = NULL;
15524 /***************************/
15525 /* FP RX SGE MBUF DMA MAPS */
15526 /***************************/
15528 if (fp->rx_sge_mbuf_tag != NULL) {
15529 for (j = 0; j < RX_SGE_TOTAL; j++) {
15530 if (fp->rx_sge_mbuf_chain[j].m_map != NULL) {
15531 bus_dmamap_unload(fp->rx_sge_mbuf_tag,
15532 fp->rx_sge_mbuf_chain[j].m_map);
15533 bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
15534 fp->rx_sge_mbuf_chain[j].m_map);
15538 if (fp->rx_sge_mbuf_spare_map != NULL) {
15539 bus_dmamap_unload(fp->rx_sge_mbuf_tag,
15540 fp->rx_sge_mbuf_spare_map);
15541 bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
15542 fp->rx_sge_mbuf_spare_map);
15545 bus_dma_tag_destroy(fp->rx_sge_mbuf_tag);
15546 fp->rx_sge_mbuf_tag = NULL;
15550 /***************************/
15551 /* FW DECOMPRESSION BUFFER */
15552 /***************************/
15554 bxe_dma_free(sc, &sc->gz_buf_dma);
15556 free(sc->gz_strm, M_DEVBUF);
15557 sc->gz_strm = NULL;
15559 /*******************/
15560 /* SLOW PATH QUEUE */
15561 /*******************/
15563 bxe_dma_free(sc, &sc->spq_dma);
15570 bxe_dma_free(sc, &sc->sp_dma);
15577 bxe_dma_free(sc, &sc->eq_dma);
15580 /************************/
15581 /* DEFAULT STATUS BLOCK */
15582 /************************/
15584 bxe_dma_free(sc, &sc->def_sb_dma);
15587 bus_dma_tag_destroy(sc->parent_dma_tag);
15588 sc->parent_dma_tag = NULL;
15592 * Previous driver DMAE transaction may have occurred when pre-boot stage
15593 * ended and boot began. This would invalidate the addresses of the
15594 * transaction, resulting in was-error bit set in the PCI causing all
15595 * hw-to-host PCIe transactions to timeout. If this happened we want to clear
15596 * the interrupt which detected this from the pglueb and the was-done bit
15599 bxe_prev_interrupted_dmae(struct bxe_softc *sc)
15603 if (!CHIP_IS_E1x(sc)) {
15604 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS);
15605 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
15606 BLOGD(sc, DBG_LOAD,
15607 "Clearing 'was-error' bit that was set in pglueb");
15608 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc));
15614 bxe_prev_mcp_done(struct bxe_softc *sc)
15616 uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE,
15617 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
15619 BLOGE(sc, "MCP response failure, aborting\n");
15626 static struct bxe_prev_list_node *
15627 bxe_prev_path_get_entry(struct bxe_softc *sc)
15629 struct bxe_prev_list_node *tmp;
15631 LIST_FOREACH(tmp, &bxe_prev_list, node) {
15632 if ((sc->pcie_bus == tmp->bus) &&
15633 (sc->pcie_device == tmp->slot) &&
15634 (SC_PATH(sc) == tmp->path)) {
15643 bxe_prev_is_path_marked(struct bxe_softc *sc)
15645 struct bxe_prev_list_node *tmp;
15648 mtx_lock(&bxe_prev_mtx);
15650 tmp = bxe_prev_path_get_entry(sc);
15653 BLOGD(sc, DBG_LOAD,
15654 "Path %d/%d/%d was marked by AER\n",
15655 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15658 BLOGD(sc, DBG_LOAD,
15659 "Path %d/%d/%d was already cleaned from previous drivers\n",
15660 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15664 mtx_unlock(&bxe_prev_mtx);
15670 bxe_prev_mark_path(struct bxe_softc *sc,
15671 uint8_t after_undi)
15673 struct bxe_prev_list_node *tmp;
15675 mtx_lock(&bxe_prev_mtx);
15677 /* Check whether the entry for this path already exists */
15678 tmp = bxe_prev_path_get_entry(sc);
15681 BLOGD(sc, DBG_LOAD,
15682 "Re-marking AER in path %d/%d/%d\n",
15683 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15685 BLOGD(sc, DBG_LOAD,
15686 "Removing AER indication from path %d/%d/%d\n",
15687 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15691 mtx_unlock(&bxe_prev_mtx);
15695 mtx_unlock(&bxe_prev_mtx);
15697 /* Create an entry for this path and add it */
15698 tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF,
15699 (M_NOWAIT | M_ZERO));
15701 BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n");
15705 tmp->bus = sc->pcie_bus;
15706 tmp->slot = sc->pcie_device;
15707 tmp->path = SC_PATH(sc);
15709 tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0;
15711 mtx_lock(&bxe_prev_mtx);
15713 BLOGD(sc, DBG_LOAD,
15714 "Marked path %d/%d/%d - finished previous unload\n",
15715 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15716 LIST_INSERT_HEAD(&bxe_prev_list, tmp, node);
15718 mtx_unlock(&bxe_prev_mtx);
15724 bxe_do_flr(struct bxe_softc *sc)
15728 /* only E2 and onwards support FLR */
15729 if (CHIP_IS_E1x(sc)) {
15730 BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n");
15734 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
15735 if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
15736 BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n",
15737 sc->devinfo.bc_ver);
15741 /* Wait for Transaction Pending bit clean */
15742 for (i = 0; i < 4; i++) {
15744 DELAY(((1 << (i - 1)) * 100) * 1000);
15747 if (!bxe_is_pcie_pending(sc)) {
15752 BLOGE(sc, "PCIE transaction is not cleared, "
15753 "proceeding with reset anyway\n");
15757 BLOGD(sc, DBG_LOAD, "Initiating FLR\n");
15758 bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0);
15763 struct bxe_mac_vals {
15764 uint32_t xmac_addr;
15766 uint32_t emac_addr;
15768 uint32_t umac_addr;
15770 uint32_t bmac_addr;
15771 uint32_t bmac_val[2];
15775 bxe_prev_unload_close_mac(struct bxe_softc *sc,
15776 struct bxe_mac_vals *vals)
15778 uint32_t val, base_addr, offset, mask, reset_reg;
15779 uint8_t mac_stopped = FALSE;
15780 uint8_t port = SC_PORT(sc);
15781 uint32_t wb_data[2];
15783 /* reset addresses as they also mark which values were changed */
15784 vals->bmac_addr = 0;
15785 vals->umac_addr = 0;
15786 vals->xmac_addr = 0;
15787 vals->emac_addr = 0;
15789 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2);
15791 if (!CHIP_IS_E3(sc)) {
15792 val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
15793 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
15794 if ((mask & reset_reg) && val) {
15795 BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n");
15796 base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM
15797 : NIG_REG_INGRESS_BMAC0_MEM;
15798 offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL
15799 : BIGMAC_REGISTER_BMAC_CONTROL;
15802 * use rd/wr since we cannot use dmae. This is safe
15803 * since MCP won't access the bus due to the request
15804 * to unload, and no function on the path can be
15805 * loaded at this time.
15807 wb_data[0] = REG_RD(sc, base_addr + offset);
15808 wb_data[1] = REG_RD(sc, base_addr + offset + 0x4);
15809 vals->bmac_addr = base_addr + offset;
15810 vals->bmac_val[0] = wb_data[0];
15811 vals->bmac_val[1] = wb_data[1];
15812 wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE;
15813 REG_WR(sc, vals->bmac_addr, wb_data[0]);
15814 REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]);
15817 BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n");
15818 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4;
15819 vals->emac_val = REG_RD(sc, vals->emac_addr);
15820 REG_WR(sc, vals->emac_addr, 0);
15821 mac_stopped = TRUE;
15823 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
15824 BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n");
15825 base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
15826 val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI);
15827 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1));
15828 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1));
15829 vals->xmac_addr = base_addr + XMAC_REG_CTRL;
15830 vals->xmac_val = REG_RD(sc, vals->xmac_addr);
15831 REG_WR(sc, vals->xmac_addr, 0);
15832 mac_stopped = TRUE;
15835 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
15836 if (mask & reset_reg) {
15837 BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n");
15838 base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
15839 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
15840 vals->umac_val = REG_RD(sc, vals->umac_addr);
15841 REG_WR(sc, vals->umac_addr, 0);
15842 mac_stopped = TRUE;
15851 #define BXE_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
15852 #define BXE_PREV_UNDI_RCQ(val) ((val) & 0xffff)
15853 #define BXE_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
15854 #define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
15857 bxe_prev_unload_undi_inc(struct bxe_softc *sc,
15862 uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port));
15864 rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc;
15865 bd = BXE_PREV_UNDI_BD(tmp_reg) + inc;
15867 tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd);
15868 REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg);
15870 BLOGD(sc, DBG_LOAD,
15871 "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
15876 bxe_prev_unload_common(struct bxe_softc *sc)
15878 uint32_t reset_reg, tmp_reg = 0, rc;
15879 uint8_t prev_undi = FALSE;
15880 struct bxe_mac_vals mac_vals;
15881 uint32_t timer_count = 1000;
15885 * It is possible a previous function received 'common' answer,
15886 * but hasn't loaded yet, therefore creating a scenario of
15887 * multiple functions receiving 'common' on the same path.
15889 BLOGD(sc, DBG_LOAD, "Common unload Flow\n");
15891 memset(&mac_vals, 0, sizeof(mac_vals));
15893 if (bxe_prev_is_path_marked(sc)) {
15894 return (bxe_prev_mcp_done(sc));
15897 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1);
15899 /* Reset should be performed after BRB is emptied */
15900 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
15901 /* Close the MAC Rx to prevent BRB from filling up */
15902 bxe_prev_unload_close_mac(sc, &mac_vals);
15904 /* close LLH filters towards the BRB */
15905 elink_set_rx_filter(&sc->link_params, 0);
15908 * Check if the UNDI driver was previously loaded.
15909 * UNDI driver initializes CID offset for normal bell to 0x7
15911 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
15912 tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST);
15913 if (tmp_reg == 0x7) {
15914 BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n");
15916 /* clear the UNDI indication */
15917 REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0);
15918 /* clear possible idle check errors */
15919 REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0);
15923 /* wait until BRB is empty */
15924 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15925 while (timer_count) {
15926 prev_brb = tmp_reg;
15928 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15933 BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg);
15935 /* reset timer as long as BRB actually gets emptied */
15936 if (prev_brb > tmp_reg) {
15937 timer_count = 1000;
15942 /* If UNDI resides in memory, manually increment it */
15944 bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1);
15950 if (!timer_count) {
15951 BLOGE(sc, "Failed to empty BRB\n");
15955 /* No packets are in the pipeline, path is ready for reset */
15956 bxe_reset_common(sc);
15958 if (mac_vals.xmac_addr) {
15959 REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val);
15961 if (mac_vals.umac_addr) {
15962 REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val);
15964 if (mac_vals.emac_addr) {
15965 REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val);
15967 if (mac_vals.bmac_addr) {
15968 REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
15969 REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
15972 rc = bxe_prev_mark_path(sc, prev_undi);
15974 bxe_prev_mcp_done(sc);
15978 return (bxe_prev_mcp_done(sc));
15982 bxe_prev_unload_uncommon(struct bxe_softc *sc)
15986 BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n");
15988 /* Test if previous unload process was already finished for this path */
15989 if (bxe_prev_is_path_marked(sc)) {
15990 return (bxe_prev_mcp_done(sc));
15993 BLOGD(sc, DBG_LOAD, "Path is unmarked\n");
15996 * If function has FLR capabilities, and existing FW version matches
15997 * the one required, then FLR will be sufficient to clean any residue
15998 * left by previous driver
16000 rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION);
16002 /* fw version is good */
16003 BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n");
16004 rc = bxe_do_flr(sc);
16008 /* FLR was performed */
16009 BLOGD(sc, DBG_LOAD, "FLR successful\n");
16013 BLOGD(sc, DBG_LOAD, "Could not FLR\n");
16015 /* Close the MCP request, return failure*/
16016 rc = bxe_prev_mcp_done(sc);
16018 rc = BXE_PREV_WAIT_NEEDED;
16025 bxe_prev_unload(struct bxe_softc *sc)
16027 int time_counter = 10;
16028 uint32_t fw, hw_lock_reg, hw_lock_val;
16032 * Clear HW from errors which may have resulted from an interrupted
16033 * DMAE transaction.
16035 bxe_prev_interrupted_dmae(sc);
16037 /* Release previously held locks */
16039 (SC_FUNC(sc) <= 5) ?
16040 (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) :
16041 (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
16043 hw_lock_val = (REG_RD(sc, hw_lock_reg));
16045 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
16046 BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n");
16047 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
16048 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc)));
16050 BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n");
16051 REG_WR(sc, hw_lock_reg, 0xffffffff);
16053 BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n");
16056 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) {
16057 BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n");
16058 REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0);
16062 /* Lock MCP using an unload request */
16063 fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
16065 BLOGE(sc, "MCP response failure, aborting\n");
16070 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
16071 rc = bxe_prev_unload_common(sc);
16075 /* non-common reply from MCP night require looping */
16076 rc = bxe_prev_unload_uncommon(sc);
16077 if (rc != BXE_PREV_WAIT_NEEDED) {
16082 } while (--time_counter);
16084 if (!time_counter || rc) {
16085 BLOGE(sc, "Failed to unload previous driver!\n");
16093 bxe_dcbx_set_state(struct bxe_softc *sc,
16095 uint32_t dcbx_enabled)
16097 if (!CHIP_IS_E1x(sc)) {
16098 sc->dcb_state = dcb_on;
16099 sc->dcbx_enabled = dcbx_enabled;
16101 sc->dcb_state = FALSE;
16102 sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID;
16104 BLOGD(sc, DBG_LOAD,
16105 "DCB state [%s:%s]\n",
16106 dcb_on ? "ON" : "OFF",
16107 (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" :
16108 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" :
16109 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ?
16110 "on-chip with negotiation" : "invalid");
16113 /* must be called after sriov-enable */
16115 bxe_set_qm_cid_count(struct bxe_softc *sc)
16117 int cid_count = BXE_L2_MAX_CID(sc);
16119 if (IS_SRIOV(sc)) {
16120 cid_count += BXE_VF_CIDS;
16123 if (CNIC_SUPPORT(sc)) {
16124 cid_count += CNIC_CID_MAX;
16127 return (roundup(cid_count, QM_CID_ROUND));
16131 bxe_init_multi_cos(struct bxe_softc *sc)
16135 uint32_t pri_map = 0; /* XXX change to user config */
16137 for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) {
16138 cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4));
16139 if (cos < sc->max_cos) {
16140 sc->prio_to_cos[pri] = cos;
16142 BLOGW(sc, "Invalid COS %d for priority %d "
16143 "(max COS is %d), setting to 0\n",
16144 cos, pri, (sc->max_cos - 1));
16145 sc->prio_to_cos[pri] = 0;
16151 bxe_sysctl_state(SYSCTL_HANDLER_ARGS)
16153 struct bxe_softc *sc;
16157 error = sysctl_handle_int(oidp, &result, 0, req);
16159 if (error || !req->newptr) {
16164 sc = (struct bxe_softc *)arg1;
16165 BLOGI(sc, "... dumping driver state ...\n");
16173 bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS)
16175 struct bxe_softc *sc = (struct bxe_softc *)arg1;
16176 uint32_t *eth_stats = (uint32_t *)&sc->eth_stats;
16178 uint64_t value = 0;
16179 int index = (int)arg2;
16181 if (index >= BXE_NUM_ETH_STATS) {
16182 BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index);
16186 offset = (eth_stats + bxe_eth_stats_arr[index].offset);
16188 switch (bxe_eth_stats_arr[index].size) {
16190 value = (uint64_t)*offset;
16193 value = HILO_U64(*offset, *(offset + 1));
16196 BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n",
16197 index, bxe_eth_stats_arr[index].size);
16201 return (sysctl_handle_64(oidp, &value, 0, req));
16205 bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS)
16207 struct bxe_softc *sc = (struct bxe_softc *)arg1;
16208 uint32_t *eth_stats;
16210 uint64_t value = 0;
16211 uint32_t q_stat = (uint32_t)arg2;
16212 uint32_t fp_index = ((q_stat >> 16) & 0xffff);
16213 uint32_t index = (q_stat & 0xffff);
16215 eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats;
16217 if (index >= BXE_NUM_ETH_Q_STATS) {
16218 BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index);
16222 offset = (eth_stats + bxe_eth_q_stats_arr[index].offset);
16224 switch (bxe_eth_q_stats_arr[index].size) {
16226 value = (uint64_t)*offset;
16229 value = HILO_U64(*offset, *(offset + 1));
16232 BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n",
16233 index, bxe_eth_q_stats_arr[index].size);
16237 return (sysctl_handle_64(oidp, &value, 0, req));
16241 bxe_add_sysctls(struct bxe_softc *sc)
16243 struct sysctl_ctx_list *ctx;
16244 struct sysctl_oid_list *children;
16245 struct sysctl_oid *queue_top, *queue;
16246 struct sysctl_oid_list *queue_top_children, *queue_children;
16247 char queue_num_buf[32];
16251 ctx = device_get_sysctl_ctx(sc->dev);
16252 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
16254 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version",
16255 CTLFLAG_RD, BXE_DRIVER_VERSION, 0,
16258 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
16259 CTLFLAG_RD, &sc->devinfo.bc_ver_str, 0,
16260 "bootcode version");
16262 snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d",
16263 BCM_5710_FW_MAJOR_VERSION,
16264 BCM_5710_FW_MINOR_VERSION,
16265 BCM_5710_FW_REVISION_VERSION,
16266 BCM_5710_FW_ENGINEERING_VERSION);
16267 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
16268 CTLFLAG_RD, &sc->fw_ver_str, 0,
16269 "firmware version");
16271 snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s",
16272 ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION) ? "Single" :
16273 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD) ? "MF-SD" :
16274 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI) ? "MF-SI" :
16275 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" :
16277 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
16278 CTLFLAG_RD, &sc->mf_mode_str, 0,
16279 "multifunction mode");
16281 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics",
16282 CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0,
16283 "multifunction vnics per port");
16285 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
16286 CTLFLAG_RD, &sc->mac_addr_str, 0,
16289 snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d",
16290 ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" :
16291 (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" :
16292 (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" :
16294 sc->devinfo.pcie_link_width);
16295 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
16296 CTLFLAG_RD, &sc->pci_link_str, 0,
16297 "pci link status");
16299 sc->debug = bxe_debug;
16300 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "debug",
16301 CTLFLAG_RW, &sc->debug, 0,
16302 "debug logging mode");
16304 sc->rx_budget = bxe_rx_budget;
16305 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget",
16306 CTLFLAG_RW, &sc->rx_budget, 0,
16307 "rx processing budget");
16309 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state",
16310 CTLTYPE_UINT | CTLFLAG_RW, sc, 0,
16311 bxe_sysctl_state, "IU", "dump driver state");
16313 for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
16314 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
16315 bxe_eth_stats_arr[i].string,
16316 CTLTYPE_U64 | CTLFLAG_RD, sc, i,
16317 bxe_sysctl_eth_stat, "LU",
16318 bxe_eth_stats_arr[i].string);
16321 /* add a new parent node for all queues "dev.bxe.#.queue" */
16322 queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue",
16323 CTLFLAG_RD, NULL, "queue");
16324 queue_top_children = SYSCTL_CHILDREN(queue_top);
16326 for (i = 0; i < sc->num_queues; i++) {
16327 /* add a new parent node for a single queue "dev.bxe.#.queue.#" */
16328 snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i);
16329 queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO,
16330 queue_num_buf, CTLFLAG_RD, NULL,
16332 queue_children = SYSCTL_CHILDREN(queue);
16334 for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) {
16335 q_stat = ((i << 16) | j);
16336 SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO,
16337 bxe_eth_q_stats_arr[j].string,
16338 CTLTYPE_U64 | CTLFLAG_RD, sc, q_stat,
16339 bxe_sysctl_eth_q_stat, "LU",
16340 bxe_eth_q_stats_arr[j].string);
16346 * Device attach function.
16348 * Allocates device resources, performs secondary chip identification, and
16349 * initializes driver instance variables. This function is called from driver
16350 * load after a successful probe.
16353 * 0 = Success, >0 = Failure
16356 bxe_attach(device_t dev)
16358 struct bxe_softc *sc;
16360 sc = device_get_softc(dev);
16362 BLOGD(sc, DBG_LOAD, "Starting attach...\n");
16364 sc->state = BXE_STATE_CLOSED;
16367 sc->unit = device_get_unit(dev);
16369 BLOGD(sc, DBG_LOAD, "softc = %p\n", sc);
16371 sc->pcie_bus = pci_get_bus(dev);
16372 sc->pcie_device = pci_get_slot(dev);
16373 sc->pcie_func = pci_get_function(dev);
16375 /* enable bus master capability */
16376 pci_enable_busmaster(dev);
16379 if (bxe_allocate_bars(sc) != 0) {
16383 /* initialize the mutexes */
16384 bxe_init_mutexes(sc);
16386 /* prepare the periodic callout */
16387 callout_init(&sc->periodic_callout, 0);
16389 /* prepare the chip taskqueue */
16390 sc->chip_tq_flags = CHIP_TQ_NONE;
16391 snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name),
16392 "bxe%d_chip_tq", sc->unit);
16393 TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc);
16394 sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT,
16395 taskqueue_thread_enqueue,
16397 taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */
16398 "%s", sc->chip_tq_name);
16400 /* get device info and set params */
16401 if (bxe_get_device_info(sc) != 0) {
16402 BLOGE(sc, "getting device info\n");
16403 bxe_deallocate_bars(sc);
16404 pci_disable_busmaster(dev);
16408 /* get final misc params */
16409 bxe_get_params(sc);
16411 /* set the default MTU (changed via ifconfig) */
16412 sc->mtu = ETHERMTU;
16414 bxe_set_modes_bitmap(sc);
16417 * If in AFEX mode and the function is configured for FCoE
16418 * then bail... no L2 allowed.
16421 /* get phy settings from shmem and 'and' against admin settings */
16422 bxe_get_phy_info(sc);
16424 /* initialize the FreeBSD ifnet interface */
16425 if (bxe_init_ifnet(sc) != 0) {
16426 bxe_release_mutexes(sc);
16427 bxe_deallocate_bars(sc);
16428 pci_disable_busmaster(dev);
16432 /* allocate device interrupts */
16433 if (bxe_interrupt_alloc(sc) != 0) {
16434 if (sc->ifnet != NULL) {
16435 ether_ifdetach(sc->ifnet);
16437 ifmedia_removeall(&sc->ifmedia);
16438 bxe_release_mutexes(sc);
16439 bxe_deallocate_bars(sc);
16440 pci_disable_busmaster(dev);
16445 if (bxe_alloc_ilt_mem(sc) != 0) {
16446 bxe_interrupt_free(sc);
16447 if (sc->ifnet != NULL) {
16448 ether_ifdetach(sc->ifnet);
16450 ifmedia_removeall(&sc->ifmedia);
16451 bxe_release_mutexes(sc);
16452 bxe_deallocate_bars(sc);
16453 pci_disable_busmaster(dev);
16457 /* allocate the host hardware/software hsi structures */
16458 if (bxe_alloc_hsi_mem(sc) != 0) {
16459 bxe_free_ilt_mem(sc);
16460 bxe_interrupt_free(sc);
16461 if (sc->ifnet != NULL) {
16462 ether_ifdetach(sc->ifnet);
16464 ifmedia_removeall(&sc->ifmedia);
16465 bxe_release_mutexes(sc);
16466 bxe_deallocate_bars(sc);
16467 pci_disable_busmaster(dev);
16471 /* need to reset chip if UNDI was active */
16472 if (IS_PF(sc) && !BXE_NOMCP(sc)) {
16475 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
16476 DRV_MSG_SEQ_NUMBER_MASK);
16477 BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq);
16478 bxe_prev_unload(sc);
16483 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16485 if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) &&
16486 SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) &&
16487 SHMEM2_RD(sc, dcbx_lldp_params_offset) &&
16488 SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) {
16489 bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON);
16490 bxe_dcbx_init_params(sc);
16492 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16496 /* calculate qm_cid_count */
16497 sc->qm_cid_count = bxe_set_qm_cid_count(sc);
16498 BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count);
16501 bxe_init_multi_cos(sc);
16503 bxe_add_sysctls(sc);
16509 * Device detach function.
16511 * Stops the controller, resets the controller, and releases resources.
16514 * 0 = Success, >0 = Failure
16517 bxe_detach(device_t dev)
16519 struct bxe_softc *sc;
16522 sc = device_get_softc(dev);
16524 BLOGD(sc, DBG_LOAD, "Starting detach...\n");
16527 if (ifp != NULL && ifp->if_vlantrunk != NULL) {
16528 BLOGE(sc, "Cannot detach while VLANs are in use.\n");
16532 /* stop the periodic callout */
16533 bxe_periodic_stop(sc);
16535 /* stop the chip taskqueue */
16536 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE);
16538 taskqueue_drain(sc->chip_tq, &sc->chip_tq_task);
16539 taskqueue_free(sc->chip_tq);
16540 sc->chip_tq = NULL;
16543 /* stop and reset the controller if it was open */
16544 if (sc->state != BXE_STATE_CLOSED) {
16546 bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE);
16547 BXE_CORE_UNLOCK(sc);
16550 /* release the network interface */
16552 ether_ifdetach(ifp);
16554 ifmedia_removeall(&sc->ifmedia);
16556 /* XXX do the following based on driver state... */
16558 /* free the host hardware/software hsi structures */
16559 bxe_free_hsi_mem(sc);
16562 bxe_free_ilt_mem(sc);
16564 /* release the interrupts */
16565 bxe_interrupt_free(sc);
16567 /* Release the mutexes*/
16568 bxe_release_mutexes(sc);
16570 /* Release the PCIe BAR mapped memory */
16571 bxe_deallocate_bars(sc);
16573 /* Release the FreeBSD interface. */
16574 if (sc->ifnet != NULL) {
16575 if_free(sc->ifnet);
16578 pci_disable_busmaster(dev);
16584 * Device shutdown function.
16586 * Stops and resets the controller.
16592 bxe_shutdown(device_t dev)
16594 struct bxe_softc *sc;
16596 sc = device_get_softc(dev);
16598 BLOGD(sc, DBG_LOAD, "Starting shutdown...\n");
16600 /* stop the periodic callout */
16601 bxe_periodic_stop(sc);
16604 bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE);
16605 BXE_CORE_UNLOCK(sc);
16611 bxe_igu_ack_sb(struct bxe_softc *sc,
16618 uint32_t igu_addr = sc->igu_base_addr;
16619 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
16620 bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr);
16624 bxe_igu_clear_sb_gen(struct bxe_softc *sc,
16629 uint32_t data, ctl, cnt = 100;
16630 uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
16631 uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
16632 uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
16633 uint32_t sb_bit = 1 << (idu_sb_id%32);
16634 uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
16635 uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
16637 /* Not supported in BC mode */
16638 if (CHIP_INT_MODE_IS_BC(sc)) {
16642 data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup <<
16643 IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
16644 IGU_REGULAR_CLEANUP_SET |
16645 IGU_REGULAR_BCLEANUP);
16647 ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) |
16648 (func_encode << IGU_CTRL_REG_FID_SHIFT) |
16649 (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
16651 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16652 data, igu_addr_data);
16653 REG_WR(sc, igu_addr_data, data);
16655 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16656 BUS_SPACE_BARRIER_WRITE);
16659 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16660 ctl, igu_addr_ctl);
16661 REG_WR(sc, igu_addr_ctl, ctl);
16663 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16664 BUS_SPACE_BARRIER_WRITE);
16667 /* wait for clean up to finish */
16668 while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) {
16672 if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) {
16673 BLOGD(sc, DBG_LOAD,
16674 "Unable to finish IGU cleanup: "
16675 "idu_sb_id %d offset %d bit %d (cnt %d)\n",
16676 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
16681 bxe_igu_clear_sb(struct bxe_softc *sc,
16684 bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/);
16693 /*******************/
16694 /* ECORE CALLBACKS */
16695 /*******************/
16698 bxe_reset_common(struct bxe_softc *sc)
16700 uint32_t val = 0x1400;
16703 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f);
16705 if (CHIP_IS_E3(sc)) {
16706 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16707 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16710 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val);
16714 bxe_common_init_phy(struct bxe_softc *sc)
16716 uint32_t shmem_base[2];
16717 uint32_t shmem2_base[2];
16719 /* Avoid common init in case MFW supports LFA */
16720 if (SHMEM2_RD(sc, size) >
16721 (uint32_t)offsetof(struct shmem2_region,
16722 lfa_host_addr[SC_PORT(sc)])) {
16726 shmem_base[0] = sc->devinfo.shmem_base;
16727 shmem2_base[0] = sc->devinfo.shmem2_base;
16729 if (!CHIP_IS_E1x(sc)) {
16730 shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr);
16731 shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr);
16735 elink_common_init_phy(sc, shmem_base, shmem2_base,
16736 sc->devinfo.chip_id, 0);
16737 BXE_PHY_UNLOCK(sc);
16741 bxe_pf_disable(struct bxe_softc *sc)
16743 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
16745 val &= ~IGU_PF_CONF_FUNC_EN;
16747 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
16748 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16749 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0);
16753 bxe_init_pxp(struct bxe_softc *sc)
16756 int r_order, w_order;
16758 devctl = bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL, 2);
16760 BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl);
16762 w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5);
16764 if (sc->mrrs == -1) {
16765 r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12);
16767 BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs);
16768 r_order = sc->mrrs;
16771 ecore_init_pxp_arb(sc, r_order, w_order);
16775 bxe_get_pretend_reg(struct bxe_softc *sc)
16777 uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0;
16778 uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base);
16779 return (base + (SC_ABS_FUNC(sc)) * stride);
16783 * Called only on E1H or E2.
16784 * When pretending to be PF, the pretend value is the function number 0..7.
16785 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
16789 bxe_pretend_func(struct bxe_softc *sc,
16790 uint16_t pretend_func_val)
16792 uint32_t pretend_reg;
16794 if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) {
16798 /* get my own pretend register */
16799 pretend_reg = bxe_get_pretend_reg(sc);
16800 REG_WR(sc, pretend_reg, pretend_func_val);
16801 REG_RD(sc, pretend_reg);
16806 bxe_iov_init_dmae(struct bxe_softc *sc)
16810 BLOGD(sc, DBG_LOAD, "SRIOV is %s\n", IS_SRIOV(sc) ? "ON" : "OFF");
16812 if (!IS_SRIOV(sc)) {
16816 REG_WR(sc, DMAE_REG_BACKWARD_COMP_EN, 0);
16822 bxe_iov_init_ilt(struct bxe_softc *sc,
16828 struct ecore_ilt* ilt = sc->ilt;
16830 if (!IS_SRIOV(sc)) {
16834 /* set vfs ilt lines */
16835 for (i = 0; i < BXE_VF_CIDS/ILT_PAGE_CIDS ; i++) {
16836 struct hw_dma *hw_cxt = SC_VF_CXT_PAGE(sc,i);
16837 ilt->lines[line+i].page = hw_cxt->addr;
16838 ilt->lines[line+i].page_mapping = hw_cxt->mapping;
16839 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
16847 bxe_iov_init_dq(struct bxe_softc *sc)
16851 if (!IS_SRIOV(sc)) {
16855 /* Set the DQ such that the CID reflect the abs_vfid */
16856 REG_WR(sc, DORQ_REG_VF_NORM_VF_BASE, 0);
16857 REG_WR(sc, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
16860 * Set VFs starting CID. If its > 0 the preceding CIDs are belong to
16863 REG_WR(sc, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
16865 /* The VF window size is the log2 of the max number of CIDs per VF */
16866 REG_WR(sc, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
16869 * The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
16870 * the Pf doorbell size although the 2 are independent.
16872 REG_WR(sc, DORQ_REG_VF_NORM_CID_OFST,
16873 BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
16876 * No security checks for now -
16877 * configure single rule (out of 16) mask = 0x1, value = 0x0,
16878 * CID range 0 - 0x1ffff
16880 REG_WR(sc, DORQ_REG_VF_TYPE_MASK_0, 1);
16881 REG_WR(sc, DORQ_REG_VF_TYPE_VALUE_0, 0);
16882 REG_WR(sc, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
16883 REG_WR(sc, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
16885 /* set the number of VF alllowed doorbells to the full DQ range */
16886 REG_WR(sc, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
16888 /* set the VF doorbell threshold */
16889 REG_WR(sc, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
16893 /* send a NIG loopback debug packet */
16895 bxe_lb_pckt(struct bxe_softc *sc)
16897 uint32_t wb_write[3];
16899 /* Ethernet source and destination addresses */
16900 wb_write[0] = 0x55555555;
16901 wb_write[1] = 0x55555555;
16902 wb_write[2] = 0x20; /* SOP */
16903 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16905 /* NON-IP protocol */
16906 wb_write[0] = 0x09000000;
16907 wb_write[1] = 0x55555555;
16908 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
16909 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16913 * Some of the internal memories are not directly readable from the driver.
16914 * To test them we send debug packets.
16917 bxe_int_mem_test(struct bxe_softc *sc)
16923 if (CHIP_REV_IS_FPGA(sc)) {
16925 } else if (CHIP_REV_IS_EMUL(sc)) {
16931 /* disable inputs of parser neighbor blocks */
16932 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16933 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16934 REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16935 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16937 /* write 0 to parser credits for CFC search request */
16938 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16940 /* send Ethernet packet */
16943 /* TODO do i reset NIG statistic? */
16944 /* Wait until NIG register shows 1 packet of size 0x10 */
16945 count = 1000 * factor;
16947 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16948 val = *BXE_SP(sc, wb_data[0]);
16958 BLOGE(sc, "NIG timeout val=0x%x\n", val);
16962 /* wait until PRS register shows 1 packet */
16963 count = (1000 * factor);
16965 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16975 BLOGE(sc, "PRS timeout val=0x%x\n", val);
16979 /* Reset and init BRB, PRS */
16980 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16982 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16984 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16985 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16987 /* Disable inputs of parser neighbor blocks */
16988 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16989 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16990 REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16991 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16993 /* Write 0 to parser credits for CFC search request */
16994 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16996 /* send 10 Ethernet packets */
16997 for (i = 0; i < 10; i++) {
17001 /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */
17002 count = (1000 * factor);
17004 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
17005 val = *BXE_SP(sc, wb_data[0]);
17015 BLOGE(sc, "NIG timeout val=0x%x\n", val);
17019 /* Wait until PRS register shows 2 packets */
17020 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
17022 BLOGE(sc, "PRS timeout val=0x%x\n", val);
17025 /* Write 1 to parser credits for CFC search request */
17026 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
17028 /* Wait until PRS register shows 3 packets */
17029 DELAY(10000 * factor);
17031 /* Wait until NIG register shows 1 packet of size 0x10 */
17032 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
17034 BLOGE(sc, "PRS timeout val=0x%x\n", val);
17037 /* clear NIG EOP FIFO */
17038 for (i = 0; i < 11; i++) {
17039 REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO);
17042 val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY);
17044 BLOGE(sc, "clear of NIG failed\n");
17048 /* Reset and init BRB, PRS, NIG */
17049 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
17051 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
17053 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
17054 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
17055 if (!CNIC_SUPPORT(sc)) {
17057 REG_WR(sc, PRS_REG_NIC_MODE, 1);
17060 /* Enable inputs of parser neighbor blocks */
17061 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff);
17062 REG_WR(sc, TCM_REG_PRS_IFEN, 0x1);
17063 REG_WR(sc, CFC_REG_DEBUG0, 0x0);
17064 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1);
17070 bxe_setup_fan_failure_detection(struct bxe_softc *sc)
17077 val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) &
17078 SHARED_HW_CFG_FAN_FAILURE_MASK);
17080 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) {
17084 * The fan failure mechanism is usually related to the PHY type since
17085 * the power consumption of the board is affected by the PHY. Currently,
17086 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
17088 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) {
17089 for (port = PORT_0; port < PORT_MAX; port++) {
17090 is_required |= elink_fan_failure_det_req(sc,
17091 sc->devinfo.shmem_base,
17092 sc->devinfo.shmem2_base,
17097 BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required);
17099 if (is_required == 0) {
17103 /* Fan failure is indicated by SPIO 5 */
17104 bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
17106 /* set to active low mode */
17107 val = REG_RD(sc, MISC_REG_SPIO_INT);
17108 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
17109 REG_WR(sc, MISC_REG_SPIO_INT, val);
17111 /* enable interrupt to signal the IGU */
17112 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
17113 val |= MISC_SPIO_SPIO5;
17114 REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val);
17118 bxe_enable_blocks_attention(struct bxe_softc *sc)
17122 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
17123 if (!CHIP_IS_E1x(sc)) {
17124 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40);
17126 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0);
17128 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
17129 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
17131 * mask read length error interrupts in brb for parser
17132 * (parsing unit and 'checksum and crc' unit)
17133 * these errors are legal (PU reads fixed length and CAC can cause
17134 * read length error on truncated packets)
17136 REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00);
17137 REG_WR(sc, QM_REG_QM_INT_MASK, 0);
17138 REG_WR(sc, TM_REG_TM_INT_MASK, 0);
17139 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0);
17140 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0);
17141 REG_WR(sc, XCM_REG_XCM_INT_MASK, 0);
17142 /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */
17143 /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */
17144 REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0);
17145 REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0);
17146 REG_WR(sc, UCM_REG_UCM_INT_MASK, 0);
17147 /* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */
17148 /* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */
17149 REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
17150 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0);
17151 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0);
17152 REG_WR(sc, CCM_REG_CCM_INT_MASK, 0);
17153 /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */
17154 /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */
17156 val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
17157 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
17158 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN);
17159 if (!CHIP_IS_E1x(sc)) {
17160 val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
17161 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED);
17163 REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val);
17165 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0);
17166 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0);
17167 REG_WR(sc, TCM_REG_TCM_INT_MASK, 0);
17168 /* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */
17170 if (!CHIP_IS_E1x(sc)) {
17171 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
17172 REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
17175 REG_WR(sc, CDU_REG_CDU_INT_MASK, 0);
17176 REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0);
17177 /* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */
17178 REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
17182 * bxe_init_hw_common - initialize the HW at the COMMON phase.
17184 * @sc: driver handle
17187 bxe_init_hw_common(struct bxe_softc *sc)
17189 uint8_t abs_func_id;
17192 BLOGD(sc, DBG_LOAD, "starting common init for func %d\n",
17196 * take the RESET lock to protect undi_unload flow from accessing
17197 * registers while we are resetting the chip
17199 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
17201 bxe_reset_common(sc);
17203 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff);
17206 if (CHIP_IS_E3(sc)) {
17207 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
17208 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
17211 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val);
17213 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
17215 ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON);
17216 BLOGD(sc, DBG_LOAD, "after misc block init\n");
17218 if (!CHIP_IS_E1x(sc)) {
17220 * 4-port mode or 2-port mode we need to turn off master-enable for
17221 * everyone. After that we turn it back on for self. So, we disregard
17222 * multi-function, and always disable all functions on the given path,
17223 * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1
17225 for (abs_func_id = SC_PATH(sc);
17226 abs_func_id < (E2_FUNC_MAX * 2);
17227 abs_func_id += 2) {
17228 if (abs_func_id == SC_ABS_FUNC(sc)) {
17229 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17233 bxe_pretend_func(sc, abs_func_id);
17235 /* clear pf enable */
17236 bxe_pf_disable(sc);
17238 bxe_pretend_func(sc, SC_ABS_FUNC(sc));
17242 BLOGD(sc, DBG_LOAD, "after pf disable\n");
17244 ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON);
17246 if (CHIP_IS_E1(sc)) {
17248 * enable HW interrupt from PXP on USDM overflow
17249 * bit 16 on INT_MASK_0
17251 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
17254 ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON);
17257 #ifdef __BIG_ENDIAN
17258 REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1);
17259 REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1);
17260 REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
17261 REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
17262 REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
17263 /* make sure this value is 0 */
17264 REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0);
17266 //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1);
17267 REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1);
17268 REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1);
17269 REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1);
17270 REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
17273 ecore_ilt_init_page_size(sc, INITOP_SET);
17275 if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) {
17276 REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
17279 /* let the HW do it's magic... */
17282 /* finish PXP init */
17283 val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE);
17285 BLOGE(sc, "PXP2 CFG failed\n");
17288 val = REG_RD(sc, PXP2_REG_RD_INIT_DONE);
17290 BLOGE(sc, "PXP2 RD_INIT failed\n");
17294 BLOGD(sc, DBG_LOAD, "after pxp init\n");
17297 * Timer bug workaround for E2 only. We need to set the entire ILT to have
17298 * entries with value "0" and valid bit on. This needs to be done by the
17299 * first PF that is loaded in a path (i.e. common phase)
17301 if (!CHIP_IS_E1x(sc)) {
17303 * In E2 there is a bug in the timers block that can cause function 6 / 7
17304 * (i.e. vnic3) to start even if it is marked as "scan-off".
17305 * This occurs when a different function (func2,3) is being marked
17306 * as "scan-off". Real-life scenario for example: if a driver is being
17307 * load-unloaded while func6,7 are down. This will cause the timer to access
17308 * the ilt, translate to a logical address and send a request to read/write.
17309 * Since the ilt for the function that is down is not valid, this will cause
17310 * a translation error which is unrecoverable.
17311 * The Workaround is intended to make sure that when this happens nothing
17312 * fatal will occur. The workaround:
17313 * 1. First PF driver which loads on a path will:
17314 * a. After taking the chip out of reset, by using pretend,
17315 * it will write "0" to the following registers of
17317 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
17318 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
17319 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
17320 * And for itself it will write '1' to
17321 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
17322 * dmae-operations (writing to pram for example.)
17323 * note: can be done for only function 6,7 but cleaner this
17325 * b. Write zero+valid to the entire ILT.
17326 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of
17327 * VNIC3 (of that port). The range allocated will be the
17328 * entire ILT. This is needed to prevent ILT range error.
17329 * 2. Any PF driver load flow:
17330 * a. ILT update with the physical addresses of the allocated
17332 * b. Wait 20msec. - note that this timeout is needed to make
17333 * sure there are no requests in one of the PXP internal
17334 * queues with "old" ILT addresses.
17335 * c. PF enable in the PGLC.
17336 * d. Clear the was_error of the PF in the PGLC. (could have
17337 * occurred while driver was down)
17338 * e. PF enable in the CFC (WEAK + STRONG)
17339 * f. Timers scan enable
17340 * 3. PF driver unload flow:
17341 * a. Clear the Timers scan_en.
17342 * b. Polling for scan_on=0 for that PF.
17343 * c. Clear the PF enable bit in the PXP.
17344 * d. Clear the PF enable in the CFC (WEAK + STRONG)
17345 * e. Write zero+valid to all ILT entries (The valid bit must
17347 * f. If this is VNIC 3 of a port then also init
17348 * first_timers_ilt_entry to zero and last_timers_ilt_entry
17349 * to the last enrty in the ILT.
17352 * Currently the PF error in the PGLC is non recoverable.
17353 * In the future the there will be a recovery routine for this error.
17354 * Currently attention is masked.
17355 * Having an MCP lock on the load/unload process does not guarantee that
17356 * there is no Timer disable during Func6/7 enable. This is because the
17357 * Timers scan is currently being cleared by the MCP on FLR.
17358 * Step 2.d can be done only for PF6/7 and the driver can also check if
17359 * there is error before clearing it. But the flow above is simpler and
17361 * All ILT entries are written by zero+valid and not just PF6/7
17362 * ILT entries since in the future the ILT entries allocation for
17363 * PF-s might be dynamic.
17365 struct ilt_client_info ilt_cli;
17366 struct ecore_ilt ilt;
17368 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
17369 memset(&ilt, 0, sizeof(struct ecore_ilt));
17371 /* initialize dummy TM client */
17373 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
17374 ilt_cli.client_num = ILT_CLIENT_TM;
17377 * Step 1: set zeroes to all ilt page entries with valid bit on
17378 * Step 2: set the timers first/last ilt entry to point
17379 * to the entire range to prevent ILT range error for 3rd/4th
17380 * vnic (this code assumes existence of the vnic)
17382 * both steps performed by call to ecore_ilt_client_init_op()
17383 * with dummy TM client
17385 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
17386 * and his brother are split registers
17389 bxe_pretend_func(sc, (SC_PATH(sc) + 6));
17390 ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR);
17391 bxe_pretend_func(sc, SC_ABS_FUNC(sc));
17393 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN);
17394 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN);
17395 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
17398 REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0);
17399 REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0);
17401 if (!CHIP_IS_E1x(sc)) {
17402 int factor = CHIP_REV_IS_EMUL(sc) ? 1000 :
17403 (CHIP_REV_IS_FPGA(sc) ? 400 : 0);
17405 ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON);
17406 ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON);
17408 /* let the HW do it's magic... */
17411 val = REG_RD(sc, ATC_REG_ATC_INIT_DONE);
17412 } while (factor-- && (val != 1));
17415 BLOGE(sc, "ATC_INIT failed\n");
17420 BLOGD(sc, DBG_LOAD, "after pglue and atc init\n");
17422 ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON);
17424 bxe_iov_init_dmae(sc);
17426 /* clean the DMAE memory */
17427 sc->dmae_ready = 1;
17428 ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1);
17430 ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON);
17432 ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON);
17434 ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON);
17436 ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON);
17438 bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3);
17439 bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3);
17440 bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3);
17441 bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3);
17443 ecore_init_block(sc, BLOCK_QM, PHASE_COMMON);
17445 /* QM queues pointers table */
17446 ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET);
17448 /* soft reset pulse */
17449 REG_WR(sc, QM_REG_SOFT_RESET, 1);
17450 REG_WR(sc, QM_REG_SOFT_RESET, 0);
17452 if (CNIC_SUPPORT(sc))
17453 ecore_init_block(sc, BLOCK_TM, PHASE_COMMON);
17455 ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON);
17456 REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT);
17457 if (!CHIP_REV_IS_SLOW(sc)) {
17458 /* enable hw interrupt from doorbell Q */
17459 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
17462 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
17464 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
17465 REG_WR(sc, PRS_REG_A_PRSU_20, 0xf);
17467 if (!CHIP_IS_E1(sc)) {
17468 REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan);
17471 if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) {
17472 if (IS_MF_AFEX(sc)) {
17474 * configure that AFEX and VLAN headers must be
17475 * received in AFEX mode
17477 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE);
17478 REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA);
17479 REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
17480 REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
17481 REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4);
17484 * Bit-map indicating which L2 hdrs may appear
17485 * after the basic Ethernet header
17487 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC,
17488 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17492 ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON);
17493 ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON);
17494 ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON);
17495 ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON);
17497 if (!CHIP_IS_E1x(sc)) {
17498 /* reset VFC memories */
17499 REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17500 VFC_MEMORIES_RST_REG_CAM_RST |
17501 VFC_MEMORIES_RST_REG_RAM_RST);
17502 REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17503 VFC_MEMORIES_RST_REG_CAM_RST |
17504 VFC_MEMORIES_RST_REG_RAM_RST);
17509 ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON);
17510 ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON);
17511 ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON);
17512 ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON);
17514 /* sync semi rtc */
17515 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
17517 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
17520 ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON);
17521 ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON);
17522 ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON);
17524 if (!CHIP_IS_E1x(sc)) {
17525 if (IS_MF_AFEX(sc)) {
17527 * configure that AFEX and VLAN headers must be
17528 * sent in AFEX mode
17530 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE);
17531 REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA);
17532 REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
17533 REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
17534 REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4);
17536 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC,
17537 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17541 REG_WR(sc, SRC_REG_SOFT_RST, 1);
17543 ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON);
17545 if (CNIC_SUPPORT(sc)) {
17546 REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672);
17547 REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
17548 REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b);
17549 REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a);
17550 REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116);
17551 REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
17552 REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf);
17553 REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
17554 REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f);
17555 REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7);
17557 REG_WR(sc, SRC_REG_SOFT_RST, 0);
17559 if (sizeof(union cdu_context) != 1024) {
17560 /* we currently assume that a context is 1024 bytes */
17561 BLOGE(sc, "please adjust the size of cdu_context(%ld)\n",
17562 (long)sizeof(union cdu_context));
17565 ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON);
17566 val = (4 << 24) + (0 << 12) + 1024;
17567 REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val);
17569 ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON);
17571 REG_WR(sc, CFC_REG_INIT_REG, 0x7FF);
17572 /* enable context validation interrupt from CFC */
17573 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
17575 /* set the thresholds to prevent CFC/CDU race */
17576 REG_WR(sc, CFC_REG_DEBUG0, 0x20020000);
17577 ecore_init_block(sc, BLOCK_HC, PHASE_COMMON);
17579 if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) {
17580 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36);
17583 ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON);
17584 ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON);
17586 /* Reset PCIE errors for debug */
17587 REG_WR(sc, 0x2814, 0xffffffff);
17588 REG_WR(sc, 0x3820, 0xffffffff);
17590 if (!CHIP_IS_E1x(sc)) {
17591 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
17592 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
17593 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
17594 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
17595 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
17596 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
17597 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
17598 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
17599 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
17600 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
17601 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
17604 ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON);
17606 if (!CHIP_IS_E1(sc)) {
17607 /* in E3 this done in per-port section */
17608 if (!CHIP_IS_E3(sc))
17609 REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc));
17612 if (CHIP_IS_E1H(sc)) {
17613 /* not applicable for E2 (and above ...) */
17614 REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc));
17617 if (CHIP_REV_IS_SLOW(sc)) {
17621 /* finish CFC init */
17622 val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10);
17624 BLOGE(sc, "CFC LL_INIT failed\n");
17627 val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10);
17629 BLOGE(sc, "CFC AC_INIT failed\n");
17632 val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
17634 BLOGE(sc, "CFC CAM_INIT failed\n");
17637 REG_WR(sc, CFC_REG_DEBUG0, 0);
17639 if (CHIP_IS_E1(sc)) {
17640 /* read NIG statistic to see if this is our first up since powerup */
17641 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
17642 val = *BXE_SP(sc, wb_data[0]);
17644 /* do internal memory self test */
17645 if ((val == 0) && bxe_int_mem_test(sc)) {
17646 BLOGE(sc, "internal mem self test failed\n");
17651 bxe_setup_fan_failure_detection(sc);
17653 /* clear PXP2 attentions */
17654 REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
17656 bxe_enable_blocks_attention(sc);
17658 if (!CHIP_REV_IS_SLOW(sc)) {
17659 ecore_enable_blocks_parity(sc);
17662 if (!BXE_NOMCP(sc)) {
17663 if (CHIP_IS_E1x(sc)) {
17664 bxe_common_init_phy(sc);
17672 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase.
17674 * @sc: driver handle
17677 bxe_init_hw_common_chip(struct bxe_softc *sc)
17679 int rc = bxe_init_hw_common(sc);
17685 /* In E2 2-PORT mode, same ext phy is used for the two paths */
17686 if (!BXE_NOMCP(sc)) {
17687 bxe_common_init_phy(sc);
17694 bxe_init_hw_port(struct bxe_softc *sc)
17696 int port = SC_PORT(sc);
17697 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
17698 uint32_t low, high;
17701 BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port);
17703 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
17705 ecore_init_block(sc, BLOCK_MISC, init_phase);
17706 ecore_init_block(sc, BLOCK_PXP, init_phase);
17707 ecore_init_block(sc, BLOCK_PXP2, init_phase);
17710 * Timers bug workaround: disables the pf_master bit in pglue at
17711 * common phase, we need to enable it here before any dmae access are
17712 * attempted. Therefore we manually added the enable-master to the
17713 * port phase (it also happens in the function phase)
17715 if (!CHIP_IS_E1x(sc)) {
17716 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17719 ecore_init_block(sc, BLOCK_ATC, init_phase);
17720 ecore_init_block(sc, BLOCK_DMAE, init_phase);
17721 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17722 ecore_init_block(sc, BLOCK_QM, init_phase);
17724 ecore_init_block(sc, BLOCK_TCM, init_phase);
17725 ecore_init_block(sc, BLOCK_UCM, init_phase);
17726 ecore_init_block(sc, BLOCK_CCM, init_phase);
17727 ecore_init_block(sc, BLOCK_XCM, init_phase);
17729 /* QM cid (connection) count */
17730 ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET);
17732 if (CNIC_SUPPORT(sc)) {
17733 ecore_init_block(sc, BLOCK_TM, init_phase);
17734 REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20);
17735 REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
17738 ecore_init_block(sc, BLOCK_DORQ, init_phase);
17740 ecore_init_block(sc, BLOCK_BRB1, init_phase);
17742 if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) {
17744 low = (BXE_ONE_PORT(sc) ? 160 : 246);
17745 } else if (sc->mtu > 4096) {
17746 if (BXE_ONE_PORT(sc)) {
17750 /* (24*1024 + val*4)/256 */
17751 low = (96 + (val / 64) + ((val % 64) ? 1 : 0));
17754 low = (BXE_ONE_PORT(sc) ? 80 : 160);
17756 high = (low + 56); /* 14*1024/256 */
17757 REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
17758 REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
17761 if (CHIP_IS_MODE_4_PORT(sc)) {
17762 REG_WR(sc, SC_PORT(sc) ?
17763 BRB1_REG_MAC_GUARANTIED_1 :
17764 BRB1_REG_MAC_GUARANTIED_0, 40);
17767 ecore_init_block(sc, BLOCK_PRS, init_phase);
17768 if (CHIP_IS_E3B0(sc)) {
17769 if (IS_MF_AFEX(sc)) {
17770 /* configure headers for AFEX mode */
17771 REG_WR(sc, SC_PORT(sc) ?
17772 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17773 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
17774 REG_WR(sc, SC_PORT(sc) ?
17775 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
17776 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
17777 REG_WR(sc, SC_PORT(sc) ?
17778 PRS_REG_MUST_HAVE_HDRS_PORT_1 :
17779 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
17781 /* Ovlan exists only if we are in multi-function +
17782 * switch-dependent mode, in switch-independent there
17783 * is no ovlan headers
17785 REG_WR(sc, SC_PORT(sc) ?
17786 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17787 PRS_REG_HDRS_AFTER_BASIC_PORT_0,
17788 (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6));
17792 ecore_init_block(sc, BLOCK_TSDM, init_phase);
17793 ecore_init_block(sc, BLOCK_CSDM, init_phase);
17794 ecore_init_block(sc, BLOCK_USDM, init_phase);
17795 ecore_init_block(sc, BLOCK_XSDM, init_phase);
17797 ecore_init_block(sc, BLOCK_TSEM, init_phase);
17798 ecore_init_block(sc, BLOCK_USEM, init_phase);
17799 ecore_init_block(sc, BLOCK_CSEM, init_phase);
17800 ecore_init_block(sc, BLOCK_XSEM, init_phase);
17802 ecore_init_block(sc, BLOCK_UPB, init_phase);
17803 ecore_init_block(sc, BLOCK_XPB, init_phase);
17805 ecore_init_block(sc, BLOCK_PBF, init_phase);
17807 if (CHIP_IS_E1x(sc)) {
17808 /* configure PBF to work without PAUSE mtu 9000 */
17809 REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
17811 /* update threshold */
17812 REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
17813 /* update init credit */
17814 REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
17816 /* probe changes */
17817 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1);
17819 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0);
17822 if (CNIC_SUPPORT(sc)) {
17823 ecore_init_block(sc, BLOCK_SRC, init_phase);
17826 ecore_init_block(sc, BLOCK_CDU, init_phase);
17827 ecore_init_block(sc, BLOCK_CFC, init_phase);
17829 if (CHIP_IS_E1(sc)) {
17830 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17831 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17833 ecore_init_block(sc, BLOCK_HC, init_phase);
17835 ecore_init_block(sc, BLOCK_IGU, init_phase);
17837 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17838 /* init aeu_mask_attn_func_0/1:
17839 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
17840 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
17841 * bits 4-7 are used for "per vn group attention" */
17842 val = IS_MF(sc) ? 0xF7 : 0x7;
17843 /* Enable DCBX attention for all but E1 */
17844 val |= CHIP_IS_E1(sc) ? 0 : 0x10;
17845 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
17847 ecore_init_block(sc, BLOCK_NIG, init_phase);
17849 if (!CHIP_IS_E1x(sc)) {
17850 /* Bit-map indicating which L2 hdrs may appear after the
17851 * basic Ethernet header
17853 if (IS_MF_AFEX(sc)) {
17854 REG_WR(sc, SC_PORT(sc) ?
17855 NIG_REG_P1_HDRS_AFTER_BASIC :
17856 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
17858 REG_WR(sc, SC_PORT(sc) ?
17859 NIG_REG_P1_HDRS_AFTER_BASIC :
17860 NIG_REG_P0_HDRS_AFTER_BASIC,
17861 IS_MF_SD(sc) ? 7 : 6);
17864 if (CHIP_IS_E3(sc)) {
17865 REG_WR(sc, SC_PORT(sc) ?
17866 NIG_REG_LLH1_MF_MODE :
17867 NIG_REG_LLH_MF_MODE, IS_MF(sc));
17870 if (!CHIP_IS_E3(sc)) {
17871 REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
17874 if (!CHIP_IS_E1(sc)) {
17875 /* 0x2 disable mf_ov, 0x1 enable */
17876 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
17877 (IS_MF_SD(sc) ? 0x1 : 0x2));
17879 if (!CHIP_IS_E1x(sc)) {
17881 switch (sc->devinfo.mf_info.mf_mode) {
17882 case MULTI_FUNCTION_SD:
17885 case MULTI_FUNCTION_SI:
17886 case MULTI_FUNCTION_AFEX:
17891 REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE :
17892 NIG_REG_LLH0_CLS_TYPE), val);
17894 REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
17895 REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
17896 REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
17899 /* If SPIO5 is set to generate interrupts, enable it for this port */
17900 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
17901 if (val & MISC_SPIO_SPIO5) {
17902 uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
17903 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
17904 val = REG_RD(sc, reg_addr);
17905 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
17906 REG_WR(sc, reg_addr, val);
17913 bxe_flr_clnup_reg_poll(struct bxe_softc *sc,
17916 uint32_t poll_count)
17918 uint32_t cur_cnt = poll_count;
17921 while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) {
17922 DELAY(FLR_WAIT_INTERVAL);
17929 bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc,
17934 uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt);
17937 BLOGE(sc, "%s usage count=%d\n", msg, val);
17944 /* Common routines with VF FLR cleanup */
17946 bxe_flr_clnup_poll_count(struct bxe_softc *sc)
17948 /* adjust polling timeout */
17949 if (CHIP_REV_IS_EMUL(sc)) {
17950 return (FLR_POLL_CNT * 2000);
17953 if (CHIP_REV_IS_FPGA(sc)) {
17954 return (FLR_POLL_CNT * 120);
17957 return (FLR_POLL_CNT);
17961 bxe_poll_hw_usage_counters(struct bxe_softc *sc,
17964 /* wait for CFC PF usage-counter to zero (includes all the VFs) */
17965 if (bxe_flr_clnup_poll_hw_counter(sc,
17966 CFC_REG_NUM_LCIDS_INSIDE_PF,
17967 "CFC PF usage counter timed out",
17972 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
17973 if (bxe_flr_clnup_poll_hw_counter(sc,
17974 DORQ_REG_PF_USAGE_CNT,
17975 "DQ PF usage counter timed out",
17980 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
17981 if (bxe_flr_clnup_poll_hw_counter(sc,
17982 QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc),
17983 "QM PF usage counter timed out",
17988 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
17989 if (bxe_flr_clnup_poll_hw_counter(sc,
17990 TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc),
17991 "Timers VNIC usage counter timed out",
17996 if (bxe_flr_clnup_poll_hw_counter(sc,
17997 TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc),
17998 "Timers NUM_SCANS usage counter timed out",
18003 /* Wait DMAE PF usage counter to zero */
18004 if (bxe_flr_clnup_poll_hw_counter(sc,
18005 dmae_reg_go_c[INIT_DMAE_C(sc)],
18006 "DMAE dommand register timed out",
18014 #define OP_GEN_PARAM(param) \
18015 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
18016 #define OP_GEN_TYPE(type) \
18017 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
18018 #define OP_GEN_AGG_VECT(index) \
18019 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
18022 bxe_send_final_clnup(struct bxe_softc *sc,
18023 uint8_t clnup_func,
18026 uint32_t op_gen_command = 0;
18027 uint32_t comp_addr = (BAR_CSTRORM_INTMEM +
18028 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func));
18031 if (REG_RD(sc, comp_addr)) {
18032 BLOGE(sc, "Cleanup complete was not 0 before sending\n");
18036 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
18037 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
18038 op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
18039 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
18041 BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n");
18042 REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command);
18044 if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) {
18045 BLOGE(sc, "FW final cleanup did not succeed\n");
18046 BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n",
18047 (REG_RD(sc, comp_addr)));
18048 bxe_panic(sc, ("FLR cleanup failed\n"));
18052 /* Zero completion for nxt FLR */
18053 REG_WR(sc, comp_addr, 0);
18059 bxe_pbf_pN_buf_flushed(struct bxe_softc *sc,
18060 struct pbf_pN_buf_regs *regs,
18061 uint32_t poll_count)
18063 uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start;
18064 uint32_t cur_cnt = poll_count;
18066 crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed);
18067 crd = crd_start = REG_RD(sc, regs->crd);
18068 init_crd = REG_RD(sc, regs->init_crd);
18070 BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
18071 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : s:%x\n", regs->pN, crd);
18072 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
18074 while ((crd != init_crd) &&
18075 ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) <
18076 (init_crd - crd_start))) {
18078 DELAY(FLR_WAIT_INTERVAL);
18079 crd = REG_RD(sc, regs->crd);
18080 crd_freed = REG_RD(sc, regs->crd_freed);
18082 BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN);
18083 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : c:%x\n", regs->pN, crd);
18084 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed);
18089 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n",
18090 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
18094 bxe_pbf_pN_cmd_flushed(struct bxe_softc *sc,
18095 struct pbf_pN_cmd_regs *regs,
18096 uint32_t poll_count)
18098 uint32_t occup, to_free, freed, freed_start;
18099 uint32_t cur_cnt = poll_count;
18101 occup = to_free = REG_RD(sc, regs->lines_occup);
18102 freed = freed_start = REG_RD(sc, regs->lines_freed);
18104 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
18105 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
18108 ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) {
18110 DELAY(FLR_WAIT_INTERVAL);
18111 occup = REG_RD(sc, regs->lines_occup);
18112 freed = REG_RD(sc, regs->lines_freed);
18114 BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN);
18115 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
18116 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
18121 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n",
18122 poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
18126 bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count)
18128 struct pbf_pN_cmd_regs cmd_regs[] = {
18129 {0, (CHIP_IS_E3B0(sc)) ?
18130 PBF_REG_TQ_OCCUPANCY_Q0 :
18131 PBF_REG_P0_TQ_OCCUPANCY,
18132 (CHIP_IS_E3B0(sc)) ?
18133 PBF_REG_TQ_LINES_FREED_CNT_Q0 :
18134 PBF_REG_P0_TQ_LINES_FREED_CNT},
18135 {1, (CHIP_IS_E3B0(sc)) ?
18136 PBF_REG_TQ_OCCUPANCY_Q1 :
18137 PBF_REG_P1_TQ_OCCUPANCY,
18138 (CHIP_IS_E3B0(sc)) ?
18139 PBF_REG_TQ_LINES_FREED_CNT_Q1 :
18140 PBF_REG_P1_TQ_LINES_FREED_CNT},
18141 {4, (CHIP_IS_E3B0(sc)) ?
18142 PBF_REG_TQ_OCCUPANCY_LB_Q :
18143 PBF_REG_P4_TQ_OCCUPANCY,
18144 (CHIP_IS_E3B0(sc)) ?
18145 PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
18146 PBF_REG_P4_TQ_LINES_FREED_CNT}
18149 struct pbf_pN_buf_regs buf_regs[] = {
18150 {0, (CHIP_IS_E3B0(sc)) ?
18151 PBF_REG_INIT_CRD_Q0 :
18152 PBF_REG_P0_INIT_CRD ,
18153 (CHIP_IS_E3B0(sc)) ?
18154 PBF_REG_CREDIT_Q0 :
18156 (CHIP_IS_E3B0(sc)) ?
18157 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
18158 PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
18159 {1, (CHIP_IS_E3B0(sc)) ?
18160 PBF_REG_INIT_CRD_Q1 :
18161 PBF_REG_P1_INIT_CRD,
18162 (CHIP_IS_E3B0(sc)) ?
18163 PBF_REG_CREDIT_Q1 :
18165 (CHIP_IS_E3B0(sc)) ?
18166 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
18167 PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
18168 {4, (CHIP_IS_E3B0(sc)) ?
18169 PBF_REG_INIT_CRD_LB_Q :
18170 PBF_REG_P4_INIT_CRD,
18171 (CHIP_IS_E3B0(sc)) ?
18172 PBF_REG_CREDIT_LB_Q :
18174 (CHIP_IS_E3B0(sc)) ?
18175 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
18176 PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
18181 /* Verify the command queues are flushed P0, P1, P4 */
18182 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) {
18183 bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count);
18186 /* Verify the transmission buffers are flushed P0, P1, P4 */
18187 for (i = 0; i < ARRAY_SIZE(buf_regs); i++) {
18188 bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count);
18193 bxe_hw_enable_status(struct bxe_softc *sc)
18197 val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF);
18198 BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
18200 val = REG_RD(sc, PBF_REG_DISABLE_PF);
18201 BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val);
18203 val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN);
18204 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
18206 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN);
18207 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
18209 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
18210 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
18212 val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
18213 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
18215 val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
18216 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
18218 val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
18219 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val);
18223 bxe_pf_flr_clnup(struct bxe_softc *sc)
18225 uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc);
18227 BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc));
18229 /* Re-enable PF target read access */
18230 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
18232 /* Poll HW usage counters */
18233 BLOGD(sc, DBG_LOAD, "Polling usage counters\n");
18234 if (bxe_poll_hw_usage_counters(sc, poll_cnt)) {
18238 /* Zero the igu 'trailing edge' and 'leading edge' */
18240 /* Send the FW cleanup command */
18241 if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) {
18247 /* Verify TX hw is flushed */
18248 bxe_tx_hw_flushed(sc, poll_cnt);
18250 /* Wait 100ms (not adjusted according to platform) */
18253 /* Verify no pending pci transactions */
18254 if (bxe_is_pcie_pending(sc)) {
18255 BLOGE(sc, "PCIE Transactions still pending\n");
18259 bxe_hw_enable_status(sc);
18262 * Master enable - Due to WB DMAE writes performed before this
18263 * register is re-initialized as part of the regular function init
18265 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
18272 bxe_init_searcher(struct bxe_softc *sc)
18274 int port = SC_PORT(sc);
18275 ecore_src_init_t2(sc, sc->t2, sc->t2_mapping, SRC_CONN_NUM);
18276 /* T1 hash bits value determines the T1 number of entries */
18277 REG_WR(sc, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
18282 bxe_init_hw_func(struct bxe_softc *sc)
18284 int port = SC_PORT(sc);
18285 int func = SC_FUNC(sc);
18286 int init_phase = PHASE_PF0 + func;
18287 struct ecore_ilt *ilt = sc->ilt;
18288 uint16_t cdu_ilt_start;
18289 uint32_t addr, val;
18290 uint32_t main_mem_base, main_mem_size, main_mem_prty_clr;
18291 int i, main_mem_width, rc;
18293 BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func);
18296 if (!CHIP_IS_E1x(sc)) {
18297 rc = bxe_pf_flr_clnup(sc);
18299 BLOGE(sc, "FLR cleanup failed!\n");
18300 // XXX bxe_fw_dump(sc);
18301 // XXX bxe_idle_chk(sc);
18306 /* set MSI reconfigure capability */
18307 if (sc->devinfo.int_block == INT_BLOCK_HC) {
18308 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
18309 val = REG_RD(sc, addr);
18310 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
18311 REG_WR(sc, addr, val);
18314 ecore_init_block(sc, BLOCK_PXP, init_phase);
18315 ecore_init_block(sc, BLOCK_PXP2, init_phase);
18318 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
18321 if (IS_SRIOV(sc)) {
18322 cdu_ilt_start += BXE_FIRST_VF_CID/ILT_PAGE_CIDS;
18324 cdu_ilt_start = bxe_iov_init_ilt(sc, cdu_ilt_start);
18326 #if (BXE_FIRST_VF_CID > 0)
18328 * If BXE_FIRST_VF_CID > 0 then the PF L2 cids precedes
18329 * those of the VFs, so start line should be reset
18331 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
18335 for (i = 0; i < L2_ILT_LINES(sc); i++) {
18336 ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
18337 ilt->lines[cdu_ilt_start + i].page_mapping =
18338 sc->context[i].vcxt_dma.paddr;
18339 ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
18341 ecore_ilt_init_op(sc, INITOP_SET);
18344 if (!CONFIGURE_NIC_MODE(sc)) {
18345 bxe_init_searcher(sc);
18346 REG_WR(sc, PRS_REG_NIC_MODE, 0);
18347 BLOGD(sc, DBG_LOAD, "NIC MODE disabled\n");
18352 REG_WR(sc, PRS_REG_NIC_MODE, 1);
18353 BLOGD(sc, DBG_LOAD, "NIC MODE configured\n");
18356 if (!CHIP_IS_E1x(sc)) {
18357 uint32_t pf_conf = IGU_PF_CONF_FUNC_EN;
18359 /* Turn on a single ISR mode in IGU if driver is going to use
18362 if (sc->interrupt_mode != INTR_MODE_MSIX) {
18363 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
18367 * Timers workaround bug: function init part.
18368 * Need to wait 20msec after initializing ILT,
18369 * needed to make sure there are no requests in
18370 * one of the PXP internal queues with "old" ILT addresses
18375 * Master enable - Due to WB DMAE writes performed before this
18376 * register is re-initialized as part of the regular function
18379 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
18380 /* Enable the function in IGU */
18381 REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf);
18384 sc->dmae_ready = 1;
18386 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
18388 if (!CHIP_IS_E1x(sc))
18389 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
18391 ecore_init_block(sc, BLOCK_ATC, init_phase);
18392 ecore_init_block(sc, BLOCK_DMAE, init_phase);
18393 ecore_init_block(sc, BLOCK_NIG, init_phase);
18394 ecore_init_block(sc, BLOCK_SRC, init_phase);
18395 ecore_init_block(sc, BLOCK_MISC, init_phase);
18396 ecore_init_block(sc, BLOCK_TCM, init_phase);
18397 ecore_init_block(sc, BLOCK_UCM, init_phase);
18398 ecore_init_block(sc, BLOCK_CCM, init_phase);
18399 ecore_init_block(sc, BLOCK_XCM, init_phase);
18400 ecore_init_block(sc, BLOCK_TSEM, init_phase);
18401 ecore_init_block(sc, BLOCK_USEM, init_phase);
18402 ecore_init_block(sc, BLOCK_CSEM, init_phase);
18403 ecore_init_block(sc, BLOCK_XSEM, init_phase);
18405 if (!CHIP_IS_E1x(sc))
18406 REG_WR(sc, QM_REG_PF_EN, 1);
18408 if (!CHIP_IS_E1x(sc)) {
18409 REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18410 REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18411 REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18412 REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18414 ecore_init_block(sc, BLOCK_QM, init_phase);
18416 ecore_init_block(sc, BLOCK_TM, init_phase);
18417 ecore_init_block(sc, BLOCK_DORQ, init_phase);
18419 bxe_iov_init_dq(sc);
18421 ecore_init_block(sc, BLOCK_BRB1, init_phase);
18422 ecore_init_block(sc, BLOCK_PRS, init_phase);
18423 ecore_init_block(sc, BLOCK_TSDM, init_phase);
18424 ecore_init_block(sc, BLOCK_CSDM, init_phase);
18425 ecore_init_block(sc, BLOCK_USDM, init_phase);
18426 ecore_init_block(sc, BLOCK_XSDM, init_phase);
18427 ecore_init_block(sc, BLOCK_UPB, init_phase);
18428 ecore_init_block(sc, BLOCK_XPB, init_phase);
18429 ecore_init_block(sc, BLOCK_PBF, init_phase);
18430 if (!CHIP_IS_E1x(sc))
18431 REG_WR(sc, PBF_REG_DISABLE_PF, 0);
18433 ecore_init_block(sc, BLOCK_CDU, init_phase);
18435 ecore_init_block(sc, BLOCK_CFC, init_phase);
18437 if (!CHIP_IS_E1x(sc))
18438 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1);
18441 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
18442 REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc));
18445 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
18447 /* HC init per function */
18448 if (sc->devinfo.int_block == INT_BLOCK_HC) {
18449 if (CHIP_IS_E1H(sc)) {
18450 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
18452 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18453 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18455 ecore_init_block(sc, BLOCK_HC, init_phase);
18458 int num_segs, sb_idx, prod_offset;
18460 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
18462 if (!CHIP_IS_E1x(sc)) {
18463 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18464 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18467 ecore_init_block(sc, BLOCK_IGU, init_phase);
18469 if (!CHIP_IS_E1x(sc)) {
18473 * E2 mode: address 0-135 match to the mapping memory;
18474 * 136 - PF0 default prod; 137 - PF1 default prod;
18475 * 138 - PF2 default prod; 139 - PF3 default prod;
18476 * 140 - PF0 attn prod; 141 - PF1 attn prod;
18477 * 142 - PF2 attn prod; 143 - PF3 attn prod;
18478 * 144-147 reserved.
18480 * E1.5 mode - In backward compatible mode;
18481 * for non default SB; each even line in the memory
18482 * holds the U producer and each odd line hold
18483 * the C producer. The first 128 producers are for
18484 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
18485 * producers are for the DSB for each PF.
18486 * Each PF has five segments: (the order inside each
18487 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
18488 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
18489 * 144-147 attn prods;
18491 /* non-default-status-blocks */
18492 num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18493 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
18494 for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) {
18495 prod_offset = (sc->igu_base_sb + sb_idx) *
18498 for (i = 0; i < num_segs; i++) {
18499 addr = IGU_REG_PROD_CONS_MEMORY +
18500 (prod_offset + i) * 4;
18501 REG_WR(sc, addr, 0);
18503 /* send consumer update with value 0 */
18504 bxe_ack_sb(sc, sc->igu_base_sb + sb_idx,
18505 USTORM_ID, 0, IGU_INT_NOP, 1);
18506 bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx);
18509 /* default-status-blocks */
18510 num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18511 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
18513 if (CHIP_IS_MODE_4_PORT(sc))
18514 dsb_idx = SC_FUNC(sc);
18516 dsb_idx = SC_VN(sc);
18518 prod_offset = (CHIP_INT_MODE_IS_BC(sc) ?
18519 IGU_BC_BASE_DSB_PROD + dsb_idx :
18520 IGU_NORM_BASE_DSB_PROD + dsb_idx);
18523 * igu prods come in chunks of E1HVN_MAX (4) -
18524 * does not matters what is the current chip mode
18526 for (i = 0; i < (num_segs * E1HVN_MAX);
18528 addr = IGU_REG_PROD_CONS_MEMORY +
18529 (prod_offset + i)*4;
18530 REG_WR(sc, addr, 0);
18532 /* send consumer update with 0 */
18533 if (CHIP_INT_MODE_IS_BC(sc)) {
18534 bxe_ack_sb(sc, sc->igu_dsb_id,
18535 USTORM_ID, 0, IGU_INT_NOP, 1);
18536 bxe_ack_sb(sc, sc->igu_dsb_id,
18537 CSTORM_ID, 0, IGU_INT_NOP, 1);
18538 bxe_ack_sb(sc, sc->igu_dsb_id,
18539 XSTORM_ID, 0, IGU_INT_NOP, 1);
18540 bxe_ack_sb(sc, sc->igu_dsb_id,
18541 TSTORM_ID, 0, IGU_INT_NOP, 1);
18542 bxe_ack_sb(sc, sc->igu_dsb_id,
18543 ATTENTION_ID, 0, IGU_INT_NOP, 1);
18545 bxe_ack_sb(sc, sc->igu_dsb_id,
18546 USTORM_ID, 0, IGU_INT_NOP, 1);
18547 bxe_ack_sb(sc, sc->igu_dsb_id,
18548 ATTENTION_ID, 0, IGU_INT_NOP, 1);
18550 bxe_igu_clear_sb(sc, sc->igu_dsb_id);
18552 /* !!! these should become driver const once
18553 rf-tool supports split-68 const */
18554 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
18555 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
18556 REG_WR(sc, IGU_REG_SB_MASK_LSB, 0);
18557 REG_WR(sc, IGU_REG_SB_MASK_MSB, 0);
18558 REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0);
18559 REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0);
18563 /* Reset PCIE errors for debug */
18564 REG_WR(sc, 0x2114, 0xffffffff);
18565 REG_WR(sc, 0x2120, 0xffffffff);
18567 if (CHIP_IS_E1x(sc)) {
18568 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
18569 main_mem_base = HC_REG_MAIN_MEMORY +
18570 SC_PORT(sc) * (main_mem_size * 4);
18571 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
18572 main_mem_width = 8;
18574 val = REG_RD(sc, main_mem_prty_clr);
18576 BLOGD(sc, DBG_LOAD,
18577 "Parity errors in HC block during function init (0x%x)!\n",
18581 /* Clear "false" parity errors in MSI-X table */
18582 for (i = main_mem_base;
18583 i < main_mem_base + main_mem_size * 4;
18584 i += main_mem_width) {
18585 bxe_read_dmae(sc, i, main_mem_width / 4);
18586 bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data),
18587 i, main_mem_width / 4);
18589 /* Clear HC parity attention */
18590 REG_RD(sc, main_mem_prty_clr);
18594 /* Enable STORMs SP logging */
18595 REG_WR8(sc, BAR_USTRORM_INTMEM +
18596 USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18597 REG_WR8(sc, BAR_TSTRORM_INTMEM +
18598 TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18599 REG_WR8(sc, BAR_CSTRORM_INTMEM +
18600 CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18601 REG_WR8(sc, BAR_XSTRORM_INTMEM +
18602 XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18605 elink_phy_probe(&sc->link_params);
18611 bxe_link_reset(struct bxe_softc *sc)
18613 if (!BXE_NOMCP(sc)) {
18615 elink_lfa_reset(&sc->link_params, &sc->link_vars);
18616 BXE_PHY_UNLOCK(sc);
18618 if (!CHIP_REV_IS_SLOW(sc)) {
18619 BLOGW(sc, "Bootcode is missing - cannot reset link\n");
18625 bxe_reset_port(struct bxe_softc *sc)
18627 int port = SC_PORT(sc);
18630 /* reset physical Link */
18631 bxe_link_reset(sc);
18633 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
18635 /* Do not rcv packets to BRB */
18636 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
18637 /* Do not direct rcv packets that are not for MCP to the BRB */
18638 REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
18639 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
18641 /* Configure AEU */
18642 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
18646 /* Check for BRB port occupancy */
18647 val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
18649 BLOGD(sc, DBG_LOAD,
18650 "BRB1 is not empty, %d blocks are occupied\n", val);
18653 /* TODO: Close Doorbell port? */
18657 bxe_ilt_wr(struct bxe_softc *sc,
18662 uint32_t wb_write[2];
18664 if (CHIP_IS_E1(sc)) {
18665 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
18667 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
18670 wb_write[0] = ONCHIP_ADDR1(addr);
18671 wb_write[1] = ONCHIP_ADDR2(addr);
18672 REG_WR_DMAE(sc, reg, wb_write, 2);
18676 bxe_clear_func_ilt(struct bxe_softc *sc,
18679 uint32_t i, base = FUNC_ILT_BASE(func);
18680 for (i = base; i < base + ILT_PER_FUNC; i++) {
18681 bxe_ilt_wr(sc, i, 0);
18686 bxe_reset_func(struct bxe_softc *sc)
18688 struct bxe_fastpath *fp;
18689 int port = SC_PORT(sc);
18690 int func = SC_FUNC(sc);
18693 /* Disable the function in the FW */
18694 REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
18695 REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
18696 REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
18697 REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
18700 FOR_EACH_ETH_QUEUE(sc, i) {
18702 REG_WR8(sc, BAR_CSTRORM_INTMEM +
18703 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
18708 if (CNIC_LOADED(sc)) {
18710 REG_WR8(sc, BAR_CSTRORM_INTMEM +
18711 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
18712 (bxe_cnic_fw_sb_id(sc)), SB_DISABLED);
18717 REG_WR8(sc, BAR_CSTRORM_INTMEM +
18718 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
18721 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) {
18722 REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0);
18725 /* Configure IGU */
18726 if (sc->devinfo.int_block == INT_BLOCK_HC) {
18727 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18728 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18730 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18731 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18734 if (CNIC_LOADED(sc)) {
18735 /* Disable Timer scan */
18736 REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
18738 * Wait for at least 10ms and up to 2 second for the timers
18741 for (i = 0; i < 200; i++) {
18743 if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4))
18749 bxe_clear_func_ilt(sc, func);
18752 * Timers workaround bug for E2: if this is vnic-3,
18753 * we need to set the entire ilt range for this timers.
18755 if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) {
18756 struct ilt_client_info ilt_cli;
18757 /* use dummy TM client */
18758 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
18760 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
18761 ilt_cli.client_num = ILT_CLIENT_TM;
18763 ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR);
18766 /* this assumes that reset_port() called before reset_func()*/
18767 if (!CHIP_IS_E1x(sc)) {
18768 bxe_pf_disable(sc);
18771 sc->dmae_ready = 0;
18775 bxe_gunzip_init(struct bxe_softc *sc)
18781 bxe_gunzip_end(struct bxe_softc *sc)
18787 bxe_init_firmware(struct bxe_softc *sc)
18789 if (CHIP_IS_E1(sc)) {
18790 ecore_init_e1_firmware(sc);
18791 sc->iro_array = e1_iro_arr;
18792 } else if (CHIP_IS_E1H(sc)) {
18793 ecore_init_e1h_firmware(sc);
18794 sc->iro_array = e1h_iro_arr;
18795 } else if (!CHIP_IS_E1x(sc)) {
18796 ecore_init_e2_firmware(sc);
18797 sc->iro_array = e2_iro_arr;
18799 BLOGE(sc, "Unsupported chip revision\n");
18807 bxe_release_firmware(struct bxe_softc *sc)
18814 ecore_gunzip(struct bxe_softc *sc,
18815 const uint8_t *zbuf,
18818 /* XXX : Implement... */
18819 BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n");
18824 ecore_reg_wr_ind(struct bxe_softc *sc,
18828 bxe_reg_wr_ind(sc, addr, val);
18832 ecore_write_dmae_phys_len(struct bxe_softc *sc,
18833 bus_addr_t phys_addr,
18837 bxe_write_dmae_phys_len(sc, phys_addr, addr, len);
18841 ecore_storm_memset_struct(struct bxe_softc *sc,
18847 for (i = 0; i < size/4; i++) {
18848 REG_WR(sc, addr + (i * 4), data[i]);