2 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
18 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
24 * THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #define BXE_DRIVER_VERSION "1.78.79"
34 #include "ecore_init.h"
35 #include "ecore_init_ops.h"
37 #include "57710_int_offsets.h"
38 #include "57711_int_offsets.h"
39 #include "57712_int_offsets.h"
42 * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these
43 * explicitly here for older kernels that don't include this changeset.
46 #define CTLTYPE_U64 CTLTYPE_QUAD
47 #define sysctl_handle_64 sysctl_handle_quad
51 * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these
52 * here as zero(0) for older kernels that don't include this changeset
53 * thereby masking the functionality.
56 #define CSUM_TCP_IPV6 0
57 #define CSUM_UDP_IPV6 0
61 * pci_find_cap was added in r219865. Re-define this at pci_find_extcap
62 * for older kernels that don't include this changeset.
64 #if __FreeBSD_version < 900035
65 #define pci_find_cap pci_find_extcap
68 #define BXE_DEF_SB_ATT_IDX 0x0001
69 #define BXE_DEF_SB_IDX 0x0002
72 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per
73 * function HW initialization.
75 #define FLR_WAIT_USEC 10000 /* 10 msecs */
76 #define FLR_WAIT_INTERVAL 50 /* usecs */
77 #define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */
79 struct pbf_pN_buf_regs {
86 struct pbf_pN_cmd_regs {
93 * PCI Device ID Table used by bxe_probe().
95 #define BXE_DEVDESC_MAX 64
96 static struct bxe_device_type bxe_devs[] = {
100 PCI_ANY_ID, PCI_ANY_ID,
101 "QLogic NetXtreme II BCM57710 10GbE"
106 PCI_ANY_ID, PCI_ANY_ID,
107 "QLogic NetXtreme II BCM57711 10GbE"
112 PCI_ANY_ID, PCI_ANY_ID,
113 "QLogic NetXtreme II BCM57711E 10GbE"
118 PCI_ANY_ID, PCI_ANY_ID,
119 "QLogic NetXtreme II BCM57712 10GbE"
124 PCI_ANY_ID, PCI_ANY_ID,
125 "QLogic NetXtreme II BCM57712 MF 10GbE"
130 PCI_ANY_ID, PCI_ANY_ID,
131 "QLogic NetXtreme II BCM57800 10GbE"
136 PCI_ANY_ID, PCI_ANY_ID,
137 "QLogic NetXtreme II BCM57800 MF 10GbE"
142 PCI_ANY_ID, PCI_ANY_ID,
143 "QLogic NetXtreme II BCM57810 10GbE"
148 PCI_ANY_ID, PCI_ANY_ID,
149 "QLogic NetXtreme II BCM57810 MF 10GbE"
154 PCI_ANY_ID, PCI_ANY_ID,
155 "QLogic NetXtreme II BCM57811 10GbE"
160 PCI_ANY_ID, PCI_ANY_ID,
161 "QLogic NetXtreme II BCM57811 MF 10GbE"
166 PCI_ANY_ID, PCI_ANY_ID,
167 "QLogic NetXtreme II BCM57840 4x10GbE"
172 PCI_ANY_ID, PCI_ANY_ID,
173 "QLogic NetXtreme II BCM57840 MF 10GbE"
180 MALLOC_DECLARE(M_BXE_ILT);
181 MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer");
184 * FreeBSD device entry points.
186 static int bxe_probe(device_t);
187 static int bxe_attach(device_t);
188 static int bxe_detach(device_t);
189 static int bxe_shutdown(device_t);
192 * FreeBSD KLD module/device interface event handler method.
194 static device_method_t bxe_methods[] = {
195 /* Device interface (device_if.h) */
196 DEVMETHOD(device_probe, bxe_probe),
197 DEVMETHOD(device_attach, bxe_attach),
198 DEVMETHOD(device_detach, bxe_detach),
199 DEVMETHOD(device_shutdown, bxe_shutdown),
200 /* Bus interface (bus_if.h) */
201 DEVMETHOD(bus_print_child, bus_generic_print_child),
202 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
207 * FreeBSD KLD Module data declaration
209 static driver_t bxe_driver = {
210 "bxe", /* module name */
211 bxe_methods, /* event handler */
212 sizeof(struct bxe_softc) /* extra data */
216 * FreeBSD dev class is needed to manage dev instances and
217 * to associate with a bus type
219 static devclass_t bxe_devclass;
221 MODULE_DEPEND(bxe, pci, 1, 1, 1);
222 MODULE_DEPEND(bxe, ether, 1, 1, 1);
223 DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0);
225 /* resources needed for unloading a previously loaded device */
227 #define BXE_PREV_WAIT_NEEDED 1
228 struct mtx bxe_prev_mtx;
229 MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF);
230 struct bxe_prev_list_node {
231 LIST_ENTRY(bxe_prev_list_node) node;
235 uint8_t aer; /* XXX automatic error recovery */
238 static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list);
240 static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
242 /* Tunable device values... */
244 SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters");
247 unsigned long bxe_debug = 0;
248 TUNABLE_ULONG("hw.bxe.debug", &bxe_debug);
249 SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, (CTLFLAG_RDTUN),
250 &bxe_debug, 0, "Debug logging mode");
252 /* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
253 static int bxe_interrupt_mode = INTR_MODE_MSIX;
254 TUNABLE_INT("hw.bxe.interrupt_mode", &bxe_interrupt_mode);
255 SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN,
256 &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode");
258 /* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */
259 static int bxe_queue_count = 4;
260 TUNABLE_INT("hw.bxe.queue_count", &bxe_queue_count);
261 SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
262 &bxe_queue_count, 0, "Multi-Queue queue count");
264 /* max number of buffers per queue (default RX_BD_USABLE) */
265 static int bxe_max_rx_bufs = 0;
266 TUNABLE_INT("hw.bxe.max_rx_bufs", &bxe_max_rx_bufs);
267 SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN,
268 &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue");
270 /* Host interrupt coalescing RX tick timer (usecs) */
271 static int bxe_hc_rx_ticks = 25;
272 TUNABLE_INT("hw.bxe.hc_rx_ticks", &bxe_hc_rx_ticks);
273 SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN,
274 &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks");
276 /* Host interrupt coalescing TX tick timer (usecs) */
277 static int bxe_hc_tx_ticks = 50;
278 TUNABLE_INT("hw.bxe.hc_tx_ticks", &bxe_hc_tx_ticks);
279 SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN,
280 &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks");
282 /* Maximum number of Rx packets to process at a time */
283 static int bxe_rx_budget = 0xffffffff;
284 TUNABLE_INT("hw.bxe.rx_budget", &bxe_rx_budget);
285 SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN,
286 &bxe_rx_budget, 0, "Rx processing budget");
288 /* Maximum LRO aggregation size */
289 static int bxe_max_aggregation_size = 0;
290 TUNABLE_INT("hw.bxe.max_aggregation_size", &bxe_max_aggregation_size);
291 SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN,
292 &bxe_max_aggregation_size, 0, "max aggregation size");
294 /* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */
295 static int bxe_mrrs = -1;
296 TUNABLE_INT("hw.bxe.mrrs", &bxe_mrrs);
297 SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN,
298 &bxe_mrrs, 0, "PCIe maximum read request size");
300 /* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */
301 static int bxe_autogreeen = 0;
302 TUNABLE_INT("hw.bxe.autogreeen", &bxe_autogreeen);
303 SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN,
304 &bxe_autogreeen, 0, "AutoGrEEEn support");
306 /* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */
307 static int bxe_udp_rss = 0;
308 TUNABLE_INT("hw.bxe.udp_rss", &bxe_udp_rss);
309 SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN,
310 &bxe_udp_rss, 0, "UDP RSS support");
313 #define STAT_NAME_LEN 32 /* no stat names below can be longer than this */
315 #define STATS_OFFSET32(stat_name) \
316 (offsetof(struct bxe_eth_stats, stat_name) / 4)
318 #define Q_STATS_OFFSET32(stat_name) \
319 (offsetof(struct bxe_eth_q_stats, stat_name) / 4)
321 static const struct {
325 #define STATS_FLAGS_PORT 1
326 #define STATS_FLAGS_FUNC 2 /* MF only cares about function stats */
327 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
328 char string[STAT_NAME_LEN];
329 } bxe_eth_stats_arr[] = {
330 { STATS_OFFSET32(total_bytes_received_hi),
331 8, STATS_FLAGS_BOTH, "rx_bytes" },
332 { STATS_OFFSET32(error_bytes_received_hi),
333 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
334 { STATS_OFFSET32(total_unicast_packets_received_hi),
335 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
336 { STATS_OFFSET32(total_multicast_packets_received_hi),
337 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
338 { STATS_OFFSET32(total_broadcast_packets_received_hi),
339 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
340 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
341 8, STATS_FLAGS_PORT, "rx_crc_errors" },
342 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
343 8, STATS_FLAGS_PORT, "rx_align_errors" },
344 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
345 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
346 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
347 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
348 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
349 8, STATS_FLAGS_PORT, "rx_fragments" },
350 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
351 8, STATS_FLAGS_PORT, "rx_jabbers" },
352 { STATS_OFFSET32(no_buff_discard_hi),
353 8, STATS_FLAGS_BOTH, "rx_discards" },
354 { STATS_OFFSET32(mac_filter_discard),
355 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
356 { STATS_OFFSET32(mf_tag_discard),
357 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
358 { STATS_OFFSET32(pfc_frames_received_hi),
359 8, STATS_FLAGS_PORT, "pfc_frames_received" },
360 { STATS_OFFSET32(pfc_frames_sent_hi),
361 8, STATS_FLAGS_PORT, "pfc_frames_sent" },
362 { STATS_OFFSET32(brb_drop_hi),
363 8, STATS_FLAGS_PORT, "rx_brb_discard" },
364 { STATS_OFFSET32(brb_truncate_hi),
365 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
366 { STATS_OFFSET32(pause_frames_received_hi),
367 8, STATS_FLAGS_PORT, "rx_pause_frames" },
368 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
369 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
370 { STATS_OFFSET32(nig_timer_max),
371 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
372 { STATS_OFFSET32(total_bytes_transmitted_hi),
373 8, STATS_FLAGS_BOTH, "tx_bytes" },
374 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
375 8, STATS_FLAGS_PORT, "tx_error_bytes" },
376 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
377 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
378 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
379 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
380 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
381 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
382 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
383 8, STATS_FLAGS_PORT, "tx_mac_errors" },
384 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
385 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
386 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
387 8, STATS_FLAGS_PORT, "tx_single_collisions" },
388 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
389 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
390 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
391 8, STATS_FLAGS_PORT, "tx_deferred" },
392 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
393 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
394 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
395 8, STATS_FLAGS_PORT, "tx_late_collisions" },
396 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
397 8, STATS_FLAGS_PORT, "tx_total_collisions" },
398 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
399 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
400 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
401 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
402 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
403 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
404 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
405 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
406 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
407 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
408 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
409 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
410 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
411 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
412 { STATS_OFFSET32(pause_frames_sent_hi),
413 8, STATS_FLAGS_PORT, "tx_pause_frames" },
414 { STATS_OFFSET32(total_tpa_aggregations_hi),
415 8, STATS_FLAGS_FUNC, "tpa_aggregations" },
416 { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
417 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
418 { STATS_OFFSET32(total_tpa_bytes_hi),
419 8, STATS_FLAGS_FUNC, "tpa_bytes"},
420 { STATS_OFFSET32(eee_tx_lpi),
421 4, STATS_FLAGS_PORT, "eee_tx_lpi"},
422 { STATS_OFFSET32(rx_calls),
423 4, STATS_FLAGS_FUNC, "rx_calls"},
424 { STATS_OFFSET32(rx_pkts),
425 4, STATS_FLAGS_FUNC, "rx_pkts"},
426 { STATS_OFFSET32(rx_tpa_pkts),
427 4, STATS_FLAGS_FUNC, "rx_tpa_pkts"},
428 { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
429 4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"},
430 { STATS_OFFSET32(rx_bxe_service_rxsgl),
431 4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"},
432 { STATS_OFFSET32(rx_jumbo_sge_pkts),
433 4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"},
434 { STATS_OFFSET32(rx_soft_errors),
435 4, STATS_FLAGS_FUNC, "rx_soft_errors"},
436 { STATS_OFFSET32(rx_hw_csum_errors),
437 4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"},
438 { STATS_OFFSET32(rx_ofld_frames_csum_ip),
439 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"},
440 { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
441 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"},
442 { STATS_OFFSET32(rx_budget_reached),
443 4, STATS_FLAGS_FUNC, "rx_budget_reached"},
444 { STATS_OFFSET32(tx_pkts),
445 4, STATS_FLAGS_FUNC, "tx_pkts"},
446 { STATS_OFFSET32(tx_soft_errors),
447 4, STATS_FLAGS_FUNC, "tx_soft_errors"},
448 { STATS_OFFSET32(tx_ofld_frames_csum_ip),
449 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"},
450 { STATS_OFFSET32(tx_ofld_frames_csum_tcp),
451 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"},
452 { STATS_OFFSET32(tx_ofld_frames_csum_udp),
453 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"},
454 { STATS_OFFSET32(tx_ofld_frames_lso),
455 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"},
456 { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
457 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"},
458 { STATS_OFFSET32(tx_encap_failures),
459 4, STATS_FLAGS_FUNC, "tx_encap_failures"},
460 { STATS_OFFSET32(tx_hw_queue_full),
461 4, STATS_FLAGS_FUNC, "tx_hw_queue_full"},
462 { STATS_OFFSET32(tx_hw_max_queue_depth),
463 4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"},
464 { STATS_OFFSET32(tx_dma_mapping_failure),
465 4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"},
466 { STATS_OFFSET32(tx_max_drbr_queue_depth),
467 4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"},
468 { STATS_OFFSET32(tx_window_violation_std),
469 4, STATS_FLAGS_FUNC, "tx_window_violation_std"},
470 { STATS_OFFSET32(tx_window_violation_tso),
471 4, STATS_FLAGS_FUNC, "tx_window_violation_tso"},
472 { STATS_OFFSET32(tx_chain_lost_mbuf),
473 4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"},
474 { STATS_OFFSET32(tx_frames_deferred),
475 4, STATS_FLAGS_FUNC, "tx_frames_deferred"},
476 { STATS_OFFSET32(tx_queue_xoff),
477 4, STATS_FLAGS_FUNC, "tx_queue_xoff"},
478 { STATS_OFFSET32(mbuf_defrag_attempts),
479 4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"},
480 { STATS_OFFSET32(mbuf_defrag_failures),
481 4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"},
482 { STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
483 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"},
484 { STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
485 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"},
486 { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
487 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"},
488 { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
489 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"},
490 { STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
491 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"},
492 { STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
493 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"},
494 { STATS_OFFSET32(mbuf_alloc_tx),
495 4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"},
496 { STATS_OFFSET32(mbuf_alloc_rx),
497 4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"},
498 { STATS_OFFSET32(mbuf_alloc_sge),
499 4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"},
500 { STATS_OFFSET32(mbuf_alloc_tpa),
501 4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"}
504 static const struct {
507 char string[STAT_NAME_LEN];
508 } bxe_eth_q_stats_arr[] = {
509 { Q_STATS_OFFSET32(total_bytes_received_hi),
511 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
512 8, "rx_ucast_packets" },
513 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
514 8, "rx_mcast_packets" },
515 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
516 8, "rx_bcast_packets" },
517 { Q_STATS_OFFSET32(no_buff_discard_hi),
519 { Q_STATS_OFFSET32(total_bytes_transmitted_hi),
521 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
522 8, "tx_ucast_packets" },
523 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
524 8, "tx_mcast_packets" },
525 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
526 8, "tx_bcast_packets" },
527 { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
528 8, "tpa_aggregations" },
529 { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
530 8, "tpa_aggregated_frames"},
531 { Q_STATS_OFFSET32(total_tpa_bytes_hi),
533 { Q_STATS_OFFSET32(rx_calls),
535 { Q_STATS_OFFSET32(rx_pkts),
537 { Q_STATS_OFFSET32(rx_tpa_pkts),
539 { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
540 4, "rx_erroneous_jumbo_sge_pkts"},
541 { Q_STATS_OFFSET32(rx_bxe_service_rxsgl),
542 4, "rx_bxe_service_rxsgl"},
543 { Q_STATS_OFFSET32(rx_jumbo_sge_pkts),
544 4, "rx_jumbo_sge_pkts"},
545 { Q_STATS_OFFSET32(rx_soft_errors),
546 4, "rx_soft_errors"},
547 { Q_STATS_OFFSET32(rx_hw_csum_errors),
548 4, "rx_hw_csum_errors"},
549 { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip),
550 4, "rx_ofld_frames_csum_ip"},
551 { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
552 4, "rx_ofld_frames_csum_tcp_udp"},
553 { Q_STATS_OFFSET32(rx_budget_reached),
554 4, "rx_budget_reached"},
555 { Q_STATS_OFFSET32(tx_pkts),
557 { Q_STATS_OFFSET32(tx_soft_errors),
558 4, "tx_soft_errors"},
559 { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip),
560 4, "tx_ofld_frames_csum_ip"},
561 { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp),
562 4, "tx_ofld_frames_csum_tcp"},
563 { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp),
564 4, "tx_ofld_frames_csum_udp"},
565 { Q_STATS_OFFSET32(tx_ofld_frames_lso),
566 4, "tx_ofld_frames_lso"},
567 { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
568 4, "tx_ofld_frames_lso_hdr_splits"},
569 { Q_STATS_OFFSET32(tx_encap_failures),
570 4, "tx_encap_failures"},
571 { Q_STATS_OFFSET32(tx_hw_queue_full),
572 4, "tx_hw_queue_full"},
573 { Q_STATS_OFFSET32(tx_hw_max_queue_depth),
574 4, "tx_hw_max_queue_depth"},
575 { Q_STATS_OFFSET32(tx_dma_mapping_failure),
576 4, "tx_dma_mapping_failure"},
577 { Q_STATS_OFFSET32(tx_max_drbr_queue_depth),
578 4, "tx_max_drbr_queue_depth"},
579 { Q_STATS_OFFSET32(tx_window_violation_std),
580 4, "tx_window_violation_std"},
581 { Q_STATS_OFFSET32(tx_window_violation_tso),
582 4, "tx_window_violation_tso"},
583 { Q_STATS_OFFSET32(tx_chain_lost_mbuf),
584 4, "tx_chain_lost_mbuf"},
585 { Q_STATS_OFFSET32(tx_frames_deferred),
586 4, "tx_frames_deferred"},
587 { Q_STATS_OFFSET32(tx_queue_xoff),
589 { Q_STATS_OFFSET32(mbuf_defrag_attempts),
590 4, "mbuf_defrag_attempts"},
591 { Q_STATS_OFFSET32(mbuf_defrag_failures),
592 4, "mbuf_defrag_failures"},
593 { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
594 4, "mbuf_rx_bd_alloc_failed"},
595 { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
596 4, "mbuf_rx_bd_mapping_failed"},
597 { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
598 4, "mbuf_rx_tpa_alloc_failed"},
599 { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
600 4, "mbuf_rx_tpa_mapping_failed"},
601 { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
602 4, "mbuf_rx_sge_alloc_failed"},
603 { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
604 4, "mbuf_rx_sge_mapping_failed"},
605 { Q_STATS_OFFSET32(mbuf_alloc_tx),
607 { Q_STATS_OFFSET32(mbuf_alloc_rx),
609 { Q_STATS_OFFSET32(mbuf_alloc_sge),
610 4, "mbuf_alloc_sge"},
611 { Q_STATS_OFFSET32(mbuf_alloc_tpa),
615 #define BXE_NUM_ETH_STATS ARRAY_SIZE(bxe_eth_stats_arr)
616 #define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr)
619 static void bxe_cmng_fns_init(struct bxe_softc *sc,
622 static int bxe_get_cmng_fns_mode(struct bxe_softc *sc);
623 static void storm_memset_cmng(struct bxe_softc *sc,
624 struct cmng_init *cmng,
626 static void bxe_set_reset_global(struct bxe_softc *sc);
627 static void bxe_set_reset_in_progress(struct bxe_softc *sc);
628 static uint8_t bxe_reset_is_done(struct bxe_softc *sc,
630 static uint8_t bxe_clear_pf_load(struct bxe_softc *sc);
631 static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc,
634 static void bxe_int_disable(struct bxe_softc *sc);
635 static int bxe_release_leader_lock(struct bxe_softc *sc);
636 static void bxe_pf_disable(struct bxe_softc *sc);
637 static void bxe_free_fp_buffers(struct bxe_softc *sc);
638 static inline void bxe_update_rx_prod(struct bxe_softc *sc,
639 struct bxe_fastpath *fp,
642 uint16_t rx_sge_prod);
643 static void bxe_link_report_locked(struct bxe_softc *sc);
644 static void bxe_link_report(struct bxe_softc *sc);
645 static void bxe_link_status_update(struct bxe_softc *sc);
646 static void bxe_periodic_callout_func(void *xsc);
647 static void bxe_periodic_start(struct bxe_softc *sc);
648 static void bxe_periodic_stop(struct bxe_softc *sc);
649 static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
652 static int bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
654 static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
656 static uint8_t bxe_txeof(struct bxe_softc *sc,
657 struct bxe_fastpath *fp);
658 static void bxe_task_fp(struct bxe_fastpath *fp);
659 static __noinline void bxe_dump_mbuf(struct bxe_softc *sc,
662 static int bxe_alloc_mem(struct bxe_softc *sc);
663 static void bxe_free_mem(struct bxe_softc *sc);
664 static int bxe_alloc_fw_stats_mem(struct bxe_softc *sc);
665 static void bxe_free_fw_stats_mem(struct bxe_softc *sc);
666 static int bxe_interrupt_attach(struct bxe_softc *sc);
667 static void bxe_interrupt_detach(struct bxe_softc *sc);
668 static void bxe_set_rx_mode(struct bxe_softc *sc);
669 static int bxe_init_locked(struct bxe_softc *sc);
670 static int bxe_stop_locked(struct bxe_softc *sc);
671 static __noinline int bxe_nic_load(struct bxe_softc *sc,
673 static __noinline int bxe_nic_unload(struct bxe_softc *sc,
674 uint32_t unload_mode,
677 static void bxe_handle_sp_tq(void *context, int pending);
678 static void bxe_handle_fp_tq(void *context, int pending);
680 static int bxe_add_cdev(struct bxe_softc *sc);
681 static void bxe_del_cdev(struct bxe_softc *sc);
682 static int bxe_grc_dump(struct bxe_softc *sc);
684 /* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */
686 calc_crc32(uint8_t *crc32_packet,
687 uint32_t crc32_length,
696 uint8_t current_byte = 0;
697 uint32_t crc32_result = crc32_seed;
698 const uint32_t CRC32_POLY = 0x1edc6f41;
700 if ((crc32_packet == NULL) ||
701 (crc32_length == 0) ||
702 ((crc32_length % 8) != 0))
704 return (crc32_result);
707 for (byte = 0; byte < crc32_length; byte = byte + 1)
709 current_byte = crc32_packet[byte];
710 for (bit = 0; bit < 8; bit = bit + 1)
712 /* msb = crc32_result[31]; */
713 msb = (uint8_t)(crc32_result >> 31);
715 crc32_result = crc32_result << 1;
717 /* it (msb != current_byte[bit]) */
718 if (msb != (0x1 & (current_byte >> bit)))
720 crc32_result = crc32_result ^ CRC32_POLY;
721 /* crc32_result[0] = 1 */
728 * 1. "mirror" every bit
729 * 2. swap the 4 bytes
730 * 3. complement each bit
735 shft = sizeof(crc32_result) * 8 - 1;
737 for (crc32_result >>= 1; crc32_result; crc32_result >>= 1)
740 temp |= crc32_result & 1;
744 /* temp[31-bit] = crc32_result[bit] */
748 /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */
750 uint32_t t0, t1, t2, t3;
751 t0 = (0x000000ff & (temp >> 24));
752 t1 = (0x0000ff00 & (temp >> 8));
753 t2 = (0x00ff0000 & (temp << 8));
754 t3 = (0xff000000 & (temp << 24));
755 crc32_result = t0 | t1 | t2 | t3;
761 crc32_result = ~crc32_result;
764 return (crc32_result);
769 volatile unsigned long *addr)
771 return ((atomic_load_acq_long(addr) & (1 << nr)) != 0);
775 bxe_set_bit(unsigned int nr,
776 volatile unsigned long *addr)
778 atomic_set_acq_long(addr, (1 << nr));
782 bxe_clear_bit(int nr,
783 volatile unsigned long *addr)
785 atomic_clear_acq_long(addr, (1 << nr));
789 bxe_test_and_set_bit(int nr,
790 volatile unsigned long *addr)
796 } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0);
797 // if (x & nr) bit_was_set; else bit_was_not_set;
802 bxe_test_and_clear_bit(int nr,
803 volatile unsigned long *addr)
809 } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0);
810 // if (x & nr) bit_was_set; else bit_was_not_set;
815 bxe_cmpxchg(volatile int *addr,
822 } while (atomic_cmpset_acq_int(addr, old, new) == 0);
827 * Get DMA memory from the OS.
829 * Validates that the OS has provided DMA buffers in response to a
830 * bus_dmamap_load call and saves the physical address of those buffers.
831 * When the callback is used the OS will return 0 for the mapping function
832 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any
833 * failures back to the caller.
839 bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
841 struct bxe_dma *dma = arg;
846 BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error);
848 dma->paddr = segs->ds_addr;
854 * Allocate a block of memory and map it for DMA. No partial completions
855 * allowed and release any resources acquired if we can't acquire all
859 * 0 = Success, !0 = Failure
862 bxe_dma_alloc(struct bxe_softc *sc,
870 BLOGE(sc, "dma block '%s' already has size %lu\n", msg,
871 (unsigned long)dma->size);
875 memset(dma, 0, sizeof(*dma)); /* sanity */
878 snprintf(dma->msg, sizeof(dma->msg), "%s", msg);
880 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
881 BCM_PAGE_SIZE, /* alignment */
882 0, /* boundary limit */
883 BUS_SPACE_MAXADDR, /* restricted low */
884 BUS_SPACE_MAXADDR, /* restricted hi */
885 NULL, /* addr filter() */
886 NULL, /* addr filter() arg */
887 size, /* max map size */
888 1, /* num discontinuous */
889 size, /* max seg size */
890 BUS_DMA_ALLOCNOW, /* flags */
892 NULL, /* lock() arg */
893 &dma->tag); /* returned dma tag */
895 BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc);
896 memset(dma, 0, sizeof(*dma));
900 rc = bus_dmamem_alloc(dma->tag,
901 (void **)&dma->vaddr,
902 (BUS_DMA_NOWAIT | BUS_DMA_ZERO),
905 BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc);
906 bus_dma_tag_destroy(dma->tag);
907 memset(dma, 0, sizeof(*dma));
911 rc = bus_dmamap_load(dma->tag,
915 bxe_dma_map_addr, /* BLOGD in here */
919 BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc);
920 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
921 bus_dma_tag_destroy(dma->tag);
922 memset(dma, 0, sizeof(*dma));
930 bxe_dma_free(struct bxe_softc *sc,
934 DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL"));
936 bus_dmamap_sync(dma->tag, dma->map,
937 (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE));
938 bus_dmamap_unload(dma->tag, dma->map);
939 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
940 bus_dma_tag_destroy(dma->tag);
943 memset(dma, 0, sizeof(*dma));
947 * These indirect read and write routines are only during init.
948 * The locking is handled by the MCP.
952 bxe_reg_wr_ind(struct bxe_softc *sc,
956 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
957 pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4);
958 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
962 bxe_reg_rd_ind(struct bxe_softc *sc,
967 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
968 val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4);
969 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
975 bxe_acquire_hw_lock(struct bxe_softc *sc,
978 uint32_t lock_status;
979 uint32_t resource_bit = (1 << resource);
980 int func = SC_FUNC(sc);
981 uint32_t hw_lock_control_reg;
984 /* validate the resource is within range */
985 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
986 BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
987 " resource_bit 0x%x\n", resource, resource_bit);
992 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
994 hw_lock_control_reg =
995 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
998 /* validate the resource is not already taken */
999 lock_status = REG_RD(sc, hw_lock_control_reg);
1000 if (lock_status & resource_bit) {
1001 BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n",
1002 resource, lock_status, resource_bit);
1006 /* try every 5ms for 5 seconds */
1007 for (cnt = 0; cnt < 1000; cnt++) {
1008 REG_WR(sc, (hw_lock_control_reg + 4), resource_bit);
1009 lock_status = REG_RD(sc, hw_lock_control_reg);
1010 if (lock_status & resource_bit) {
1016 BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n",
1017 resource, resource_bit);
1022 bxe_release_hw_lock(struct bxe_softc *sc,
1025 uint32_t lock_status;
1026 uint32_t resource_bit = (1 << resource);
1027 int func = SC_FUNC(sc);
1028 uint32_t hw_lock_control_reg;
1030 /* validate the resource is within range */
1031 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1032 BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1033 " resource_bit 0x%x\n", resource, resource_bit);
1038 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1040 hw_lock_control_reg =
1041 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1044 /* validate the resource is currently taken */
1045 lock_status = REG_RD(sc, hw_lock_control_reg);
1046 if (!(lock_status & resource_bit)) {
1047 BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n",
1048 resource, lock_status, resource_bit);
1052 REG_WR(sc, hw_lock_control_reg, resource_bit);
1055 static void bxe_acquire_phy_lock(struct bxe_softc *sc)
1058 bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1061 static void bxe_release_phy_lock(struct bxe_softc *sc)
1063 bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1067 * Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
1068 * had we done things the other way around, if two pfs from the same port
1069 * would attempt to access nvram at the same time, we could run into a
1071 * pf A takes the port lock.
1072 * pf B succeeds in taking the same lock since they are from the same port.
1073 * pf A takes the per pf misc lock. Performs eeprom access.
1074 * pf A finishes. Unlocks the per pf misc lock.
1075 * Pf B takes the lock and proceeds to perform it's own access.
1076 * pf A unlocks the per port lock, while pf B is still working (!).
1077 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
1078 * access corrupted by pf B).*
1081 bxe_acquire_nvram_lock(struct bxe_softc *sc)
1083 int port = SC_PORT(sc);
1087 /* acquire HW lock: protect against other PFs in PF Direct Assignment */
1088 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1090 /* adjust timeout for emulation/FPGA */
1091 count = NVRAM_TIMEOUT_COUNT;
1092 if (CHIP_REV_IS_SLOW(sc)) {
1096 /* request access to nvram interface */
1097 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1098 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
1100 for (i = 0; i < count*10; i++) {
1101 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1102 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1109 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1110 BLOGE(sc, "Cannot get access to nvram interface "
1111 "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1120 bxe_release_nvram_lock(struct bxe_softc *sc)
1122 int port = SC_PORT(sc);
1126 /* adjust timeout for emulation/FPGA */
1127 count = NVRAM_TIMEOUT_COUNT;
1128 if (CHIP_REV_IS_SLOW(sc)) {
1132 /* relinquish nvram interface */
1133 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1134 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
1136 for (i = 0; i < count*10; i++) {
1137 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1138 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1145 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1146 BLOGE(sc, "Cannot free access to nvram interface "
1147 "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1152 /* release HW lock: protect against other PFs in PF Direct Assignment */
1153 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1159 bxe_enable_nvram_access(struct bxe_softc *sc)
1163 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1165 /* enable both bits, even on read */
1166 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1167 (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN));
1171 bxe_disable_nvram_access(struct bxe_softc *sc)
1175 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1177 /* disable both bits, even after read */
1178 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1179 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
1180 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
1184 bxe_nvram_read_dword(struct bxe_softc *sc,
1192 /* build the command word */
1193 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
1195 /* need to clear DONE bit separately */
1196 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1198 /* address of the NVRAM to read from */
1199 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1200 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1202 /* issue a read command */
1203 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1205 /* adjust timeout for emulation/FPGA */
1206 count = NVRAM_TIMEOUT_COUNT;
1207 if (CHIP_REV_IS_SLOW(sc)) {
1211 /* wait for completion */
1214 for (i = 0; i < count; i++) {
1216 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1218 if (val & MCPR_NVM_COMMAND_DONE) {
1219 val = REG_RD(sc, MCP_REG_MCPR_NVM_READ);
1220 /* we read nvram data in cpu order
1221 * but ethtool sees it as an array of bytes
1222 * converting to big-endian will do the work
1224 *ret_val = htobe32(val);
1231 BLOGE(sc, "nvram read timeout expired "
1232 "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1233 offset, cmd_flags, val);
1240 bxe_nvram_read(struct bxe_softc *sc,
1249 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
1250 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1255 if ((offset + buf_size) > sc->devinfo.flash_size) {
1256 BLOGE(sc, "Invalid parameter, "
1257 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1258 offset, buf_size, sc->devinfo.flash_size);
1262 /* request access to nvram interface */
1263 rc = bxe_acquire_nvram_lock(sc);
1268 /* enable access to nvram interface */
1269 bxe_enable_nvram_access(sc);
1271 /* read the first word(s) */
1272 cmd_flags = MCPR_NVM_COMMAND_FIRST;
1273 while ((buf_size > sizeof(uint32_t)) && (rc == 0)) {
1274 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1275 memcpy(ret_buf, &val, 4);
1277 /* advance to the next dword */
1278 offset += sizeof(uint32_t);
1279 ret_buf += sizeof(uint32_t);
1280 buf_size -= sizeof(uint32_t);
1285 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1286 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1287 memcpy(ret_buf, &val, 4);
1290 /* disable access to nvram interface */
1291 bxe_disable_nvram_access(sc);
1292 bxe_release_nvram_lock(sc);
1298 bxe_nvram_write_dword(struct bxe_softc *sc,
1305 /* build the command word */
1306 cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR);
1308 /* need to clear DONE bit separately */
1309 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1311 /* write the data */
1312 REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val);
1314 /* address of the NVRAM to write to */
1315 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1316 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1318 /* issue the write command */
1319 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1321 /* adjust timeout for emulation/FPGA */
1322 count = NVRAM_TIMEOUT_COUNT;
1323 if (CHIP_REV_IS_SLOW(sc)) {
1327 /* wait for completion */
1329 for (i = 0; i < count; i++) {
1331 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1332 if (val & MCPR_NVM_COMMAND_DONE) {
1339 BLOGE(sc, "nvram write timeout expired "
1340 "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1341 offset, cmd_flags, val);
1347 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
1350 bxe_nvram_write1(struct bxe_softc *sc,
1356 uint32_t align_offset;
1360 if ((offset + buf_size) > sc->devinfo.flash_size) {
1361 BLOGE(sc, "Invalid parameter, "
1362 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1363 offset, buf_size, sc->devinfo.flash_size);
1367 /* request access to nvram interface */
1368 rc = bxe_acquire_nvram_lock(sc);
1373 /* enable access to nvram interface */
1374 bxe_enable_nvram_access(sc);
1376 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
1377 align_offset = (offset & ~0x03);
1378 rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags);
1381 val &= ~(0xff << BYTE_OFFSET(offset));
1382 val |= (*data_buf << BYTE_OFFSET(offset));
1384 /* nvram data is returned as an array of bytes
1385 * convert it back to cpu order
1389 rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags);
1392 /* disable access to nvram interface */
1393 bxe_disable_nvram_access(sc);
1394 bxe_release_nvram_lock(sc);
1400 bxe_nvram_write(struct bxe_softc *sc,
1407 uint32_t written_so_far;
1410 if (buf_size == 1) {
1411 return (bxe_nvram_write1(sc, offset, data_buf, buf_size));
1414 if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) {
1415 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1420 if (buf_size == 0) {
1421 return (0); /* nothing to do */
1424 if ((offset + buf_size) > sc->devinfo.flash_size) {
1425 BLOGE(sc, "Invalid parameter, "
1426 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1427 offset, buf_size, sc->devinfo.flash_size);
1431 /* request access to nvram interface */
1432 rc = bxe_acquire_nvram_lock(sc);
1437 /* enable access to nvram interface */
1438 bxe_enable_nvram_access(sc);
1441 cmd_flags = MCPR_NVM_COMMAND_FIRST;
1442 while ((written_so_far < buf_size) && (rc == 0)) {
1443 if (written_so_far == (buf_size - sizeof(uint32_t))) {
1444 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1445 } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) {
1446 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1447 } else if ((offset % NVRAM_PAGE_SIZE) == 0) {
1448 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
1451 memcpy(&val, data_buf, 4);
1453 rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags);
1455 /* advance to the next dword */
1456 offset += sizeof(uint32_t);
1457 data_buf += sizeof(uint32_t);
1458 written_so_far += sizeof(uint32_t);
1462 /* disable access to nvram interface */
1463 bxe_disable_nvram_access(sc);
1464 bxe_release_nvram_lock(sc);
1469 /* copy command into DMAE command memory and set DMAE command Go */
1471 bxe_post_dmae(struct bxe_softc *sc,
1472 struct dmae_command *dmae,
1475 uint32_t cmd_offset;
1478 cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_command) * idx));
1479 for (i = 0; i < ((sizeof(struct dmae_command) / 4)); i++) {
1480 REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i));
1483 REG_WR(sc, dmae_reg_go_c[idx], 1);
1487 bxe_dmae_opcode_add_comp(uint32_t opcode,
1490 return (opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
1491 DMAE_COMMAND_C_TYPE_ENABLE));
1495 bxe_dmae_opcode_clr_src_reset(uint32_t opcode)
1497 return (opcode & ~DMAE_COMMAND_SRC_RESET);
1501 bxe_dmae_opcode(struct bxe_softc *sc,
1507 uint32_t opcode = 0;
1509 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
1510 (dst_type << DMAE_COMMAND_DST_SHIFT));
1512 opcode |= (DMAE_COMMAND_SRC_RESET | DMAE_COMMAND_DST_RESET);
1514 opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
1516 opcode |= ((SC_VN(sc) << DMAE_COMMAND_E1HVN_SHIFT) |
1517 (SC_VN(sc) << DMAE_COMMAND_DST_VN_SHIFT));
1519 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
1522 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
1524 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
1528 opcode = bxe_dmae_opcode_add_comp(opcode, comp_type);
1535 bxe_prep_dmae_with_comp(struct bxe_softc *sc,
1536 struct dmae_command *dmae,
1540 memset(dmae, 0, sizeof(struct dmae_command));
1542 /* set the opcode */
1543 dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type,
1544 TRUE, DMAE_COMP_PCI);
1546 /* fill in the completion parameters */
1547 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp));
1548 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp));
1549 dmae->comp_val = DMAE_COMP_VAL;
1552 /* issue a DMAE command over the init channel and wait for completion */
1554 bxe_issue_dmae_with_comp(struct bxe_softc *sc,
1555 struct dmae_command *dmae)
1557 uint32_t *wb_comp = BXE_SP(sc, wb_comp);
1558 int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000;
1562 /* reset completion */
1565 /* post the command on the channel used for initializations */
1566 bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
1568 /* wait for completion */
1571 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
1573 (sc->recovery_state != BXE_RECOVERY_DONE &&
1574 sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) {
1575 BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n",
1576 *wb_comp, sc->recovery_state);
1577 BXE_DMAE_UNLOCK(sc);
1578 return (DMAE_TIMEOUT);
1585 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
1586 BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n",
1587 *wb_comp, sc->recovery_state);
1588 BXE_DMAE_UNLOCK(sc);
1589 return (DMAE_PCI_ERROR);
1592 BXE_DMAE_UNLOCK(sc);
1597 bxe_read_dmae(struct bxe_softc *sc,
1601 struct dmae_command dmae;
1605 DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32));
1607 if (!sc->dmae_ready) {
1608 data = BXE_SP(sc, wb_data[0]);
1610 for (i = 0; i < len32; i++) {
1611 data[i] = (CHIP_IS_E1(sc)) ?
1612 bxe_reg_rd_ind(sc, (src_addr + (i * 4))) :
1613 REG_RD(sc, (src_addr + (i * 4)));
1619 /* set opcode and fixed command fields */
1620 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
1622 /* fill in addresses and len */
1623 dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */
1624 dmae.src_addr_hi = 0;
1625 dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data));
1626 dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data));
1629 /* issue the command and wait for completion */
1630 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1631 bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1636 bxe_write_dmae(struct bxe_softc *sc,
1637 bus_addr_t dma_addr,
1641 struct dmae_command dmae;
1644 if (!sc->dmae_ready) {
1645 DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32));
1647 if (CHIP_IS_E1(sc)) {
1648 ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1650 ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1656 /* set opcode and fixed command fields */
1657 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
1659 /* fill in addresses and len */
1660 dmae.src_addr_lo = U64_LO(dma_addr);
1661 dmae.src_addr_hi = U64_HI(dma_addr);
1662 dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */
1663 dmae.dst_addr_hi = 0;
1666 /* issue the command and wait for completion */
1667 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1668 bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1673 bxe_write_dmae_phys_len(struct bxe_softc *sc,
1674 bus_addr_t phys_addr,
1678 int dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
1681 while (len > dmae_wr_max) {
1683 (phys_addr + offset), /* src DMA address */
1684 (addr + offset), /* dst GRC address */
1686 offset += (dmae_wr_max * 4);
1691 (phys_addr + offset), /* src DMA address */
1692 (addr + offset), /* dst GRC address */
1697 bxe_set_ctx_validation(struct bxe_softc *sc,
1698 struct eth_context *cxt,
1701 /* ustorm cxt validation */
1702 cxt->ustorm_ag_context.cdu_usage =
1703 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1704 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
1705 /* xcontext validation */
1706 cxt->xstorm_ag_context.cdu_reserved =
1707 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1708 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
1712 bxe_storm_memset_hc_timeout(struct bxe_softc *sc,
1719 (BAR_CSTRORM_INTMEM +
1720 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index));
1722 REG_WR8(sc, addr, ticks);
1725 "port %d fw_sb_id %d sb_index %d ticks %d\n",
1726 port, fw_sb_id, sb_index, ticks);
1730 bxe_storm_memset_hc_disable(struct bxe_softc *sc,
1736 uint32_t enable_flag =
1737 (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
1739 (BAR_CSTRORM_INTMEM +
1740 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index));
1744 flags = REG_RD8(sc, addr);
1745 flags &= ~HC_INDEX_DATA_HC_ENABLED;
1746 flags |= enable_flag;
1747 REG_WR8(sc, addr, flags);
1750 "port %d fw_sb_id %d sb_index %d disable %d\n",
1751 port, fw_sb_id, sb_index, disable);
1755 bxe_update_coalesce_sb_index(struct bxe_softc *sc,
1761 int port = SC_PORT(sc);
1762 uint8_t ticks = (usec / 4); /* XXX ??? */
1764 bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks);
1766 disable = (disable) ? 1 : ((usec) ? 0 : 1);
1767 bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable);
1771 elink_cb_udelay(struct bxe_softc *sc,
1778 elink_cb_reg_read(struct bxe_softc *sc,
1781 return (REG_RD(sc, reg_addr));
1785 elink_cb_reg_write(struct bxe_softc *sc,
1789 REG_WR(sc, reg_addr, val);
1793 elink_cb_reg_wb_write(struct bxe_softc *sc,
1798 REG_WR_DMAE(sc, offset, wb_write, len);
1802 elink_cb_reg_wb_read(struct bxe_softc *sc,
1807 REG_RD_DMAE(sc, offset, wb_write, len);
1811 elink_cb_path_id(struct bxe_softc *sc)
1813 return (SC_PATH(sc));
1817 elink_cb_event_log(struct bxe_softc *sc,
1818 const elink_log_id_t elink_log_id,
1822 BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id);
1826 bxe_set_spio(struct bxe_softc *sc,
1832 /* Only 2 SPIOs are configurable */
1833 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
1834 BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode);
1838 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1840 /* read SPIO and mask except the float bits */
1841 spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
1844 case MISC_SPIO_OUTPUT_LOW:
1845 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio);
1846 /* clear FLOAT and set CLR */
1847 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1848 spio_reg |= (spio << MISC_SPIO_CLR_POS);
1851 case MISC_SPIO_OUTPUT_HIGH:
1852 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio);
1853 /* clear FLOAT and set SET */
1854 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1855 spio_reg |= (spio << MISC_SPIO_SET_POS);
1858 case MISC_SPIO_INPUT_HI_Z:
1859 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio);
1861 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
1868 REG_WR(sc, MISC_REG_SPIO, spio_reg);
1869 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1875 bxe_gpio_read(struct bxe_softc *sc,
1879 /* The GPIO should be swapped if swap register is set and active */
1880 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1881 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1882 int gpio_shift = (gpio_num +
1883 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1884 uint32_t gpio_mask = (1 << gpio_shift);
1887 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1888 BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d"
1889 " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift,
1894 /* read GPIO value */
1895 gpio_reg = REG_RD(sc, MISC_REG_GPIO);
1897 /* get the requested pin value */
1898 return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0;
1902 bxe_gpio_write(struct bxe_softc *sc,
1907 /* The GPIO should be swapped if swap register is set and active */
1908 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1909 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1910 int gpio_shift = (gpio_num +
1911 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1912 uint32_t gpio_mask = (1 << gpio_shift);
1915 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1916 BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
1917 " gpio_shift %d gpio_mask 0x%x\n",
1918 gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
1922 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1924 /* read GPIO and mask except the float bits */
1925 gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1928 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1930 "Set GPIO %d (shift %d) -> output low\n",
1931 gpio_num, gpio_shift);
1932 /* clear FLOAT and set CLR */
1933 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1934 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1937 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1939 "Set GPIO %d (shift %d) -> output high\n",
1940 gpio_num, gpio_shift);
1941 /* clear FLOAT and set SET */
1942 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1943 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1946 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1948 "Set GPIO %d (shift %d) -> input\n",
1949 gpio_num, gpio_shift);
1951 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1958 REG_WR(sc, MISC_REG_GPIO, gpio_reg);
1959 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1965 bxe_gpio_mult_write(struct bxe_softc *sc,
1971 /* any port swapping should be handled by caller */
1973 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1975 /* read GPIO and mask except the float bits */
1976 gpio_reg = REG_RD(sc, MISC_REG_GPIO);
1977 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
1978 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
1979 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
1982 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1983 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins);
1985 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
1988 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1989 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins);
1991 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
1994 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1995 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins);
1997 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2001 BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x"
2002 " gpio_reg 0x%x\n", pins, mode, gpio_reg);
2003 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2007 REG_WR(sc, MISC_REG_GPIO, gpio_reg);
2008 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2014 bxe_gpio_int_write(struct bxe_softc *sc,
2019 /* The GPIO should be swapped if swap register is set and active */
2020 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
2021 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
2022 int gpio_shift = (gpio_num +
2023 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
2024 uint32_t gpio_mask = (1 << gpio_shift);
2027 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2028 BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
2029 " gpio_shift %d gpio_mask 0x%x\n",
2030 gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
2034 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2037 gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT);
2040 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2042 "Clear GPIO INT %d (shift %d) -> output low\n",
2043 gpio_num, gpio_shift);
2044 /* clear SET and set CLR */
2045 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2046 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2049 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2051 "Set GPIO INT %d (shift %d) -> output high\n",
2052 gpio_num, gpio_shift);
2053 /* clear CLR and set SET */
2054 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2055 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2062 REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg);
2063 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2069 elink_cb_gpio_read(struct bxe_softc *sc,
2073 return (bxe_gpio_read(sc, gpio_num, port));
2077 elink_cb_gpio_write(struct bxe_softc *sc,
2079 uint8_t mode, /* 0=low 1=high */
2082 return (bxe_gpio_write(sc, gpio_num, mode, port));
2086 elink_cb_gpio_mult_write(struct bxe_softc *sc,
2088 uint8_t mode) /* 0=low 1=high */
2090 return (bxe_gpio_mult_write(sc, pins, mode));
2094 elink_cb_gpio_int_write(struct bxe_softc *sc,
2096 uint8_t mode, /* 0=low 1=high */
2099 return (bxe_gpio_int_write(sc, gpio_num, mode, port));
2103 elink_cb_notify_link_changed(struct bxe_softc *sc)
2105 REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 +
2106 (SC_FUNC(sc) * sizeof(uint32_t))), 1);
2109 /* send the MCP a request, block until there is a reply */
2111 elink_cb_fw_command(struct bxe_softc *sc,
2115 int mb_idx = SC_FW_MB_IDX(sc);
2119 uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10;
2124 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param);
2125 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq));
2128 "wrote command 0x%08x to FW MB param 0x%08x\n",
2129 (command | seq), param);
2131 /* Let the FW do it's magic. GIve it up to 5 seconds... */
2133 DELAY(delay * 1000);
2134 rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header);
2135 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2138 "[after %d ms] read 0x%x seq 0x%x from FW MB\n",
2139 cnt*delay, rc, seq);
2141 /* is this a reply to our command? */
2142 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
2143 rc &= FW_MSG_CODE_MASK;
2146 BLOGE(sc, "FW failed to respond!\n");
2147 // XXX bxe_fw_dump(sc);
2151 BXE_FWMB_UNLOCK(sc);
2156 bxe_fw_command(struct bxe_softc *sc,
2160 return (elink_cb_fw_command(sc, command, param));
2164 __storm_memset_dma_mapping(struct bxe_softc *sc,
2168 REG_WR(sc, addr, U64_LO(mapping));
2169 REG_WR(sc, (addr + 4), U64_HI(mapping));
2173 storm_memset_spq_addr(struct bxe_softc *sc,
2177 uint32_t addr = (XSEM_REG_FAST_MEMORY +
2178 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid));
2179 __storm_memset_dma_mapping(sc, addr, mapping);
2183 storm_memset_vf_to_pf(struct bxe_softc *sc,
2187 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2188 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2189 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2190 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2194 storm_memset_func_en(struct bxe_softc *sc,
2198 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2199 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2200 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2201 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2205 storm_memset_eq_data(struct bxe_softc *sc,
2206 struct event_ring_data *eq_data,
2212 addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid));
2213 size = sizeof(struct event_ring_data);
2214 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data);
2218 storm_memset_eq_prod(struct bxe_softc *sc,
2222 uint32_t addr = (BAR_CSTRORM_INTMEM +
2223 CSTORM_EVENT_RING_PROD_OFFSET(pfid));
2224 REG_WR16(sc, addr, eq_prod);
2228 * Post a slowpath command.
2230 * A slowpath command is used to propogate a configuration change through
2231 * the controller in a controlled manner, allowing each STORM processor and
2232 * other H/W blocks to phase in the change. The commands sent on the
2233 * slowpath are referred to as ramrods. Depending on the ramrod used the
2234 * completion of the ramrod will occur in different ways. Here's a
2235 * breakdown of ramrods and how they complete:
2237 * RAMROD_CMD_ID_ETH_PORT_SETUP
2238 * Used to setup the leading connection on a port. Completes on the
2239 * Receive Completion Queue (RCQ) of that port (typically fp[0]).
2241 * RAMROD_CMD_ID_ETH_CLIENT_SETUP
2242 * Used to setup an additional connection on a port. Completes on the
2243 * RCQ of the multi-queue/RSS connection being initialized.
2245 * RAMROD_CMD_ID_ETH_STAT_QUERY
2246 * Used to force the storm processors to update the statistics database
2247 * in host memory. This ramrod is send on the leading connection CID and
2248 * completes as an index increment of the CSTORM on the default status
2251 * RAMROD_CMD_ID_ETH_UPDATE
2252 * Used to update the state of the leading connection, usually to udpate
2253 * the RSS indirection table. Completes on the RCQ of the leading
2254 * connection. (Not currently used under FreeBSD until OS support becomes
2257 * RAMROD_CMD_ID_ETH_HALT
2258 * Used when tearing down a connection prior to driver unload. Completes
2259 * on the RCQ of the multi-queue/RSS connection being torn down. Don't
2260 * use this on the leading connection.
2262 * RAMROD_CMD_ID_ETH_SET_MAC
2263 * Sets the Unicast/Broadcast/Multicast used by the port. Completes on
2264 * the RCQ of the leading connection.
2266 * RAMROD_CMD_ID_ETH_CFC_DEL
2267 * Used when tearing down a conneciton prior to driver unload. Completes
2268 * on the RCQ of the leading connection (since the current connection
2269 * has been completely removed from controller memory).
2271 * RAMROD_CMD_ID_ETH_PORT_DEL
2272 * Used to tear down the leading connection prior to driver unload,
2273 * typically fp[0]. Completes as an index increment of the CSTORM on the
2274 * default status block.
2276 * RAMROD_CMD_ID_ETH_FORWARD_SETUP
2277 * Used for connection offload. Completes on the RCQ of the multi-queue
2278 * RSS connection that is being offloaded. (Not currently used under
2281 * There can only be one command pending per function.
2284 * 0 = Success, !0 = Failure.
2287 /* must be called under the spq lock */
2289 struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc)
2291 struct eth_spe *next_spe = sc->spq_prod_bd;
2293 if (sc->spq_prod_bd == sc->spq_last_bd) {
2294 /* wrap back to the first eth_spq */
2295 sc->spq_prod_bd = sc->spq;
2296 sc->spq_prod_idx = 0;
2305 /* must be called under the spq lock */
2307 void bxe_sp_prod_update(struct bxe_softc *sc)
2309 int func = SC_FUNC(sc);
2312 * Make sure that BD data is updated before writing the producer.
2313 * BD data is written to the memory, the producer is read from the
2314 * memory, thus we need a full memory barrier to ensure the ordering.
2318 REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)),
2321 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
2322 BUS_SPACE_BARRIER_WRITE);
2326 * bxe_is_contextless_ramrod - check if the current command ends on EQ
2328 * @cmd: command to check
2329 * @cmd_type: command type
2332 int bxe_is_contextless_ramrod(int cmd,
2335 if ((cmd_type == NONE_CONNECTION_TYPE) ||
2336 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
2337 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
2338 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
2339 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
2340 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
2341 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) {
2349 * bxe_sp_post - place a single command on an SP ring
2351 * @sc: driver handle
2352 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.)
2353 * @cid: SW CID the command is related to
2354 * @data_hi: command private data address (high 32 bits)
2355 * @data_lo: command private data address (low 32 bits)
2356 * @cmd_type: command type (e.g. NONE, ETH)
2358 * SP data is handled as if it's always an address pair, thus data fields are
2359 * not swapped to little endian in upper functions. Instead this function swaps
2360 * data as if it's two uint32 fields.
2363 bxe_sp_post(struct bxe_softc *sc,
2370 struct eth_spe *spe;
2374 common = bxe_is_contextless_ramrod(command, cmd_type);
2379 if (!atomic_load_acq_long(&sc->eq_spq_left)) {
2380 BLOGE(sc, "EQ ring is full!\n");
2385 if (!atomic_load_acq_long(&sc->cq_spq_left)) {
2386 BLOGE(sc, "SPQ ring is full!\n");
2392 spe = bxe_sp_get_next(sc);
2394 /* CID needs port number to be encoded int it */
2395 spe->hdr.conn_and_cmd_data =
2396 htole32((command << SPE_HDR_CMD_ID_SHIFT) | HW_CID(sc, cid));
2398 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
2400 /* TBD: Check if it works for VFs */
2401 type |= ((SC_FUNC(sc) << SPE_HDR_FUNCTION_ID_SHIFT) &
2402 SPE_HDR_FUNCTION_ID);
2404 spe->hdr.type = htole16(type);
2406 spe->data.update_data_addr.hi = htole32(data_hi);
2407 spe->data.update_data_addr.lo = htole32(data_lo);
2410 * It's ok if the actual decrement is issued towards the memory
2411 * somewhere between the lock and unlock. Thus no more explict
2412 * memory barrier is needed.
2415 atomic_subtract_acq_long(&sc->eq_spq_left, 1);
2417 atomic_subtract_acq_long(&sc->cq_spq_left, 1);
2420 BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr);
2421 BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n",
2422 BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata));
2424 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n",
2426 (uint32_t)U64_HI(sc->spq_dma.paddr),
2427 (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq),
2434 atomic_load_acq_long(&sc->cq_spq_left),
2435 atomic_load_acq_long(&sc->eq_spq_left));
2437 bxe_sp_prod_update(sc);
2444 * bxe_debug_print_ind_table - prints the indirection table configuration.
2446 * @sc: driver hanlde
2447 * @p: pointer to rss configuration
2451 * FreeBSD Device probe function.
2453 * Compares the device found to the driver's list of supported devices and
2454 * reports back to the bsd loader whether this is the right driver for the device.
2455 * This is the driver entry function called from the "kldload" command.
2458 * BUS_PROBE_DEFAULT on success, positive value on failure.
2461 bxe_probe(device_t dev)
2463 struct bxe_softc *sc;
2464 struct bxe_device_type *t;
2466 uint16_t did, sdid, svid, vid;
2468 /* Find our device structure */
2469 sc = device_get_softc(dev);
2473 /* Get the data for the device to be probed. */
2474 vid = pci_get_vendor(dev);
2475 did = pci_get_device(dev);
2476 svid = pci_get_subvendor(dev);
2477 sdid = pci_get_subdevice(dev);
2480 "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
2481 "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
2483 /* Look through the list of known devices for a match. */
2484 while (t->bxe_name != NULL) {
2485 if ((vid == t->bxe_vid) && (did == t->bxe_did) &&
2486 ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) &&
2487 ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) {
2488 descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
2489 if (descbuf == NULL)
2492 /* Print out the device identity. */
2493 snprintf(descbuf, BXE_DEVDESC_MAX,
2494 "%s (%c%d) BXE v:%s\n", t->bxe_name,
2495 (((pci_read_config(dev, PCIR_REVID, 4) &
2497 (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
2498 BXE_DRIVER_VERSION);
2500 device_set_desc_copy(dev, descbuf);
2501 free(descbuf, M_TEMP);
2502 return (BUS_PROBE_DEFAULT);
2511 bxe_init_mutexes(struct bxe_softc *sc)
2513 #ifdef BXE_CORE_LOCK_SX
2514 snprintf(sc->core_sx_name, sizeof(sc->core_sx_name),
2515 "bxe%d_core_lock", sc->unit);
2516 sx_init(&sc->core_sx, sc->core_sx_name);
2518 snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name),
2519 "bxe%d_core_lock", sc->unit);
2520 mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF);
2523 snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name),
2524 "bxe%d_sp_lock", sc->unit);
2525 mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF);
2527 snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name),
2528 "bxe%d_dmae_lock", sc->unit);
2529 mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF);
2531 snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name),
2532 "bxe%d_phy_lock", sc->unit);
2533 mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF);
2535 snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name),
2536 "bxe%d_fwmb_lock", sc->unit);
2537 mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF);
2539 snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name),
2540 "bxe%d_print_lock", sc->unit);
2541 mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF);
2543 snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name),
2544 "bxe%d_stats_lock", sc->unit);
2545 mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF);
2547 snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name),
2548 "bxe%d_mcast_lock", sc->unit);
2549 mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF);
2553 bxe_release_mutexes(struct bxe_softc *sc)
2555 #ifdef BXE_CORE_LOCK_SX
2556 sx_destroy(&sc->core_sx);
2558 if (mtx_initialized(&sc->core_mtx)) {
2559 mtx_destroy(&sc->core_mtx);
2563 if (mtx_initialized(&sc->sp_mtx)) {
2564 mtx_destroy(&sc->sp_mtx);
2567 if (mtx_initialized(&sc->dmae_mtx)) {
2568 mtx_destroy(&sc->dmae_mtx);
2571 if (mtx_initialized(&sc->port.phy_mtx)) {
2572 mtx_destroy(&sc->port.phy_mtx);
2575 if (mtx_initialized(&sc->fwmb_mtx)) {
2576 mtx_destroy(&sc->fwmb_mtx);
2579 if (mtx_initialized(&sc->print_mtx)) {
2580 mtx_destroy(&sc->print_mtx);
2583 if (mtx_initialized(&sc->stats_mtx)) {
2584 mtx_destroy(&sc->stats_mtx);
2587 if (mtx_initialized(&sc->mcast_mtx)) {
2588 mtx_destroy(&sc->mcast_mtx);
2593 bxe_tx_disable(struct bxe_softc* sc)
2595 struct ifnet *ifp = sc->ifnet;
2597 /* tell the stack the driver is stopped and TX queue is full */
2599 ifp->if_drv_flags = 0;
2604 bxe_drv_pulse(struct bxe_softc *sc)
2606 SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb,
2607 sc->fw_drv_pulse_wr_seq);
2610 static inline uint16_t
2611 bxe_tx_avail(struct bxe_softc *sc,
2612 struct bxe_fastpath *fp)
2618 prod = fp->tx_bd_prod;
2619 cons = fp->tx_bd_cons;
2621 used = SUB_S16(prod, cons);
2623 return (int16_t)(sc->tx_ring_size) - used;
2627 bxe_tx_queue_has_work(struct bxe_fastpath *fp)
2631 mb(); /* status block fields can change */
2632 hw_cons = le16toh(*fp->tx_cons_sb);
2633 return (hw_cons != fp->tx_pkt_cons);
2636 static inline uint8_t
2637 bxe_has_tx_work(struct bxe_fastpath *fp)
2639 /* expand this for multi-cos if ever supported */
2640 return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE;
2644 bxe_has_rx_work(struct bxe_fastpath *fp)
2646 uint16_t rx_cq_cons_sb;
2648 mb(); /* status block fields can change */
2649 rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb);
2650 if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX)
2652 return (fp->rx_cq_cons != rx_cq_cons_sb);
2656 bxe_sp_event(struct bxe_softc *sc,
2657 struct bxe_fastpath *fp,
2658 union eth_rx_cqe *rr_cqe)
2660 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2661 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2662 enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX;
2663 struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
2665 BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n",
2666 fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type);
2669 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
2670 BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid);
2671 drv_cmd = ECORE_Q_CMD_UPDATE;
2674 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
2675 BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid);
2676 drv_cmd = ECORE_Q_CMD_SETUP;
2679 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
2680 BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
2681 drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY;
2684 case (RAMROD_CMD_ID_ETH_HALT):
2685 BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid);
2686 drv_cmd = ECORE_Q_CMD_HALT;
2689 case (RAMROD_CMD_ID_ETH_TERMINATE):
2690 BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid);
2691 drv_cmd = ECORE_Q_CMD_TERMINATE;
2694 case (RAMROD_CMD_ID_ETH_EMPTY):
2695 BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid);
2696 drv_cmd = ECORE_Q_CMD_EMPTY;
2700 BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n",
2701 command, fp->index);
2705 if ((drv_cmd != ECORE_Q_CMD_MAX) &&
2706 q_obj->complete_cmd(sc, q_obj, drv_cmd)) {
2708 * q_obj->complete_cmd() failure means that this was
2709 * an unexpected completion.
2711 * In this case we don't want to increase the sc->spq_left
2712 * because apparently we haven't sent this command the first
2715 // bxe_panic(sc, ("Unexpected SP completion\n"));
2719 atomic_add_acq_long(&sc->cq_spq_left, 1);
2721 BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n",
2722 atomic_load_acq_long(&sc->cq_spq_left));
2726 * The current mbuf is part of an aggregation. Move the mbuf into the TPA
2727 * aggregation queue, put an empty mbuf back onto the receive chain, and mark
2728 * the current aggregation queue as in-progress.
2731 bxe_tpa_start(struct bxe_softc *sc,
2732 struct bxe_fastpath *fp,
2736 struct eth_fast_path_rx_cqe *cqe)
2738 struct bxe_sw_rx_bd tmp_bd;
2739 struct bxe_sw_rx_bd *rx_buf;
2740 struct eth_rx_bd *rx_bd;
2742 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
2745 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START "
2746 "cons=%d prod=%d\n",
2747 fp->index, queue, cons, prod);
2749 max_agg_queues = MAX_AGG_QS(sc);
2751 KASSERT((queue < max_agg_queues),
2752 ("fp[%02d] invalid aggr queue (%d >= %d)!",
2753 fp->index, queue, max_agg_queues));
2755 KASSERT((tpa_info->state == BXE_TPA_STATE_STOP),
2756 ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!",
2759 /* copy the existing mbuf and mapping from the TPA pool */
2760 tmp_bd = tpa_info->bd;
2762 if (tmp_bd.m == NULL) {
2765 tmp = (uint32_t *)cqe;
2767 BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n",
2768 fp->index, queue, cons, prod);
2769 BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2770 *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2772 /* XXX Error handling? */
2776 /* change the TPA queue to the start state */
2777 tpa_info->state = BXE_TPA_STATE_START;
2778 tpa_info->placement_offset = cqe->placement_offset;
2779 tpa_info->parsing_flags = le16toh(cqe->pars_flags.flags);
2780 tpa_info->vlan_tag = le16toh(cqe->vlan_tag);
2781 tpa_info->len_on_bd = le16toh(cqe->len_on_bd);
2783 fp->rx_tpa_queue_used |= (1 << queue);
2786 * If all the buffer descriptors are filled with mbufs then fill in
2787 * the current consumer index with a new BD. Else if a maximum Rx
2788 * buffer limit is imposed then fill in the next producer index.
2790 index = (sc->max_rx_bufs != RX_BD_USABLE) ?
2793 /* move the received mbuf and mapping to TPA pool */
2794 tpa_info->bd = fp->rx_mbuf_chain[cons];
2796 /* release any existing RX BD mbuf mappings */
2797 if (cons != index) {
2798 rx_buf = &fp->rx_mbuf_chain[cons];
2800 if (rx_buf->m_map != NULL) {
2801 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
2802 BUS_DMASYNC_POSTREAD);
2803 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
2807 * We get here when the maximum number of rx buffers is less than
2808 * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL
2809 * it out here without concern of a memory leak.
2811 fp->rx_mbuf_chain[cons].m = NULL;
2814 /* update the Rx SW BD with the mbuf info from the TPA pool */
2815 fp->rx_mbuf_chain[index] = tmp_bd;
2817 /* update the Rx BD with the empty mbuf phys address from the TPA pool */
2818 rx_bd = &fp->rx_chain[index];
2819 rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr));
2820 rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr));
2824 * When a TPA aggregation is completed, loop through the individual mbufs
2825 * of the aggregation, combining them into a single mbuf which will be sent
2826 * up the stack. Refill all freed SGEs with mbufs as we go along.
2829 bxe_fill_frag_mbuf(struct bxe_softc *sc,
2830 struct bxe_fastpath *fp,
2831 struct bxe_sw_tpa_info *tpa_info,
2835 struct eth_end_agg_rx_cqe *cqe,
2838 struct mbuf *m_frag;
2839 uint32_t frag_len, frag_size, i;
2844 frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd;
2847 "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n",
2848 fp->index, queue, tpa_info->len_on_bd, frag_size, pages);
2850 /* make sure the aggregated frame is not too big to handle */
2851 if (pages > 8 * PAGES_PER_SGE) {
2853 uint32_t *tmp = (uint32_t *)cqe;
2855 BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! "
2856 "pkt_len=%d len_on_bd=%d frag_size=%d\n",
2857 fp->index, cqe_idx, pages, le16toh(cqe->pkt_len),
2858 tpa_info->len_on_bd, frag_size);
2860 BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2861 *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2863 bxe_panic(sc, ("sge page count error\n"));
2868 * Scan through the scatter gather list pulling individual mbufs into a
2869 * single mbuf for the host stack.
2871 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
2872 sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j]));
2875 * Firmware gives the indices of the SGE as if the ring is an array
2876 * (meaning that the "next" element will consume 2 indices).
2878 frag_len = min(frag_size, (uint32_t)(SGE_PAGES));
2880 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d "
2881 "sge_idx=%d frag_size=%d frag_len=%d\n",
2882 fp->index, queue, i, j, sge_idx, frag_size, frag_len);
2884 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
2886 /* allocate a new mbuf for the SGE */
2887 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
2889 /* Leave all remaining SGEs in the ring! */
2893 /* update the fragment length */
2894 m_frag->m_len = frag_len;
2896 /* concatenate the fragment to the head mbuf */
2898 fp->eth_q_stats.mbuf_alloc_sge--;
2900 /* update the TPA mbuf size and remaining fragment size */
2901 m->m_pkthdr.len += frag_len;
2902 frag_size -= frag_len;
2906 "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n",
2907 fp->index, queue, frag_size);
2913 bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp)
2917 for (i = 1; i <= RX_SGE_NUM_PAGES; i++) {
2918 int idx = RX_SGE_TOTAL_PER_PAGE * i - 1;
2920 for (j = 0; j < 2; j++) {
2921 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
2928 bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp)
2930 /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */
2931 memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
2934 * Clear the two last indices in the page to 1. These are the indices that
2935 * correspond to the "next" element, hence will never be indicated and
2936 * should be removed from the calculations.
2938 bxe_clear_sge_mask_next_elems(fp);
2942 bxe_update_last_max_sge(struct bxe_fastpath *fp,
2945 uint16_t last_max = fp->last_max_sge;
2947 if (SUB_S16(idx, last_max) > 0) {
2948 fp->last_max_sge = idx;
2953 bxe_update_sge_prod(struct bxe_softc *sc,
2954 struct bxe_fastpath *fp,
2956 union eth_sgl_or_raw_data *cqe)
2958 uint16_t last_max, last_elem, first_elem;
2966 /* first mark all used pages */
2967 for (i = 0; i < sge_len; i++) {
2968 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
2969 RX_SGE(le16toh(cqe->sgl[i])));
2973 "fp[%02d] fp_cqe->sgl[%d] = %d\n",
2974 fp->index, sge_len - 1,
2975 le16toh(cqe->sgl[sge_len - 1]));
2977 /* assume that the last SGE index is the biggest */
2978 bxe_update_last_max_sge(fp,
2979 le16toh(cqe->sgl[sge_len - 1]));
2981 last_max = RX_SGE(fp->last_max_sge);
2982 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
2983 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
2985 /* if ring is not full */
2986 if (last_elem + 1 != first_elem) {
2990 /* now update the prod */
2991 for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) {
2992 if (__predict_true(fp->sge_mask[i])) {
2996 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
2997 delta += BIT_VEC64_ELEM_SZ;
3001 fp->rx_sge_prod += delta;
3002 /* clear page-end entries */
3003 bxe_clear_sge_mask_next_elems(fp);
3007 "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n",
3008 fp->index, fp->last_max_sge, fp->rx_sge_prod);
3012 * The aggregation on the current TPA queue has completed. Pull the individual
3013 * mbuf fragments together into a single mbuf, perform all necessary checksum
3014 * calculations, and send the resuting mbuf to the stack.
3017 bxe_tpa_stop(struct bxe_softc *sc,
3018 struct bxe_fastpath *fp,
3019 struct bxe_sw_tpa_info *tpa_info,
3022 struct eth_end_agg_rx_cqe *cqe,
3025 struct ifnet *ifp = sc->ifnet;
3030 "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n",
3031 fp->index, queue, tpa_info->placement_offset,
3032 le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag);
3036 /* allocate a replacement before modifying existing mbuf */
3037 rc = bxe_alloc_rx_tpa_mbuf(fp, queue);
3039 /* drop the frame and log an error */
3040 fp->eth_q_stats.rx_soft_errors++;
3041 goto bxe_tpa_stop_exit;
3044 /* we have a replacement, fixup the current mbuf */
3045 m_adj(m, tpa_info->placement_offset);
3046 m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd;
3048 /* mark the checksums valid (taken care of by the firmware) */
3049 fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3050 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3051 m->m_pkthdr.csum_data = 0xffff;
3052 m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
3057 /* aggregate all of the SGEs into a single mbuf */
3058 rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx);
3060 /* drop the packet and log an error */
3061 fp->eth_q_stats.rx_soft_errors++;
3064 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) {
3065 m->m_pkthdr.ether_vtag = tpa_info->vlan_tag;
3066 m->m_flags |= M_VLANTAG;
3069 /* assign packet to this interface interface */
3070 m->m_pkthdr.rcvif = ifp;
3072 #if __FreeBSD_version >= 800000
3073 /* specify what RSS queue was used for this flow */
3074 m->m_pkthdr.flowid = fp->index;
3075 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
3079 fp->eth_q_stats.rx_tpa_pkts++;
3081 /* pass the frame to the stack */
3082 (*ifp->if_input)(ifp, m);
3085 /* we passed an mbuf up the stack or dropped the frame */
3086 fp->eth_q_stats.mbuf_alloc_tpa--;
3090 fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP;
3091 fp->rx_tpa_queue_used &= ~(1 << queue);
3096 struct bxe_fastpath *fp,
3100 struct eth_fast_path_rx_cqe *cqe_fp)
3102 struct mbuf *m_frag;
3103 uint16_t frags, frag_len;
3104 uint16_t sge_idx = 0;
3109 /* adjust the mbuf */
3112 frag_size = len - lenonbd;
3113 frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3115 for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) {
3116 sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j]));
3118 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
3119 frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE));
3120 m_frag->m_len = frag_len;
3122 /* allocate a new mbuf for the SGE */
3123 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
3125 /* Leave all remaining SGEs in the ring! */
3128 fp->eth_q_stats.mbuf_alloc_sge--;
3130 /* concatenate the fragment to the head mbuf */
3133 frag_size -= frag_len;
3136 bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data);
3142 bxe_rxeof(struct bxe_softc *sc,
3143 struct bxe_fastpath *fp)
3145 struct ifnet *ifp = sc->ifnet;
3146 uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
3147 uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
3153 /* CQ "next element" is of the size of the regular element */
3154 hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
3155 if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) {
3159 bd_cons = fp->rx_bd_cons;
3160 bd_prod = fp->rx_bd_prod;
3161 bd_prod_fw = bd_prod;
3162 sw_cq_cons = fp->rx_cq_cons;
3163 sw_cq_prod = fp->rx_cq_prod;
3166 * Memory barrier necessary as speculative reads of the rx
3167 * buffer can be ahead of the index in the status block
3172 "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n",
3173 fp->index, hw_cq_cons, sw_cq_cons);
3175 while (sw_cq_cons != hw_cq_cons) {
3176 struct bxe_sw_rx_bd *rx_buf = NULL;
3177 union eth_rx_cqe *cqe;
3178 struct eth_fast_path_rx_cqe *cqe_fp;
3179 uint8_t cqe_fp_flags;
3180 enum eth_rx_cqe_type cqe_fp_type;
3181 uint16_t len, lenonbd, pad;
3182 struct mbuf *m = NULL;
3184 comp_ring_cons = RCQ(sw_cq_cons);
3185 bd_prod = RX_BD(bd_prod);
3186 bd_cons = RX_BD(bd_cons);
3188 cqe = &fp->rcq_chain[comp_ring_cons];
3189 cqe_fp = &cqe->fast_path_cqe;
3190 cqe_fp_flags = cqe_fp->type_error_flags;
3191 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
3194 "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d "
3195 "BD prod=%d cons=%d CQE type=0x%x err=0x%x "
3196 "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n",
3202 CQE_TYPE(cqe_fp_flags),
3204 cqe_fp->status_flags,
3205 le32toh(cqe_fp->rss_hash_result),
3206 le16toh(cqe_fp->vlan_tag),
3207 le16toh(cqe_fp->pkt_len_or_gro_seg_len),
3208 le16toh(cqe_fp->len_on_bd));
3210 /* is this a slowpath msg? */
3211 if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) {
3212 bxe_sp_event(sc, fp, cqe);
3216 rx_buf = &fp->rx_mbuf_chain[bd_cons];
3218 if (!CQE_TYPE_FAST(cqe_fp_type)) {
3219 struct bxe_sw_tpa_info *tpa_info;
3220 uint16_t frag_size, pages;
3223 if (CQE_TYPE_START(cqe_fp_type)) {
3224 bxe_tpa_start(sc, fp, cqe_fp->queue_index,
3225 bd_cons, bd_prod, cqe_fp);
3226 m = NULL; /* packet not ready yet */
3230 KASSERT(CQE_TYPE_STOP(cqe_fp_type),
3231 ("CQE type is not STOP! (0x%x)\n", cqe_fp_type));
3233 queue = cqe->end_agg_cqe.queue_index;
3234 tpa_info = &fp->rx_tpa_info[queue];
3236 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n",
3239 frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) -
3240 tpa_info->len_on_bd);
3241 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3243 bxe_tpa_stop(sc, fp, tpa_info, queue, pages,
3244 &cqe->end_agg_cqe, comp_ring_cons);
3246 bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data);
3253 /* is this an error packet? */
3254 if (__predict_false(cqe_fp_flags &
3255 ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
3256 BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons);
3257 fp->eth_q_stats.rx_soft_errors++;
3261 len = le16toh(cqe_fp->pkt_len_or_gro_seg_len);
3262 lenonbd = le16toh(cqe_fp->len_on_bd);
3263 pad = cqe_fp->placement_offset;
3267 if (__predict_false(m == NULL)) {
3268 BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n",
3269 bd_cons, fp->index);
3273 /* XXX double copy if packet length under a threshold */
3276 * If all the buffer descriptors are filled with mbufs then fill in
3277 * the current consumer index with a new BD. Else if a maximum Rx
3278 * buffer limit is imposed then fill in the next producer index.
3280 rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons,
3281 (sc->max_rx_bufs != RX_BD_USABLE) ?
3285 /* we simply reuse the received mbuf and don't post it to the stack */
3288 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
3290 fp->eth_q_stats.rx_soft_errors++;
3292 if (sc->max_rx_bufs != RX_BD_USABLE) {
3293 /* copy this consumer index to the producer index */
3294 memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf,
3295 sizeof(struct bxe_sw_rx_bd));
3296 memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd));
3302 /* current mbuf was detached from the bd */
3303 fp->eth_q_stats.mbuf_alloc_rx--;
3305 /* we allocated a replacement mbuf, fixup the current one */
3307 m->m_pkthdr.len = m->m_len = len;
3309 if ((len > 60) && (len > lenonbd)) {
3310 fp->eth_q_stats.rx_bxe_service_rxsgl++;
3311 rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp);
3314 fp->eth_q_stats.rx_jumbo_sge_pkts++;
3315 } else if (lenonbd < len) {
3316 fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++;
3319 /* assign packet to this interface interface */
3320 m->m_pkthdr.rcvif = ifp;
3322 /* assume no hardware checksum has complated */
3323 m->m_pkthdr.csum_flags = 0;
3325 /* validate checksum if offload enabled */
3326 if (ifp->if_capenable & IFCAP_RXCSUM) {
3327 /* check for a valid IP frame */
3328 if (!(cqe->fast_path_cqe.status_flags &
3329 ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) {
3330 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3331 if (__predict_false(cqe_fp_flags &
3332 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) {
3333 fp->eth_q_stats.rx_hw_csum_errors++;
3335 fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3336 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3340 /* check for a valid TCP/UDP frame */
3341 if (!(cqe->fast_path_cqe.status_flags &
3342 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) {
3343 if (__predict_false(cqe_fp_flags &
3344 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) {
3345 fp->eth_q_stats.rx_hw_csum_errors++;
3347 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3348 m->m_pkthdr.csum_data = 0xFFFF;
3349 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
3355 /* if there is a VLAN tag then flag that info */
3356 if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_VLAN) {
3357 m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag;
3358 m->m_flags |= M_VLANTAG;
3361 #if __FreeBSD_version >= 800000
3362 /* specify what RSS queue was used for this flow */
3363 m->m_pkthdr.flowid = fp->index;
3364 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
3369 bd_cons = RX_BD_NEXT(bd_cons);
3370 bd_prod = RX_BD_NEXT(bd_prod);
3371 bd_prod_fw = RX_BD_NEXT(bd_prod_fw);
3373 /* pass the frame to the stack */
3374 if (__predict_true(m != NULL)) {
3377 (*ifp->if_input)(ifp, m);
3382 sw_cq_prod = RCQ_NEXT(sw_cq_prod);
3383 sw_cq_cons = RCQ_NEXT(sw_cq_cons);
3385 /* limit spinning on the queue */
3389 if (rx_pkts == sc->rx_budget) {
3390 fp->eth_q_stats.rx_budget_reached++;
3393 } /* while work to do */
3395 fp->rx_bd_cons = bd_cons;
3396 fp->rx_bd_prod = bd_prod_fw;
3397 fp->rx_cq_cons = sw_cq_cons;
3398 fp->rx_cq_prod = sw_cq_prod;
3400 /* Update producers */
3401 bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod);
3403 fp->eth_q_stats.rx_pkts += rx_pkts;
3404 fp->eth_q_stats.rx_calls++;
3406 BXE_FP_RX_UNLOCK(fp);
3408 return (sw_cq_cons != hw_cq_cons);
3412 bxe_free_tx_pkt(struct bxe_softc *sc,
3413 struct bxe_fastpath *fp,
3416 struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx];
3417 struct eth_tx_start_bd *tx_start_bd;
3418 uint16_t bd_idx = TX_BD(tx_buf->first_bd);
3422 /* unmap the mbuf from non-paged memory */
3423 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
3425 tx_start_bd = &fp->tx_chain[bd_idx].start_bd;
3426 nbd = le16toh(tx_start_bd->nbd) - 1;
3428 new_cons = (tx_buf->first_bd + nbd);
3431 if (__predict_true(tx_buf->m != NULL)) {
3433 fp->eth_q_stats.mbuf_alloc_tx--;
3435 fp->eth_q_stats.tx_chain_lost_mbuf++;
3439 tx_buf->first_bd = 0;
3444 /* transmit timeout watchdog */
3446 bxe_watchdog(struct bxe_softc *sc,
3447 struct bxe_fastpath *fp)
3451 if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) {
3452 BXE_FP_TX_UNLOCK(fp);
3456 BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index);
3458 BXE_FP_TX_UNLOCK(fp);
3460 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT);
3461 taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task);
3466 /* processes transmit completions */
3468 bxe_txeof(struct bxe_softc *sc,
3469 struct bxe_fastpath *fp)
3471 struct ifnet *ifp = sc->ifnet;
3472 uint16_t bd_cons, hw_cons, sw_cons, pkt_cons;
3473 uint16_t tx_bd_avail;
3475 BXE_FP_TX_LOCK_ASSERT(fp);
3477 bd_cons = fp->tx_bd_cons;
3478 hw_cons = le16toh(*fp->tx_cons_sb);
3479 sw_cons = fp->tx_pkt_cons;
3481 while (sw_cons != hw_cons) {
3482 pkt_cons = TX_BD(sw_cons);
3485 "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n",
3486 fp->index, hw_cons, sw_cons, pkt_cons);
3488 bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons);
3493 fp->tx_pkt_cons = sw_cons;
3494 fp->tx_bd_cons = bd_cons;
3497 "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n",
3498 fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod);
3502 tx_bd_avail = bxe_tx_avail(sc, fp);
3504 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
3505 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3507 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3510 if (fp->tx_pkt_prod != fp->tx_pkt_cons) {
3511 /* reset the watchdog timer if there are pending transmits */
3512 fp->watchdog_timer = BXE_TX_TIMEOUT;
3515 /* clear watchdog when there are no pending transmits */
3516 fp->watchdog_timer = 0;
3522 bxe_drain_tx_queues(struct bxe_softc *sc)
3524 struct bxe_fastpath *fp;
3527 /* wait until all TX fastpath tasks have completed */
3528 for (i = 0; i < sc->num_queues; i++) {
3533 while (bxe_has_tx_work(fp)) {
3537 BXE_FP_TX_UNLOCK(fp);
3540 BLOGE(sc, "Timeout waiting for fp[%d] "
3541 "transmits to complete!\n", i);
3542 bxe_panic(sc, ("tx drain failure\n"));
3556 bxe_del_all_macs(struct bxe_softc *sc,
3557 struct ecore_vlan_mac_obj *mac_obj,
3559 uint8_t wait_for_comp)
3561 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3564 /* wait for completion of requested */
3565 if (wait_for_comp) {
3566 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3569 /* Set the mac type of addresses we want to clear */
3570 bxe_set_bit(mac_type, &vlan_mac_flags);
3572 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
3574 BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n",
3575 rc, mac_type, wait_for_comp);
3582 bxe_fill_accept_flags(struct bxe_softc *sc,
3584 unsigned long *rx_accept_flags,
3585 unsigned long *tx_accept_flags)
3587 /* Clear the flags first */
3588 *rx_accept_flags = 0;
3589 *tx_accept_flags = 0;
3592 case BXE_RX_MODE_NONE:
3594 * 'drop all' supersedes any accept flags that may have been
3595 * passed to the function.
3599 case BXE_RX_MODE_NORMAL:
3600 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3601 bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags);
3602 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3604 /* internal switching mode */
3605 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3606 bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags);
3607 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3611 case BXE_RX_MODE_ALLMULTI:
3612 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3613 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3614 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3616 /* internal switching mode */
3617 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3618 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3619 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3623 case BXE_RX_MODE_PROMISC:
3625 * According to deffinition of SI mode, iface in promisc mode
3626 * should receive matched and unmatched (in resolution of port)
3629 bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags);
3630 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3631 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3632 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3634 /* internal switching mode */
3635 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3636 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3639 bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags);
3641 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3647 BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode);
3651 /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
3652 if (rx_mode != BXE_RX_MODE_NONE) {
3653 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags);
3654 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags);
3661 bxe_set_q_rx_mode(struct bxe_softc *sc,
3663 unsigned long rx_mode_flags,
3664 unsigned long rx_accept_flags,
3665 unsigned long tx_accept_flags,
3666 unsigned long ramrod_flags)
3668 struct ecore_rx_mode_ramrod_params ramrod_param;
3671 memset(&ramrod_param, 0, sizeof(ramrod_param));
3673 /* Prepare ramrod parameters */
3674 ramrod_param.cid = 0;
3675 ramrod_param.cl_id = cl_id;
3676 ramrod_param.rx_mode_obj = &sc->rx_mode_obj;
3677 ramrod_param.func_id = SC_FUNC(sc);
3679 ramrod_param.pstate = &sc->sp_state;
3680 ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING;
3682 ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata);
3683 ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata);
3685 bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
3687 ramrod_param.ramrod_flags = ramrod_flags;
3688 ramrod_param.rx_mode_flags = rx_mode_flags;
3690 ramrod_param.rx_accept_flags = rx_accept_flags;
3691 ramrod_param.tx_accept_flags = tx_accept_flags;
3693 rc = ecore_config_rx_mode(sc, &ramrod_param);
3695 BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x "
3696 "rx_accept_flags 0x%x tx_accept_flags 0x%x "
3697 "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id,
3698 (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags,
3699 (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc);
3707 bxe_set_storm_rx_mode(struct bxe_softc *sc)
3709 unsigned long rx_mode_flags = 0, ramrod_flags = 0;
3710 unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
3713 rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags,
3719 bxe_set_bit(RAMROD_RX, &ramrod_flags);
3720 bxe_set_bit(RAMROD_TX, &ramrod_flags);
3722 /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */
3723 return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags,
3724 rx_accept_flags, tx_accept_flags,
3728 /* returns the "mcp load_code" according to global load_count array */
3730 bxe_nic_load_no_mcp(struct bxe_softc *sc)
3732 int path = SC_PATH(sc);
3733 int port = SC_PORT(sc);
3735 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n",
3736 path, load_count[path][0], load_count[path][1],
3737 load_count[path][2]);
3738 load_count[path][0]++;
3739 load_count[path][1 + port]++;
3740 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n",
3741 path, load_count[path][0], load_count[path][1],
3742 load_count[path][2]);
3743 if (load_count[path][0] == 1) {
3744 return (FW_MSG_CODE_DRV_LOAD_COMMON);
3745 } else if (load_count[path][1 + port] == 1) {
3746 return (FW_MSG_CODE_DRV_LOAD_PORT);
3748 return (FW_MSG_CODE_DRV_LOAD_FUNCTION);
3752 /* returns the "mcp load_code" according to global load_count array */
3754 bxe_nic_unload_no_mcp(struct bxe_softc *sc)
3756 int port = SC_PORT(sc);
3757 int path = SC_PATH(sc);
3759 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n",
3760 path, load_count[path][0], load_count[path][1],
3761 load_count[path][2]);
3762 load_count[path][0]--;
3763 load_count[path][1 + port]--;
3764 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n",
3765 path, load_count[path][0], load_count[path][1],
3766 load_count[path][2]);
3767 if (load_count[path][0] == 0) {
3768 return (FW_MSG_CODE_DRV_UNLOAD_COMMON);
3769 } else if (load_count[path][1 + port] == 0) {
3770 return (FW_MSG_CODE_DRV_UNLOAD_PORT);
3772 return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION);
3776 /* request unload mode from the MCP: COMMON, PORT or FUNCTION */
3778 bxe_send_unload_req(struct bxe_softc *sc,
3781 uint32_t reset_code = 0;
3783 /* Select the UNLOAD request mode */
3784 if (unload_mode == UNLOAD_NORMAL) {
3785 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3787 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3790 /* Send the request to the MCP */
3791 if (!BXE_NOMCP(sc)) {
3792 reset_code = bxe_fw_command(sc, reset_code, 0);
3794 reset_code = bxe_nic_unload_no_mcp(sc);
3797 return (reset_code);
3800 /* send UNLOAD_DONE command to the MCP */
3802 bxe_send_unload_done(struct bxe_softc *sc,
3805 uint32_t reset_param =
3806 keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
3808 /* Report UNLOAD_DONE to MCP */
3809 if (!BXE_NOMCP(sc)) {
3810 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
3815 bxe_func_wait_started(struct bxe_softc *sc)
3819 if (!sc->port.pmf) {
3824 * (assumption: No Attention from MCP at this stage)
3825 * PMF probably in the middle of TX disable/enable transaction
3826 * 1. Sync IRS for default SB
3827 * 2. Sync SP queue - this guarantees us that attention handling started
3828 * 3. Wait, that TX disable/enable transaction completes
3830 * 1+2 guarantee that if DCBX attention was scheduled it already changed
3831 * pending bit of transaction from STARTED-->TX_STOPPED, if we already
3832 * received completion for the transaction the state is TX_STOPPED.
3833 * State will return to STARTED after completion of TX_STOPPED-->STARTED
3837 /* XXX make sure default SB ISR is done */
3838 /* need a way to synchronize an irq (intr_mtx?) */
3840 /* XXX flush any work queues */
3842 while (ecore_func_get_state(sc, &sc->func_obj) !=
3843 ECORE_F_STATE_STARTED && tout--) {
3847 if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) {
3849 * Failed to complete the transaction in a "good way"
3850 * Force both transactions with CLR bit.
3852 struct ecore_func_state_params func_params = { NULL };
3854 BLOGE(sc, "Unexpected function state! "
3855 "Forcing STARTED-->TX_STOPPED-->STARTED\n");
3857 func_params.f_obj = &sc->func_obj;
3858 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3860 /* STARTED-->TX_STOPPED */
3861 func_params.cmd = ECORE_F_CMD_TX_STOP;
3862 ecore_func_state_change(sc, &func_params);
3864 /* TX_STOPPED-->STARTED */
3865 func_params.cmd = ECORE_F_CMD_TX_START;
3866 return (ecore_func_state_change(sc, &func_params));
3873 bxe_stop_queue(struct bxe_softc *sc,
3876 struct bxe_fastpath *fp = &sc->fp[index];
3877 struct ecore_queue_state_params q_params = { NULL };
3880 BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index);
3882 q_params.q_obj = &sc->sp_objs[fp->index].q_obj;
3883 /* We want to wait for completion in this context */
3884 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3886 /* Stop the primary connection: */
3888 /* ...halt the connection */
3889 q_params.cmd = ECORE_Q_CMD_HALT;
3890 rc = ecore_queue_state_change(sc, &q_params);
3895 /* ...terminate the connection */
3896 q_params.cmd = ECORE_Q_CMD_TERMINATE;
3897 memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate));
3898 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
3899 rc = ecore_queue_state_change(sc, &q_params);
3904 /* ...delete cfc entry */
3905 q_params.cmd = ECORE_Q_CMD_CFC_DEL;
3906 memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del));
3907 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
3908 return (ecore_queue_state_change(sc, &q_params));
3911 /* wait for the outstanding SP commands */
3912 static inline uint8_t
3913 bxe_wait_sp_comp(struct bxe_softc *sc,
3917 int tout = 5000; /* wait for 5 secs tops */
3921 if (!(atomic_load_acq_long(&sc->sp_state) & mask)) {
3930 tmp = atomic_load_acq_long(&sc->sp_state);
3932 BLOGE(sc, "Filtering completion timed out: "
3933 "sp_state 0x%lx, mask 0x%lx\n",
3942 bxe_func_stop(struct bxe_softc *sc)
3944 struct ecore_func_state_params func_params = { NULL };
3947 /* prepare parameters for function state transitions */
3948 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3949 func_params.f_obj = &sc->func_obj;
3950 func_params.cmd = ECORE_F_CMD_STOP;
3953 * Try to stop the function the 'good way'. If it fails (in case
3954 * of a parity error during bxe_chip_cleanup()) and we are
3955 * not in a debug mode, perform a state transaction in order to
3956 * enable further HW_RESET transaction.
3958 rc = ecore_func_state_change(sc, &func_params);
3960 BLOGE(sc, "FUNC_STOP ramrod failed. "
3961 "Running a dry transaction (%d)\n", rc);
3962 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3963 return (ecore_func_state_change(sc, &func_params));
3970 bxe_reset_hw(struct bxe_softc *sc,
3973 struct ecore_func_state_params func_params = { NULL };
3975 /* Prepare parameters for function state transitions */
3976 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3978 func_params.f_obj = &sc->func_obj;
3979 func_params.cmd = ECORE_F_CMD_HW_RESET;
3981 func_params.params.hw_init.load_phase = load_code;
3983 return (ecore_func_state_change(sc, &func_params));
3987 bxe_int_disable_sync(struct bxe_softc *sc,
3991 /* prevent the HW from sending interrupts */
3992 bxe_int_disable(sc);
3995 /* XXX need a way to synchronize ALL irqs (intr_mtx?) */
3996 /* make sure all ISRs are done */
3998 /* XXX make sure sp_task is not running */
3999 /* cancel and flush work queues */
4003 bxe_chip_cleanup(struct bxe_softc *sc,
4004 uint32_t unload_mode,
4007 int port = SC_PORT(sc);
4008 struct ecore_mcast_ramrod_params rparam = { NULL };
4009 uint32_t reset_code;
4012 bxe_drain_tx_queues(sc);
4014 /* give HW time to discard old tx messages */
4017 /* Clean all ETH MACs */
4018 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE);
4020 BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc);
4023 /* Clean up UC list */
4024 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE);
4026 BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc);
4030 if (!CHIP_IS_E1(sc)) {
4031 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
4034 /* Set "drop all" to stop Rx */
4037 * We need to take the BXE_MCAST_LOCK() here in order to prevent
4038 * a race between the completion code and this code.
4042 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
4043 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
4045 bxe_set_storm_rx_mode(sc);
4048 /* Clean up multicast configuration */
4049 rparam.mcast_obj = &sc->mcast_obj;
4050 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4052 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4055 BXE_MCAST_UNLOCK(sc);
4057 // XXX bxe_iov_chip_cleanup(sc);
4060 * Send the UNLOAD_REQUEST to the MCP. This will return if
4061 * this function should perform FUNCTION, PORT, or COMMON HW
4064 reset_code = bxe_send_unload_req(sc, unload_mode);
4067 * (assumption: No Attention from MCP at this stage)
4068 * PMF probably in the middle of TX disable/enable transaction
4070 rc = bxe_func_wait_started(sc);
4072 BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc);
4076 * Close multi and leading connections
4077 * Completions for ramrods are collected in a synchronous way
4079 for (i = 0; i < sc->num_queues; i++) {
4080 if (bxe_stop_queue(sc, i)) {
4086 * If SP settings didn't get completed so far - something
4087 * very wrong has happen.
4089 if (!bxe_wait_sp_comp(sc, ~0x0UL)) {
4090 BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc);
4095 rc = bxe_func_stop(sc);
4097 BLOGE(sc, "Function stop failed!(%d)\n", rc);
4100 /* disable HW interrupts */
4101 bxe_int_disable_sync(sc, TRUE);
4103 /* detach interrupts */
4104 bxe_interrupt_detach(sc);
4106 /* Reset the chip */
4107 rc = bxe_reset_hw(sc, reset_code);
4109 BLOGE(sc, "Hardware reset failed(%d)\n", rc);
4112 /* Report UNLOAD_DONE to MCP */
4113 bxe_send_unload_done(sc, keep_link);
4117 bxe_disable_close_the_gate(struct bxe_softc *sc)
4120 int port = SC_PORT(sc);
4123 "Disabling 'close the gates'\n");
4125 if (CHIP_IS_E1(sc)) {
4126 uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4127 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4128 val = REG_RD(sc, addr);
4130 REG_WR(sc, addr, val);
4132 val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK);
4133 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
4134 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
4135 REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val);
4140 * Cleans the object that have internal lists without sending
4141 * ramrods. Should be run when interrutps are disabled.
4144 bxe_squeeze_objects(struct bxe_softc *sc)
4146 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
4147 struct ecore_mcast_ramrod_params rparam = { NULL };
4148 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
4151 /* Cleanup MACs' object first... */
4153 /* Wait for completion of requested */
4154 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
4155 /* Perform a dry cleanup */
4156 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
4158 /* Clean ETH primary MAC */
4159 bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags);
4160 rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags,
4163 BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc);
4166 /* Cleanup UC list */
4168 bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags);
4169 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags,
4172 BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc);
4175 /* Now clean mcast object... */
4177 rparam.mcast_obj = &sc->mcast_obj;
4178 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
4180 /* Add a DEL command... */
4181 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4183 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4186 /* now wait until all pending commands are cleared */
4188 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4191 BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc);
4195 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4199 /* stop the controller */
4200 static __noinline int
4201 bxe_nic_unload(struct bxe_softc *sc,
4202 uint32_t unload_mode,
4205 uint8_t global = FALSE;
4208 BXE_CORE_LOCK_ASSERT(sc);
4210 BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n");
4212 /* mark driver as unloaded in shmem2 */
4213 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
4214 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
4215 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
4216 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
4219 if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE &&
4220 (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) {
4222 * We can get here if the driver has been unloaded
4223 * during parity error recovery and is either waiting for a
4224 * leader to complete or for other functions to unload and
4225 * then ifconfig down has been issued. In this case we want to
4226 * unload and let other functions to complete a recovery
4229 sc->recovery_state = BXE_RECOVERY_DONE;
4231 bxe_release_leader_lock(sc);
4234 BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n");
4235 BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x"
4236 " state = 0x%x\n", sc->recovery_state, sc->state);
4241 * Nothing to do during unload if previous bxe_nic_load()
4242 * did not completed succesfully - all resourses are released.
4244 if ((sc->state == BXE_STATE_CLOSED) ||
4245 (sc->state == BXE_STATE_ERROR)) {
4249 sc->state = BXE_STATE_CLOSING_WAITING_HALT;
4255 sc->rx_mode = BXE_RX_MODE_NONE;
4256 /* XXX set rx mode ??? */
4258 if (IS_PF(sc) && !sc->grcdump_done) {
4259 /* set ALWAYS_ALIVE bit in shmem */
4260 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
4264 bxe_stats_handle(sc, STATS_EVENT_STOP);
4265 bxe_save_statistics(sc);
4268 /* wait till consumers catch up with producers in all queues */
4269 bxe_drain_tx_queues(sc);
4271 /* if VF indicate to PF this function is going down (PF will delete sp
4272 * elements and clear initializations
4275 ; /* bxe_vfpf_close_vf(sc); */
4276 } else if (unload_mode != UNLOAD_RECOVERY) {
4277 /* if this is a normal/close unload need to clean up chip */
4278 if (!sc->grcdump_done)
4279 bxe_chip_cleanup(sc, unload_mode, keep_link);
4281 /* Send the UNLOAD_REQUEST to the MCP */
4282 bxe_send_unload_req(sc, unload_mode);
4285 * Prevent transactions to host from the functions on the
4286 * engine that doesn't reset global blocks in case of global
4287 * attention once gloabl blocks are reset and gates are opened
4288 * (the engine which leader will perform the recovery
4291 if (!CHIP_IS_E1x(sc)) {
4295 /* disable HW interrupts */
4296 bxe_int_disable_sync(sc, TRUE);
4298 /* detach interrupts */
4299 bxe_interrupt_detach(sc);
4301 /* Report UNLOAD_DONE to MCP */
4302 bxe_send_unload_done(sc, FALSE);
4306 * At this stage no more interrupts will arrive so we may safely clean
4307 * the queue'able objects here in case they failed to get cleaned so far.
4310 bxe_squeeze_objects(sc);
4313 /* There should be no more pending SP commands at this stage */
4318 bxe_free_fp_buffers(sc);
4324 bxe_free_fw_stats_mem(sc);
4326 sc->state = BXE_STATE_CLOSED;
4329 * Check if there are pending parity attentions. If there are - set
4330 * RECOVERY_IN_PROGRESS.
4332 if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) {
4333 bxe_set_reset_in_progress(sc);
4335 /* Set RESET_IS_GLOBAL if needed */
4337 bxe_set_reset_global(sc);
4342 * The last driver must disable a "close the gate" if there is no
4343 * parity attention or "process kill" pending.
4345 if (IS_PF(sc) && !bxe_clear_pf_load(sc) &&
4346 bxe_reset_is_done(sc, SC_PATH(sc))) {
4347 bxe_disable_close_the_gate(sc);
4350 BLOGD(sc, DBG_LOAD, "Ended NIC unload\n");
4356 * Called by the OS to set various media options (i.e. link, speed, etc.) when
4357 * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...".
4360 bxe_ifmedia_update(struct ifnet *ifp)
4362 struct bxe_softc *sc = (struct bxe_softc *)ifp->if_softc;
4363 struct ifmedia *ifm;
4367 /* We only support Ethernet media type. */
4368 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
4372 switch (IFM_SUBTYPE(ifm->ifm_media)) {
4378 case IFM_10G_TWINAX:
4380 /* We don't support changing the media type. */
4381 BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n",
4382 IFM_SUBTYPE(ifm->ifm_media));
4390 * Called by the OS to get the current media status (i.e. link, speed, etc.).
4393 bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
4395 struct bxe_softc *sc = ifp->if_softc;
4397 /* Report link down if the driver isn't running. */
4398 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
4399 ifmr->ifm_active |= IFM_NONE;
4403 /* Setup the default interface info. */
4404 ifmr->ifm_status = IFM_AVALID;
4405 ifmr->ifm_active = IFM_ETHER;
4407 if (sc->link_vars.link_up) {
4408 ifmr->ifm_status |= IFM_ACTIVE;
4410 ifmr->ifm_active |= IFM_NONE;
4414 ifmr->ifm_active |= sc->media;
4416 if (sc->link_vars.duplex == DUPLEX_FULL) {
4417 ifmr->ifm_active |= IFM_FDX;
4419 ifmr->ifm_active |= IFM_HDX;
4424 bxe_ioctl_nvram(struct bxe_softc *sc,
4428 struct bxe_nvram_data nvdata_base;
4429 struct bxe_nvram_data *nvdata;
4433 copyin(ifr->ifr_data, &nvdata_base, sizeof(nvdata_base));
4435 len = (sizeof(struct bxe_nvram_data) +
4439 if (len > sizeof(struct bxe_nvram_data)) {
4440 if ((nvdata = (struct bxe_nvram_data *)
4441 malloc(len, M_DEVBUF,
4442 (M_NOWAIT | M_ZERO))) == NULL) {
4443 BLOGE(sc, "BXE_IOC_RD_NVRAM malloc failed priv_op 0x%x "
4444 " len = 0x%x\n", priv_op, len);
4447 memcpy(nvdata, &nvdata_base, sizeof(struct bxe_nvram_data));
4449 nvdata = &nvdata_base;
4452 if (priv_op == BXE_IOC_RD_NVRAM) {
4453 BLOGD(sc, DBG_IOCTL, "IOC_RD_NVRAM 0x%x %d\n",
4454 nvdata->offset, nvdata->len);
4455 error = bxe_nvram_read(sc,
4457 (uint8_t *)nvdata->value,
4459 copyout(nvdata, ifr->ifr_data, len);
4460 } else { /* BXE_IOC_WR_NVRAM */
4461 BLOGD(sc, DBG_IOCTL, "IOC_WR_NVRAM 0x%x %d\n",
4462 nvdata->offset, nvdata->len);
4463 copyin(ifr->ifr_data, nvdata, len);
4464 error = bxe_nvram_write(sc,
4466 (uint8_t *)nvdata->value,
4470 if (len > sizeof(struct bxe_nvram_data)) {
4471 free(nvdata, M_DEVBUF);
4478 bxe_ioctl_stats_show(struct bxe_softc *sc,
4482 const size_t str_size = (BXE_NUM_ETH_STATS * STAT_NAME_LEN);
4483 const size_t stats_size = (BXE_NUM_ETH_STATS * sizeof(uint64_t));
4490 case BXE_IOC_STATS_SHOW_NUM:
4491 memset(ifr->ifr_data, 0, sizeof(union bxe_stats_show_data));
4492 ((union bxe_stats_show_data *)ifr->ifr_data)->desc.num =
4494 ((union bxe_stats_show_data *)ifr->ifr_data)->desc.len =
4498 case BXE_IOC_STATS_SHOW_STR:
4499 memset(ifr->ifr_data, 0, str_size);
4500 p_tmp = ifr->ifr_data;
4501 for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
4502 strcpy(p_tmp, bxe_eth_stats_arr[i].string);
4503 p_tmp += STAT_NAME_LEN;
4507 case BXE_IOC_STATS_SHOW_CNT:
4508 memset(ifr->ifr_data, 0, stats_size);
4509 p_tmp = ifr->ifr_data;
4510 for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
4511 offset = ((uint32_t *)&sc->eth_stats +
4512 bxe_eth_stats_arr[i].offset);
4513 switch (bxe_eth_stats_arr[i].size) {
4515 *((uint64_t *)p_tmp) = (uint64_t)*offset;
4518 *((uint64_t *)p_tmp) = HILO_U64(*offset, *(offset + 1));
4521 *((uint64_t *)p_tmp) = 0;
4523 p_tmp += sizeof(uint64_t);
4533 bxe_handle_chip_tq(void *context,
4536 struct bxe_softc *sc = (struct bxe_softc *)context;
4537 long work = atomic_load_acq_long(&sc->chip_tq_flags);
4541 case CHIP_TQ_REINIT:
4542 if (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING) {
4543 /* restart the interface */
4544 BLOGD(sc, DBG_LOAD, "Restarting the interface...\n");
4545 bxe_periodic_stop(sc);
4547 bxe_stop_locked(sc);
4548 bxe_init_locked(sc);
4549 BXE_CORE_UNLOCK(sc);
4559 * Handles any IOCTL calls from the operating system.
4562 * 0 = Success, >0 Failure
4565 bxe_ioctl(struct ifnet *ifp,
4569 struct bxe_softc *sc = ifp->if_softc;
4570 struct ifreq *ifr = (struct ifreq *)data;
4571 struct bxe_nvram_data *nvdata;
4577 int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN);
4578 int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING);
4583 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n",
4586 if (sc->mtu == ifr->ifr_mtu) {
4587 /* nothing to change */
4591 if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) {
4592 BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n",
4593 ifr->ifr_mtu, mtu_min, mtu_max);
4598 atomic_store_rel_int((volatile unsigned int *)&sc->mtu,
4599 (unsigned long)ifr->ifr_mtu);
4600 atomic_store_rel_long((volatile unsigned long *)&ifp->if_mtu,
4601 (unsigned long)ifr->ifr_mtu);
4607 /* toggle the interface state up or down */
4608 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n");
4611 /* check if the interface is up */
4612 if (ifp->if_flags & IFF_UP) {
4613 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4614 /* set the receive mode flags */
4615 bxe_set_rx_mode(sc);
4617 bxe_init_locked(sc);
4620 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4621 bxe_periodic_stop(sc);
4622 bxe_stop_locked(sc);
4625 BXE_CORE_UNLOCK(sc);
4631 /* add/delete multicast addresses */
4632 BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n");
4634 /* check if the interface is up */
4635 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4636 /* set the receive mode flags */
4638 bxe_set_rx_mode(sc);
4639 BXE_CORE_UNLOCK(sc);
4645 /* find out which capabilities have changed */
4646 mask = (ifr->ifr_reqcap ^ ifp->if_capenable);
4648 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n",
4651 /* toggle the LRO capabilites enable flag */
4652 if (mask & IFCAP_LRO) {
4653 ifp->if_capenable ^= IFCAP_LRO;
4654 BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n",
4655 (ifp->if_capenable & IFCAP_LRO) ? "ON" : "OFF");
4659 /* toggle the TXCSUM checksum capabilites enable flag */
4660 if (mask & IFCAP_TXCSUM) {
4661 ifp->if_capenable ^= IFCAP_TXCSUM;
4662 BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n",
4663 (ifp->if_capenable & IFCAP_TXCSUM) ? "ON" : "OFF");
4664 if (ifp->if_capenable & IFCAP_TXCSUM) {
4665 ifp->if_hwassist = (CSUM_IP |
4672 ifp->if_hwassist = 0;
4676 /* toggle the RXCSUM checksum capabilities enable flag */
4677 if (mask & IFCAP_RXCSUM) {
4678 ifp->if_capenable ^= IFCAP_RXCSUM;
4679 BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n",
4680 (ifp->if_capenable & IFCAP_RXCSUM) ? "ON" : "OFF");
4681 if (ifp->if_capenable & IFCAP_RXCSUM) {
4682 ifp->if_hwassist = (CSUM_IP |
4689 ifp->if_hwassist = 0;
4693 /* toggle TSO4 capabilities enabled flag */
4694 if (mask & IFCAP_TSO4) {
4695 ifp->if_capenable ^= IFCAP_TSO4;
4696 BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n",
4697 (ifp->if_capenable & IFCAP_TSO4) ? "ON" : "OFF");
4700 /* toggle TSO6 capabilities enabled flag */
4701 if (mask & IFCAP_TSO6) {
4702 ifp->if_capenable ^= IFCAP_TSO6;
4703 BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n",
4704 (ifp->if_capenable & IFCAP_TSO6) ? "ON" : "OFF");
4707 /* toggle VLAN_HWTSO capabilities enabled flag */
4708 if (mask & IFCAP_VLAN_HWTSO) {
4709 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
4710 BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n",
4711 (ifp->if_capenable & IFCAP_VLAN_HWTSO) ? "ON" : "OFF");
4714 /* toggle VLAN_HWCSUM capabilities enabled flag */
4715 if (mask & IFCAP_VLAN_HWCSUM) {
4716 /* XXX investigate this... */
4717 BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n");
4721 /* toggle VLAN_MTU capabilities enable flag */
4722 if (mask & IFCAP_VLAN_MTU) {
4723 /* XXX investigate this... */
4724 BLOGE(sc, "Changing VLAN_MTU is not supported!\n");
4728 /* toggle VLAN_HWTAGGING capabilities enabled flag */
4729 if (mask & IFCAP_VLAN_HWTAGGING) {
4730 /* XXX investigate this... */
4731 BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n");
4735 /* toggle VLAN_HWFILTER capabilities enabled flag */
4736 if (mask & IFCAP_VLAN_HWFILTER) {
4737 /* XXX investigate this... */
4738 BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n");
4750 /* set/get interface media */
4751 BLOGD(sc, DBG_IOCTL,
4752 "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n",
4754 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
4757 case SIOCGPRIVATE_0:
4758 copyin(ifr->ifr_data, &priv_op, sizeof(priv_op));
4762 case BXE_IOC_RD_NVRAM:
4763 case BXE_IOC_WR_NVRAM:
4764 nvdata = (struct bxe_nvram_data *)ifr->ifr_data;
4765 BLOGD(sc, DBG_IOCTL,
4766 "Received Private NVRAM ioctl addr=0x%x size=%u\n",
4767 nvdata->offset, nvdata->len);
4768 error = bxe_ioctl_nvram(sc, priv_op, ifr);
4771 case BXE_IOC_STATS_SHOW_NUM:
4772 case BXE_IOC_STATS_SHOW_STR:
4773 case BXE_IOC_STATS_SHOW_CNT:
4774 BLOGD(sc, DBG_IOCTL, "Received Private Stats ioctl (%d)\n",
4776 error = bxe_ioctl_stats_show(sc, priv_op, ifr);
4780 BLOGW(sc, "Received Private Unknown ioctl (%d)\n", priv_op);
4788 BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n",
4790 error = ether_ioctl(ifp, command, data);
4794 if (reinit && (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING)) {
4795 BLOGD(sc, DBG_LOAD | DBG_IOCTL,
4796 "Re-initializing hardware from IOCTL change\n");
4797 bxe_periodic_stop(sc);
4799 bxe_stop_locked(sc);
4800 bxe_init_locked(sc);
4801 BXE_CORE_UNLOCK(sc);
4807 static __noinline void
4808 bxe_dump_mbuf(struct bxe_softc *sc,
4815 if (!(sc->debug & DBG_MBUF)) {
4820 BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n");
4826 "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4827 i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data);
4829 if (m->m_flags & M_PKTHDR) {
4831 "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4832 i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS,
4833 (int)m->m_pkthdr.csum_flags, CSUM_BITS);
4836 if (m->m_flags & M_EXT) {
4837 switch (m->m_ext.ext_type) {
4838 case EXT_CLUSTER: type = "EXT_CLUSTER"; break;
4839 case EXT_SFBUF: type = "EXT_SFBUF"; break;
4840 case EXT_JUMBOP: type = "EXT_JUMBOP"; break;
4841 case EXT_JUMBO9: type = "EXT_JUMBO9"; break;
4842 case EXT_JUMBO16: type = "EXT_JUMBO16"; break;
4843 case EXT_PACKET: type = "EXT_PACKET"; break;
4844 case EXT_MBUF: type = "EXT_MBUF"; break;
4845 case EXT_NET_DRV: type = "EXT_NET_DRV"; break;
4846 case EXT_MOD_TYPE: type = "EXT_MOD_TYPE"; break;
4847 case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break;
4848 case EXT_EXTREF: type = "EXT_EXTREF"; break;
4849 default: type = "UNKNOWN"; break;
4853 "%02d: - m_ext: %p ext_size=%d type=%s\n",
4854 i, m->m_ext.ext_buf, m->m_ext.ext_size, type);
4858 bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE);
4867 * Checks to ensure the 13 bd sliding window is >= MSS for TSO.
4868 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS.
4869 * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD
4870 * The headers comes in a seperate bd in FreeBSD so 13-3=10.
4871 * Returns: 0 if OK to send, 1 if packet needs further defragmentation
4874 bxe_chktso_window(struct bxe_softc *sc,
4876 bus_dma_segment_t *segs,
4879 uint32_t num_wnds, wnd_size, wnd_sum;
4880 int32_t frag_idx, wnd_idx;
4881 unsigned short lso_mss;
4887 num_wnds = nsegs - wnd_size;
4888 lso_mss = htole16(m->m_pkthdr.tso_segsz);
4891 * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the
4892 * first window sum of data while skipping the first assuming it is the
4893 * header in FreeBSD.
4895 for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) {
4896 wnd_sum += htole16(segs[frag_idx].ds_len);
4899 /* check the first 10 bd window size */
4900 if (wnd_sum < lso_mss) {
4904 /* run through the windows */
4905 for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) {
4906 /* subtract the first mbuf->m_len of the last wndw(-header) */
4907 wnd_sum -= htole16(segs[wnd_idx+1].ds_len);
4908 /* add the next mbuf len to the len of our new window */
4909 wnd_sum += htole16(segs[frag_idx].ds_len);
4910 if (wnd_sum < lso_mss) {
4919 bxe_set_pbd_csum_e2(struct bxe_fastpath *fp,
4921 uint32_t *parsing_data)
4923 struct ether_vlan_header *eh = NULL;
4924 struct ip *ip4 = NULL;
4925 struct ip6_hdr *ip6 = NULL;
4927 struct tcphdr *th = NULL;
4928 int e_hlen, ip_hlen, l4_off;
4931 if (m->m_pkthdr.csum_flags == CSUM_IP) {
4932 /* no L4 checksum offload needed */
4936 /* get the Ethernet header */
4937 eh = mtod(m, struct ether_vlan_header *);
4939 /* handle VLAN encapsulation if present */
4940 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4941 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4942 proto = ntohs(eh->evl_proto);
4944 e_hlen = ETHER_HDR_LEN;
4945 proto = ntohs(eh->evl_encap_proto);
4950 /* get the IP header, if mbuf len < 20 then header in next mbuf */
4951 ip4 = (m->m_len < sizeof(struct ip)) ?
4952 (struct ip *)m->m_next->m_data :
4953 (struct ip *)(m->m_data + e_hlen);
4954 /* ip_hl is number of 32-bit words */
4955 ip_hlen = (ip4->ip_hl << 2);
4958 case ETHERTYPE_IPV6:
4959 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4960 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4961 (struct ip6_hdr *)m->m_next->m_data :
4962 (struct ip6_hdr *)(m->m_data + e_hlen);
4963 /* XXX cannot support offload with IPv6 extensions */
4964 ip_hlen = sizeof(struct ip6_hdr);
4968 /* We can't offload in this case... */
4969 /* XXX error stat ??? */
4973 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4974 l4_off = (e_hlen + ip_hlen);
4977 (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
4978 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W);
4980 if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4983 fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
4984 th = (struct tcphdr *)(ip + ip_hlen);
4985 /* th_off is number of 32-bit words */
4986 *parsing_data |= ((th->th_off <<
4987 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
4988 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW);
4989 return (l4_off + (th->th_off << 2)); /* entire header length */
4990 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4992 fp->eth_q_stats.tx_ofld_frames_csum_udp++;
4993 return (l4_off + sizeof(struct udphdr)); /* entire header length */
4995 /* XXX error stat ??? */
5001 bxe_set_pbd_csum(struct bxe_fastpath *fp,
5003 struct eth_tx_parse_bd_e1x *pbd)
5005 struct ether_vlan_header *eh = NULL;
5006 struct ip *ip4 = NULL;
5007 struct ip6_hdr *ip6 = NULL;
5009 struct tcphdr *th = NULL;
5010 struct udphdr *uh = NULL;
5011 int e_hlen, ip_hlen;
5017 /* get the Ethernet header */
5018 eh = mtod(m, struct ether_vlan_header *);
5020 /* handle VLAN encapsulation if present */
5021 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
5022 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
5023 proto = ntohs(eh->evl_proto);
5025 e_hlen = ETHER_HDR_LEN;
5026 proto = ntohs(eh->evl_encap_proto);
5031 /* get the IP header, if mbuf len < 20 then header in next mbuf */
5032 ip4 = (m->m_len < sizeof(struct ip)) ?
5033 (struct ip *)m->m_next->m_data :
5034 (struct ip *)(m->m_data + e_hlen);
5035 /* ip_hl is number of 32-bit words */
5036 ip_hlen = (ip4->ip_hl << 1);
5039 case ETHERTYPE_IPV6:
5040 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
5041 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
5042 (struct ip6_hdr *)m->m_next->m_data :
5043 (struct ip6_hdr *)(m->m_data + e_hlen);
5044 /* XXX cannot support offload with IPv6 extensions */
5045 ip_hlen = (sizeof(struct ip6_hdr) >> 1);
5049 /* We can't offload in this case... */
5050 /* XXX error stat ??? */
5054 hlen = (e_hlen >> 1);
5056 /* note that rest of global_data is indirectly zeroed here */
5057 if (m->m_flags & M_VLANTAG) {
5059 htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
5061 pbd->global_data = htole16(hlen);
5064 pbd->ip_hlen_w = ip_hlen;
5066 hlen += pbd->ip_hlen_w;
5068 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
5070 if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5073 th = (struct tcphdr *)(ip + (ip_hlen << 1));
5074 /* th_off is number of 32-bit words */
5075 hlen += (uint16_t)(th->th_off << 1);
5076 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5078 uh = (struct udphdr *)(ip + (ip_hlen << 1));
5079 hlen += (sizeof(struct udphdr) / 2);
5081 /* valid case as only CSUM_IP was set */
5085 pbd->total_hlen_w = htole16(hlen);
5087 if (m->m_pkthdr.csum_flags & (CSUM_TCP |
5090 fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
5091 pbd->tcp_pseudo_csum = ntohs(th->th_sum);
5092 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5094 fp->eth_q_stats.tx_ofld_frames_csum_udp++;
5097 * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP
5098 * checksums and does not know anything about the UDP header and where
5099 * the checksum field is located. It only knows about TCP. Therefore
5100 * we "lie" to the hardware for outgoing UDP packets w/ checksum
5101 * offload. Since the checksum field offset for TCP is 16 bytes and
5102 * for UDP it is 6 bytes we pass a pointer to the hardware that is 10
5103 * bytes less than the start of the UDP header. This allows the
5104 * hardware to write the checksum in the correct spot. But the
5105 * hardware will compute a checksum which includes the last 10 bytes
5106 * of the IP header. To correct this we tweak the stack computed
5107 * pseudo checksum by folding in the calculation of the inverse
5108 * checksum for those final 10 bytes of the IP header. This allows
5109 * the correct checksum to be computed by the hardware.
5112 /* set pointer 10 bytes before UDP header */
5113 tmp_uh = (uint32_t *)((uint8_t *)uh - 10);
5115 /* calculate a pseudo header checksum over the first 10 bytes */
5116 tmp_csum = in_pseudo(*tmp_uh,
5118 *(uint16_t *)(tmp_uh + 2));
5120 pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum));
5123 return (hlen * 2); /* entire header length, number of bytes */
5127 bxe_set_pbd_lso_e2(struct mbuf *m,
5128 uint32_t *parsing_data)
5130 *parsing_data |= ((m->m_pkthdr.tso_segsz <<
5131 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
5132 ETH_TX_PARSE_BD_E2_LSO_MSS);
5134 /* XXX test for IPv6 with extension header... */
5138 bxe_set_pbd_lso(struct mbuf *m,
5139 struct eth_tx_parse_bd_e1x *pbd)
5141 struct ether_vlan_header *eh = NULL;
5142 struct ip *ip = NULL;
5143 struct tcphdr *th = NULL;
5146 /* get the Ethernet header */
5147 eh = mtod(m, struct ether_vlan_header *);
5149 /* handle VLAN encapsulation if present */
5150 e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ?
5151 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN;
5153 /* get the IP and TCP header, with LSO entire header in first mbuf */
5154 /* XXX assuming IPv4 */
5155 ip = (struct ip *)(m->m_data + e_hlen);
5156 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
5158 pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz);
5159 pbd->tcp_send_seq = ntohl(th->th_seq);
5160 pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff);
5164 pbd->ip_id = ntohs(ip->ip_id);
5165 pbd->tcp_pseudo_csum =
5166 ntohs(in_pseudo(ip->ip_src.s_addr,
5168 htons(IPPROTO_TCP)));
5171 pbd->tcp_pseudo_csum =
5172 ntohs(in_pseudo(&ip6->ip6_src,
5174 htons(IPPROTO_TCP)));
5178 htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
5182 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory
5183 * visible to the controller.
5185 * If an mbuf is submitted to this routine and cannot be given to the
5186 * controller (e.g. it has too many fragments) then the function may free
5187 * the mbuf and return to the caller.
5190 * 0 = Success, !0 = Failure
5191 * Note the side effect that an mbuf may be freed if it causes a problem.
5194 bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
5196 bus_dma_segment_t segs[32];
5198 struct bxe_sw_tx_bd *tx_buf;
5199 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
5200 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
5201 /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */
5202 struct eth_tx_bd *tx_data_bd;
5203 struct eth_tx_bd *tx_total_pkt_size_bd;
5204 struct eth_tx_start_bd *tx_start_bd;
5205 uint16_t bd_prod, pkt_prod, total_pkt_size;
5207 int defragged, error, nsegs, rc, nbds, vlan_off, ovlan;
5208 struct bxe_softc *sc;
5209 uint16_t tx_bd_avail;
5210 struct ether_vlan_header *eh;
5211 uint32_t pbd_e2_parsing_data = 0;
5218 M_ASSERTPKTHDR(*m_head);
5221 rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0;
5224 tx_total_pkt_size_bd = NULL;
5226 /* get the H/W pointer for packets and BDs */
5227 pkt_prod = fp->tx_pkt_prod;
5228 bd_prod = fp->tx_bd_prod;
5230 mac_type = UNICAST_ADDRESS;
5232 /* map the mbuf into the next open DMAable memory */
5233 tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)];
5234 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5236 segs, &nsegs, BUS_DMA_NOWAIT);
5238 /* mapping errors */
5239 if(__predict_false(error != 0)) {
5240 fp->eth_q_stats.tx_dma_mapping_failure++;
5241 if (error == ENOMEM) {
5242 /* resource issue, try again later */
5244 } else if (error == EFBIG) {
5245 /* possibly recoverable with defragmentation */
5246 fp->eth_q_stats.mbuf_defrag_attempts++;
5247 m0 = m_defrag(*m_head, M_DONTWAIT);
5249 fp->eth_q_stats.mbuf_defrag_failures++;
5252 /* defrag successful, try mapping again */
5254 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5256 segs, &nsegs, BUS_DMA_NOWAIT);
5258 fp->eth_q_stats.tx_dma_mapping_failure++;
5263 /* unknown, unrecoverable mapping error */
5264 BLOGE(sc, "Unknown TX mapping error rc=%d\n", error);
5265 bxe_dump_mbuf(sc, m0, FALSE);
5269 goto bxe_tx_encap_continue;
5272 tx_bd_avail = bxe_tx_avail(sc, fp);
5274 /* make sure there is enough room in the send queue */
5275 if (__predict_false(tx_bd_avail < (nsegs + 2))) {
5276 /* Recoverable, try again later. */
5277 fp->eth_q_stats.tx_hw_queue_full++;
5278 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5280 goto bxe_tx_encap_continue;
5283 /* capture the current H/W TX chain high watermark */
5284 if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth <
5285 (TX_BD_USABLE - tx_bd_avail))) {
5286 fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail);
5289 /* make sure it fits in the packet window */
5290 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5292 * The mbuf may be to big for the controller to handle. If the frame
5293 * is a TSO frame we'll need to do an additional check.
5295 if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5296 if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) {
5297 goto bxe_tx_encap_continue; /* OK to send */
5299 fp->eth_q_stats.tx_window_violation_tso++;
5302 fp->eth_q_stats.tx_window_violation_std++;
5305 /* lets try to defragment this mbuf and remap it */
5306 fp->eth_q_stats.mbuf_defrag_attempts++;
5307 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5309 m0 = m_defrag(*m_head, M_DONTWAIT);
5311 fp->eth_q_stats.mbuf_defrag_failures++;
5312 /* Ugh, just drop the frame... :( */
5315 /* defrag successful, try mapping again */
5317 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5319 segs, &nsegs, BUS_DMA_NOWAIT);
5321 fp->eth_q_stats.tx_dma_mapping_failure++;
5322 /* No sense in trying to defrag/copy chain, drop it. :( */
5326 /* if the chain is still too long then drop it */
5327 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5328 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5335 bxe_tx_encap_continue:
5337 /* Check for errors */
5340 /* recoverable try again later */
5342 fp->eth_q_stats.tx_soft_errors++;
5343 fp->eth_q_stats.mbuf_alloc_tx--;
5351 /* set flag according to packet type (UNICAST_ADDRESS is default) */
5352 if (m0->m_flags & M_BCAST) {
5353 mac_type = BROADCAST_ADDRESS;
5354 } else if (m0->m_flags & M_MCAST) {
5355 mac_type = MULTICAST_ADDRESS;
5358 /* store the mbuf into the mbuf ring */
5360 tx_buf->first_bd = fp->tx_bd_prod;
5363 /* prepare the first transmit (start) BD for the mbuf */
5364 tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd;
5367 "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n",
5368 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
5370 tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
5371 tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
5372 tx_start_bd->nbytes = htole16(segs[0].ds_len);
5373 total_pkt_size += tx_start_bd->nbytes;
5374 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
5376 tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
5378 /* all frames have at least Start BD + Parsing BD */
5380 tx_start_bd->nbd = htole16(nbds);
5382 if (m0->m_flags & M_VLANTAG) {
5383 tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag);
5384 tx_start_bd->bd_flags.as_bitfield |=
5385 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
5387 /* vf tx, start bd must hold the ethertype for fw to enforce it */
5389 /* map ethernet header to find type and header length */
5390 eh = mtod(m0, struct ether_vlan_header *);
5391 tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto;
5393 /* used by FW for packet accounting */
5394 tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod);
5399 * add a parsing BD from the chain. The parsing BD is always added
5400 * though it is only used for TSO and chksum
5402 bd_prod = TX_BD_NEXT(bd_prod);
5404 if (m0->m_pkthdr.csum_flags) {
5405 if (m0->m_pkthdr.csum_flags & CSUM_IP) {
5406 fp->eth_q_stats.tx_ofld_frames_csum_ip++;
5407 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
5410 if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) {
5411 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5412 ETH_TX_BD_FLAGS_L4_CSUM);
5413 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) {
5414 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5415 ETH_TX_BD_FLAGS_IS_UDP |
5416 ETH_TX_BD_FLAGS_L4_CSUM);
5417 } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) ||
5418 (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5419 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
5420 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) {
5421 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM |
5422 ETH_TX_BD_FLAGS_IS_UDP);
5426 if (!CHIP_IS_E1x(sc)) {
5427 pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2;
5428 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
5430 if (m0->m_pkthdr.csum_flags) {
5431 hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data);
5434 SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE,
5437 uint16_t global_data = 0;
5439 pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x;
5440 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
5442 if (m0->m_pkthdr.csum_flags) {
5443 hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x);
5446 SET_FLAG(global_data,
5447 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
5448 pbd_e1x->global_data |= htole16(global_data);
5451 /* setup the parsing BD with TSO specific info */
5452 if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5453 fp->eth_q_stats.tx_ofld_frames_lso++;
5454 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
5456 if (__predict_false(tx_start_bd->nbytes > hlen)) {
5457 fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++;
5459 /* split the first BD into header/data making the fw job easy */
5461 tx_start_bd->nbd = htole16(nbds);
5462 tx_start_bd->nbytes = htole16(hlen);
5464 bd_prod = TX_BD_NEXT(bd_prod);
5466 /* new transmit BD after the tx_parse_bd */
5467 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5468 tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen));
5469 tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen));
5470 tx_data_bd->nbytes = htole16(segs[0].ds_len - hlen);
5471 if (tx_total_pkt_size_bd == NULL) {
5472 tx_total_pkt_size_bd = tx_data_bd;
5476 "TSO split header size is %d (%x:%x) nbds %d\n",
5477 le16toh(tx_start_bd->nbytes),
5478 le32toh(tx_start_bd->addr_hi),
5479 le32toh(tx_start_bd->addr_lo),
5483 if (!CHIP_IS_E1x(sc)) {
5484 bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data);
5486 bxe_set_pbd_lso(m0, pbd_e1x);
5490 if (pbd_e2_parsing_data) {
5491 pbd_e2->parsing_data = htole32(pbd_e2_parsing_data);
5494 /* prepare remaining BDs, start tx bd contains first seg/frag */
5495 for (i = 1; i < nsegs ; i++) {
5496 bd_prod = TX_BD_NEXT(bd_prod);
5497 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5498 tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr));
5499 tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr));
5500 tx_data_bd->nbytes = htole16(segs[i].ds_len);
5501 if (tx_total_pkt_size_bd == NULL) {
5502 tx_total_pkt_size_bd = tx_data_bd;
5504 total_pkt_size += tx_data_bd->nbytes;
5507 BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd);
5509 if (tx_total_pkt_size_bd != NULL) {
5510 tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size;
5513 if (__predict_false(sc->debug & DBG_TX)) {
5514 tmp_bd = tx_buf->first_bd;
5515 for (i = 0; i < nbds; i++)
5519 "TX Strt: %p bd=%d nbd=%d vlan=0x%x "
5520 "bd_flags=0x%x hdr_nbds=%d\n",
5523 le16toh(tx_start_bd->nbd),
5524 le16toh(tx_start_bd->vlan_or_ethertype),
5525 tx_start_bd->bd_flags.as_bitfield,
5526 (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS));
5527 } else if (i == 1) {
5530 "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u "
5531 "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x "
5532 "tcp_seq=%u total_hlen_w=%u\n",
5535 pbd_e1x->global_data,
5540 pbd_e1x->tcp_pseudo_csum,
5541 pbd_e1x->tcp_send_seq,
5542 le16toh(pbd_e1x->total_hlen_w));
5543 } else { /* if (pbd_e2) */
5545 "-> Parse: %p bd=%d dst=%02x:%02x:%02x "
5546 "src=%02x:%02x:%02x parsing_data=0x%x\n",
5549 pbd_e2->data.mac_addr.dst_hi,
5550 pbd_e2->data.mac_addr.dst_mid,
5551 pbd_e2->data.mac_addr.dst_lo,
5552 pbd_e2->data.mac_addr.src_hi,
5553 pbd_e2->data.mac_addr.src_mid,
5554 pbd_e2->data.mac_addr.src_lo,
5555 pbd_e2->parsing_data);
5559 if (i != 1) { /* skip parse db as it doesn't hold data */
5560 tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd;
5562 "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n",
5565 le16toh(tx_data_bd->nbytes),
5566 le32toh(tx_data_bd->addr_hi),
5567 le32toh(tx_data_bd->addr_lo));
5570 tmp_bd = TX_BD_NEXT(tmp_bd);
5574 BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod);
5576 /* update TX BD producer index value for next TX */
5577 bd_prod = TX_BD_NEXT(bd_prod);
5580 * If the chain of tx_bd's describing this frame is adjacent to or spans
5581 * an eth_tx_next_bd element then we need to increment the nbds value.
5583 if (TX_BD_IDX(bd_prod) < nbds) {
5587 /* don't allow reordering of writes for nbd and packets */
5590 fp->tx_db.data.prod += nbds;
5592 /* producer points to the next free tx_bd at this point */
5594 fp->tx_bd_prod = bd_prod;
5596 DOORBELL(sc, fp->index, fp->tx_db.raw);
5598 fp->eth_q_stats.tx_pkts++;
5600 /* Prevent speculative reads from getting ahead of the status block. */
5601 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle,
5602 0, 0, BUS_SPACE_BARRIER_READ);
5604 /* Prevent speculative reads from getting ahead of the doorbell. */
5605 bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle,
5606 0, 0, BUS_SPACE_BARRIER_READ);
5612 bxe_tx_start_locked(struct bxe_softc *sc,
5614 struct bxe_fastpath *fp)
5616 struct mbuf *m = NULL;
5618 uint16_t tx_bd_avail;
5620 BXE_FP_TX_LOCK_ASSERT(fp);
5622 /* keep adding entries while there are frames to send */
5623 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
5626 * check for any frames to send
5627 * dequeue can still be NULL even if queue is not empty
5629 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
5630 if (__predict_false(m == NULL)) {
5634 /* the mbuf now belongs to us */
5635 fp->eth_q_stats.mbuf_alloc_tx++;
5638 * Put the frame into the transmit ring. If we don't have room,
5639 * place the mbuf back at the head of the TX queue, set the
5640 * OACTIVE flag, and wait for the NIC to drain the chain.
5642 if (__predict_false(bxe_tx_encap(fp, &m))) {
5643 fp->eth_q_stats.tx_encap_failures++;
5645 /* mark the TX queue as full and return the frame */
5646 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
5647 IFQ_DRV_PREPEND(&ifp->if_snd, m);
5648 fp->eth_q_stats.mbuf_alloc_tx--;
5649 fp->eth_q_stats.tx_queue_xoff++;
5652 /* stop looking for more work */
5656 /* the frame was enqueued successfully */
5659 /* send a copy of the frame to any BPF listeners. */
5662 tx_bd_avail = bxe_tx_avail(sc, fp);
5664 /* handle any completions if we're running low */
5665 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5666 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5668 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
5674 /* all TX packets were dequeued and/or the tx ring is full */
5676 /* reset the TX watchdog timeout timer */
5677 fp->watchdog_timer = BXE_TX_TIMEOUT;
5681 /* Legacy (non-RSS) dispatch routine */
5683 bxe_tx_start(struct ifnet *ifp)
5685 struct bxe_softc *sc;
5686 struct bxe_fastpath *fp;
5690 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
5691 BLOGW(sc, "Interface not running, ignoring transmit request\n");
5695 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
5696 BLOGW(sc, "Interface TX queue is full, ignoring transmit request\n");
5700 if (!sc->link_vars.link_up) {
5701 BLOGW(sc, "Interface link is down, ignoring transmit request\n");
5708 bxe_tx_start_locked(sc, ifp, fp);
5709 BXE_FP_TX_UNLOCK(fp);
5712 #if __FreeBSD_version >= 800000
5715 bxe_tx_mq_start_locked(struct bxe_softc *sc,
5717 struct bxe_fastpath *fp,
5720 struct buf_ring *tx_br = fp->tx_br;
5722 int depth, rc, tx_count;
5723 uint16_t tx_bd_avail;
5727 BXE_FP_TX_LOCK_ASSERT(fp);
5730 BLOGE(sc, "Multiqueue TX and no buf_ring!\n");
5734 if (!sc->link_vars.link_up ||
5735 (ifp->if_drv_flags &
5736 (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) {
5737 rc = drbr_enqueue(ifp, tx_br, m);
5738 goto bxe_tx_mq_start_locked_exit;
5741 /* fetch the depth of the driver queue */
5742 depth = drbr_inuse(ifp, tx_br);
5743 if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) {
5744 fp->eth_q_stats.tx_max_drbr_queue_depth = depth;
5748 /* no new work, check for pending frames */
5749 next = drbr_dequeue(ifp, tx_br);
5750 } else if (drbr_needs_enqueue(ifp, tx_br)) {
5751 /* have both new and pending work, maintain packet order */
5752 rc = drbr_enqueue(ifp, tx_br, m);
5754 fp->eth_q_stats.tx_soft_errors++;
5755 goto bxe_tx_mq_start_locked_exit;
5757 next = drbr_dequeue(ifp, tx_br);
5759 /* new work only and nothing pending */
5763 /* keep adding entries while there are frames to send */
5764 while (next != NULL) {
5766 /* the mbuf now belongs to us */
5767 fp->eth_q_stats.mbuf_alloc_tx++;
5770 * Put the frame into the transmit ring. If we don't have room,
5771 * place the mbuf back at the head of the TX queue, set the
5772 * OACTIVE flag, and wait for the NIC to drain the chain.
5774 rc = bxe_tx_encap(fp, &next);
5775 if (__predict_false(rc != 0)) {
5776 fp->eth_q_stats.tx_encap_failures++;
5778 /* mark the TX queue as full and save the frame */
5779 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
5780 /* XXX this may reorder the frame */
5781 rc = drbr_enqueue(ifp, tx_br, next);
5782 fp->eth_q_stats.mbuf_alloc_tx--;
5783 fp->eth_q_stats.tx_frames_deferred++;
5786 /* stop looking for more work */
5790 /* the transmit frame was enqueued successfully */
5793 /* send a copy of the frame to any BPF listeners */
5794 BPF_MTAP(ifp, next);
5796 tx_bd_avail = bxe_tx_avail(sc, fp);
5798 /* handle any completions if we're running low */
5799 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5800 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5802 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
5807 next = drbr_dequeue(ifp, tx_br);
5810 /* all TX packets were dequeued and/or the tx ring is full */
5812 /* reset the TX watchdog timeout timer */
5813 fp->watchdog_timer = BXE_TX_TIMEOUT;
5816 bxe_tx_mq_start_locked_exit:
5821 /* Multiqueue (TSS) dispatch routine. */
5823 bxe_tx_mq_start(struct ifnet *ifp,
5826 struct bxe_softc *sc = ifp->if_softc;
5827 struct bxe_fastpath *fp;
5830 fp_index = 0; /* default is the first queue */
5832 /* check if flowid is set */
5833 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
5834 fp_index = (m->m_pkthdr.flowid % sc->num_queues);
5836 fp = &sc->fp[fp_index];
5838 if (BXE_FP_TX_TRYLOCK(fp)) {
5839 rc = bxe_tx_mq_start_locked(sc, ifp, fp, m);
5840 BXE_FP_TX_UNLOCK(fp);
5842 rc = drbr_enqueue(ifp, fp->tx_br, m);
5848 bxe_mq_flush(struct ifnet *ifp)
5850 struct bxe_softc *sc = ifp->if_softc;
5851 struct bxe_fastpath *fp;
5855 for (i = 0; i < sc->num_queues; i++) {
5858 if (fp->state != BXE_FP_STATE_OPEN) {
5859 BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n",
5860 fp->index, fp->state);
5864 if (fp->tx_br != NULL) {
5865 BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index);
5867 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) {
5870 BXE_FP_TX_UNLOCK(fp);
5877 #endif /* FreeBSD_version >= 800000 */
5880 bxe_cid_ilt_lines(struct bxe_softc *sc)
5883 return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS);
5885 return (L2_ILT_LINES(sc));
5889 bxe_ilt_set_info(struct bxe_softc *sc)
5891 struct ilt_client_info *ilt_client;
5892 struct ecore_ilt *ilt = sc->ilt;
5895 ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc));
5896 BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line);
5899 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
5900 ilt_client->client_num = ILT_CLIENT_CDU;
5901 ilt_client->page_size = CDU_ILT_PAGE_SZ;
5902 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
5903 ilt_client->start = line;
5904 line += bxe_cid_ilt_lines(sc);
5906 if (CNIC_SUPPORT(sc)) {
5907 line += CNIC_ILT_LINES;
5910 ilt_client->end = (line - 1);
5913 "ilt client[CDU]: start %d, end %d, "
5914 "psz 0x%x, flags 0x%x, hw psz %d\n",
5915 ilt_client->start, ilt_client->end,
5916 ilt_client->page_size,
5918 ilog2(ilt_client->page_size >> 12));
5921 if (QM_INIT(sc->qm_cid_count)) {
5922 ilt_client = &ilt->clients[ILT_CLIENT_QM];
5923 ilt_client->client_num = ILT_CLIENT_QM;
5924 ilt_client->page_size = QM_ILT_PAGE_SZ;
5925 ilt_client->flags = 0;
5926 ilt_client->start = line;
5928 /* 4 bytes for each cid */
5929 line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
5932 ilt_client->end = (line - 1);
5935 "ilt client[QM]: start %d, end %d, "
5936 "psz 0x%x, flags 0x%x, hw psz %d\n",
5937 ilt_client->start, ilt_client->end,
5938 ilt_client->page_size, ilt_client->flags,
5939 ilog2(ilt_client->page_size >> 12));
5942 if (CNIC_SUPPORT(sc)) {
5944 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
5945 ilt_client->client_num = ILT_CLIENT_SRC;
5946 ilt_client->page_size = SRC_ILT_PAGE_SZ;
5947 ilt_client->flags = 0;
5948 ilt_client->start = line;
5949 line += SRC_ILT_LINES;
5950 ilt_client->end = (line - 1);
5953 "ilt client[SRC]: start %d, end %d, "
5954 "psz 0x%x, flags 0x%x, hw psz %d\n",
5955 ilt_client->start, ilt_client->end,
5956 ilt_client->page_size, ilt_client->flags,
5957 ilog2(ilt_client->page_size >> 12));
5960 ilt_client = &ilt->clients[ILT_CLIENT_TM];
5961 ilt_client->client_num = ILT_CLIENT_TM;
5962 ilt_client->page_size = TM_ILT_PAGE_SZ;
5963 ilt_client->flags = 0;
5964 ilt_client->start = line;
5965 line += TM_ILT_LINES;
5966 ilt_client->end = (line - 1);
5969 "ilt client[TM]: start %d, end %d, "
5970 "psz 0x%x, flags 0x%x, hw psz %d\n",
5971 ilt_client->start, ilt_client->end,
5972 ilt_client->page_size, ilt_client->flags,
5973 ilog2(ilt_client->page_size >> 12));
5976 KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!"));
5980 bxe_set_fp_rx_buf_size(struct bxe_softc *sc)
5983 uint32_t rx_buf_size;
5985 rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu);
5987 for (i = 0; i < sc->num_queues; i++) {
5988 if(rx_buf_size <= MCLBYTES){
5989 sc->fp[i].rx_buf_size = rx_buf_size;
5990 sc->fp[i].mbuf_alloc_size = MCLBYTES;
5991 }else if (rx_buf_size <= MJUMPAGESIZE){
5992 sc->fp[i].rx_buf_size = rx_buf_size;
5993 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5994 }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){
5995 sc->fp[i].rx_buf_size = MCLBYTES;
5996 sc->fp[i].mbuf_alloc_size = MCLBYTES;
5997 }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){
5998 sc->fp[i].rx_buf_size = MJUMPAGESIZE;
5999 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
6001 sc->fp[i].rx_buf_size = MCLBYTES;
6002 sc->fp[i].mbuf_alloc_size = MCLBYTES;
6008 bxe_alloc_ilt_mem(struct bxe_softc *sc)
6013 (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt),
6015 (M_NOWAIT | M_ZERO))) == NULL) {
6023 bxe_alloc_ilt_lines_mem(struct bxe_softc *sc)
6027 if ((sc->ilt->lines =
6028 (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES),
6030 (M_NOWAIT | M_ZERO))) == NULL) {
6038 bxe_free_ilt_mem(struct bxe_softc *sc)
6040 if (sc->ilt != NULL) {
6041 free(sc->ilt, M_BXE_ILT);
6047 bxe_free_ilt_lines_mem(struct bxe_softc *sc)
6049 if (sc->ilt->lines != NULL) {
6050 free(sc->ilt->lines, M_BXE_ILT);
6051 sc->ilt->lines = NULL;
6056 bxe_free_mem(struct bxe_softc *sc)
6060 for (i = 0; i < L2_ILT_LINES(sc); i++) {
6061 bxe_dma_free(sc, &sc->context[i].vcxt_dma);
6062 sc->context[i].vcxt = NULL;
6063 sc->context[i].size = 0;
6066 ecore_ilt_mem_op(sc, ILT_MEMOP_FREE);
6068 bxe_free_ilt_lines_mem(sc);
6073 bxe_alloc_mem(struct bxe_softc *sc)
6080 * Allocate memory for CDU context:
6081 * This memory is allocated separately and not in the generic ILT
6082 * functions because CDU differs in few aspects:
6083 * 1. There can be multiple entities allocating memory for context -
6084 * regular L2, CNIC, and SRIOV drivers. Each separately controls
6085 * its own ILT lines.
6086 * 2. Since CDU page-size is not a single 4KB page (which is the case
6087 * for the other ILT clients), to be efficient we want to support
6088 * allocation of sub-page-size in the last entry.
6089 * 3. Context pointers are used by the driver to pass to FW / update
6090 * the context (for the other ILT clients the pointers are used just to
6091 * free the memory during unload).
6093 context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc));
6094 for (i = 0, allocated = 0; allocated < context_size; i++) {
6095 sc->context[i].size = min(CDU_ILT_PAGE_SZ,
6096 (context_size - allocated));
6098 if (bxe_dma_alloc(sc, sc->context[i].size,
6099 &sc->context[i].vcxt_dma,
6100 "cdu context") != 0) {
6105 sc->context[i].vcxt =
6106 (union cdu_context *)sc->context[i].vcxt_dma.vaddr;
6108 allocated += sc->context[i].size;
6111 bxe_alloc_ilt_lines_mem(sc);
6113 BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n",
6114 sc->ilt, sc->ilt->start_line, sc->ilt->lines);
6116 for (i = 0; i < 4; i++) {
6118 "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n",
6120 sc->ilt->clients[i].page_size,
6121 sc->ilt->clients[i].start,
6122 sc->ilt->clients[i].end,
6123 sc->ilt->clients[i].client_num,
6124 sc->ilt->clients[i].flags);
6127 if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) {
6128 BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n");
6137 bxe_free_rx_bd_chain(struct bxe_fastpath *fp)
6139 struct bxe_softc *sc;
6144 if (fp->rx_mbuf_tag == NULL) {
6148 /* free all mbufs and unload all maps */
6149 for (i = 0; i < RX_BD_TOTAL; i++) {
6150 if (fp->rx_mbuf_chain[i].m_map != NULL) {
6151 bus_dmamap_sync(fp->rx_mbuf_tag,
6152 fp->rx_mbuf_chain[i].m_map,
6153 BUS_DMASYNC_POSTREAD);
6154 bus_dmamap_unload(fp->rx_mbuf_tag,
6155 fp->rx_mbuf_chain[i].m_map);
6158 if (fp->rx_mbuf_chain[i].m != NULL) {
6159 m_freem(fp->rx_mbuf_chain[i].m);
6160 fp->rx_mbuf_chain[i].m = NULL;
6161 fp->eth_q_stats.mbuf_alloc_rx--;
6167 bxe_free_tpa_pool(struct bxe_fastpath *fp)
6169 struct bxe_softc *sc;
6170 int i, max_agg_queues;
6174 if (fp->rx_mbuf_tag == NULL) {
6178 max_agg_queues = MAX_AGG_QS(sc);
6180 /* release all mbufs and unload all DMA maps in the TPA pool */
6181 for (i = 0; i < max_agg_queues; i++) {
6182 if (fp->rx_tpa_info[i].bd.m_map != NULL) {
6183 bus_dmamap_sync(fp->rx_mbuf_tag,
6184 fp->rx_tpa_info[i].bd.m_map,
6185 BUS_DMASYNC_POSTREAD);
6186 bus_dmamap_unload(fp->rx_mbuf_tag,
6187 fp->rx_tpa_info[i].bd.m_map);
6190 if (fp->rx_tpa_info[i].bd.m != NULL) {
6191 m_freem(fp->rx_tpa_info[i].bd.m);
6192 fp->rx_tpa_info[i].bd.m = NULL;
6193 fp->eth_q_stats.mbuf_alloc_tpa--;
6199 bxe_free_sge_chain(struct bxe_fastpath *fp)
6201 struct bxe_softc *sc;
6206 if (fp->rx_sge_mbuf_tag == NULL) {
6210 /* rree all mbufs and unload all maps */
6211 for (i = 0; i < RX_SGE_TOTAL; i++) {
6212 if (fp->rx_sge_mbuf_chain[i].m_map != NULL) {
6213 bus_dmamap_sync(fp->rx_sge_mbuf_tag,
6214 fp->rx_sge_mbuf_chain[i].m_map,
6215 BUS_DMASYNC_POSTREAD);
6216 bus_dmamap_unload(fp->rx_sge_mbuf_tag,
6217 fp->rx_sge_mbuf_chain[i].m_map);
6220 if (fp->rx_sge_mbuf_chain[i].m != NULL) {
6221 m_freem(fp->rx_sge_mbuf_chain[i].m);
6222 fp->rx_sge_mbuf_chain[i].m = NULL;
6223 fp->eth_q_stats.mbuf_alloc_sge--;
6229 bxe_free_fp_buffers(struct bxe_softc *sc)
6231 struct bxe_fastpath *fp;
6234 for (i = 0; i < sc->num_queues; i++) {
6237 #if __FreeBSD_version >= 800000
6238 if (fp->tx_br != NULL) {
6239 /* just in case bxe_mq_flush() wasn't called */
6240 if (mtx_initialized(&fp->tx_mtx)) {
6244 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL)
6246 BXE_FP_TX_UNLOCK(fp);
6248 buf_ring_free(fp->tx_br, M_DEVBUF);
6253 /* free all RX buffers */
6254 bxe_free_rx_bd_chain(fp);
6255 bxe_free_tpa_pool(fp);
6256 bxe_free_sge_chain(fp);
6258 if (fp->eth_q_stats.mbuf_alloc_rx != 0) {
6259 BLOGE(sc, "failed to claim all rx mbufs (%d left)\n",
6260 fp->eth_q_stats.mbuf_alloc_rx);
6263 if (fp->eth_q_stats.mbuf_alloc_sge != 0) {
6264 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6265 fp->eth_q_stats.mbuf_alloc_sge);
6268 if (fp->eth_q_stats.mbuf_alloc_tpa != 0) {
6269 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6270 fp->eth_q_stats.mbuf_alloc_tpa);
6273 if (fp->eth_q_stats.mbuf_alloc_tx != 0) {
6274 BLOGE(sc, "failed to release tx mbufs (%d left)\n",
6275 fp->eth_q_stats.mbuf_alloc_tx);
6278 /* XXX verify all mbufs were reclaimed */
6280 if (mtx_initialized(&fp->tx_mtx)) {
6281 mtx_destroy(&fp->tx_mtx);
6284 if (mtx_initialized(&fp->rx_mtx)) {
6285 mtx_destroy(&fp->rx_mtx);
6291 bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
6292 uint16_t prev_index,
6295 struct bxe_sw_rx_bd *rx_buf;
6296 struct eth_rx_bd *rx_bd;
6297 bus_dma_segment_t segs[1];
6304 /* allocate the new RX BD mbuf */
6305 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6306 if (__predict_false(m == NULL)) {
6307 fp->eth_q_stats.mbuf_rx_bd_alloc_failed++;
6311 fp->eth_q_stats.mbuf_alloc_rx++;
6313 /* initialize the mbuf buffer length */
6314 m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6316 /* map the mbuf into non-paged pool */
6317 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6318 fp->rx_mbuf_spare_map,
6319 m, segs, &nsegs, BUS_DMA_NOWAIT);
6320 if (__predict_false(rc != 0)) {
6321 fp->eth_q_stats.mbuf_rx_bd_mapping_failed++;
6323 fp->eth_q_stats.mbuf_alloc_rx--;
6327 /* all mbufs must map to a single segment */
6328 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6330 /* release any existing RX BD mbuf mappings */
6332 if (prev_index != index) {
6333 rx_buf = &fp->rx_mbuf_chain[prev_index];
6335 if (rx_buf->m_map != NULL) {
6336 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6337 BUS_DMASYNC_POSTREAD);
6338 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6342 * We only get here from bxe_rxeof() when the maximum number
6343 * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already
6344 * holds the mbuf in the prev_index so it's OK to NULL it out
6345 * here without concern of a memory leak.
6347 fp->rx_mbuf_chain[prev_index].m = NULL;
6350 rx_buf = &fp->rx_mbuf_chain[index];
6352 if (rx_buf->m_map != NULL) {
6353 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6354 BUS_DMASYNC_POSTREAD);
6355 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6358 /* save the mbuf and mapping info for a future packet */
6359 map = (prev_index != index) ?
6360 fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map;
6361 rx_buf->m_map = fp->rx_mbuf_spare_map;
6362 fp->rx_mbuf_spare_map = map;
6363 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6364 BUS_DMASYNC_PREREAD);
6367 rx_bd = &fp->rx_chain[index];
6368 rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6369 rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6375 bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
6378 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
6379 bus_dma_segment_t segs[1];
6385 /* allocate the new TPA mbuf */
6386 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6387 if (__predict_false(m == NULL)) {
6388 fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++;
6392 fp->eth_q_stats.mbuf_alloc_tpa++;
6394 /* initialize the mbuf buffer length */
6395 m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6397 /* map the mbuf into non-paged pool */
6398 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6399 fp->rx_tpa_info_mbuf_spare_map,
6400 m, segs, &nsegs, BUS_DMA_NOWAIT);
6401 if (__predict_false(rc != 0)) {
6402 fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++;
6404 fp->eth_q_stats.mbuf_alloc_tpa--;
6408 /* all mbufs must map to a single segment */
6409 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6411 /* release any existing TPA mbuf mapping */
6412 if (tpa_info->bd.m_map != NULL) {
6413 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6414 BUS_DMASYNC_POSTREAD);
6415 bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map);
6418 /* save the mbuf and mapping info for the TPA mbuf */
6419 map = tpa_info->bd.m_map;
6420 tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map;
6421 fp->rx_tpa_info_mbuf_spare_map = map;
6422 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6423 BUS_DMASYNC_PREREAD);
6425 tpa_info->seg = segs[0];
6431 * Allocate an mbuf and assign it to the receive scatter gather chain. The
6432 * caller must take care to save a copy of the existing mbuf in the SG mbuf
6436 bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
6439 struct bxe_sw_rx_bd *sge_buf;
6440 struct eth_rx_sge *sge;
6441 bus_dma_segment_t segs[1];
6447 /* allocate a new SGE mbuf */
6448 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
6449 if (__predict_false(m == NULL)) {
6450 fp->eth_q_stats.mbuf_rx_sge_alloc_failed++;
6454 fp->eth_q_stats.mbuf_alloc_sge++;
6456 /* initialize the mbuf buffer length */
6457 m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE;
6459 /* map the SGE mbuf into non-paged pool */
6460 rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag,
6461 fp->rx_sge_mbuf_spare_map,
6462 m, segs, &nsegs, BUS_DMA_NOWAIT);
6463 if (__predict_false(rc != 0)) {
6464 fp->eth_q_stats.mbuf_rx_sge_mapping_failed++;
6466 fp->eth_q_stats.mbuf_alloc_sge--;
6470 /* all mbufs must map to a single segment */
6471 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6473 sge_buf = &fp->rx_sge_mbuf_chain[index];
6475 /* release any existing SGE mbuf mapping */
6476 if (sge_buf->m_map != NULL) {
6477 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6478 BUS_DMASYNC_POSTREAD);
6479 bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map);
6482 /* save the mbuf and mapping info for a future packet */
6483 map = sge_buf->m_map;
6484 sge_buf->m_map = fp->rx_sge_mbuf_spare_map;
6485 fp->rx_sge_mbuf_spare_map = map;
6486 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6487 BUS_DMASYNC_PREREAD);
6490 sge = &fp->rx_sge_chain[index];
6491 sge->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6492 sge->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6497 static __noinline int
6498 bxe_alloc_fp_buffers(struct bxe_softc *sc)
6500 struct bxe_fastpath *fp;
6502 int ring_prod, cqe_ring_prod;
6505 for (i = 0; i < sc->num_queues; i++) {
6508 #if __FreeBSD_version >= 800000
6509 fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF,
6510 M_DONTWAIT, &fp->tx_mtx);
6511 if (fp->tx_br == NULL) {
6512 BLOGE(sc, "buf_ring alloc fail for fp[%02d]\n", i);
6513 goto bxe_alloc_fp_buffers_error;
6517 ring_prod = cqe_ring_prod = 0;
6521 /* allocate buffers for the RX BDs in RX BD chain */
6522 for (j = 0; j < sc->max_rx_bufs; j++) {
6523 rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod);
6525 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
6527 goto bxe_alloc_fp_buffers_error;
6530 ring_prod = RX_BD_NEXT(ring_prod);
6531 cqe_ring_prod = RCQ_NEXT(cqe_ring_prod);
6534 fp->rx_bd_prod = ring_prod;
6535 fp->rx_cq_prod = cqe_ring_prod;
6536 fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0;
6538 max_agg_queues = MAX_AGG_QS(sc);
6540 fp->tpa_enable = TRUE;
6542 /* fill the TPA pool */
6543 for (j = 0; j < max_agg_queues; j++) {
6544 rc = bxe_alloc_rx_tpa_mbuf(fp, j);
6546 BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n",
6548 fp->tpa_enable = FALSE;
6549 goto bxe_alloc_fp_buffers_error;
6552 fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP;
6555 if (fp->tpa_enable) {
6556 /* fill the RX SGE chain */
6558 for (j = 0; j < RX_SGE_USABLE; j++) {
6559 rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod);
6561 BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n",
6563 fp->tpa_enable = FALSE;
6565 goto bxe_alloc_fp_buffers_error;
6568 ring_prod = RX_SGE_NEXT(ring_prod);
6571 fp->rx_sge_prod = ring_prod;
6577 bxe_alloc_fp_buffers_error:
6579 /* unwind what was already allocated */
6580 bxe_free_rx_bd_chain(fp);
6581 bxe_free_tpa_pool(fp);
6582 bxe_free_sge_chain(fp);
6588 bxe_free_fw_stats_mem(struct bxe_softc *sc)
6590 bxe_dma_free(sc, &sc->fw_stats_dma);
6592 sc->fw_stats_num = 0;
6594 sc->fw_stats_req_size = 0;
6595 sc->fw_stats_req = NULL;
6596 sc->fw_stats_req_mapping = 0;
6598 sc->fw_stats_data_size = 0;
6599 sc->fw_stats_data = NULL;
6600 sc->fw_stats_data_mapping = 0;
6604 bxe_alloc_fw_stats_mem(struct bxe_softc *sc)
6606 uint8_t num_queue_stats;
6609 /* number of queues for statistics is number of eth queues */
6610 num_queue_stats = BXE_NUM_ETH_QUEUES(sc);
6613 * Total number of FW statistics requests =
6614 * 1 for port stats + 1 for PF stats + num of queues
6616 sc->fw_stats_num = (2 + num_queue_stats);
6619 * Request is built from stats_query_header and an array of
6620 * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT
6621 * rules. The real number or requests is configured in the
6622 * stats_query_header.
6625 ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) +
6626 ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0));
6628 BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n",
6629 sc->fw_stats_num, num_groups);
6631 sc->fw_stats_req_size =
6632 (sizeof(struct stats_query_header) +
6633 (num_groups * sizeof(struct stats_query_cmd_group)));
6636 * Data for statistics requests + stats_counter.
6637 * stats_counter holds per-STORM counters that are incremented when
6638 * STORM has finished with the current request. Memory for FCoE
6639 * offloaded statistics are counted anyway, even if they will not be sent.
6640 * VF stats are not accounted for here as the data of VF stats is stored
6641 * in memory allocated by the VF, not here.
6643 sc->fw_stats_data_size =
6644 (sizeof(struct stats_counter) +
6645 sizeof(struct per_port_stats) +
6646 sizeof(struct per_pf_stats) +
6647 /* sizeof(struct fcoe_statistics_params) + */
6648 (sizeof(struct per_queue_stats) * num_queue_stats));
6650 if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size),
6651 &sc->fw_stats_dma, "fw stats") != 0) {
6652 bxe_free_fw_stats_mem(sc);
6656 /* set up the shortcuts */
6659 (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr;
6660 sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr;
6663 (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr +
6664 sc->fw_stats_req_size);
6665 sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr +
6666 sc->fw_stats_req_size);
6668 BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n",
6669 (uintmax_t)sc->fw_stats_req_mapping);
6671 BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n",
6672 (uintmax_t)sc->fw_stats_data_mapping);
6679 * 0-7 - Engine0 load counter.
6680 * 8-15 - Engine1 load counter.
6681 * 16 - Engine0 RESET_IN_PROGRESS bit.
6682 * 17 - Engine1 RESET_IN_PROGRESS bit.
6683 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active
6684 * function on the engine
6685 * 19 - Engine1 ONE_IS_LOADED.
6686 * 20 - Chip reset flow bit. When set none-leader must wait for both engines
6687 * leader to complete (check for both RESET_IN_PROGRESS bits and not
6688 * for just the one belonging to its engine).
6690 #define BXE_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
6691 #define BXE_PATH0_LOAD_CNT_MASK 0x000000ff
6692 #define BXE_PATH0_LOAD_CNT_SHIFT 0
6693 #define BXE_PATH1_LOAD_CNT_MASK 0x0000ff00
6694 #define BXE_PATH1_LOAD_CNT_SHIFT 8
6695 #define BXE_PATH0_RST_IN_PROG_BIT 0x00010000
6696 #define BXE_PATH1_RST_IN_PROG_BIT 0x00020000
6697 #define BXE_GLOBAL_RESET_BIT 0x00040000
6699 /* set the GLOBAL_RESET bit, should be run under rtnl lock */
6701 bxe_set_reset_global(struct bxe_softc *sc)
6704 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6705 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6706 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT);
6707 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6710 /* clear the GLOBAL_RESET bit, should be run under rtnl lock */
6712 bxe_clear_reset_global(struct bxe_softc *sc)
6715 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6716 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6717 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT));
6718 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6721 /* checks the GLOBAL_RESET bit, should be run under rtnl lock */
6723 bxe_reset_is_global(struct bxe_softc *sc)
6725 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6726 BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val);
6727 return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE;
6730 /* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */
6732 bxe_set_reset_done(struct bxe_softc *sc)
6735 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6736 BXE_PATH0_RST_IN_PROG_BIT;
6738 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6740 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6743 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6745 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6748 /* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */
6750 bxe_set_reset_in_progress(struct bxe_softc *sc)
6753 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6754 BXE_PATH0_RST_IN_PROG_BIT;
6756 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6758 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6761 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6763 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6766 /* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */
6768 bxe_reset_is_done(struct bxe_softc *sc,
6771 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6772 uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT :
6773 BXE_PATH0_RST_IN_PROG_BIT;
6775 /* return false if bit is set */
6776 return (val & bit) ? FALSE : TRUE;
6779 /* get the load status for an engine, should be run under rtnl lock */
6781 bxe_get_load_status(struct bxe_softc *sc,
6784 uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK :
6785 BXE_PATH0_LOAD_CNT_MASK;
6786 uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT :
6787 BXE_PATH0_LOAD_CNT_SHIFT;
6788 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6790 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6792 val = ((val & mask) >> shift);
6794 BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val);
6799 /* set pf load mark */
6800 /* XXX needs to be under rtnl lock */
6802 bxe_set_pf_load(struct bxe_softc *sc)
6806 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6807 BXE_PATH0_LOAD_CNT_MASK;
6808 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6809 BXE_PATH0_LOAD_CNT_SHIFT;
6811 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6813 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6814 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6816 /* get the current counter value */
6817 val1 = ((val & mask) >> shift);
6819 /* set bit of this PF */
6820 val1 |= (1 << SC_ABS_FUNC(sc));
6822 /* clear the old value */
6825 /* set the new one */
6826 val |= ((val1 << shift) & mask);
6828 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6830 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6833 /* clear pf load mark */
6834 /* XXX needs to be under rtnl lock */
6836 bxe_clear_pf_load(struct bxe_softc *sc)
6839 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6840 BXE_PATH0_LOAD_CNT_MASK;
6841 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6842 BXE_PATH0_LOAD_CNT_SHIFT;
6844 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6845 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6846 BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val);
6848 /* get the current counter value */
6849 val1 = (val & mask) >> shift;
6851 /* clear bit of that PF */
6852 val1 &= ~(1 << SC_ABS_FUNC(sc));
6854 /* clear the old value */
6857 /* set the new one */
6858 val |= ((val1 << shift) & mask);
6860 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6861 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6865 /* send load requrest to mcp and analyze response */
6867 bxe_nic_load_request(struct bxe_softc *sc,
6868 uint32_t *load_code)
6872 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
6873 DRV_MSG_SEQ_NUMBER_MASK);
6875 BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq);
6877 /* get the current FW pulse sequence */
6878 sc->fw_drv_pulse_wr_seq =
6879 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) &
6880 DRV_PULSE_SEQ_MASK);
6882 BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n",
6883 sc->fw_drv_pulse_wr_seq);
6886 (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
6887 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
6889 /* if the MCP fails to respond we must abort */
6890 if (!(*load_code)) {
6891 BLOGE(sc, "MCP response failure!\n");
6895 /* if MCP refused then must abort */
6896 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6897 BLOGE(sc, "MCP refused load request\n");
6905 * Check whether another PF has already loaded FW to chip. In virtualized
6906 * environments a pf from anoth VM may have already initialized the device
6907 * including loading FW.
6910 bxe_nic_load_analyze_req(struct bxe_softc *sc,
6913 uint32_t my_fw, loaded_fw;
6915 /* is another pf loaded on this engine? */
6916 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
6917 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
6918 /* build my FW version dword */
6919 my_fw = (BCM_5710_FW_MAJOR_VERSION +
6920 (BCM_5710_FW_MINOR_VERSION << 8 ) +
6921 (BCM_5710_FW_REVISION_VERSION << 16) +
6922 (BCM_5710_FW_ENGINEERING_VERSION << 24));
6924 /* read loaded FW from chip */
6925 loaded_fw = REG_RD(sc, XSEM_REG_PRAM);
6926 BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n",
6929 /* abort nic load if version mismatch */
6930 if (my_fw != loaded_fw) {
6931 BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)",
6940 /* mark PMF if applicable */
6942 bxe_nic_load_pmf(struct bxe_softc *sc,
6945 uint32_t ncsi_oem_data_addr;
6947 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6948 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
6949 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
6951 * Barrier here for ordering between the writing to sc->port.pmf here
6952 * and reading it from the periodic task.
6960 BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf);
6963 if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) {
6964 if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) {
6965 ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr);
6966 if (ncsi_oem_data_addr) {
6968 (ncsi_oem_data_addr +
6969 offsetof(struct glob_ncsi_oem_data, driver_version)),
6977 bxe_read_mf_cfg(struct bxe_softc *sc)
6979 int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1);
6983 if (BXE_NOMCP(sc)) {
6984 return; /* what should be the default bvalue in this case */
6988 * The formula for computing the absolute function number is...
6989 * For 2 port configuration (4 functions per port):
6990 * abs_func = 2 * vn + SC_PORT + SC_PATH
6991 * For 4 port configuration (2 functions per port):
6992 * abs_func = 4 * vn + 2 * SC_PORT + SC_PATH
6994 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
6995 abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc));
6996 if (abs_func >= E1H_FUNC_MAX) {
6999 sc->devinfo.mf_info.mf_config[vn] =
7000 MFCFG_RD(sc, func_mf_config[abs_func].config);
7003 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] &
7004 FUNC_MF_CFG_FUNC_DISABLED) {
7005 BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n");
7006 sc->flags |= BXE_MF_FUNC_DIS;
7008 BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n");
7009 sc->flags &= ~BXE_MF_FUNC_DIS;
7013 /* acquire split MCP access lock register */
7014 static int bxe_acquire_alr(struct bxe_softc *sc)
7018 for (j = 0; j < 1000; j++) {
7020 REG_WR(sc, GRCBASE_MCP + 0x9c, val);
7021 val = REG_RD(sc, GRCBASE_MCP + 0x9c);
7022 if (val & (1L << 31))
7028 if (!(val & (1L << 31))) {
7029 BLOGE(sc, "Cannot acquire MCP access lock register\n");
7036 /* release split MCP access lock register */
7037 static void bxe_release_alr(struct bxe_softc *sc)
7039 REG_WR(sc, GRCBASE_MCP + 0x9c, 0);
7043 bxe_fan_failure(struct bxe_softc *sc)
7045 int port = SC_PORT(sc);
7046 uint32_t ext_phy_config;
7048 /* mark the failure */
7050 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
7052 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
7053 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
7054 SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config,
7057 /* log the failure */
7058 BLOGW(sc, "Fan Failure has caused the driver to shutdown "
7059 "the card to prevent permanent damage. "
7060 "Please contact OEM Support for assistance\n");
7064 bxe_panic(sc, ("Schedule task to handle fan failure\n"));
7067 * Schedule device reset (unload)
7068 * This is due to some boards consuming sufficient power when driver is
7069 * up to overheat if fan fails.
7071 bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state);
7072 schedule_delayed_work(&sc->sp_rtnl_task, 0);
7076 /* this function is called upon a link interrupt */
7078 bxe_link_attn(struct bxe_softc *sc)
7080 uint32_t pause_enabled = 0;
7081 struct host_port_stats *pstats;
7084 /* Make sure that we are synced with the current statistics */
7085 bxe_stats_handle(sc, STATS_EVENT_STOP);
7087 elink_link_update(&sc->link_params, &sc->link_vars);
7089 if (sc->link_vars.link_up) {
7091 /* dropless flow control */
7092 if (!CHIP_IS_E1(sc) && sc->dropless_fc) {
7095 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
7100 (BAR_USTRORM_INTMEM +
7101 USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))),
7105 if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) {
7106 pstats = BXE_SP(sc, port_stats);
7107 /* reset old mac stats */
7108 memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx));
7111 if (sc->state == BXE_STATE_OPEN) {
7112 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
7116 if (sc->link_vars.link_up && sc->link_vars.line_speed) {
7117 cmng_fns = bxe_get_cmng_fns_mode(sc);
7119 if (cmng_fns != CMNG_FNS_NONE) {
7120 bxe_cmng_fns_init(sc, FALSE, cmng_fns);
7121 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7123 /* rate shaping and fairness are disabled */
7124 BLOGD(sc, DBG_LOAD, "single function mode without fairness\n");
7128 bxe_link_report_locked(sc);
7131 ; // XXX bxe_link_sync_notify(sc);
7136 bxe_attn_int_asserted(struct bxe_softc *sc,
7139 int port = SC_PORT(sc);
7140 uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7141 MISC_REG_AEU_MASK_ATTN_FUNC_0;
7142 uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
7143 NIG_REG_MASK_INTERRUPT_PORT0;
7145 uint32_t nig_mask = 0;
7150 if (sc->attn_state & asserted) {
7151 BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted);
7154 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7156 aeu_mask = REG_RD(sc, aeu_addr);
7158 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n",
7159 aeu_mask, asserted);
7161 aeu_mask &= ~(asserted & 0x3ff);
7163 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
7165 REG_WR(sc, aeu_addr, aeu_mask);
7167 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7169 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
7170 sc->attn_state |= asserted;
7171 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
7173 if (asserted & ATTN_HARD_WIRED_MASK) {
7174 if (asserted & ATTN_NIG_FOR_FUNC) {
7176 bxe_acquire_phy_lock(sc);
7177 /* save nig interrupt mask */
7178 nig_mask = REG_RD(sc, nig_int_mask_addr);
7180 /* If nig_mask is not set, no need to call the update function */
7182 REG_WR(sc, nig_int_mask_addr, 0);
7187 /* handle unicore attn? */
7190 if (asserted & ATTN_SW_TIMER_4_FUNC) {
7191 BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n");
7194 if (asserted & GPIO_2_FUNC) {
7195 BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n");
7198 if (asserted & GPIO_3_FUNC) {
7199 BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n");
7202 if (asserted & GPIO_4_FUNC) {
7203 BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n");
7207 if (asserted & ATTN_GENERAL_ATTN_1) {
7208 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n");
7209 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
7211 if (asserted & ATTN_GENERAL_ATTN_2) {
7212 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n");
7213 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
7215 if (asserted & ATTN_GENERAL_ATTN_3) {
7216 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n");
7217 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
7220 if (asserted & ATTN_GENERAL_ATTN_4) {
7221 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n");
7222 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
7224 if (asserted & ATTN_GENERAL_ATTN_5) {
7225 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n");
7226 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
7228 if (asserted & ATTN_GENERAL_ATTN_6) {
7229 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n");
7230 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
7235 if (sc->devinfo.int_block == INT_BLOCK_HC) {
7236 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET);
7238 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
7241 BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n",
7243 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
7244 REG_WR(sc, reg_addr, asserted);
7246 /* now set back the mask */
7247 if (asserted & ATTN_NIG_FOR_FUNC) {
7249 * Verify that IGU ack through BAR was written before restoring
7250 * NIG mask. This loop should exit after 2-3 iterations max.
7252 if (sc->devinfo.int_block != INT_BLOCK_HC) {
7256 igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS);
7257 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
7258 (++cnt < MAX_IGU_ATTN_ACK_TO));
7261 BLOGE(sc, "Failed to verify IGU ack on time\n");
7267 REG_WR(sc, nig_int_mask_addr, nig_mask);
7269 bxe_release_phy_lock(sc);
7274 bxe_print_next_block(struct bxe_softc *sc,
7278 BLOGI(sc, "%s%s", idx ? ", " : "", blk);
7282 bxe_check_blocks_with_parity0(struct bxe_softc *sc,
7287 uint32_t cur_bit = 0;
7290 for (i = 0; sig; i++) {
7291 cur_bit = ((uint32_t)0x1 << i);
7292 if (sig & cur_bit) {
7294 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
7296 bxe_print_next_block(sc, par_num++, "BRB");
7298 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
7300 bxe_print_next_block(sc, par_num++, "PARSER");
7302 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
7304 bxe_print_next_block(sc, par_num++, "TSDM");
7306 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
7308 bxe_print_next_block(sc, par_num++, "SEARCHER");
7310 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
7312 bxe_print_next_block(sc, par_num++, "TCM");
7314 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
7316 bxe_print_next_block(sc, par_num++, "TSEMI");
7318 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
7320 bxe_print_next_block(sc, par_num++, "XPB");
7333 bxe_check_blocks_with_parity1(struct bxe_softc *sc,
7340 uint32_t cur_bit = 0;
7341 for (i = 0; sig; i++) {
7342 cur_bit = ((uint32_t)0x1 << i);
7343 if (sig & cur_bit) {
7345 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
7347 bxe_print_next_block(sc, par_num++, "PBF");
7349 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
7351 bxe_print_next_block(sc, par_num++, "QM");
7353 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
7355 bxe_print_next_block(sc, par_num++, "TM");
7357 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
7359 bxe_print_next_block(sc, par_num++, "XSDM");
7361 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
7363 bxe_print_next_block(sc, par_num++, "XCM");
7365 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
7367 bxe_print_next_block(sc, par_num++, "XSEMI");
7369 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
7371 bxe_print_next_block(sc, par_num++, "DOORBELLQ");
7373 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
7375 bxe_print_next_block(sc, par_num++, "NIG");
7377 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
7379 bxe_print_next_block(sc, par_num++, "VAUX PCI CORE");
7382 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
7384 bxe_print_next_block(sc, par_num++, "DEBUG");
7386 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
7388 bxe_print_next_block(sc, par_num++, "USDM");
7390 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
7392 bxe_print_next_block(sc, par_num++, "UCM");
7394 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
7396 bxe_print_next_block(sc, par_num++, "USEMI");
7398 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
7400 bxe_print_next_block(sc, par_num++, "UPB");
7402 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
7404 bxe_print_next_block(sc, par_num++, "CSDM");
7406 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
7408 bxe_print_next_block(sc, par_num++, "CCM");
7421 bxe_check_blocks_with_parity2(struct bxe_softc *sc,
7426 uint32_t cur_bit = 0;
7429 for (i = 0; sig; i++) {
7430 cur_bit = ((uint32_t)0x1 << i);
7431 if (sig & cur_bit) {
7433 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
7435 bxe_print_next_block(sc, par_num++, "CSEMI");
7437 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
7439 bxe_print_next_block(sc, par_num++, "PXP");
7441 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
7443 bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT");
7445 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
7447 bxe_print_next_block(sc, par_num++, "CFC");
7449 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
7451 bxe_print_next_block(sc, par_num++, "CDU");
7453 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
7455 bxe_print_next_block(sc, par_num++, "DMAE");
7457 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
7459 bxe_print_next_block(sc, par_num++, "IGU");
7461 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
7463 bxe_print_next_block(sc, par_num++, "MISC");
7476 bxe_check_blocks_with_parity3(struct bxe_softc *sc,
7482 uint32_t cur_bit = 0;
7485 for (i = 0; sig; i++) {
7486 cur_bit = ((uint32_t)0x1 << i);
7487 if (sig & cur_bit) {
7489 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
7491 bxe_print_next_block(sc, par_num++, "MCP ROM");
7494 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
7496 bxe_print_next_block(sc, par_num++,
7500 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
7502 bxe_print_next_block(sc, par_num++,
7506 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
7508 bxe_print_next_block(sc, par_num++,
7523 bxe_check_blocks_with_parity4(struct bxe_softc *sc,
7528 uint32_t cur_bit = 0;
7531 for (i = 0; sig; i++) {
7532 cur_bit = ((uint32_t)0x1 << i);
7533 if (sig & cur_bit) {
7535 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
7537 bxe_print_next_block(sc, par_num++, "PGLUE_B");
7539 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
7541 bxe_print_next_block(sc, par_num++, "ATC");
7554 bxe_parity_attn(struct bxe_softc *sc,
7561 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
7562 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
7563 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
7564 (sig[3] & HW_PRTY_ASSERT_SET_3) ||
7565 (sig[4] & HW_PRTY_ASSERT_SET_4)) {
7566 BLOGE(sc, "Parity error: HW block parity attention:\n"
7567 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
7568 (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0),
7569 (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1),
7570 (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2),
7571 (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3),
7572 (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4));
7575 BLOGI(sc, "Parity errors detected in blocks: ");
7578 bxe_check_blocks_with_parity0(sc, sig[0] &
7579 HW_PRTY_ASSERT_SET_0,
7582 bxe_check_blocks_with_parity1(sc, sig[1] &
7583 HW_PRTY_ASSERT_SET_1,
7584 par_num, global, print);
7586 bxe_check_blocks_with_parity2(sc, sig[2] &
7587 HW_PRTY_ASSERT_SET_2,
7590 bxe_check_blocks_with_parity3(sc, sig[3] &
7591 HW_PRTY_ASSERT_SET_3,
7592 par_num, global, print);
7594 bxe_check_blocks_with_parity4(sc, sig[4] &
7595 HW_PRTY_ASSERT_SET_4,
7608 bxe_chk_parity_attn(struct bxe_softc *sc,
7612 struct attn_route attn = { {0} };
7613 int port = SC_PORT(sc);
7615 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
7616 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
7617 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
7618 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
7621 * Since MCP attentions can't be disabled inside the block, we need to
7622 * read AEU registers to see whether they're currently disabled
7624 attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
7625 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) &
7626 MISC_AEU_ENABLE_MCP_PRTY_BITS) |
7627 ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
7630 if (!CHIP_IS_E1x(sc))
7631 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
7633 return (bxe_parity_attn(sc, global, print, attn.sig));
7637 bxe_attn_int_deasserted4(struct bxe_softc *sc,
7642 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
7643 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
7644 BLOGE(sc, "PGLUE hw attention 0x%08x\n", val);
7645 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
7646 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
7647 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
7648 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
7649 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
7650 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
7651 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
7652 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
7653 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
7654 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
7655 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
7656 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
7657 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
7658 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
7659 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
7660 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
7661 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
7662 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
7665 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
7666 val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR);
7667 BLOGE(sc, "ATC hw attention 0x%08x\n", val);
7668 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
7669 BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
7670 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
7671 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
7672 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
7673 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
7674 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
7675 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
7676 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
7677 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
7678 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
7679 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
7682 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7683 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
7684 BLOGE(sc, "FATAL parity attention set4 0x%08x\n",
7685 (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7686 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
7691 bxe_e1h_disable(struct bxe_softc *sc)
7693 int port = SC_PORT(sc);
7697 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7701 bxe_e1h_enable(struct bxe_softc *sc)
7703 int port = SC_PORT(sc);
7705 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7707 // XXX bxe_tx_enable(sc);
7711 * called due to MCP event (on pmf):
7712 * reread new bandwidth configuration
7714 * notify others function about the change
7717 bxe_config_mf_bw(struct bxe_softc *sc)
7719 if (sc->link_vars.link_up) {
7720 bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX);
7721 // XXX bxe_link_sync_notify(sc);
7724 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7728 bxe_set_mf_bw(struct bxe_softc *sc)
7730 bxe_config_mf_bw(sc);
7731 bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
7735 bxe_handle_eee_event(struct bxe_softc *sc)
7737 BLOGD(sc, DBG_INTR, "EEE - LLDP event\n");
7738 bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
7741 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
7744 bxe_drv_info_ether_stat(struct bxe_softc *sc)
7746 struct eth_stats_info *ether_stat =
7747 &sc->sp->drv_info_to_mcp.ether_stat;
7749 strlcpy(ether_stat->version, BXE_DRIVER_VERSION,
7750 ETH_STAT_INFO_VERSION_LEN);
7752 /* XXX (+ MAC_PAD) taken from other driver... verify this is right */
7753 sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj,
7754 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
7755 ether_stat->mac_local + MAC_PAD,
7758 ether_stat->mtu_size = sc->mtu;
7760 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
7761 if (sc->ifnet->if_capenable & (IFCAP_TSO4 | IFCAP_TSO6)) {
7762 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
7765 // XXX ether_stat->feature_flags |= ???;
7767 ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0;
7769 ether_stat->txq_size = sc->tx_ring_size;
7770 ether_stat->rxq_size = sc->rx_ring_size;
7774 bxe_handle_drv_info_req(struct bxe_softc *sc)
7776 enum drv_info_opcode op_code;
7777 uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control);
7779 /* if drv_info version supported by MFW doesn't match - send NACK */
7780 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
7781 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7785 op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
7786 DRV_INFO_CONTROL_OP_CODE_SHIFT);
7788 memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp));
7791 case ETH_STATS_OPCODE:
7792 bxe_drv_info_ether_stat(sc);
7794 case FCOE_STATS_OPCODE:
7795 case ISCSI_STATS_OPCODE:
7797 /* if op code isn't supported - send NACK */
7798 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7803 * If we got drv_info attn from MFW then these fields are defined in
7806 SHMEM2_WR(sc, drv_info_host_addr_lo,
7807 U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7808 SHMEM2_WR(sc, drv_info_host_addr_hi,
7809 U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7811 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0);
7815 bxe_dcc_event(struct bxe_softc *sc,
7818 BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event);
7820 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
7822 * This is the only place besides the function initialization
7823 * where the sc->flags can change so it is done without any
7826 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
7827 BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n");
7828 sc->flags |= BXE_MF_FUNC_DIS;
7829 bxe_e1h_disable(sc);
7831 BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n");
7832 sc->flags &= ~BXE_MF_FUNC_DIS;
7835 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
7838 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
7839 bxe_config_mf_bw(sc);
7840 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
7843 /* Report results to MCP */
7845 bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0);
7847 bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0);
7851 bxe_pmf_update(struct bxe_softc *sc)
7853 int port = SC_PORT(sc);
7857 BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf);
7860 * We need the mb() to ensure the ordering between the writing to
7861 * sc->port.pmf here and reading it from the bxe_periodic_task().
7865 /* queue a periodic task */
7866 // XXX schedule task...
7868 // XXX bxe_dcbx_pmf_update(sc);
7870 /* enable nig attention */
7871 val = (0xff0f | (1 << (SC_VN(sc) + 4)));
7872 if (sc->devinfo.int_block == INT_BLOCK_HC) {
7873 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val);
7874 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val);
7875 } else if (!CHIP_IS_E1x(sc)) {
7876 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
7877 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
7880 bxe_stats_handle(sc, STATS_EVENT_PMF);
7884 bxe_mc_assert(struct bxe_softc *sc)
7888 uint32_t row0, row1, row2, row3;
7891 last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET);
7893 BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7895 /* print the asserts */
7896 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7898 row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i));
7899 row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4);
7900 row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8);
7901 row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12);
7903 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7904 BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7905 i, row3, row2, row1, row0);
7913 last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET);
7915 BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7918 /* print the asserts */
7919 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7921 row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i));
7922 row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4);
7923 row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8);
7924 row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12);
7926 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7927 BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7928 i, row3, row2, row1, row0);
7936 last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET);
7938 BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7941 /* print the asserts */
7942 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7944 row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i));
7945 row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4);
7946 row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8);
7947 row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12);
7949 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7950 BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7951 i, row3, row2, row1, row0);
7959 last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET);
7961 BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7964 /* print the asserts */
7965 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7967 row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i));
7968 row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4);
7969 row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8);
7970 row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12);
7972 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7973 BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7974 i, row3, row2, row1, row0);
7985 bxe_attn_int_deasserted3(struct bxe_softc *sc,
7988 int func = SC_FUNC(sc);
7991 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
7993 if (attn & BXE_PMF_LINK_ASSERT(sc)) {
7995 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7996 bxe_read_mf_cfg(sc);
7997 sc->devinfo.mf_info.mf_config[SC_VN(sc)] =
7998 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
7999 val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status);
8001 if (val & DRV_STATUS_DCC_EVENT_MASK)
8002 bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK));
8004 if (val & DRV_STATUS_SET_MF_BW)
8007 if (val & DRV_STATUS_DRV_INFO_REQ)
8008 bxe_handle_drv_info_req(sc);
8010 if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF))
8013 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
8014 bxe_handle_eee_event(sc);
8016 if (sc->link_vars.periodic_flags &
8017 ELINK_PERIODIC_FLAGS_LINK_EVENT) {
8018 /* sync with link */
8019 bxe_acquire_phy_lock(sc);
8020 sc->link_vars.periodic_flags &=
8021 ~ELINK_PERIODIC_FLAGS_LINK_EVENT;
8022 bxe_release_phy_lock(sc);
8024 ; // XXX bxe_link_sync_notify(sc);
8025 bxe_link_report(sc);
8029 * Always call it here: bxe_link_report() will
8030 * prevent the link indication duplication.
8032 bxe_link_status_update(sc);
8034 } else if (attn & BXE_MC_ASSERT_BITS) {
8036 BLOGE(sc, "MC assert!\n");
8038 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0);
8039 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0);
8040 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0);
8041 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0);
8042 bxe_panic(sc, ("MC assert!\n"));
8044 } else if (attn & BXE_MCP_ASSERT) {
8046 BLOGE(sc, "MCP assert!\n");
8047 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0);
8048 // XXX bxe_fw_dump(sc);
8051 BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn);
8055 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
8056 BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn);
8057 if (attn & BXE_GRC_TIMEOUT) {
8058 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN);
8059 BLOGE(sc, "GRC time-out 0x%08x\n", val);
8061 if (attn & BXE_GRC_RSV) {
8062 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN);
8063 BLOGE(sc, "GRC reserved 0x%08x\n", val);
8065 REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
8070 bxe_attn_int_deasserted2(struct bxe_softc *sc,
8073 int port = SC_PORT(sc);
8075 uint32_t val0, mask0, val1, mask1;
8078 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
8079 val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR);
8080 BLOGE(sc, "CFC hw attention 0x%08x\n", val);
8081 /* CFC error attention */
8083 BLOGE(sc, "FATAL error from CFC\n");
8087 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
8088 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0);
8089 BLOGE(sc, "PXP hw attention-0 0x%08x\n", val);
8090 /* RQ_USDMDP_FIFO_OVERFLOW */
8091 if (val & 0x18000) {
8092 BLOGE(sc, "FATAL error from PXP\n");
8095 if (!CHIP_IS_E1x(sc)) {
8096 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1);
8097 BLOGE(sc, "PXP hw attention-1 0x%08x\n", val);
8101 #define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR
8102 #define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT
8104 if (attn & AEU_PXP2_HW_INT_BIT) {
8105 /* CQ47854 workaround do not panic on
8106 * PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8108 if (!CHIP_IS_E1x(sc)) {
8109 mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0);
8110 val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1);
8111 mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1);
8112 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0);
8114 * If the olny PXP2_EOP_ERROR_BIT is set in
8115 * STS0 and STS1 - clear it
8117 * probably we lose additional attentions between
8118 * STS0 and STS_CLR0, in this case user will not
8119 * be notified about them
8121 if (val0 & mask0 & PXP2_EOP_ERROR_BIT &&
8123 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
8125 /* print the register, since no one can restore it */
8126 BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0);
8129 * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8132 if (val0 & PXP2_EOP_ERROR_BIT) {
8133 BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n");
8136 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is
8137 * set then clear attention from PXP2 block without panic
8139 if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) &&
8140 ((val1 & mask1) == 0))
8141 attn &= ~AEU_PXP2_HW_INT_BIT;
8146 if (attn & HW_INTERRUT_ASSERT_SET_2) {
8147 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
8148 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
8150 val = REG_RD(sc, reg_offset);
8151 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
8152 REG_WR(sc, reg_offset, val);
8154 BLOGE(sc, "FATAL HW block attention set2 0x%x\n",
8155 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2));
8156 bxe_panic(sc, ("HW block attention set2\n"));
8161 bxe_attn_int_deasserted1(struct bxe_softc *sc,
8164 int port = SC_PORT(sc);
8168 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
8169 val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR);
8170 BLOGE(sc, "DB hw attention 0x%08x\n", val);
8171 /* DORQ discard attention */
8173 BLOGE(sc, "FATAL error from DORQ\n");
8177 if (attn & HW_INTERRUT_ASSERT_SET_1) {
8178 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
8179 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
8181 val = REG_RD(sc, reg_offset);
8182 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
8183 REG_WR(sc, reg_offset, val);
8185 BLOGE(sc, "FATAL HW block attention set1 0x%08x\n",
8186 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1));
8187 bxe_panic(sc, ("HW block attention set1\n"));
8192 bxe_attn_int_deasserted0(struct bxe_softc *sc,
8195 int port = SC_PORT(sc);
8199 reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
8200 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
8202 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
8203 val = REG_RD(sc, reg_offset);
8204 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
8205 REG_WR(sc, reg_offset, val);
8207 BLOGW(sc, "SPIO5 hw attention\n");
8209 /* Fan failure attention */
8210 elink_hw_reset_phy(&sc->link_params);
8211 bxe_fan_failure(sc);
8214 if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) {
8215 bxe_acquire_phy_lock(sc);
8216 elink_handle_module_detect_int(&sc->link_params);
8217 bxe_release_phy_lock(sc);
8220 if (attn & HW_INTERRUT_ASSERT_SET_0) {
8221 val = REG_RD(sc, reg_offset);
8222 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
8223 REG_WR(sc, reg_offset, val);
8225 bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n",
8226 (attn & HW_INTERRUT_ASSERT_SET_0)));
8231 bxe_attn_int_deasserted(struct bxe_softc *sc,
8232 uint32_t deasserted)
8234 struct attn_route attn;
8235 struct attn_route *group_mask;
8236 int port = SC_PORT(sc);
8241 uint8_t global = FALSE;
8244 * Need to take HW lock because MCP or other port might also
8245 * try to handle this event.
8247 bxe_acquire_alr(sc);
8249 if (bxe_chk_parity_attn(sc, &global, TRUE)) {
8251 * In case of parity errors don't handle attentions so that
8252 * other function would "see" parity errors.
8254 sc->recovery_state = BXE_RECOVERY_INIT;
8255 // XXX schedule a recovery task...
8256 /* disable HW interrupts */
8257 bxe_int_disable(sc);
8258 bxe_release_alr(sc);
8262 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
8263 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
8264 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
8265 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
8266 if (!CHIP_IS_E1x(sc)) {
8267 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
8272 BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
8273 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
8275 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
8276 if (deasserted & (1 << index)) {
8277 group_mask = &sc->attn_group[index];
8280 "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index,
8281 group_mask->sig[0], group_mask->sig[1],
8282 group_mask->sig[2], group_mask->sig[3],
8283 group_mask->sig[4]);
8285 bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]);
8286 bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]);
8287 bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]);
8288 bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]);
8289 bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]);
8293 bxe_release_alr(sc);
8295 if (sc->devinfo.int_block == INT_BLOCK_HC) {
8296 reg_addr = (HC_REG_COMMAND_REG + port*32 +
8297 COMMAND_REG_ATTN_BITS_CLR);
8299 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
8304 "about to mask 0x%08x at %s addr 0x%08x\n", val,
8305 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
8306 REG_WR(sc, reg_addr, val);
8308 if (~sc->attn_state & deasserted) {
8309 BLOGE(sc, "IGU error\n");
8312 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8313 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8315 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8317 aeu_mask = REG_RD(sc, reg_addr);
8319 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n",
8320 aeu_mask, deasserted);
8321 aeu_mask |= (deasserted & 0x3ff);
8322 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
8324 REG_WR(sc, reg_addr, aeu_mask);
8325 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8327 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
8328 sc->attn_state &= ~deasserted;
8329 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
8333 bxe_attn_int(struct bxe_softc *sc)
8335 /* read local copy of bits */
8336 uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits);
8337 uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack);
8338 uint32_t attn_state = sc->attn_state;
8340 /* look for changed bits */
8341 uint32_t asserted = attn_bits & ~attn_ack & ~attn_state;
8342 uint32_t deasserted = ~attn_bits & attn_ack & attn_state;
8345 "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n",
8346 attn_bits, attn_ack, asserted, deasserted);
8348 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) {
8349 BLOGE(sc, "BAD attention state\n");
8352 /* handle bits that were raised */
8354 bxe_attn_int_asserted(sc, asserted);
8358 bxe_attn_int_deasserted(sc, deasserted);
8363 bxe_update_dsb_idx(struct bxe_softc *sc)
8365 struct host_sp_status_block *def_sb = sc->def_sb;
8368 mb(); /* status block is written to by the chip */
8370 if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
8371 sc->def_att_idx = def_sb->atten_status_block.attn_bits_index;
8372 rc |= BXE_DEF_SB_ATT_IDX;
8375 if (sc->def_idx != def_sb->sp_sb.running_index) {
8376 sc->def_idx = def_sb->sp_sb.running_index;
8377 rc |= BXE_DEF_SB_IDX;
8385 static inline struct ecore_queue_sp_obj *
8386 bxe_cid_to_q_obj(struct bxe_softc *sc,
8389 BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid);
8390 return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj);
8394 bxe_handle_mcast_eqe(struct bxe_softc *sc)
8396 struct ecore_mcast_ramrod_params rparam;
8399 memset(&rparam, 0, sizeof(rparam));
8401 rparam.mcast_obj = &sc->mcast_obj;
8405 /* clear pending state for the last command */
8406 sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw);
8408 /* if there are pending mcast commands - send them */
8409 if (sc->mcast_obj.check_pending(&sc->mcast_obj)) {
8410 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
8413 "ERROR: Failed to send pending mcast commands (%d)\n", rc);
8417 BXE_MCAST_UNLOCK(sc);
8421 bxe_handle_classification_eqe(struct bxe_softc *sc,
8422 union event_ring_elem *elem)
8424 unsigned long ramrod_flags = 0;
8426 uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8427 struct ecore_vlan_mac_obj *vlan_mac_obj;
8429 /* always push next commands out, don't wait here */
8430 bit_set(&ramrod_flags, RAMROD_CONT);
8432 switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) {
8433 case ECORE_FILTER_MAC_PENDING:
8434 BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n");
8435 vlan_mac_obj = &sc->sp_objs[cid].mac_obj;
8438 case ECORE_FILTER_MCAST_PENDING:
8439 BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n");
8441 * This is only relevant for 57710 where multicast MACs are
8442 * configured as unicast MACs using the same ramrod.
8444 bxe_handle_mcast_eqe(sc);
8448 BLOGE(sc, "Unsupported classification command: %d\n",
8449 elem->message.data.eth_event.echo);
8453 rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags);
8456 BLOGE(sc, "Failed to schedule new commands (%d)\n", rc);
8457 } else if (rc > 0) {
8458 BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n");
8463 bxe_handle_rx_mode_eqe(struct bxe_softc *sc,
8464 union event_ring_elem *elem)
8466 bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
8468 /* send rx_mode command again if was requested */
8469 if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED,
8471 bxe_set_storm_rx_mode(sc);
8476 bxe_update_eq_prod(struct bxe_softc *sc,
8479 storm_memset_eq_prod(sc, prod, SC_FUNC(sc));
8480 wmb(); /* keep prod updates ordered */
8484 bxe_eq_int(struct bxe_softc *sc)
8486 uint16_t hw_cons, sw_cons, sw_prod;
8487 union event_ring_elem *elem;
8492 struct ecore_queue_sp_obj *q_obj;
8493 struct ecore_func_sp_obj *f_obj = &sc->func_obj;
8494 struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw;
8496 hw_cons = le16toh(*sc->eq_cons_sb);
8499 * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256.
8500 * when we get to the next-page we need to adjust so the loop
8501 * condition below will be met. The next element is the size of a
8502 * regular element and hence incrementing by 1
8504 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) {
8509 * This function may never run in parallel with itself for a
8510 * specific sc and no need for a read memory barrier here.
8512 sw_cons = sc->eq_cons;
8513 sw_prod = sc->eq_prod;
8515 BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n",
8516 hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left));
8520 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
8522 elem = &sc->eq[EQ_DESC(sw_cons)];
8524 /* elem CID originates from FW, actually LE */
8525 cid = SW_CID(elem->message.data.cfc_del_event.cid);
8526 opcode = elem->message.opcode;
8528 /* handle eq element */
8531 case EVENT_RING_OPCODE_STAT_QUERY:
8532 BLOGD(sc, DBG_SP, "got statistics completion event %d\n",
8534 /* nothing to do with stats comp */
8537 case EVENT_RING_OPCODE_CFC_DEL:
8538 /* handle according to cid range */
8539 /* we may want to verify here that the sc state is HALTING */
8540 BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid);
8541 q_obj = bxe_cid_to_q_obj(sc, cid);
8542 if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) {
8547 case EVENT_RING_OPCODE_STOP_TRAFFIC:
8548 BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n");
8549 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) {
8552 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED);
8555 case EVENT_RING_OPCODE_START_TRAFFIC:
8556 BLOGD(sc, DBG_SP, "got START TRAFFIC\n");
8557 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) {
8560 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED);
8563 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
8564 echo = elem->message.data.function_update_event.echo;
8565 if (echo == SWITCH_UPDATE) {
8566 BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n");
8567 if (f_obj->complete_cmd(sc, f_obj,
8568 ECORE_F_CMD_SWITCH_UPDATE)) {
8574 "AFEX: ramrod completed FUNCTION_UPDATE\n");
8578 case EVENT_RING_OPCODE_FORWARD_SETUP:
8579 q_obj = &bxe_fwd_sp_obj(sc, q_obj);
8580 if (q_obj->complete_cmd(sc, q_obj,
8581 ECORE_Q_CMD_SETUP_TX_ONLY)) {
8586 case EVENT_RING_OPCODE_FUNCTION_START:
8587 BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n");
8588 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) {
8593 case EVENT_RING_OPCODE_FUNCTION_STOP:
8594 BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n");
8595 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) {
8601 switch (opcode | sc->state) {
8602 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN):
8603 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT):
8604 cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8605 BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid);
8606 rss_raw->clear_pending(rss_raw);
8609 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN):
8610 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG):
8611 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT):
8612 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN):
8613 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG):
8614 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8615 BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n");
8616 bxe_handle_classification_eqe(sc, elem);
8619 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN):
8620 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG):
8621 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8622 BLOGD(sc, DBG_SP, "got mcast ramrod\n");
8623 bxe_handle_mcast_eqe(sc);
8626 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN):
8627 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG):
8628 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8629 BLOGD(sc, DBG_SP, "got rx_mode ramrod\n");
8630 bxe_handle_rx_mode_eqe(sc, elem);
8634 /* unknown event log error and continue */
8635 BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n",
8636 elem->message.opcode, sc->state);
8644 atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt);
8646 sc->eq_cons = sw_cons;
8647 sc->eq_prod = sw_prod;
8649 /* make sure that above mem writes were issued towards the memory */
8652 /* update producer */
8653 bxe_update_eq_prod(sc, sc->eq_prod);
8657 bxe_handle_sp_tq(void *context,
8660 struct bxe_softc *sc = (struct bxe_softc *)context;
8663 BLOGD(sc, DBG_SP, "---> SP TASK <---\n");
8665 /* what work needs to be performed? */
8666 status = bxe_update_dsb_idx(sc);
8668 BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status);
8671 if (status & BXE_DEF_SB_ATT_IDX) {
8672 BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n");
8674 status &= ~BXE_DEF_SB_ATT_IDX;
8677 /* SP events: STAT_QUERY and others */
8678 if (status & BXE_DEF_SB_IDX) {
8679 /* handle EQ completions */
8680 BLOGD(sc, DBG_SP, "---> EQ INTR <---\n");
8682 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
8683 le16toh(sc->def_idx), IGU_INT_NOP, 1);
8684 status &= ~BXE_DEF_SB_IDX;
8687 /* if status is non zero then something went wrong */
8688 if (__predict_false(status)) {
8689 BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status);
8692 /* ack status block only if something was actually handled */
8693 bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID,
8694 le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1);
8697 * Must be called after the EQ processing (since eq leads to sriov
8698 * ramrod completion flows).
8699 * This flow may have been scheduled by the arrival of a ramrod
8700 * completion, or by the sriov code rescheduling itself.
8702 // XXX bxe_iov_sp_task(sc);
8707 bxe_handle_fp_tq(void *context,
8710 struct bxe_fastpath *fp = (struct bxe_fastpath *)context;
8711 struct bxe_softc *sc = fp->sc;
8712 uint8_t more_tx = FALSE;
8713 uint8_t more_rx = FALSE;
8715 BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index);
8718 * IFF_DRV_RUNNING state can't be checked here since we process
8719 * slowpath events on a client queue during setup. Instead
8720 * we need to add a "process/continue" flag here that the driver
8721 * can use to tell the task here not to do anything.
8724 if (!(sc->ifnet->if_drv_flags & IFF_DRV_RUNNING)) {
8729 /* update the fastpath index */
8730 bxe_update_fp_sb_idx(fp);
8732 /* XXX add loop here if ever support multiple tx CoS */
8733 /* fp->txdata[cos] */
8734 if (bxe_has_tx_work(fp)) {
8736 more_tx = bxe_txeof(sc, fp);
8737 BXE_FP_TX_UNLOCK(fp);
8740 if (bxe_has_rx_work(fp)) {
8741 more_rx = bxe_rxeof(sc, fp);
8744 if (more_rx /*|| more_tx*/) {
8745 /* still more work to do */
8746 taskqueue_enqueue_fast(fp->tq, &fp->tq_task);
8750 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8751 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8755 bxe_task_fp(struct bxe_fastpath *fp)
8757 struct bxe_softc *sc = fp->sc;
8758 uint8_t more_tx = FALSE;
8759 uint8_t more_rx = FALSE;
8761 BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index);
8763 /* update the fastpath index */
8764 bxe_update_fp_sb_idx(fp);
8766 /* XXX add loop here if ever support multiple tx CoS */
8767 /* fp->txdata[cos] */
8768 if (bxe_has_tx_work(fp)) {
8770 more_tx = bxe_txeof(sc, fp);
8771 BXE_FP_TX_UNLOCK(fp);
8774 if (bxe_has_rx_work(fp)) {
8775 more_rx = bxe_rxeof(sc, fp);
8778 if (more_rx /*|| more_tx*/) {
8779 /* still more work to do, bail out if this ISR and process later */
8780 taskqueue_enqueue_fast(fp->tq, &fp->tq_task);
8785 * Here we write the fastpath index taken before doing any tx or rx work.
8786 * It is very well possible other hw events occurred up to this point and
8787 * they were actually processed accordingly above. Since we're going to
8788 * write an older fastpath index, an interrupt is coming which we might
8789 * not do any work in.
8791 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8792 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8796 * Legacy interrupt entry point.
8798 * Verifies that the controller generated the interrupt and
8799 * then calls a separate routine to handle the various
8800 * interrupt causes: link, RX, and TX.
8803 bxe_intr_legacy(void *xsc)
8805 struct bxe_softc *sc = (struct bxe_softc *)xsc;
8806 struct bxe_fastpath *fp;
8807 uint16_t status, mask;
8810 BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n");
8813 * 0 for ustorm, 1 for cstorm
8814 * the bits returned from ack_int() are 0-15
8815 * bit 0 = attention status block
8816 * bit 1 = fast path status block
8817 * a mask of 0x2 or more = tx/rx event
8818 * a mask of 1 = slow path event
8821 status = bxe_ack_int(sc);
8823 /* the interrupt is not for us */
8824 if (__predict_false(status == 0)) {
8825 BLOGD(sc, DBG_INTR, "Not our interrupt!\n");
8829 BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status);
8831 FOR_EACH_ETH_QUEUE(sc, i) {
8833 mask = (0x2 << (fp->index + CNIC_SUPPORT(sc)));
8834 if (status & mask) {
8835 /* acknowledge and disable further fastpath interrupts */
8836 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8842 if (__predict_false(status & 0x1)) {
8843 /* acknowledge and disable further slowpath interrupts */
8844 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8846 /* schedule slowpath handler */
8847 taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task);
8852 if (__predict_false(status)) {
8853 BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status);
8857 /* slowpath interrupt entry point */
8859 bxe_intr_sp(void *xsc)
8861 struct bxe_softc *sc = (struct bxe_softc *)xsc;
8863 BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n");
8865 /* acknowledge and disable further slowpath interrupts */
8866 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8868 /* schedule slowpath handler */
8869 taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task);
8872 /* fastpath interrupt entry point */
8874 bxe_intr_fp(void *xfp)
8876 struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp;
8877 struct bxe_softc *sc = fp->sc;
8879 BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index);
8882 "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n",
8883 curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id);
8885 /* acknowledge and disable further fastpath interrupts */
8886 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8891 /* Release all interrupts allocated by the driver. */
8893 bxe_interrupt_free(struct bxe_softc *sc)
8897 switch (sc->interrupt_mode) {
8898 case INTR_MODE_INTX:
8899 BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n");
8900 if (sc->intr[0].resource != NULL) {
8901 bus_release_resource(sc->dev,
8904 sc->intr[0].resource);
8908 for (i = 0; i < sc->intr_count; i++) {
8909 BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i);
8910 if (sc->intr[i].resource && sc->intr[i].rid) {
8911 bus_release_resource(sc->dev,
8914 sc->intr[i].resource);
8917 pci_release_msi(sc->dev);
8919 case INTR_MODE_MSIX:
8920 for (i = 0; i < sc->intr_count; i++) {
8921 BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i);
8922 if (sc->intr[i].resource && sc->intr[i].rid) {
8923 bus_release_resource(sc->dev,
8926 sc->intr[i].resource);
8929 pci_release_msi(sc->dev);
8932 /* nothing to do as initial allocation failed */
8938 * This function determines and allocates the appropriate
8939 * interrupt based on system capabilites and user request.
8941 * The user may force a particular interrupt mode, specify
8942 * the number of receive queues, specify the method for
8943 * distribuitng received frames to receive queues, or use
8944 * the default settings which will automatically select the
8945 * best supported combination. In addition, the OS may or
8946 * may not support certain combinations of these settings.
8947 * This routine attempts to reconcile the settings requested
8948 * by the user with the capabilites available from the system
8949 * to select the optimal combination of features.
8952 * 0 = Success, !0 = Failure.
8955 bxe_interrupt_alloc(struct bxe_softc *sc)
8959 int num_requested = 0;
8960 int num_allocated = 0;
8964 /* get the number of available MSI/MSI-X interrupts from the OS */
8965 if (sc->interrupt_mode > 0) {
8966 if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) {
8967 msix_count = pci_msix_count(sc->dev);
8970 if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) {
8971 msi_count = pci_msi_count(sc->dev);
8974 BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n",
8975 msi_count, msix_count);
8978 do { /* try allocating MSI-X interrupt resources (at least 2) */
8979 if (sc->interrupt_mode != INTR_MODE_MSIX) {
8983 if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) ||
8985 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8989 /* ask for the necessary number of MSI-X vectors */
8990 num_requested = min((sc->num_queues + 1), msix_count);
8992 BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested);
8994 num_allocated = num_requested;
8995 if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) {
8996 BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc);
8997 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9001 if (num_allocated < 2) { /* possible? */
9002 BLOGE(sc, "MSI-X allocation less than 2!\n");
9003 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9004 pci_release_msi(sc->dev);
9008 BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n",
9009 num_requested, num_allocated);
9011 /* best effort so use the number of vectors allocated to us */
9012 sc->intr_count = num_allocated;
9013 sc->num_queues = num_allocated - 1;
9015 rid = 1; /* initial resource identifier */
9017 /* allocate the MSI-X vectors */
9018 for (i = 0; i < num_allocated; i++) {
9019 sc->intr[i].rid = (rid + i);
9021 if ((sc->intr[i].resource =
9022 bus_alloc_resource_any(sc->dev,
9025 RF_ACTIVE)) == NULL) {
9026 BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n",
9029 for (j = (i - 1); j >= 0; j--) {
9030 bus_release_resource(sc->dev,
9033 sc->intr[j].resource);
9038 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9039 pci_release_msi(sc->dev);
9043 BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i));
9047 do { /* try allocating MSI vector resources (at least 2) */
9048 if (sc->interrupt_mode != INTR_MODE_MSI) {
9052 if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) ||
9054 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9058 /* ask for a single MSI vector */
9061 BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested);
9063 num_allocated = num_requested;
9064 if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) {
9065 BLOGE(sc, "MSI alloc failed (%d)!\n", rc);
9066 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9070 if (num_allocated != 1) { /* possible? */
9071 BLOGE(sc, "MSI allocation is not 1!\n");
9072 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9073 pci_release_msi(sc->dev);
9077 BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n",
9078 num_requested, num_allocated);
9080 /* best effort so use the number of vectors allocated to us */
9081 sc->intr_count = num_allocated;
9082 sc->num_queues = num_allocated;
9084 rid = 1; /* initial resource identifier */
9086 sc->intr[0].rid = rid;
9088 if ((sc->intr[0].resource =
9089 bus_alloc_resource_any(sc->dev,
9092 RF_ACTIVE)) == NULL) {
9093 BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid);
9096 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9097 pci_release_msi(sc->dev);
9101 BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid);
9104 do { /* try allocating INTx vector resources */
9105 if (sc->interrupt_mode != INTR_MODE_INTX) {
9109 BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n");
9111 /* only one vector for INTx */
9115 rid = 0; /* initial resource identifier */
9117 sc->intr[0].rid = rid;
9119 if ((sc->intr[0].resource =
9120 bus_alloc_resource_any(sc->dev,
9123 (RF_ACTIVE | RF_SHAREABLE))) == NULL) {
9124 BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid);
9127 sc->interrupt_mode = -1; /* Failed! */
9131 BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid);
9134 if (sc->interrupt_mode == -1) {
9135 BLOGE(sc, "Interrupt Allocation: FAILED!!!\n");
9139 "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n",
9140 sc->interrupt_mode, sc->num_queues);
9148 bxe_interrupt_detach(struct bxe_softc *sc)
9150 struct bxe_fastpath *fp;
9153 /* release interrupt resources */
9154 for (i = 0; i < sc->intr_count; i++) {
9155 if (sc->intr[i].resource && sc->intr[i].tag) {
9156 BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i);
9157 bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag);
9161 for (i = 0; i < sc->num_queues; i++) {
9164 taskqueue_drain(fp->tq, &fp->tq_task);
9165 taskqueue_free(fp->tq);
9172 taskqueue_drain(sc->sp_tq, &sc->sp_tq_task);
9173 taskqueue_free(sc->sp_tq);
9179 * Enables interrupts and attach to the ISR.
9181 * When using multiple MSI/MSI-X vectors the first vector
9182 * is used for slowpath operations while all remaining
9183 * vectors are used for fastpath operations. If only a
9184 * single MSI/MSI-X vector is used (SINGLE_ISR) then the
9185 * ISR must look for both slowpath and fastpath completions.
9188 bxe_interrupt_attach(struct bxe_softc *sc)
9190 struct bxe_fastpath *fp;
9194 snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name),
9195 "bxe%d_sp_tq", sc->unit);
9196 TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc);
9197 sc->sp_tq = taskqueue_create_fast(sc->sp_tq_name, M_NOWAIT,
9198 taskqueue_thread_enqueue,
9200 taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */
9201 "%s", sc->sp_tq_name);
9204 for (i = 0; i < sc->num_queues; i++) {
9206 snprintf(fp->tq_name, sizeof(fp->tq_name),
9207 "bxe%d_fp%d_tq", sc->unit, i);
9208 TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
9209 fp->tq = taskqueue_create_fast(fp->tq_name, M_NOWAIT,
9210 taskqueue_thread_enqueue,
9212 taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */
9216 /* setup interrupt handlers */
9217 if (sc->interrupt_mode == INTR_MODE_MSIX) {
9218 BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n");
9221 * Setup the interrupt handler. Note that we pass the driver instance
9222 * to the interrupt handler for the slowpath.
9224 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9225 (INTR_TYPE_NET | INTR_MPSAFE),
9226 NULL, bxe_intr_sp, sc,
9227 &sc->intr[0].tag)) != 0) {
9228 BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc);
9229 goto bxe_interrupt_attach_exit;
9232 bus_describe_intr(sc->dev, sc->intr[0].resource,
9233 sc->intr[0].tag, "sp");
9235 /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */
9237 /* initialize the fastpath vectors (note the first was used for sp) */
9238 for (i = 0; i < sc->num_queues; i++) {
9240 BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1));
9243 * Setup the interrupt handler. Note that we pass the
9244 * fastpath context to the interrupt handler in this
9247 if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource,
9248 (INTR_TYPE_NET | INTR_MPSAFE),
9249 NULL, bxe_intr_fp, fp,
9250 &sc->intr[i + 1].tag)) != 0) {
9251 BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n",
9253 goto bxe_interrupt_attach_exit;
9256 bus_describe_intr(sc->dev, sc->intr[i + 1].resource,
9257 sc->intr[i + 1].tag, "fp%02d", i);
9259 /* bind the fastpath instance to a cpu */
9260 if (sc->num_queues > 1) {
9261 bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i);
9264 fp->state = BXE_FP_STATE_IRQ;
9266 } else if (sc->interrupt_mode == INTR_MODE_MSI) {
9267 BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n");
9270 * Setup the interrupt handler. Note that we pass the
9271 * driver instance to the interrupt handler which
9272 * will handle both the slowpath and fastpath.
9274 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9275 (INTR_TYPE_NET | INTR_MPSAFE),
9276 NULL, bxe_intr_legacy, sc,
9277 &sc->intr[0].tag)) != 0) {
9278 BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc);
9279 goto bxe_interrupt_attach_exit;
9282 } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */
9283 BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n");
9286 * Setup the interrupt handler. Note that we pass the
9287 * driver instance to the interrupt handler which
9288 * will handle both the slowpath and fastpath.
9290 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9291 (INTR_TYPE_NET | INTR_MPSAFE),
9292 NULL, bxe_intr_legacy, sc,
9293 &sc->intr[0].tag)) != 0) {
9294 BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc);
9295 goto bxe_interrupt_attach_exit;
9299 bxe_interrupt_attach_exit:
9304 static int bxe_init_hw_common_chip(struct bxe_softc *sc);
9305 static int bxe_init_hw_common(struct bxe_softc *sc);
9306 static int bxe_init_hw_port(struct bxe_softc *sc);
9307 static int bxe_init_hw_func(struct bxe_softc *sc);
9308 static void bxe_reset_common(struct bxe_softc *sc);
9309 static void bxe_reset_port(struct bxe_softc *sc);
9310 static void bxe_reset_func(struct bxe_softc *sc);
9311 static int bxe_gunzip_init(struct bxe_softc *sc);
9312 static void bxe_gunzip_end(struct bxe_softc *sc);
9313 static int bxe_init_firmware(struct bxe_softc *sc);
9314 static void bxe_release_firmware(struct bxe_softc *sc);
9317 ecore_func_sp_drv_ops bxe_func_sp_drv = {
9318 .init_hw_cmn_chip = bxe_init_hw_common_chip,
9319 .init_hw_cmn = bxe_init_hw_common,
9320 .init_hw_port = bxe_init_hw_port,
9321 .init_hw_func = bxe_init_hw_func,
9323 .reset_hw_cmn = bxe_reset_common,
9324 .reset_hw_port = bxe_reset_port,
9325 .reset_hw_func = bxe_reset_func,
9327 .gunzip_init = bxe_gunzip_init,
9328 .gunzip_end = bxe_gunzip_end,
9330 .init_fw = bxe_init_firmware,
9331 .release_fw = bxe_release_firmware,
9335 bxe_init_func_obj(struct bxe_softc *sc)
9339 ecore_init_func_obj(sc,
9341 BXE_SP(sc, func_rdata),
9342 BXE_SP_MAPPING(sc, func_rdata),
9343 BXE_SP(sc, func_afex_rdata),
9344 BXE_SP_MAPPING(sc, func_afex_rdata),
9349 bxe_init_hw(struct bxe_softc *sc,
9352 struct ecore_func_state_params func_params = { NULL };
9355 /* prepare the parameters for function state transitions */
9356 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
9358 func_params.f_obj = &sc->func_obj;
9359 func_params.cmd = ECORE_F_CMD_HW_INIT;
9361 func_params.params.hw_init.load_phase = load_code;
9364 * Via a plethora of function pointers, we will eventually reach
9365 * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func().
9367 rc = ecore_func_state_change(sc, &func_params);
9373 bxe_fill(struct bxe_softc *sc,
9380 if (!(len % 4) && !(addr % 4)) {
9381 for (i = 0; i < len; i += 4) {
9382 REG_WR(sc, (addr + i), fill);
9385 for (i = 0; i < len; i++) {
9386 REG_WR8(sc, (addr + i), fill);
9391 /* writes FP SP data to FW - data_size in dwords */
9393 bxe_wr_fp_sb_data(struct bxe_softc *sc,
9395 uint32_t *sb_data_p,
9400 for (index = 0; index < data_size; index++) {
9402 (BAR_CSTRORM_INTMEM +
9403 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
9404 (sizeof(uint32_t) * index)),
9405 *(sb_data_p + index));
9410 bxe_zero_fp_sb(struct bxe_softc *sc,
9413 struct hc_status_block_data_e2 sb_data_e2;
9414 struct hc_status_block_data_e1x sb_data_e1x;
9415 uint32_t *sb_data_p;
9416 uint32_t data_size = 0;
9418 if (!CHIP_IS_E1x(sc)) {
9419 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9420 sb_data_e2.common.state = SB_DISABLED;
9421 sb_data_e2.common.p_func.vf_valid = FALSE;
9422 sb_data_p = (uint32_t *)&sb_data_e2;
9423 data_size = (sizeof(struct hc_status_block_data_e2) /
9426 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9427 sb_data_e1x.common.state = SB_DISABLED;
9428 sb_data_e1x.common.p_func.vf_valid = FALSE;
9429 sb_data_p = (uint32_t *)&sb_data_e1x;
9430 data_size = (sizeof(struct hc_status_block_data_e1x) /
9434 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9436 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)),
9437 0, CSTORM_STATUS_BLOCK_SIZE);
9438 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)),
9439 0, CSTORM_SYNC_BLOCK_SIZE);
9443 bxe_wr_sp_sb_data(struct bxe_softc *sc,
9444 struct hc_sp_status_block_data *sp_sb_data)
9449 i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t));
9452 (BAR_CSTRORM_INTMEM +
9453 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) +
9454 (i * sizeof(uint32_t))),
9455 *((uint32_t *)sp_sb_data + i));
9460 bxe_zero_sp_sb(struct bxe_softc *sc)
9462 struct hc_sp_status_block_data sp_sb_data;
9464 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9466 sp_sb_data.state = SB_DISABLED;
9467 sp_sb_data.p_func.vf_valid = FALSE;
9469 bxe_wr_sp_sb_data(sc, &sp_sb_data);
9472 (BAR_CSTRORM_INTMEM +
9473 CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))),
9474 0, CSTORM_SP_STATUS_BLOCK_SIZE);
9476 (BAR_CSTRORM_INTMEM +
9477 CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))),
9478 0, CSTORM_SP_SYNC_BLOCK_SIZE);
9482 bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
9486 hc_sm->igu_sb_id = igu_sb_id;
9487 hc_sm->igu_seg_id = igu_seg_id;
9488 hc_sm->timer_value = 0xFF;
9489 hc_sm->time_to_expire = 0xFFFFFFFF;
9493 bxe_map_sb_state_machines(struct hc_index_data *index_data)
9495 /* zero out state machine indices */
9498 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9501 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9502 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
9503 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
9504 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
9509 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
9510 (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9513 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
9514 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9515 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
9516 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9517 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
9518 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9519 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
9520 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9524 bxe_init_sb(struct bxe_softc *sc,
9531 struct hc_status_block_data_e2 sb_data_e2;
9532 struct hc_status_block_data_e1x sb_data_e1x;
9533 struct hc_status_block_sm *hc_sm_p;
9534 uint32_t *sb_data_p;
9538 if (CHIP_INT_MODE_IS_BC(sc)) {
9539 igu_seg_id = HC_SEG_ACCESS_NORM;
9541 igu_seg_id = IGU_SEG_ACCESS_NORM;
9544 bxe_zero_fp_sb(sc, fw_sb_id);
9546 if (!CHIP_IS_E1x(sc)) {
9547 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9548 sb_data_e2.common.state = SB_ENABLED;
9549 sb_data_e2.common.p_func.pf_id = SC_FUNC(sc);
9550 sb_data_e2.common.p_func.vf_id = vfid;
9551 sb_data_e2.common.p_func.vf_valid = vf_valid;
9552 sb_data_e2.common.p_func.vnic_id = SC_VN(sc);
9553 sb_data_e2.common.same_igu_sb_1b = TRUE;
9554 sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr);
9555 sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr);
9556 hc_sm_p = sb_data_e2.common.state_machine;
9557 sb_data_p = (uint32_t *)&sb_data_e2;
9558 data_size = (sizeof(struct hc_status_block_data_e2) /
9560 bxe_map_sb_state_machines(sb_data_e2.index_data);
9562 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9563 sb_data_e1x.common.state = SB_ENABLED;
9564 sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc);
9565 sb_data_e1x.common.p_func.vf_id = 0xff;
9566 sb_data_e1x.common.p_func.vf_valid = FALSE;
9567 sb_data_e1x.common.p_func.vnic_id = SC_VN(sc);
9568 sb_data_e1x.common.same_igu_sb_1b = TRUE;
9569 sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr);
9570 sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr);
9571 hc_sm_p = sb_data_e1x.common.state_machine;
9572 sb_data_p = (uint32_t *)&sb_data_e1x;
9573 data_size = (sizeof(struct hc_status_block_data_e1x) /
9575 bxe_map_sb_state_machines(sb_data_e1x.index_data);
9578 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id);
9579 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id);
9581 BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id);
9583 /* write indices to HW - PCI guarantees endianity of regpairs */
9584 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9587 static inline uint8_t
9588 bxe_fp_qzone_id(struct bxe_fastpath *fp)
9590 if (CHIP_IS_E1x(fp->sc)) {
9591 return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H);
9597 static inline uint32_t
9598 bxe_rx_ustorm_prods_offset(struct bxe_softc *sc,
9599 struct bxe_fastpath *fp)
9601 uint32_t offset = BAR_USTRORM_INTMEM;
9603 if (!CHIP_IS_E1x(sc)) {
9604 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
9606 offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id);
9613 bxe_init_eth_fp(struct bxe_softc *sc,
9616 struct bxe_fastpath *fp = &sc->fp[idx];
9617 uint32_t cids[ECORE_MULTI_TX_COS] = { 0 };
9618 unsigned long q_type = 0;
9624 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
9625 "bxe%d_fp%d_tx_lock", sc->unit, idx);
9626 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
9628 snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name),
9629 "bxe%d_fp%d_rx_lock", sc->unit, idx);
9630 mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF);
9632 fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc));
9633 fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc));
9635 fp->cl_id = (CHIP_IS_E1x(sc)) ?
9636 (SC_L_ID(sc) + idx) :
9637 /* want client ID same as IGU SB ID for non-E1 */
9639 fp->cl_qzone_id = bxe_fp_qzone_id(fp);
9641 /* setup sb indices */
9642 if (!CHIP_IS_E1x(sc)) {
9643 fp->sb_index_values = fp->status_block.e2_sb->sb.index_values;
9644 fp->sb_running_index = fp->status_block.e2_sb->sb.running_index;
9646 fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values;
9647 fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index;
9651 fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp);
9653 fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS];
9656 * XXX If multiple CoS is ever supported then each fastpath structure
9657 * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
9659 for (cos = 0; cos < sc->max_cos; cos++) {
9662 fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0];
9664 /* nothing more for a VF to do */
9669 bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE,
9670 fp->fw_sb_id, fp->igu_sb_id);
9672 bxe_update_fp_sb_idx(fp);
9674 /* Configure Queue State object */
9675 bit_set(&q_type, ECORE_Q_TYPE_HAS_RX);
9676 bit_set(&q_type, ECORE_Q_TYPE_HAS_TX);
9678 ecore_init_queue_obj(sc,
9679 &sc->sp_objs[idx].q_obj,
9684 BXE_SP(sc, q_rdata),
9685 BXE_SP_MAPPING(sc, q_rdata),
9688 /* configure classification DBs */
9689 ecore_init_mac_obj(sc,
9690 &sc->sp_objs[idx].mac_obj,
9694 BXE_SP(sc, mac_rdata),
9695 BXE_SP_MAPPING(sc, mac_rdata),
9696 ECORE_FILTER_MAC_PENDING,
9698 ECORE_OBJ_TYPE_RX_TX,
9701 BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n",
9702 idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id);
9706 bxe_update_rx_prod(struct bxe_softc *sc,
9707 struct bxe_fastpath *fp,
9708 uint16_t rx_bd_prod,
9709 uint16_t rx_cq_prod,
9710 uint16_t rx_sge_prod)
9712 struct ustorm_eth_rx_producers rx_prods = { 0 };
9715 /* update producers */
9716 rx_prods.bd_prod = rx_bd_prod;
9717 rx_prods.cqe_prod = rx_cq_prod;
9718 rx_prods.sge_prod = rx_sge_prod;
9721 * Make sure that the BD and SGE data is updated before updating the
9722 * producers since FW might read the BD/SGE right after the producer
9724 * This is only applicable for weak-ordered memory model archs such
9725 * as IA-64. The following barrier is also mandatory since FW will
9726 * assumes BDs must have buffers.
9730 for (i = 0; i < (sizeof(rx_prods) / 4); i++) {
9732 (fp->ustorm_rx_prods_offset + (i * 4)),
9733 ((uint32_t *)&rx_prods)[i]);
9736 wmb(); /* keep prod updates ordered */
9739 "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n",
9740 fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod);
9744 bxe_init_rx_rings(struct bxe_softc *sc)
9746 struct bxe_fastpath *fp;
9749 for (i = 0; i < sc->num_queues; i++) {
9755 * Activate the BD ring...
9756 * Warning, this will generate an interrupt (to the TSTORM)
9757 * so this can only be done after the chip is initialized
9759 bxe_update_rx_prod(sc, fp,
9768 if (CHIP_IS_E1(sc)) {
9770 (BAR_USTRORM_INTMEM +
9771 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))),
9772 U64_LO(fp->rcq_dma.paddr));
9774 (BAR_USTRORM_INTMEM +
9775 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4),
9776 U64_HI(fp->rcq_dma.paddr));
9782 bxe_init_tx_ring_one(struct bxe_fastpath *fp)
9784 SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
9785 fp->tx_db.data.zero_fill1 = 0;
9786 fp->tx_db.data.prod = 0;
9788 fp->tx_pkt_prod = 0;
9789 fp->tx_pkt_cons = 0;
9792 fp->eth_q_stats.tx_pkts = 0;
9796 bxe_init_tx_rings(struct bxe_softc *sc)
9800 for (i = 0; i < sc->num_queues; i++) {
9801 bxe_init_tx_ring_one(&sc->fp[i]);
9806 bxe_init_def_sb(struct bxe_softc *sc)
9808 struct host_sp_status_block *def_sb = sc->def_sb;
9809 bus_addr_t mapping = sc->def_sb_dma.paddr;
9810 int igu_sp_sb_index;
9812 int port = SC_PORT(sc);
9813 int func = SC_FUNC(sc);
9814 int reg_offset, reg_offset_en5;
9817 struct hc_sp_status_block_data sp_sb_data;
9819 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9821 if (CHIP_INT_MODE_IS_BC(sc)) {
9822 igu_sp_sb_index = DEF_SB_IGU_ID;
9823 igu_seg_id = HC_SEG_ACCESS_DEF;
9825 igu_sp_sb_index = sc->igu_dsb_id;
9826 igu_seg_id = IGU_SEG_ACCESS_DEF;
9830 section = ((uint64_t)mapping +
9831 offsetof(struct host_sp_status_block, atten_status_block));
9832 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
9835 reg_offset = (port) ?
9836 MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
9837 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
9838 reg_offset_en5 = (port) ?
9839 MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
9840 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0;
9842 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
9843 /* take care of sig[0]..sig[4] */
9844 for (sindex = 0; sindex < 4; sindex++) {
9845 sc->attn_group[index].sig[sindex] =
9846 REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index)));
9849 if (!CHIP_IS_E1x(sc)) {
9851 * enable5 is separate from the rest of the registers,
9852 * and the address skip is 4 and not 16 between the
9855 sc->attn_group[index].sig[4] =
9856 REG_RD(sc, (reg_offset_en5 + (0x4 * index)));
9858 sc->attn_group[index].sig[4] = 0;
9862 if (sc->devinfo.int_block == INT_BLOCK_HC) {
9863 reg_offset = (port) ?
9864 HC_REG_ATTN_MSG1_ADDR_L :
9865 HC_REG_ATTN_MSG0_ADDR_L;
9866 REG_WR(sc, reg_offset, U64_LO(section));
9867 REG_WR(sc, (reg_offset + 4), U64_HI(section));
9868 } else if (!CHIP_IS_E1x(sc)) {
9869 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
9870 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
9873 section = ((uint64_t)mapping +
9874 offsetof(struct host_sp_status_block, sp_sb));
9878 /* PCI guarantees endianity of regpair */
9879 sp_sb_data.state = SB_ENABLED;
9880 sp_sb_data.host_sb_addr.lo = U64_LO(section);
9881 sp_sb_data.host_sb_addr.hi = U64_HI(section);
9882 sp_sb_data.igu_sb_id = igu_sp_sb_index;
9883 sp_sb_data.igu_seg_id = igu_seg_id;
9884 sp_sb_data.p_func.pf_id = func;
9885 sp_sb_data.p_func.vnic_id = SC_VN(sc);
9886 sp_sb_data.p_func.vf_id = 0xff;
9888 bxe_wr_sp_sb_data(sc, &sp_sb_data);
9890 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
9894 bxe_init_sp_ring(struct bxe_softc *sc)
9896 atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING);
9897 sc->spq_prod_idx = 0;
9898 sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS];
9899 sc->spq_prod_bd = sc->spq;
9900 sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT);
9904 bxe_init_eq_ring(struct bxe_softc *sc)
9906 union event_ring_elem *elem;
9909 for (i = 1; i <= NUM_EQ_PAGES; i++) {
9910 elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1];
9912 elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr +
9914 (i % NUM_EQ_PAGES)));
9915 elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr +
9917 (i % NUM_EQ_PAGES)));
9921 sc->eq_prod = NUM_EQ_DESC;
9922 sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS];
9924 atomic_store_rel_long(&sc->eq_spq_left,
9925 (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING),
9930 bxe_init_internal_common(struct bxe_softc *sc)
9936 * In switch independent mode, the TSTORM needs to accept
9937 * packets that failed classification, since approximate match
9938 * mac addresses aren't written to NIG LLH.
9941 (BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET),
9943 } else if (!CHIP_IS_E1(sc)) { /* 57710 doesn't support MF */
9945 (BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET),
9950 * Zero this manually as its initialization is currently missing
9953 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) {
9955 (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)),
9959 if (!CHIP_IS_E1x(sc)) {
9960 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET),
9961 CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
9966 bxe_init_internal(struct bxe_softc *sc,
9969 switch (load_code) {
9970 case FW_MSG_CODE_DRV_LOAD_COMMON:
9971 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
9972 bxe_init_internal_common(sc);
9975 case FW_MSG_CODE_DRV_LOAD_PORT:
9979 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
9980 /* internal memory per function is initialized inside bxe_pf_init */
9984 BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code);
9990 storm_memset_func_cfg(struct bxe_softc *sc,
9991 struct tstorm_eth_function_common_config *tcfg,
9997 addr = (BAR_TSTRORM_INTMEM +
9998 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid));
9999 size = sizeof(struct tstorm_eth_function_common_config);
10000 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg);
10004 bxe_func_init(struct bxe_softc *sc,
10005 struct bxe_func_init_params *p)
10007 struct tstorm_eth_function_common_config tcfg = { 0 };
10009 if (CHIP_IS_E1x(sc)) {
10010 storm_memset_func_cfg(sc, &tcfg, p->func_id);
10013 /* Enable the function in the FW */
10014 storm_memset_vf_to_pf(sc, p->func_id, p->pf_id);
10015 storm_memset_func_en(sc, p->func_id, 1);
10018 if (p->func_flgs & FUNC_FLG_SPQ) {
10019 storm_memset_spq_addr(sc, p->spq_map, p->func_id);
10021 (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)),
10027 * Calculates the sum of vn_min_rates.
10028 * It's needed for further normalizing of the min_rates.
10030 * sum of vn_min_rates.
10032 * 0 - if all the min_rates are 0.
10033 * In the later case fainess algorithm should be deactivated.
10034 * If all min rates are not zero then those that are zeroes will be set to 1.
10037 bxe_calc_vn_min(struct bxe_softc *sc,
10038 struct cmng_init_input *input)
10041 uint32_t vn_min_rate;
10045 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10046 vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10047 vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
10048 FUNC_MF_CFG_MIN_BW_SHIFT) * 100);
10050 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10051 /* skip hidden VNs */
10053 } else if (!vn_min_rate) {
10054 /* If min rate is zero - set it to 100 */
10055 vn_min_rate = DEF_MIN_RATE;
10060 input->vnic_min_rate[vn] = vn_min_rate;
10063 /* if ETS or all min rates are zeros - disable fairness */
10064 if (BXE_IS_ETS_ENABLED(sc)) {
10065 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10066 BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n");
10067 } else if (all_zero) {
10068 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10069 BLOGD(sc, DBG_LOAD,
10070 "Fariness disabled (all MIN values are zeroes)\n");
10072 input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10076 static inline uint16_t
10077 bxe_extract_max_cfg(struct bxe_softc *sc,
10080 uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
10081 FUNC_MF_CFG_MAX_BW_SHIFT);
10084 BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n");
10092 bxe_calc_vn_max(struct bxe_softc *sc,
10094 struct cmng_init_input *input)
10096 uint16_t vn_max_rate;
10097 uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10100 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10103 max_cfg = bxe_extract_max_cfg(sc, vn_cfg);
10105 if (IS_MF_SI(sc)) {
10106 /* max_cfg in percents of linkspeed */
10107 vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100);
10108 } else { /* SD modes */
10109 /* max_cfg is absolute in 100Mb units */
10110 vn_max_rate = (max_cfg * 100);
10114 BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
10116 input->vnic_max_rate[vn] = vn_max_rate;
10120 bxe_cmng_fns_init(struct bxe_softc *sc,
10124 struct cmng_init_input input;
10127 memset(&input, 0, sizeof(struct cmng_init_input));
10129 input.port_rate = sc->link_vars.line_speed;
10131 if (cmng_type == CMNG_FNS_MINMAX) {
10132 /* read mf conf from shmem */
10134 bxe_read_mf_cfg(sc);
10137 /* get VN min rate and enable fairness if not 0 */
10138 bxe_calc_vn_min(sc, &input);
10140 /* get VN max rate */
10141 if (sc->port.pmf) {
10142 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10143 bxe_calc_vn_max(sc, vn, &input);
10147 /* always enable rate shaping and fairness */
10148 input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
10150 ecore_init_cmng(&input, &sc->cmng);
10154 /* rate shaping and fairness are disabled */
10155 BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n");
10159 bxe_get_cmng_fns_mode(struct bxe_softc *sc)
10161 if (CHIP_REV_IS_SLOW(sc)) {
10162 return (CMNG_FNS_NONE);
10166 return (CMNG_FNS_MINMAX);
10169 return (CMNG_FNS_NONE);
10173 storm_memset_cmng(struct bxe_softc *sc,
10174 struct cmng_init *cmng,
10182 addr = (BAR_XSTRORM_INTMEM +
10183 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port));
10184 size = sizeof(struct cmng_struct_per_port);
10185 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port);
10187 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10188 func = func_by_vn(sc, vn);
10190 addr = (BAR_XSTRORM_INTMEM +
10191 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func));
10192 size = sizeof(struct rate_shaping_vars_per_vn);
10193 ecore_storm_memset_struct(sc, addr, size,
10194 (uint32_t *)&cmng->vnic.vnic_max_rate[vn]);
10196 addr = (BAR_XSTRORM_INTMEM +
10197 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func));
10198 size = sizeof(struct fairness_vars_per_vn);
10199 ecore_storm_memset_struct(sc, addr, size,
10200 (uint32_t *)&cmng->vnic.vnic_min_rate[vn]);
10205 bxe_pf_init(struct bxe_softc *sc)
10207 struct bxe_func_init_params func_init = { 0 };
10208 struct event_ring_data eq_data = { { 0 } };
10211 if (!CHIP_IS_E1x(sc)) {
10212 /* reset IGU PF statistics: MSIX + ATTN */
10215 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10216 (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10217 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10221 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10222 (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10223 (BXE_IGU_STAS_MSG_PF_CNT * 4) +
10224 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10228 /* function setup flags */
10229 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
10232 * This flag is relevant for E1x only.
10233 * E2 doesn't have a TPA configuration in a function level.
10235 flags |= (sc->ifnet->if_capenable & IFCAP_LRO) ? FUNC_FLG_TPA : 0;
10237 func_init.func_flgs = flags;
10238 func_init.pf_id = SC_FUNC(sc);
10239 func_init.func_id = SC_FUNC(sc);
10240 func_init.spq_map = sc->spq_dma.paddr;
10241 func_init.spq_prod = sc->spq_prod_idx;
10243 bxe_func_init(sc, &func_init);
10245 memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port));
10248 * Congestion management values depend on the link rate.
10249 * There is no active link so initial link rate is set to 10Gbps.
10250 * When the link comes up the congestion management values are
10251 * re-calculated according to the actual link rate.
10253 sc->link_vars.line_speed = SPEED_10000;
10254 bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc));
10256 /* Only the PMF sets the HW */
10257 if (sc->port.pmf) {
10258 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
10261 /* init Event Queue - PCI bus guarantees correct endainity */
10262 eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr);
10263 eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr);
10264 eq_data.producer = sc->eq_prod;
10265 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
10266 eq_data.sb_id = DEF_SB_ID;
10267 storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc));
10271 bxe_hc_int_enable(struct bxe_softc *sc)
10273 int port = SC_PORT(sc);
10274 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10275 uint32_t val = REG_RD(sc, addr);
10276 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10277 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10278 (sc->intr_count == 1)) ? TRUE : FALSE;
10279 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10282 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10283 HC_CONFIG_0_REG_INT_LINE_EN_0);
10284 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10285 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10287 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
10290 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
10291 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10292 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10293 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10295 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10296 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10297 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10298 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10300 if (!CHIP_IS_E1(sc)) {
10301 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n",
10304 REG_WR(sc, addr, val);
10306 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
10310 if (CHIP_IS_E1(sc)) {
10311 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF);
10314 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
10315 val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10317 REG_WR(sc, addr, val);
10319 /* ensure that HC_CONFIG is written before leading/trailing edge config */
10322 if (!CHIP_IS_E1(sc)) {
10323 /* init leading/trailing edge */
10325 val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10326 if (sc->port.pmf) {
10327 /* enable nig and gpio3 attention */
10334 REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val);
10335 REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val);
10338 /* make sure that interrupts are indeed enabled from here on */
10343 bxe_igu_int_enable(struct bxe_softc *sc)
10346 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10347 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10348 (sc->intr_count == 1)) ? TRUE : FALSE;
10349 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10351 val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10354 val &= ~(IGU_PF_CONF_INT_LINE_EN |
10355 IGU_PF_CONF_SINGLE_ISR_EN);
10356 val |= (IGU_PF_CONF_MSI_MSIX_EN |
10357 IGU_PF_CONF_ATTN_BIT_EN);
10359 val |= IGU_PF_CONF_SINGLE_ISR_EN;
10362 val &= ~IGU_PF_CONF_INT_LINE_EN;
10363 val |= (IGU_PF_CONF_MSI_MSIX_EN |
10364 IGU_PF_CONF_ATTN_BIT_EN |
10365 IGU_PF_CONF_SINGLE_ISR_EN);
10367 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
10368 val |= (IGU_PF_CONF_INT_LINE_EN |
10369 IGU_PF_CONF_ATTN_BIT_EN |
10370 IGU_PF_CONF_SINGLE_ISR_EN);
10373 /* clean previous status - need to configure igu prior to ack*/
10374 if ((!msix) || single_msix) {
10375 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10379 val |= IGU_PF_CONF_FUNC_EN;
10381 BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n",
10382 val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10384 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10388 /* init leading/trailing edge */
10390 val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10391 if (sc->port.pmf) {
10392 /* enable nig and gpio3 attention */
10399 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
10400 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
10402 /* make sure that interrupts are indeed enabled from here on */
10407 bxe_int_enable(struct bxe_softc *sc)
10409 if (sc->devinfo.int_block == INT_BLOCK_HC) {
10410 bxe_hc_int_enable(sc);
10412 bxe_igu_int_enable(sc);
10417 bxe_hc_int_disable(struct bxe_softc *sc)
10419 int port = SC_PORT(sc);
10420 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10421 uint32_t val = REG_RD(sc, addr);
10424 * In E1 we must use only PCI configuration space to disable MSI/MSIX
10425 * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC
10428 if (CHIP_IS_E1(sc)) {
10430 * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register
10431 * to prevent from HC sending interrupts after we exit the function
10433 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0);
10435 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10436 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10437 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10439 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10440 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10441 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10442 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10445 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr);
10447 /* flush all outstanding writes */
10450 REG_WR(sc, addr, val);
10451 if (REG_RD(sc, addr) != val) {
10452 BLOGE(sc, "proper val not read from HC IGU!\n");
10457 bxe_igu_int_disable(struct bxe_softc *sc)
10459 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10461 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
10462 IGU_PF_CONF_INT_LINE_EN |
10463 IGU_PF_CONF_ATTN_BIT_EN);
10465 BLOGD(sc, DBG_INTR, "write %x to IGU\n", val);
10467 /* flush all outstanding writes */
10470 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10471 if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) {
10472 BLOGE(sc, "proper val not read from IGU!\n");
10477 bxe_int_disable(struct bxe_softc *sc)
10479 if (sc->devinfo.int_block == INT_BLOCK_HC) {
10480 bxe_hc_int_disable(sc);
10482 bxe_igu_int_disable(sc);
10487 bxe_nic_init(struct bxe_softc *sc,
10492 for (i = 0; i < sc->num_queues; i++) {
10493 bxe_init_eth_fp(sc, i);
10496 rmb(); /* ensure status block indices were read */
10498 bxe_init_rx_rings(sc);
10499 bxe_init_tx_rings(sc);
10505 /* initialize MOD_ABS interrupts */
10506 elink_init_mod_abs_int(sc, &sc->link_vars,
10507 sc->devinfo.chip_id,
10508 sc->devinfo.shmem_base,
10509 sc->devinfo.shmem2_base,
10512 bxe_init_def_sb(sc);
10513 bxe_update_dsb_idx(sc);
10514 bxe_init_sp_ring(sc);
10515 bxe_init_eq_ring(sc);
10516 bxe_init_internal(sc, load_code);
10518 bxe_stats_init(sc);
10520 /* flush all before enabling interrupts */
10523 bxe_int_enable(sc);
10525 /* check for SPIO5 */
10526 bxe_attn_int_deasserted0(sc,
10528 (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
10530 AEU_INPUTS_ATTN_BITS_SPIO5);
10534 bxe_init_objs(struct bxe_softc *sc)
10536 /* mcast rules must be added to tx if tx switching is enabled */
10537 ecore_obj_type o_type =
10538 (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX :
10541 /* RX_MODE controlling object */
10542 ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj);
10544 /* multicast configuration controlling object */
10545 ecore_init_mcast_obj(sc,
10551 BXE_SP(sc, mcast_rdata),
10552 BXE_SP_MAPPING(sc, mcast_rdata),
10553 ECORE_FILTER_MCAST_PENDING,
10557 /* Setup CAM credit pools */
10558 ecore_init_mac_credit_pool(sc,
10561 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10562 VNICS_PER_PATH(sc));
10564 ecore_init_vlan_credit_pool(sc,
10566 SC_ABS_FUNC(sc) >> 1,
10567 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10568 VNICS_PER_PATH(sc));
10570 /* RSS configuration object */
10571 ecore_init_rss_config_obj(sc,
10577 BXE_SP(sc, rss_rdata),
10578 BXE_SP_MAPPING(sc, rss_rdata),
10579 ECORE_FILTER_RSS_CONF_PENDING,
10580 &sc->sp_state, ECORE_OBJ_TYPE_RX);
10584 * Initialize the function. This must be called before sending CLIENT_SETUP
10585 * for the first client.
10588 bxe_func_start(struct bxe_softc *sc)
10590 struct ecore_func_state_params func_params = { NULL };
10591 struct ecore_func_start_params *start_params = &func_params.params.start;
10593 /* Prepare parameters for function state transitions */
10594 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
10596 func_params.f_obj = &sc->func_obj;
10597 func_params.cmd = ECORE_F_CMD_START;
10599 /* Function parameters */
10600 start_params->mf_mode = sc->devinfo.mf_info.mf_mode;
10601 start_params->sd_vlan_tag = OVLAN(sc);
10603 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
10604 start_params->network_cos_mode = STATIC_COS;
10605 } else { /* CHIP_IS_E1X */
10606 start_params->network_cos_mode = FW_WRR;
10609 start_params->gre_tunnel_mode = 0;
10610 start_params->gre_tunnel_rss = 0;
10612 return (ecore_func_state_change(sc, &func_params));
10616 bxe_set_power_state(struct bxe_softc *sc,
10621 /* If there is no power capability, silently succeed */
10622 if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) {
10623 BLOGW(sc, "No power capability\n");
10627 pmcsr = pci_read_config(sc->dev,
10628 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10633 pci_write_config(sc->dev,
10634 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10635 ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2);
10637 if (pmcsr & PCIM_PSTAT_DMASK) {
10638 /* delay required during transition out of D3hot */
10645 /* XXX if there are other clients above don't shut down the power */
10647 /* don't shut down the power for emulation and FPGA */
10648 if (CHIP_REV_IS_SLOW(sc)) {
10652 pmcsr &= ~PCIM_PSTAT_DMASK;
10653 pmcsr |= PCIM_PSTAT_D3;
10656 pmcsr |= PCIM_PSTAT_PMEENABLE;
10659 pci_write_config(sc->dev,
10660 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10664 * No more memory access after this point until device is brought back
10670 BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n",
10679 /* return true if succeeded to acquire the lock */
10681 bxe_trylock_hw_lock(struct bxe_softc *sc,
10684 uint32_t lock_status;
10685 uint32_t resource_bit = (1 << resource);
10686 int func = SC_FUNC(sc);
10687 uint32_t hw_lock_control_reg;
10689 BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource);
10691 /* Validating that the resource is within range */
10692 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
10693 BLOGD(sc, DBG_LOAD,
10694 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
10695 resource, HW_LOCK_MAX_RESOURCE_VALUE);
10700 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
10702 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
10705 /* try to acquire the lock */
10706 REG_WR(sc, hw_lock_control_reg + 4, resource_bit);
10707 lock_status = REG_RD(sc, hw_lock_control_reg);
10708 if (lock_status & resource_bit) {
10712 BLOGE(sc, "Failed to get a resource lock 0x%x func %d "
10713 "lock_status 0x%x resource_bit 0x%x\n", resource, func,
10714 lock_status, resource_bit);
10720 * Get the recovery leader resource id according to the engine this function
10721 * belongs to. Currently only only 2 engines is supported.
10724 bxe_get_leader_lock_resource(struct bxe_softc *sc)
10727 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1);
10729 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0);
10733 /* try to acquire a leader lock for current engine */
10735 bxe_trylock_leader_lock(struct bxe_softc *sc)
10737 return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10741 bxe_release_leader_lock(struct bxe_softc *sc)
10743 return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10746 /* close gates #2, #3 and #4 */
10748 bxe_set_234_gates(struct bxe_softc *sc,
10753 /* gates #2 and #4a are closed/opened for "not E1" only */
10754 if (!CHIP_IS_E1(sc)) {
10756 REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
10758 REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
10762 if (CHIP_IS_E1x(sc)) {
10763 /* prevent interrupts from HC on both ports */
10764 val = REG_RD(sc, HC_REG_CONFIG_1);
10765 REG_WR(sc, HC_REG_CONFIG_1,
10766 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
10767 (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
10769 val = REG_RD(sc, HC_REG_CONFIG_0);
10770 REG_WR(sc, HC_REG_CONFIG_0,
10771 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
10772 (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
10774 /* Prevent incomming interrupts in IGU */
10775 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
10777 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION,
10779 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
10780 (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
10783 BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n",
10784 close ? "closing" : "opening");
10789 /* poll for pending writes bit, it should get cleared in no more than 1s */
10791 bxe_er_poll_igu_vq(struct bxe_softc *sc)
10793 uint32_t cnt = 1000;
10794 uint32_t pend_bits = 0;
10797 pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS);
10799 if (pend_bits == 0) {
10804 } while (--cnt > 0);
10807 BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits);
10814 #define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */
10817 bxe_clp_reset_prep(struct bxe_softc *sc,
10818 uint32_t *magic_val)
10820 /* Do some magic... */
10821 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10822 *magic_val = val & SHARED_MF_CLP_MAGIC;
10823 MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
10826 /* restore the value of the 'magic' bit */
10828 bxe_clp_reset_done(struct bxe_softc *sc,
10829 uint32_t magic_val)
10831 /* Restore the 'magic' bit value... */
10832 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10833 MFCFG_WR(sc, shared_mf_config.clp_mb,
10834 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
10837 /* prepare for MCP reset, takes care of CLP configurations */
10839 bxe_reset_mcp_prep(struct bxe_softc *sc,
10840 uint32_t *magic_val)
10843 uint32_t validity_offset;
10845 /* set `magic' bit in order to save MF config */
10846 if (!CHIP_IS_E1(sc)) {
10847 bxe_clp_reset_prep(sc, magic_val);
10850 /* get shmem offset */
10851 shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10853 offsetof(struct shmem_region, validity_map[SC_PORT(sc)]);
10855 /* Clear validity map flags */
10857 REG_WR(sc, shmem + validity_offset, 0);
10861 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
10862 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
10865 bxe_mcp_wait_one(struct bxe_softc *sc)
10867 /* special handling for emulation and FPGA (10 times longer) */
10868 if (CHIP_REV_IS_SLOW(sc)) {
10869 DELAY((MCP_ONE_TIMEOUT*10) * 1000);
10871 DELAY((MCP_ONE_TIMEOUT) * 1000);
10875 /* initialize shmem_base and waits for validity signature to appear */
10877 bxe_init_shmem(struct bxe_softc *sc)
10883 sc->devinfo.shmem_base =
10884 sc->link_params.shmem_base =
10885 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10887 if (sc->devinfo.shmem_base) {
10888 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
10889 if (val & SHR_MEM_VALIDITY_MB)
10893 bxe_mcp_wait_one(sc);
10895 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
10897 BLOGE(sc, "BAD MCP validity signature\n");
10903 bxe_reset_mcp_comp(struct bxe_softc *sc,
10904 uint32_t magic_val)
10906 int rc = bxe_init_shmem(sc);
10908 /* Restore the `magic' bit value */
10909 if (!CHIP_IS_E1(sc)) {
10910 bxe_clp_reset_done(sc, magic_val);
10917 bxe_pxp_prep(struct bxe_softc *sc)
10919 if (!CHIP_IS_E1(sc)) {
10920 REG_WR(sc, PXP2_REG_RD_START_INIT, 0);
10921 REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0);
10927 * Reset the whole chip except for:
10929 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit)
10931 * - MISC (including AEU)
10936 bxe_process_kill_chip_reset(struct bxe_softc *sc,
10939 uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
10940 uint32_t global_bits2, stay_reset2;
10943 * Bits that have to be set in reset_mask2 if we want to reset 'global'
10944 * (per chip) blocks.
10947 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
10948 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
10951 * Don't reset the following blocks.
10952 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
10953 * reset, as in 4 port device they might still be owned
10954 * by the MCP (there is only one leader per path).
10957 MISC_REGISTERS_RESET_REG_1_RST_HC |
10958 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
10959 MISC_REGISTERS_RESET_REG_1_RST_PXP;
10962 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
10963 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
10964 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
10965 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
10966 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
10967 MISC_REGISTERS_RESET_REG_2_RST_GRC |
10968 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
10969 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
10970 MISC_REGISTERS_RESET_REG_2_RST_ATC |
10971 MISC_REGISTERS_RESET_REG_2_PGLC |
10972 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
10973 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
10974 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
10975 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
10976 MISC_REGISTERS_RESET_REG_2_UMAC0 |
10977 MISC_REGISTERS_RESET_REG_2_UMAC1;
10980 * Keep the following blocks in reset:
10981 * - all xxMACs are handled by the elink code.
10984 MISC_REGISTERS_RESET_REG_2_XMAC |
10985 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
10987 /* Full reset masks according to the chip */
10988 reset_mask1 = 0xffffffff;
10990 if (CHIP_IS_E1(sc))
10991 reset_mask2 = 0xffff;
10992 else if (CHIP_IS_E1H(sc))
10993 reset_mask2 = 0x1ffff;
10994 else if (CHIP_IS_E2(sc))
10995 reset_mask2 = 0xfffff;
10996 else /* CHIP_IS_E3 */
10997 reset_mask2 = 0x3ffffff;
10999 /* Don't reset global blocks unless we need to */
11001 reset_mask2 &= ~global_bits2;
11004 * In case of attention in the QM, we need to reset PXP
11005 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
11006 * because otherwise QM reset would release 'close the gates' shortly
11007 * before resetting the PXP, then the PSWRQ would send a write
11008 * request to PGLUE. Then when PXP is reset, PGLUE would try to
11009 * read the payload data from PSWWR, but PSWWR would not
11010 * respond. The write queue in PGLUE would stuck, dmae commands
11011 * would not return. Therefore it's important to reset the second
11012 * reset register (containing the
11013 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
11014 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
11017 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
11018 reset_mask2 & (~not_reset_mask2));
11020 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
11021 reset_mask1 & (~not_reset_mask1));
11026 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
11027 reset_mask2 & (~stay_reset2));
11032 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
11037 bxe_process_kill(struct bxe_softc *sc,
11042 uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
11043 uint32_t tags_63_32 = 0;
11045 /* Empty the Tetris buffer, wait for 1s */
11047 sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT);
11048 blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT);
11049 port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0);
11050 port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1);
11051 pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2);
11052 if (CHIP_IS_E3(sc)) {
11053 tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32);
11056 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
11057 ((port_is_idle_0 & 0x1) == 0x1) &&
11058 ((port_is_idle_1 & 0x1) == 0x1) &&
11059 (pgl_exp_rom2 == 0xffffffff) &&
11060 (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff)))
11063 } while (cnt-- > 0);
11066 BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there "
11067 "are still outstanding read requests after 1s! "
11068 "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, "
11069 "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
11070 sr_cnt, blk_cnt, port_is_idle_0,
11071 port_is_idle_1, pgl_exp_rom2);
11077 /* Close gates #2, #3 and #4 */
11078 bxe_set_234_gates(sc, TRUE);
11080 /* Poll for IGU VQs for 57712 and newer chips */
11081 if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) {
11085 /* XXX indicate that "process kill" is in progress to MCP */
11087 /* clear "unprepared" bit */
11088 REG_WR(sc, MISC_REG_UNPREPARED, 0);
11091 /* Make sure all is written to the chip before the reset */
11095 * Wait for 1ms to empty GLUE and PCI-E core queues,
11096 * PSWHST, GRC and PSWRD Tetris buffer.
11100 /* Prepare to chip reset: */
11103 bxe_reset_mcp_prep(sc, &val);
11110 /* reset the chip */
11111 bxe_process_kill_chip_reset(sc, global);
11114 /* clear errors in PGB */
11115 if (!CHIP_IS_E1(sc))
11116 REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
11118 /* Recover after reset: */
11120 if (global && bxe_reset_mcp_comp(sc, val)) {
11124 /* XXX add resetting the NO_MCP mode DB here */
11126 /* Open the gates #2, #3 and #4 */
11127 bxe_set_234_gates(sc, FALSE);
11130 * IGU/AEU preparation bring back the AEU/IGU to a reset state
11131 * re-enable attentions
11138 bxe_leader_reset(struct bxe_softc *sc)
11141 uint8_t global = bxe_reset_is_global(sc);
11142 uint32_t load_code;
11145 * If not going to reset MCP, load "fake" driver to reset HW while
11146 * driver is owner of the HW.
11148 if (!global && !BXE_NOMCP(sc)) {
11149 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
11150 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
11152 BLOGE(sc, "MCP response failure, aborting\n");
11154 goto exit_leader_reset;
11157 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
11158 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
11159 BLOGE(sc, "MCP unexpected response, aborting\n");
11161 goto exit_leader_reset2;
11164 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
11166 BLOGE(sc, "MCP response failure, aborting\n");
11168 goto exit_leader_reset2;
11172 /* try to recover after the failure */
11173 if (bxe_process_kill(sc, global)) {
11174 BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc));
11176 goto exit_leader_reset2;
11180 * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver
11183 bxe_set_reset_done(sc);
11185 bxe_clear_reset_global(sc);
11188 exit_leader_reset2:
11190 /* unload "fake driver" if it was loaded */
11191 if (!global && !BXE_NOMCP(sc)) {
11192 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
11193 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
11199 bxe_release_leader_lock(sc);
11206 * prepare INIT transition, parameters configured:
11207 * - HC configuration
11208 * - Queue's CDU context
11211 bxe_pf_q_prep_init(struct bxe_softc *sc,
11212 struct bxe_fastpath *fp,
11213 struct ecore_queue_init_params *init_params)
11216 int cxt_index, cxt_offset;
11218 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags);
11219 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags);
11221 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags);
11222 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags);
11225 init_params->rx.hc_rate =
11226 sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0;
11227 init_params->tx.hc_rate =
11228 sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0;
11231 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id;
11233 /* CQ index among the SB indices */
11234 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11235 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
11237 /* set maximum number of COSs supported by this queue */
11238 init_params->max_cos = sc->max_cos;
11240 BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n",
11241 fp->index, init_params->max_cos);
11243 /* set the context pointers queue object */
11244 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
11245 /* XXX change index/cid here if ever support multiple tx CoS */
11246 /* fp->txdata[cos]->cid */
11247 cxt_index = fp->index / ILT_PAGE_CIDS;
11248 cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS);
11249 init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth;
11253 /* set flags that are common for the Tx-only and not normal connections */
11254 static unsigned long
11255 bxe_get_common_flags(struct bxe_softc *sc,
11256 struct bxe_fastpath *fp,
11257 uint8_t zero_stats)
11259 unsigned long flags = 0;
11261 /* PF driver will always initialize the Queue to an ACTIVE state */
11262 bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags);
11265 * tx only connections collect statistics (on the same index as the
11266 * parent connection). The statistics are zeroed when the parent
11267 * connection is initialized.
11270 bxe_set_bit(ECORE_Q_FLG_STATS, &flags);
11272 bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags);
11276 * tx only connections can support tx-switching, though their
11277 * CoS-ness doesn't survive the loopback
11279 if (sc->flags & BXE_TX_SWITCHING) {
11280 bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags);
11283 bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags);
11288 static unsigned long
11289 bxe_get_q_flags(struct bxe_softc *sc,
11290 struct bxe_fastpath *fp,
11293 unsigned long flags = 0;
11295 if (IS_MF_SD(sc)) {
11296 bxe_set_bit(ECORE_Q_FLG_OV, &flags);
11299 if (sc->ifnet->if_capenable & IFCAP_LRO) {
11300 bxe_set_bit(ECORE_Q_FLG_TPA, &flags);
11301 bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags);
11305 bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags);
11306 bxe_set_bit(ECORE_Q_FLG_MCAST, &flags);
11309 bxe_set_bit(ECORE_Q_FLG_VLAN, &flags);
11311 /* merge with common flags */
11312 return (flags | bxe_get_common_flags(sc, fp, TRUE));
11316 bxe_pf_q_prep_general(struct bxe_softc *sc,
11317 struct bxe_fastpath *fp,
11318 struct ecore_general_setup_params *gen_init,
11321 gen_init->stat_id = bxe_stats_id(fp);
11322 gen_init->spcl_id = fp->cl_id;
11323 gen_init->mtu = sc->mtu;
11324 gen_init->cos = cos;
11328 bxe_pf_rx_q_prep(struct bxe_softc *sc,
11329 struct bxe_fastpath *fp,
11330 struct rxq_pause_params *pause,
11331 struct ecore_rxq_setup_params *rxq_init)
11333 uint8_t max_sge = 0;
11334 uint16_t sge_sz = 0;
11335 uint16_t tpa_agg_size = 0;
11337 pause->sge_th_lo = SGE_TH_LO(sc);
11338 pause->sge_th_hi = SGE_TH_HI(sc);
11340 /* validate SGE ring has enough to cross high threshold */
11341 if (sc->dropless_fc &&
11342 (pause->sge_th_hi + FW_PREFETCH_CNT) >
11343 (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) {
11344 BLOGW(sc, "sge ring threshold limit\n");
11347 /* minimum max_aggregation_size is 2*MTU (two full buffers) */
11348 tpa_agg_size = (2 * sc->mtu);
11349 if (tpa_agg_size < sc->max_aggregation_size) {
11350 tpa_agg_size = sc->max_aggregation_size;
11353 max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT;
11354 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
11355 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
11356 sge_sz = (uint16_t)min(SGE_PAGES, 0xffff);
11358 /* pause - not for e1 */
11359 if (!CHIP_IS_E1(sc)) {
11360 pause->bd_th_lo = BD_TH_LO(sc);
11361 pause->bd_th_hi = BD_TH_HI(sc);
11363 pause->rcq_th_lo = RCQ_TH_LO(sc);
11364 pause->rcq_th_hi = RCQ_TH_HI(sc);
11366 /* validate rings have enough entries to cross high thresholds */
11367 if (sc->dropless_fc &&
11368 pause->bd_th_hi + FW_PREFETCH_CNT >
11369 sc->rx_ring_size) {
11370 BLOGW(sc, "rx bd ring threshold limit\n");
11373 if (sc->dropless_fc &&
11374 pause->rcq_th_hi + FW_PREFETCH_CNT >
11375 RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) {
11376 BLOGW(sc, "rcq ring threshold limit\n");
11379 pause->pri_map = 1;
11383 rxq_init->dscr_map = fp->rx_dma.paddr;
11384 rxq_init->sge_map = fp->rx_sge_dma.paddr;
11385 rxq_init->rcq_map = fp->rcq_dma.paddr;
11386 rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE);
11389 * This should be a maximum number of data bytes that may be
11390 * placed on the BD (not including paddings).
11392 rxq_init->buf_sz = (fp->rx_buf_size -
11393 IP_HEADER_ALIGNMENT_PADDING);
11395 rxq_init->cl_qzone_id = fp->cl_qzone_id;
11396 rxq_init->tpa_agg_sz = tpa_agg_size;
11397 rxq_init->sge_buf_sz = sge_sz;
11398 rxq_init->max_sges_pkt = max_sge;
11399 rxq_init->rss_engine_id = SC_FUNC(sc);
11400 rxq_init->mcast_engine_id = SC_FUNC(sc);
11403 * Maximum number or simultaneous TPA aggregation for this Queue.
11404 * For PF Clients it should be the maximum available number.
11405 * VF driver(s) may want to define it to a smaller value.
11407 rxq_init->max_tpa_queues = MAX_AGG_QS(sc);
11409 rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT;
11410 rxq_init->fw_sb_id = fp->fw_sb_id;
11412 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11415 * configure silent vlan removal
11416 * if multi function mode is afex, then mask default vlan
11418 if (IS_MF_AFEX(sc)) {
11419 rxq_init->silent_removal_value =
11420 sc->devinfo.mf_info.afex_def_vlan_tag;
11421 rxq_init->silent_removal_mask = EVL_VLID_MASK;
11426 bxe_pf_tx_q_prep(struct bxe_softc *sc,
11427 struct bxe_fastpath *fp,
11428 struct ecore_txq_setup_params *txq_init,
11432 * XXX If multiple CoS is ever supported then each fastpath structure
11433 * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
11434 * fp->txdata[cos]->tx_dma.paddr;
11436 txq_init->dscr_map = fp->tx_dma.paddr;
11437 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
11438 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
11439 txq_init->fw_sb_id = fp->fw_sb_id;
11442 * set the TSS leading client id for TX classfication to the
11443 * leading RSS client id
11445 txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id);
11449 * This function performs 2 steps in a queue state machine:
11454 bxe_setup_queue(struct bxe_softc *sc,
11455 struct bxe_fastpath *fp,
11458 struct ecore_queue_state_params q_params = { NULL };
11459 struct ecore_queue_setup_params *setup_params =
11460 &q_params.params.setup;
11463 BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index);
11465 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
11467 q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
11469 /* we want to wait for completion in this context */
11470 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
11472 /* prepare the INIT parameters */
11473 bxe_pf_q_prep_init(sc, fp, &q_params.params.init);
11475 /* Set the command */
11476 q_params.cmd = ECORE_Q_CMD_INIT;
11478 /* Change the state to INIT */
11479 rc = ecore_queue_state_change(sc, &q_params);
11481 BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc);
11485 BLOGD(sc, DBG_LOAD, "init complete\n");
11487 /* now move the Queue to the SETUP state */
11488 memset(setup_params, 0, sizeof(*setup_params));
11490 /* set Queue flags */
11491 setup_params->flags = bxe_get_q_flags(sc, fp, leading);
11493 /* set general SETUP parameters */
11494 bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params,
11495 FIRST_TX_COS_INDEX);
11497 bxe_pf_rx_q_prep(sc, fp,
11498 &setup_params->pause_params,
11499 &setup_params->rxq_params);
11501 bxe_pf_tx_q_prep(sc, fp,
11502 &setup_params->txq_params,
11503 FIRST_TX_COS_INDEX);
11505 /* Set the command */
11506 q_params.cmd = ECORE_Q_CMD_SETUP;
11508 /* change the state to SETUP */
11509 rc = ecore_queue_state_change(sc, &q_params);
11511 BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc);
11519 bxe_setup_leading(struct bxe_softc *sc)
11521 return (bxe_setup_queue(sc, &sc->fp[0], TRUE));
11525 bxe_config_rss_pf(struct bxe_softc *sc,
11526 struct ecore_rss_config_obj *rss_obj,
11527 uint8_t config_hash)
11529 struct ecore_config_rss_params params = { NULL };
11533 * Although RSS is meaningless when there is a single HW queue we
11534 * still need it enabled in order to have HW Rx hash generated.
11537 params.rss_obj = rss_obj;
11539 bxe_set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
11541 bxe_set_bit(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags);
11543 /* RSS configuration */
11544 bxe_set_bit(ECORE_RSS_IPV4, ¶ms.rss_flags);
11545 bxe_set_bit(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags);
11546 bxe_set_bit(ECORE_RSS_IPV6, ¶ms.rss_flags);
11547 bxe_set_bit(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags);
11548 if (rss_obj->udp_rss_v4) {
11549 bxe_set_bit(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags);
11551 if (rss_obj->udp_rss_v6) {
11552 bxe_set_bit(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags);
11556 params.rss_result_mask = MULTI_MASK;
11558 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
11562 for (i = 0; i < sizeof(params.rss_key) / 4; i++) {
11563 params.rss_key[i] = arc4random();
11566 bxe_set_bit(ECORE_RSS_SET_SRCH, ¶ms.rss_flags);
11569 return (ecore_config_rss(sc, ¶ms));
11573 bxe_config_rss_eth(struct bxe_softc *sc,
11574 uint8_t config_hash)
11576 return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash));
11580 bxe_init_rss_pf(struct bxe_softc *sc)
11582 uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc);
11586 * Prepare the initial contents of the indirection table if
11589 for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) {
11590 sc->rss_conf_obj.ind_table[i] =
11591 (sc->fp->cl_id + (i % num_eth_queues));
11595 sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1;
11599 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
11600 * per-port, so if explicit configuration is needed, do it only
11603 * For 57712 and newer it's a per-function configuration.
11605 return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)));
11609 bxe_set_mac_one(struct bxe_softc *sc,
11611 struct ecore_vlan_mac_obj *obj,
11614 unsigned long *ramrod_flags)
11616 struct ecore_vlan_mac_ramrod_params ramrod_param;
11619 memset(&ramrod_param, 0, sizeof(ramrod_param));
11621 /* fill in general parameters */
11622 ramrod_param.vlan_mac_obj = obj;
11623 ramrod_param.ramrod_flags = *ramrod_flags;
11625 /* fill a user request section if needed */
11626 if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) {
11627 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
11629 bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
11631 /* Set the command: ADD or DEL */
11632 ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD :
11633 ECORE_VLAN_MAC_DEL;
11636 rc = ecore_config_vlan_mac(sc, &ramrod_param);
11638 if (rc == ECORE_EXISTS) {
11639 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
11640 /* do not treat adding same MAC as error */
11642 } else if (rc < 0) {
11643 BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc);
11650 bxe_set_eth_mac(struct bxe_softc *sc,
11653 unsigned long ramrod_flags = 0;
11655 BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n");
11657 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
11659 /* Eth MAC is set on RSS leading client (fp[0]) */
11660 return (bxe_set_mac_one(sc, sc->link_params.mac_addr,
11661 &sc->sp_objs->mac_obj,
11662 set, ECORE_ETH_MAC, &ramrod_flags));
11666 bxe_get_cur_phy_idx(struct bxe_softc *sc)
11668 uint32_t sel_phy_idx = 0;
11670 if (sc->link_params.num_phys <= 1) {
11671 return (ELINK_INT_PHY);
11674 if (sc->link_vars.link_up) {
11675 sel_phy_idx = ELINK_EXT_PHY1;
11676 /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */
11677 if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
11678 (sc->link_params.phy[ELINK_EXT_PHY2].supported &
11679 ELINK_SUPPORTED_FIBRE))
11680 sel_phy_idx = ELINK_EXT_PHY2;
11682 switch (elink_phy_selection(&sc->link_params)) {
11683 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
11684 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
11685 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
11686 sel_phy_idx = ELINK_EXT_PHY1;
11688 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
11689 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
11690 sel_phy_idx = ELINK_EXT_PHY2;
11695 return (sel_phy_idx);
11699 bxe_get_link_cfg_idx(struct bxe_softc *sc)
11701 uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc);
11704 * The selected activated PHY is always after swapping (in case PHY
11705 * swapping is enabled). So when swapping is enabled, we need to reverse
11706 * the configuration
11709 if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11710 if (sel_phy_idx == ELINK_EXT_PHY1)
11711 sel_phy_idx = ELINK_EXT_PHY2;
11712 else if (sel_phy_idx == ELINK_EXT_PHY2)
11713 sel_phy_idx = ELINK_EXT_PHY1;
11716 return (ELINK_LINK_CONFIG_IDX(sel_phy_idx));
11720 bxe_set_requested_fc(struct bxe_softc *sc)
11723 * Initialize link parameters structure variables
11724 * It is recommended to turn off RX FC for jumbo frames
11725 * for better performance
11727 if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) {
11728 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX;
11730 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH;
11735 bxe_calc_fc_adv(struct bxe_softc *sc)
11737 uint8_t cfg_idx = bxe_get_link_cfg_idx(sc);
11738 switch (sc->link_vars.ieee_fc &
11739 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
11740 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
11742 sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
11746 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
11747 sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
11751 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
11752 sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
11758 bxe_get_mf_speed(struct bxe_softc *sc)
11760 uint16_t line_speed = sc->link_vars.line_speed;
11763 bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]);
11765 /* calculate the current MAX line speed limit for the MF devices */
11766 if (IS_MF_SI(sc)) {
11767 line_speed = (line_speed * maxCfg) / 100;
11768 } else { /* SD mode */
11769 uint16_t vn_max_rate = maxCfg * 100;
11771 if (vn_max_rate < line_speed) {
11772 line_speed = vn_max_rate;
11777 return (line_speed);
11781 bxe_fill_report_data(struct bxe_softc *sc,
11782 struct bxe_link_report_data *data)
11784 uint16_t line_speed = bxe_get_mf_speed(sc);
11786 memset(data, 0, sizeof(*data));
11788 /* fill the report data with the effective line speed */
11789 data->line_speed = line_speed;
11792 if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) {
11793 bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags);
11797 if (sc->link_vars.duplex == DUPLEX_FULL) {
11798 bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags);
11801 /* Rx Flow Control is ON */
11802 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) {
11803 bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
11806 /* Tx Flow Control is ON */
11807 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
11808 bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
11812 /* report link status to OS, should be called under phy_lock */
11814 bxe_link_report_locked(struct bxe_softc *sc)
11816 struct bxe_link_report_data cur_data;
11818 /* reread mf_cfg */
11819 if (IS_PF(sc) && !CHIP_IS_E1(sc)) {
11820 bxe_read_mf_cfg(sc);
11823 /* Read the current link report info */
11824 bxe_fill_report_data(sc, &cur_data);
11826 /* Don't report link down or exactly the same link status twice */
11827 if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) ||
11828 (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11829 &sc->last_reported_link.link_report_flags) &&
11830 bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11831 &cur_data.link_report_flags))) {
11837 /* report new link params and remember the state for the next time */
11838 memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
11840 if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11841 &cur_data.link_report_flags)) {
11842 if_link_state_change(sc->ifnet, LINK_STATE_DOWN);
11843 BLOGI(sc, "NIC Link is Down\n");
11845 const char *duplex;
11848 if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX,
11849 &cur_data.link_report_flags)) {
11856 * Handle the FC at the end so that only these flags would be
11857 * possibly set. This way we may easily check if there is no FC
11860 if (cur_data.link_report_flags) {
11861 if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11862 &cur_data.link_report_flags) &&
11863 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11864 &cur_data.link_report_flags)) {
11865 flow = "ON - receive & transmit";
11866 } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11867 &cur_data.link_report_flags) &&
11868 !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11869 &cur_data.link_report_flags)) {
11870 flow = "ON - receive";
11871 } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11872 &cur_data.link_report_flags) &&
11873 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11874 &cur_data.link_report_flags)) {
11875 flow = "ON - transmit";
11877 flow = "none"; /* possible? */
11883 if_link_state_change(sc->ifnet, LINK_STATE_UP);
11884 BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
11885 cur_data.line_speed, duplex, flow);
11890 bxe_link_report(struct bxe_softc *sc)
11892 bxe_acquire_phy_lock(sc);
11893 bxe_link_report_locked(sc);
11894 bxe_release_phy_lock(sc);
11898 bxe_link_status_update(struct bxe_softc *sc)
11900 if (sc->state != BXE_STATE_OPEN) {
11904 if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) {
11905 elink_link_status_update(&sc->link_params, &sc->link_vars);
11907 sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half |
11908 ELINK_SUPPORTED_10baseT_Full |
11909 ELINK_SUPPORTED_100baseT_Half |
11910 ELINK_SUPPORTED_100baseT_Full |
11911 ELINK_SUPPORTED_1000baseT_Full |
11912 ELINK_SUPPORTED_2500baseX_Full |
11913 ELINK_SUPPORTED_10000baseT_Full |
11914 ELINK_SUPPORTED_TP |
11915 ELINK_SUPPORTED_FIBRE |
11916 ELINK_SUPPORTED_Autoneg |
11917 ELINK_SUPPORTED_Pause |
11918 ELINK_SUPPORTED_Asym_Pause);
11919 sc->port.advertising[0] = sc->port.supported[0];
11921 sc->link_params.sc = sc;
11922 sc->link_params.port = SC_PORT(sc);
11923 sc->link_params.req_duplex[0] = DUPLEX_FULL;
11924 sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE;
11925 sc->link_params.req_line_speed[0] = SPEED_10000;
11926 sc->link_params.speed_cap_mask[0] = 0x7f0000;
11927 sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G;
11929 if (CHIP_REV_IS_FPGA(sc)) {
11930 sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC;
11931 sc->link_vars.line_speed = ELINK_SPEED_1000;
11932 sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11933 LINK_STATUS_SPEED_AND_DUPLEX_1000TFD);
11935 sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC;
11936 sc->link_vars.line_speed = ELINK_SPEED_10000;
11937 sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11938 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
11941 sc->link_vars.link_up = 1;
11943 sc->link_vars.duplex = DUPLEX_FULL;
11944 sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE;
11947 REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0);
11948 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11949 bxe_link_report(sc);
11954 if (sc->link_vars.link_up) {
11955 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11957 bxe_stats_handle(sc, STATS_EVENT_STOP);
11959 bxe_link_report(sc);
11961 bxe_link_report(sc);
11962 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11967 bxe_initial_phy_init(struct bxe_softc *sc,
11970 int rc, cfg_idx = bxe_get_link_cfg_idx(sc);
11971 uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx];
11972 struct elink_params *lp = &sc->link_params;
11974 bxe_set_requested_fc(sc);
11976 if (CHIP_REV_IS_SLOW(sc)) {
11977 uint32_t bond = CHIP_BOND_ID(sc);
11980 if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) {
11981 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11982 } else if (bond & 0x4) {
11983 if (CHIP_IS_E3(sc)) {
11984 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC;
11986 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11988 } else if (bond & 0x8) {
11989 if (CHIP_IS_E3(sc)) {
11990 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC;
11992 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11996 /* disable EMAC for E3 and above */
11998 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
12001 sc->link_params.feature_config_flags |= feat;
12004 bxe_acquire_phy_lock(sc);
12006 if (load_mode == LOAD_DIAG) {
12007 lp->loopback_mode = ELINK_LOOPBACK_XGXS;
12008 /* Prefer doing PHY loopback at 10G speed, if possible */
12009 if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) {
12010 if (lp->speed_cap_mask[cfg_idx] &
12011 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
12012 lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000;
12014 lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000;
12019 if (load_mode == LOAD_LOOPBACK_EXT) {
12020 lp->loopback_mode = ELINK_LOOPBACK_EXT;
12023 rc = elink_phy_init(&sc->link_params, &sc->link_vars);
12025 bxe_release_phy_lock(sc);
12027 bxe_calc_fc_adv(sc);
12029 if (sc->link_vars.link_up) {
12030 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
12031 bxe_link_report(sc);
12034 if (!CHIP_REV_IS_SLOW(sc)) {
12035 bxe_periodic_start(sc);
12038 sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
12042 /* must be called under IF_ADDR_LOCK */
12044 bxe_init_mcast_macs_list(struct bxe_softc *sc,
12045 struct ecore_mcast_ramrod_params *p)
12047 struct ifnet *ifp = sc->ifnet;
12049 struct ifmultiaddr *ifma;
12050 struct ecore_mcast_list_elem *mc_mac;
12052 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
12053 if (ifma->ifma_addr->sa_family != AF_LINK) {
12060 ECORE_LIST_INIT(&p->mcast_list);
12061 p->mcast_list_len = 0;
12067 mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF,
12068 (M_NOWAIT | M_ZERO));
12070 BLOGE(sc, "Failed to allocate temp mcast list\n");
12073 bzero(mc_mac, (sizeof(*mc_mac) * mc_count));
12075 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
12076 if (ifma->ifma_addr->sa_family != AF_LINK) {
12080 mc_mac->mac = (uint8_t *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
12081 ECORE_LIST_PUSH_TAIL(&mc_mac->link, &p->mcast_list);
12083 BLOGD(sc, DBG_LOAD,
12084 "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X\n",
12085 mc_mac->mac[0], mc_mac->mac[1], mc_mac->mac[2],
12086 mc_mac->mac[3], mc_mac->mac[4], mc_mac->mac[5]);
12091 p->mcast_list_len = mc_count;
12097 bxe_free_mcast_macs_list(struct ecore_mcast_ramrod_params *p)
12099 struct ecore_mcast_list_elem *mc_mac =
12100 ECORE_LIST_FIRST_ENTRY(&p->mcast_list,
12101 struct ecore_mcast_list_elem,
12105 /* only a single free as all mc_macs are in the same heap array */
12106 free(mc_mac, M_DEVBUF);
12111 bxe_set_mc_list(struct bxe_softc *sc)
12113 struct ecore_mcast_ramrod_params rparam = { NULL };
12116 rparam.mcast_obj = &sc->mcast_obj;
12118 BXE_MCAST_LOCK(sc);
12120 /* first, clear all configured multicast MACs */
12121 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
12123 BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc);
12124 BXE_MCAST_UNLOCK(sc);
12128 /* configure a new MACs list */
12129 rc = bxe_init_mcast_macs_list(sc, &rparam);
12131 BLOGE(sc, "Failed to create mcast MACs list (%d)\n", rc);
12132 BXE_MCAST_UNLOCK(sc);
12136 /* Now add the new MACs */
12137 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD);
12139 BLOGE(sc, "Failed to set new mcast config (%d)\n", rc);
12142 bxe_free_mcast_macs_list(&rparam);
12144 BXE_MCAST_UNLOCK(sc);
12150 bxe_set_uc_list(struct bxe_softc *sc)
12152 struct ifnet *ifp = sc->ifnet;
12153 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
12154 struct ifaddr *ifa;
12155 unsigned long ramrod_flags = 0;
12158 #if __FreeBSD_version < 800000
12161 if_addr_rlock(ifp);
12164 /* first schedule a cleanup up of old configuration */
12165 rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE);
12167 BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc);
12168 #if __FreeBSD_version < 800000
12169 IF_ADDR_UNLOCK(ifp);
12171 if_addr_runlock(ifp);
12176 ifa = ifp->if_addr;
12178 if (ifa->ifa_addr->sa_family != AF_LINK) {
12179 ifa = TAILQ_NEXT(ifa, ifa_link);
12183 rc = bxe_set_mac_one(sc, (uint8_t *)LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
12184 mac_obj, TRUE, ECORE_UC_LIST_MAC, &ramrod_flags);
12185 if (rc == -EEXIST) {
12186 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
12187 /* do not treat adding same MAC as an error */
12189 } else if (rc < 0) {
12190 BLOGE(sc, "Failed to schedule ADD operations (%d)\n", rc);
12191 #if __FreeBSD_version < 800000
12192 IF_ADDR_UNLOCK(ifp);
12194 if_addr_runlock(ifp);
12199 ifa = TAILQ_NEXT(ifa, ifa_link);
12202 #if __FreeBSD_version < 800000
12203 IF_ADDR_UNLOCK(ifp);
12205 if_addr_runlock(ifp);
12208 /* Execute the pending commands */
12209 bit_set(&ramrod_flags, RAMROD_CONT);
12210 return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */,
12211 ECORE_UC_LIST_MAC, &ramrod_flags));
12215 bxe_set_rx_mode(struct bxe_softc *sc)
12217 struct ifnet *ifp = sc->ifnet;
12218 uint32_t rx_mode = BXE_RX_MODE_NORMAL;
12220 if (sc->state != BXE_STATE_OPEN) {
12221 BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state);
12225 BLOGD(sc, DBG_SP, "ifp->if_flags=0x%x\n", ifp->if_flags);
12227 if (ifp->if_flags & IFF_PROMISC) {
12228 rx_mode = BXE_RX_MODE_PROMISC;
12229 } else if ((ifp->if_flags & IFF_ALLMULTI) ||
12230 ((ifp->if_amcount > BXE_MAX_MULTICAST) &&
12232 rx_mode = BXE_RX_MODE_ALLMULTI;
12235 /* some multicasts */
12236 if (bxe_set_mc_list(sc) < 0) {
12237 rx_mode = BXE_RX_MODE_ALLMULTI;
12239 if (bxe_set_uc_list(sc) < 0) {
12240 rx_mode = BXE_RX_MODE_PROMISC;
12245 sc->rx_mode = rx_mode;
12247 /* schedule the rx_mode command */
12248 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
12249 BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n");
12250 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
12255 bxe_set_storm_rx_mode(sc);
12260 /* update flags in shmem */
12262 bxe_update_drv_flags(struct bxe_softc *sc,
12266 uint32_t drv_flags;
12268 if (SHMEM2_HAS(sc, drv_flags)) {
12269 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12270 drv_flags = SHMEM2_RD(sc, drv_flags);
12273 SET_FLAGS(drv_flags, flags);
12275 RESET_FLAGS(drv_flags, flags);
12278 SHMEM2_WR(sc, drv_flags, drv_flags);
12279 BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags);
12281 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12285 /* periodic timer callout routine, only runs when the interface is up */
12288 bxe_periodic_callout_func(void *xsc)
12290 struct bxe_softc *sc = (struct bxe_softc *)xsc;
12293 if (!BXE_CORE_TRYLOCK(sc)) {
12294 /* just bail and try again next time */
12296 if ((sc->state == BXE_STATE_OPEN) &&
12297 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12298 /* schedule the next periodic callout */
12299 callout_reset(&sc->periodic_callout, hz,
12300 bxe_periodic_callout_func, sc);
12306 if ((sc->state != BXE_STATE_OPEN) ||
12307 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
12308 BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state);
12309 BXE_CORE_UNLOCK(sc);
12313 /* Check for TX timeouts on any fastpath. */
12314 FOR_EACH_QUEUE(sc, i) {
12315 if (bxe_watchdog(sc, &sc->fp[i]) != 0) {
12316 /* Ruh-Roh, chip was reset! */
12321 if (!CHIP_REV_IS_SLOW(sc)) {
12323 * This barrier is needed to ensure the ordering between the writing
12324 * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and
12325 * the reading here.
12328 if (sc->port.pmf) {
12329 bxe_acquire_phy_lock(sc);
12330 elink_period_func(&sc->link_params, &sc->link_vars);
12331 bxe_release_phy_lock(sc);
12335 if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) {
12336 int mb_idx = SC_FW_MB_IDX(sc);
12337 uint32_t drv_pulse;
12338 uint32_t mcp_pulse;
12340 ++sc->fw_drv_pulse_wr_seq;
12341 sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
12343 drv_pulse = sc->fw_drv_pulse_wr_seq;
12346 mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) &
12347 MCP_PULSE_SEQ_MASK);
12350 * The delta between driver pulse and mcp response should
12351 * be 1 (before mcp response) or 0 (after mcp response).
12353 if ((drv_pulse != mcp_pulse) &&
12354 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
12355 /* someone lost a heartbeat... */
12356 BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
12357 drv_pulse, mcp_pulse);
12361 /* state is BXE_STATE_OPEN */
12362 bxe_stats_handle(sc, STATS_EVENT_UPDATE);
12364 BXE_CORE_UNLOCK(sc);
12366 if ((sc->state == BXE_STATE_OPEN) &&
12367 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12368 /* schedule the next periodic callout */
12369 callout_reset(&sc->periodic_callout, hz,
12370 bxe_periodic_callout_func, sc);
12375 bxe_periodic_start(struct bxe_softc *sc)
12377 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
12378 callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc);
12382 bxe_periodic_stop(struct bxe_softc *sc)
12384 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
12385 callout_drain(&sc->periodic_callout);
12388 /* start the controller */
12389 static __noinline int
12390 bxe_nic_load(struct bxe_softc *sc,
12397 BXE_CORE_LOCK_ASSERT(sc);
12399 BLOGD(sc, DBG_LOAD, "Starting NIC load...\n");
12401 sc->state = BXE_STATE_OPENING_WAITING_LOAD;
12404 /* must be called before memory allocation and HW init */
12405 bxe_ilt_set_info(sc);
12408 sc->last_reported_link_state = LINK_STATE_UNKNOWN;
12410 bxe_set_fp_rx_buf_size(sc);
12412 if (bxe_alloc_fp_buffers(sc) != 0) {
12413 BLOGE(sc, "Failed to allocate fastpath memory\n");
12414 sc->state = BXE_STATE_CLOSED;
12416 goto bxe_nic_load_error0;
12419 if (bxe_alloc_mem(sc) != 0) {
12420 sc->state = BXE_STATE_CLOSED;
12422 goto bxe_nic_load_error0;
12425 if (bxe_alloc_fw_stats_mem(sc) != 0) {
12426 sc->state = BXE_STATE_CLOSED;
12428 goto bxe_nic_load_error0;
12432 /* set pf load just before approaching the MCP */
12433 bxe_set_pf_load(sc);
12435 /* if MCP exists send load request and analyze response */
12436 if (!BXE_NOMCP(sc)) {
12437 /* attempt to load pf */
12438 if (bxe_nic_load_request(sc, &load_code) != 0) {
12439 sc->state = BXE_STATE_CLOSED;
12441 goto bxe_nic_load_error1;
12444 /* what did the MCP say? */
12445 if (bxe_nic_load_analyze_req(sc, load_code) != 0) {
12446 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12447 sc->state = BXE_STATE_CLOSED;
12449 goto bxe_nic_load_error2;
12452 BLOGI(sc, "Device has no MCP!\n");
12453 load_code = bxe_nic_load_no_mcp(sc);
12456 /* mark PMF if applicable */
12457 bxe_nic_load_pmf(sc, load_code);
12459 /* Init Function state controlling object */
12460 bxe_init_func_obj(sc);
12462 /* Initialize HW */
12463 if (bxe_init_hw(sc, load_code) != 0) {
12464 BLOGE(sc, "HW init failed\n");
12465 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12466 sc->state = BXE_STATE_CLOSED;
12468 goto bxe_nic_load_error2;
12472 /* set ALWAYS_ALIVE bit in shmem */
12473 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
12475 sc->flags |= BXE_NO_PULSE;
12477 /* attach interrupts */
12478 if (bxe_interrupt_attach(sc) != 0) {
12479 sc->state = BXE_STATE_CLOSED;
12481 goto bxe_nic_load_error2;
12484 bxe_nic_init(sc, load_code);
12486 /* Init per-function objects */
12489 // XXX bxe_iov_nic_init(sc);
12491 /* set AFEX default VLAN tag to an invalid value */
12492 sc->devinfo.mf_info.afex_def_vlan_tag = -1;
12493 // XXX bxe_nic_load_afex_dcc(sc, load_code);
12495 sc->state = BXE_STATE_OPENING_WAITING_PORT;
12496 rc = bxe_func_start(sc);
12498 BLOGE(sc, "Function start failed! rc = %d\n", rc);
12499 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12500 sc->state = BXE_STATE_ERROR;
12501 goto bxe_nic_load_error3;
12504 /* send LOAD_DONE command to MCP */
12505 if (!BXE_NOMCP(sc)) {
12506 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12508 BLOGE(sc, "MCP response failure, aborting\n");
12509 sc->state = BXE_STATE_ERROR;
12511 goto bxe_nic_load_error3;
12515 rc = bxe_setup_leading(sc);
12517 BLOGE(sc, "Setup leading failed! rc = %d\n", rc);
12518 sc->state = BXE_STATE_ERROR;
12519 goto bxe_nic_load_error3;
12522 FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) {
12523 rc = bxe_setup_queue(sc, &sc->fp[i], FALSE);
12525 BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc);
12526 sc->state = BXE_STATE_ERROR;
12527 goto bxe_nic_load_error3;
12531 rc = bxe_init_rss_pf(sc);
12533 BLOGE(sc, "PF RSS init failed\n");
12534 sc->state = BXE_STATE_ERROR;
12535 goto bxe_nic_load_error3;
12540 /* now when Clients are configured we are ready to work */
12541 sc->state = BXE_STATE_OPEN;
12543 /* Configure a ucast MAC */
12545 rc = bxe_set_eth_mac(sc, TRUE);
12548 BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc);
12549 sc->state = BXE_STATE_ERROR;
12550 goto bxe_nic_load_error3;
12553 if (sc->port.pmf) {
12554 rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN);
12556 sc->state = BXE_STATE_ERROR;
12557 goto bxe_nic_load_error3;
12561 sc->link_params.feature_config_flags &=
12562 ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN;
12564 /* start fast path */
12566 /* Initialize Rx filter */
12567 bxe_set_rx_mode(sc);
12570 switch (/* XXX load_mode */LOAD_OPEN) {
12576 case LOAD_LOOPBACK_EXT:
12577 sc->state = BXE_STATE_DIAG;
12584 if (sc->port.pmf) {
12585 bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0);
12587 bxe_link_status_update(sc);
12590 /* start the periodic timer callout */
12591 bxe_periodic_start(sc);
12593 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
12594 /* mark driver is loaded in shmem2 */
12595 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
12596 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
12598 DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
12599 DRV_FLAGS_CAPABILITIES_LOADED_L2));
12602 /* wait for all pending SP commands to complete */
12603 if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) {
12604 BLOGE(sc, "Timeout waiting for all SPs to complete!\n");
12605 bxe_periodic_stop(sc);
12606 bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE);
12610 /* Tell the stack the driver is running! */
12611 sc->ifnet->if_drv_flags = IFF_DRV_RUNNING;
12613 BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n");
12617 bxe_nic_load_error3:
12620 bxe_int_disable_sync(sc, 1);
12622 /* clean out queued objects */
12623 bxe_squeeze_objects(sc);
12626 bxe_interrupt_detach(sc);
12628 bxe_nic_load_error2:
12630 if (IS_PF(sc) && !BXE_NOMCP(sc)) {
12631 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
12632 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
12637 bxe_nic_load_error1:
12639 /* clear pf_load status, as it was already set */
12641 bxe_clear_pf_load(sc);
12644 bxe_nic_load_error0:
12646 bxe_free_fw_stats_mem(sc);
12647 bxe_free_fp_buffers(sc);
12654 bxe_init_locked(struct bxe_softc *sc)
12656 int other_engine = SC_PATH(sc) ? 0 : 1;
12657 uint8_t other_load_status, load_status;
12658 uint8_t global = FALSE;
12661 BXE_CORE_LOCK_ASSERT(sc);
12663 /* check if the driver is already running */
12664 if (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING) {
12665 BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n");
12669 bxe_set_power_state(sc, PCI_PM_D0);
12672 * If parity occurred during the unload, then attentions and/or
12673 * RECOVERY_IN_PROGRES may still be set. If so we want the first function
12674 * loaded on the current engine to complete the recovery. Parity recovery
12675 * is only relevant for PF driver.
12678 other_load_status = bxe_get_load_status(sc, other_engine);
12679 load_status = bxe_get_load_status(sc, SC_PATH(sc));
12681 if (!bxe_reset_is_done(sc, SC_PATH(sc)) ||
12682 bxe_chk_parity_attn(sc, &global, TRUE)) {
12685 * If there are attentions and they are in global blocks, set
12686 * the GLOBAL_RESET bit regardless whether it will be this
12687 * function that will complete the recovery or not.
12690 bxe_set_reset_global(sc);
12694 * Only the first function on the current engine should try
12695 * to recover in open. In case of attentions in global blocks
12696 * only the first in the chip should try to recover.
12698 if ((!load_status && (!global || !other_load_status)) &&
12699 bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) {
12700 BLOGI(sc, "Recovered during init\n");
12704 /* recovery has failed... */
12705 bxe_set_power_state(sc, PCI_PM_D3hot);
12706 sc->recovery_state = BXE_RECOVERY_FAILED;
12708 BLOGE(sc, "Recovery flow hasn't properly "
12709 "completed yet, try again later. "
12710 "If you still see this message after a "
12711 "few retries then power cycle is required.\n");
12714 goto bxe_init_locked_done;
12719 sc->recovery_state = BXE_RECOVERY_DONE;
12721 rc = bxe_nic_load(sc, LOAD_OPEN);
12723 bxe_init_locked_done:
12726 /* Tell the stack the driver is NOT running! */
12727 BLOGE(sc, "Initialization failed, "
12728 "stack notified driver is NOT running!\n");
12729 sc->ifnet->if_drv_flags &= ~IFF_DRV_RUNNING;
12736 bxe_stop_locked(struct bxe_softc *sc)
12738 BXE_CORE_LOCK_ASSERT(sc);
12739 return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE));
12743 * Handles controller initialization when called from an unlocked routine.
12744 * ifconfig calls this function.
12750 bxe_init(void *xsc)
12752 struct bxe_softc *sc = (struct bxe_softc *)xsc;
12755 bxe_init_locked(sc);
12756 BXE_CORE_UNLOCK(sc);
12760 bxe_init_ifnet(struct bxe_softc *sc)
12764 /* ifconfig entrypoint for media type/status reporting */
12765 ifmedia_init(&sc->ifmedia, IFM_IMASK,
12766 bxe_ifmedia_update,
12767 bxe_ifmedia_status);
12769 /* set the default interface values */
12770 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL);
12771 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
12772 ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO));
12774 sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */
12776 /* allocate the ifnet structure */
12777 if ((ifp = if_alloc(IFT_ETHER)) == NULL) {
12778 BLOGE(sc, "Interface allocation failed!\n");
12782 ifp->if_softc = sc;
12783 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
12784 ifp->if_flags = (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
12785 ifp->if_ioctl = bxe_ioctl;
12786 ifp->if_start = bxe_tx_start;
12787 #if __FreeBSD_version >= 800000
12788 ifp->if_transmit = bxe_tx_mq_start;
12789 ifp->if_qflush = bxe_mq_flush;
12794 ifp->if_init = bxe_init;
12795 ifp->if_mtu = sc->mtu;
12796 ifp->if_hwassist = (CSUM_IP |
12802 ifp->if_capabilities =
12803 #if __FreeBSD_version < 700000
12805 IFCAP_VLAN_HWTAGGING |
12811 IFCAP_VLAN_HWTAGGING |
12813 IFCAP_VLAN_HWFILTER |
12814 IFCAP_VLAN_HWCSUM |
12822 ifp->if_capenable = ifp->if_capabilities;
12823 ifp->if_capenable &= ~IFCAP_WOL_MAGIC; /* XXX not yet... */
12824 #if __FreeBSD_version < 1000025
12825 ifp->if_baudrate = 1000000000;
12827 if_initbaudrate(ifp, IF_Gbps(10));
12829 ifp->if_snd.ifq_drv_maxlen = sc->tx_ring_size;
12831 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
12832 IFQ_SET_READY(&ifp->if_snd);
12836 /* attach to the Ethernet interface list */
12837 ether_ifattach(ifp, sc->link_params.mac_addr);
12843 bxe_deallocate_bars(struct bxe_softc *sc)
12847 for (i = 0; i < MAX_BARS; i++) {
12848 if (sc->bar[i].resource != NULL) {
12849 bus_release_resource(sc->dev,
12852 sc->bar[i].resource);
12853 BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n",
12860 bxe_allocate_bars(struct bxe_softc *sc)
12865 memset(sc->bar, 0, sizeof(sc->bar));
12867 for (i = 0; i < MAX_BARS; i++) {
12869 /* memory resources reside at BARs 0, 2, 4 */
12870 /* Run `pciconf -lb` to see mappings */
12871 if ((i != 0) && (i != 2) && (i != 4)) {
12875 sc->bar[i].rid = PCIR_BAR(i);
12879 flags |= RF_SHAREABLE;
12882 if ((sc->bar[i].resource =
12883 bus_alloc_resource_any(sc->dev,
12890 sc->bar[i].tag = rman_get_bustag(sc->bar[i].resource);
12891 sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource);
12892 sc->bar[i].kva = (vm_offset_t)rman_get_virtual(sc->bar[i].resource);
12894 BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %p-%p (%ld) -> %p\n",
12896 (void *)rman_get_start(sc->bar[i].resource),
12897 (void *)rman_get_end(sc->bar[i].resource),
12898 rman_get_size(sc->bar[i].resource),
12899 (void *)sc->bar[i].kva);
12906 bxe_get_function_num(struct bxe_softc *sc)
12911 * Read the ME register to get the function number. The ME register
12912 * holds the relative-function number and absolute-function number. The
12913 * absolute-function number appears only in E2 and above. Before that
12914 * these bits always contained zero, therefore we cannot blindly use them.
12917 val = REG_RD(sc, BAR_ME_REGISTER);
12920 (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT);
12922 (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1;
12924 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
12925 sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id);
12927 sc->pfunc_abs = (sc->pfunc_rel | sc->path_id);
12930 BLOGD(sc, DBG_LOAD,
12931 "Relative function %d, Absolute function %d, Path %d\n",
12932 sc->pfunc_rel, sc->pfunc_abs, sc->path_id);
12936 bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc)
12938 uint32_t shmem2_size;
12940 uint32_t mf_cfg_offset_value;
12943 offset = (SHMEM_RD(sc, func_mb) +
12944 (MAX_FUNC_NUM * sizeof(struct drv_func_mb)));
12947 if (sc->devinfo.shmem2_base != 0) {
12948 shmem2_size = SHMEM2_RD(sc, size);
12949 if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) {
12950 mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr);
12951 if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) {
12952 offset = mf_cfg_offset_value;
12961 bxe_pcie_capability_read(struct bxe_softc *sc,
12967 /* ensure PCIe capability is enabled */
12968 if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) {
12969 if (pcie_reg != 0) {
12970 BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg);
12971 return (pci_read_config(sc->dev, (pcie_reg + reg), width));
12975 BLOGE(sc, "PCIe capability NOT FOUND!!!\n");
12981 bxe_is_pcie_pending(struct bxe_softc *sc)
12983 return (bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA, 2) &
12984 PCIM_EXP_STA_TRANSACTION_PND);
12988 * Walk the PCI capabiites list for the device to find what features are
12989 * supported. These capabilites may be enabled/disabled by firmware so it's
12990 * best to walk the list rather than make assumptions.
12993 bxe_probe_pci_caps(struct bxe_softc *sc)
12995 uint16_t link_status;
12998 /* check if PCI Power Management is enabled */
12999 if (pci_find_cap(sc->dev, PCIY_PMG, ®) == 0) {
13001 BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg);
13003 sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG;
13004 sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg;
13008 link_status = bxe_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA, 2);
13010 /* handle PCIe 2.0 workarounds for 57710 */
13011 if (CHIP_IS_E1(sc)) {
13012 /* workaround for 57710 errata E4_57710_27462 */
13013 sc->devinfo.pcie_link_speed =
13014 (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1;
13016 /* workaround for 57710 errata E4_57710_27488 */
13017 sc->devinfo.pcie_link_width =
13018 ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
13019 if (sc->devinfo.pcie_link_speed > 1) {
13020 sc->devinfo.pcie_link_width =
13021 ((link_status & PCIM_LINK_STA_WIDTH) >> 4) >> 1;
13024 sc->devinfo.pcie_link_speed =
13025 (link_status & PCIM_LINK_STA_SPEED);
13026 sc->devinfo.pcie_link_width =
13027 ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
13030 BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n",
13031 sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width);
13033 sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG;
13034 sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg;
13036 /* check if MSI capability is enabled */
13037 if (pci_find_cap(sc->dev, PCIY_MSI, ®) == 0) {
13039 BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg);
13041 sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG;
13042 sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg;
13046 /* check if MSI-X capability is enabled */
13047 if (pci_find_cap(sc->dev, PCIY_MSIX, ®) == 0) {
13049 BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg);
13051 sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG;
13052 sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg;
13058 bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc)
13060 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13063 /* get the outer vlan if we're in switch-dependent mode */
13065 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13066 mf_info->ext_id = (uint16_t)val;
13068 mf_info->multi_vnics_mode = 1;
13070 if (!VALID_OVLAN(mf_info->ext_id)) {
13071 BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id);
13075 /* get the capabilities */
13076 if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13077 FUNC_MF_CFG_PROTOCOL_ISCSI) {
13078 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI;
13079 } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13080 FUNC_MF_CFG_PROTOCOL_FCOE) {
13081 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE;
13083 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET;
13086 mf_info->vnics_per_port =
13087 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13093 bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc)
13095 uint32_t retval = 0;
13098 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13100 if (val & MACP_FUNC_CFG_FLAGS_ENABLED) {
13101 if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) {
13102 retval |= MF_PROTO_SUPPORT_ETHERNET;
13104 if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
13105 retval |= MF_PROTO_SUPPORT_ISCSI;
13107 if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
13108 retval |= MF_PROTO_SUPPORT_FCOE;
13116 bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc)
13118 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13122 * There is no outer vlan if we're in switch-independent mode.
13123 * If the mac is valid then assume multi-function.
13126 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13128 mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0);
13130 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13132 mf_info->vnics_per_port =
13133 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13139 bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc)
13141 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13142 uint32_t e1hov_tag;
13143 uint32_t func_config;
13144 uint32_t niv_config;
13146 mf_info->multi_vnics_mode = 1;
13148 e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13149 func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13150 niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config);
13153 (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >>
13154 FUNC_MF_CFG_E1HOV_TAG_SHIFT);
13156 mf_info->default_vlan =
13157 (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >>
13158 FUNC_MF_CFG_AFEX_VLAN_SHIFT);
13160 mf_info->niv_allowed_priorities =
13161 (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
13162 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT);
13164 mf_info->niv_default_cos =
13165 (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
13166 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT);
13168 mf_info->afex_vlan_mode =
13169 ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
13170 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT);
13172 mf_info->niv_mba_enabled =
13173 ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >>
13174 FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT);
13176 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13178 mf_info->vnics_per_port =
13179 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13185 bxe_check_valid_mf_cfg(struct bxe_softc *sc)
13187 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13194 BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n",
13196 BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n",
13197 mf_info->mf_config[SC_VN(sc)]);
13198 BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n",
13199 mf_info->multi_vnics_mode);
13200 BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n",
13201 mf_info->vnics_per_port);
13202 BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n",
13204 BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n",
13205 mf_info->min_bw[0], mf_info->min_bw[1],
13206 mf_info->min_bw[2], mf_info->min_bw[3]);
13207 BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n",
13208 mf_info->max_bw[0], mf_info->max_bw[1],
13209 mf_info->max_bw[2], mf_info->max_bw[3]);
13210 BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n",
13213 /* various MF mode sanity checks... */
13215 if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) {
13216 BLOGE(sc, "Enumerated function %d is marked as hidden\n",
13221 if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) {
13222 BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n",
13223 mf_info->vnics_per_port, mf_info->multi_vnics_mode);
13227 if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13228 /* vnic id > 0 must have valid ovlan in switch-dependent mode */
13229 if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) {
13230 BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n",
13231 SC_VN(sc), OVLAN(sc));
13235 if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) {
13236 BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n",
13237 mf_info->multi_vnics_mode, OVLAN(sc));
13242 * Verify all functions are either MF or SF mode. If MF, make sure
13243 * sure that all non-hidden functions have a valid ovlan. If SF,
13244 * make sure that all non-hidden functions have an invalid ovlan.
13246 FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13247 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13248 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13249 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13250 (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) ||
13251 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) {
13252 BLOGE(sc, "mf_mode=SD function %d MF config "
13253 "mismatch, multi_vnics_mode=%d ovlan=%d\n",
13254 i, mf_info->multi_vnics_mode, ovlan1);
13259 /* Verify all funcs on the same port each have a different ovlan. */
13260 FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13261 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13262 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13263 /* iterate from the next function on the port to the max func */
13264 for (j = i + 2; j < MAX_FUNC_NUM; j += 2) {
13265 mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config);
13266 ovlan2 = MFCFG_RD(sc, func_mf_config[j].e1hov_tag);
13267 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13268 VALID_OVLAN(ovlan1) &&
13269 !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) &&
13270 VALID_OVLAN(ovlan2) &&
13271 (ovlan1 == ovlan2)) {
13272 BLOGE(sc, "mf_mode=SD functions %d and %d "
13273 "have the same ovlan (%d)\n",
13279 } /* MULTI_FUNCTION_SD */
13285 bxe_get_mf_cfg_info(struct bxe_softc *sc)
13287 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13288 uint32_t val, mac_upper;
13291 /* initialize mf_info defaults */
13292 mf_info->vnics_per_port = 1;
13293 mf_info->multi_vnics_mode = FALSE;
13294 mf_info->path_has_ovlan = FALSE;
13295 mf_info->mf_mode = SINGLE_FUNCTION;
13297 if (!CHIP_IS_MF_CAP(sc)) {
13301 if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) {
13302 BLOGE(sc, "Invalid mf_cfg_base!\n");
13306 /* get the MF mode (switch dependent / independent / single-function) */
13308 val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13310 switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)
13312 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
13314 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13316 /* check for legal upper mac bytes */
13317 if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) {
13318 mf_info->mf_mode = MULTI_FUNCTION_SI;
13320 BLOGE(sc, "Invalid config for Switch Independent mode\n");
13325 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
13326 case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4:
13328 /* get outer vlan configuration */
13329 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13331 if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) !=
13332 FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
13333 mf_info->mf_mode = MULTI_FUNCTION_SD;
13335 BLOGE(sc, "Invalid config for Switch Dependent mode\n");
13340 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
13342 /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */
13345 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
13348 * Mark MF mode as NIV if MCP version includes NPAR-SD support
13349 * and the MAC address is valid.
13351 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13353 if ((SHMEM2_HAS(sc, afex_driver_support)) &&
13354 (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) {
13355 mf_info->mf_mode = MULTI_FUNCTION_AFEX;
13357 BLOGE(sc, "Invalid config for AFEX mode\n");
13364 BLOGE(sc, "Unknown MF mode (0x%08x)\n",
13365 (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK));
13370 /* set path mf_mode (which could be different than function mf_mode) */
13371 if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13372 mf_info->path_has_ovlan = TRUE;
13373 } else if (mf_info->mf_mode == SINGLE_FUNCTION) {
13375 * Decide on path multi vnics mode. If we're not in MF mode and in
13376 * 4-port mode, this is good enough to check vnic-0 of the other port
13379 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13380 uint8_t other_port = !(PORT_ID(sc) & 1);
13381 uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port));
13383 val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag);
13385 mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0;
13389 if (mf_info->mf_mode == SINGLE_FUNCTION) {
13390 /* invalid MF config */
13391 if (SC_VN(sc) >= 1) {
13392 BLOGE(sc, "VNIC ID >= 1 in SF mode\n");
13399 /* get the MF configuration */
13400 mf_info->mf_config[SC_VN(sc)] =
13401 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13403 switch(mf_info->mf_mode)
13405 case MULTI_FUNCTION_SD:
13407 bxe_get_shmem_mf_cfg_info_sd(sc);
13410 case MULTI_FUNCTION_SI:
13412 bxe_get_shmem_mf_cfg_info_si(sc);
13415 case MULTI_FUNCTION_AFEX:
13417 bxe_get_shmem_mf_cfg_info_niv(sc);
13422 BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n",
13427 /* get the congestion management parameters */
13430 FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13431 /* get min/max bw */
13432 val = MFCFG_RD(sc, func_mf_config[i].config);
13433 mf_info->min_bw[vnic] =
13434 ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT);
13435 mf_info->max_bw[vnic] =
13436 ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT);
13440 return (bxe_check_valid_mf_cfg(sc));
13444 bxe_get_shmem_info(struct bxe_softc *sc)
13447 uint32_t mac_hi, mac_lo, val;
13449 port = SC_PORT(sc);
13450 mac_hi = mac_lo = 0;
13452 sc->link_params.sc = sc;
13453 sc->link_params.port = port;
13455 /* get the hardware config info */
13456 sc->devinfo.hw_config =
13457 SHMEM_RD(sc, dev_info.shared_hw_config.config);
13458 sc->devinfo.hw_config2 =
13459 SHMEM_RD(sc, dev_info.shared_hw_config.config2);
13461 sc->link_params.hw_led_mode =
13462 ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
13463 SHARED_HW_CFG_LED_MODE_SHIFT);
13465 /* get the port feature config */
13467 SHMEM_RD(sc, dev_info.port_feature_config[port].config),
13469 /* get the link params */
13470 sc->link_params.speed_cap_mask[0] =
13471 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask);
13472 sc->link_params.speed_cap_mask[1] =
13473 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2);
13475 /* get the lane config */
13476 sc->link_params.lane_config =
13477 SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config);
13479 /* get the link config */
13480 val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config);
13481 sc->port.link_config[ELINK_INT_PHY] = val;
13482 sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK);
13483 sc->port.link_config[ELINK_EXT_PHY1] =
13484 SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2);
13486 /* get the override preemphasis flag and enable it or turn it off */
13487 val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13488 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) {
13489 sc->link_params.feature_config_flags |=
13490 ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13492 sc->link_params.feature_config_flags &=
13493 ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13496 /* get the initial value of the link params */
13497 sc->link_params.multi_phy_config =
13498 SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config);
13500 /* get external phy info */
13501 sc->port.ext_phy_config =
13502 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
13504 /* get the multifunction configuration */
13505 bxe_get_mf_cfg_info(sc);
13507 /* get the mac address */
13509 mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13510 mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower);
13512 mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper);
13513 mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower);
13516 if ((mac_lo == 0) && (mac_hi == 0)) {
13517 *sc->mac_addr_str = 0;
13518 BLOGE(sc, "No Ethernet address programmed!\n");
13520 sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8);
13521 sc->link_params.mac_addr[1] = (uint8_t)(mac_hi);
13522 sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24);
13523 sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16);
13524 sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8);
13525 sc->link_params.mac_addr[5] = (uint8_t)(mac_lo);
13526 snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str),
13527 "%02x:%02x:%02x:%02x:%02x:%02x",
13528 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1],
13529 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3],
13530 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]);
13531 BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str);
13538 bxe_get_tunable_params(struct bxe_softc *sc)
13540 /* sanity checks */
13542 if ((bxe_interrupt_mode != INTR_MODE_INTX) &&
13543 (bxe_interrupt_mode != INTR_MODE_MSI) &&
13544 (bxe_interrupt_mode != INTR_MODE_MSIX)) {
13545 BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode);
13546 bxe_interrupt_mode = INTR_MODE_MSIX;
13549 if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) {
13550 BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count);
13551 bxe_queue_count = 0;
13554 if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) {
13555 if (bxe_max_rx_bufs == 0) {
13556 bxe_max_rx_bufs = RX_BD_USABLE;
13558 BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs);
13559 bxe_max_rx_bufs = 2048;
13563 if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) {
13564 BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks);
13565 bxe_hc_rx_ticks = 25;
13568 if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) {
13569 BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks);
13570 bxe_hc_tx_ticks = 50;
13573 if (bxe_max_aggregation_size == 0) {
13574 bxe_max_aggregation_size = TPA_AGG_SIZE;
13577 if (bxe_max_aggregation_size > 0xffff) {
13578 BLOGW(sc, "invalid max_aggregation_size (%d)\n",
13579 bxe_max_aggregation_size);
13580 bxe_max_aggregation_size = TPA_AGG_SIZE;
13583 if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) {
13584 BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs);
13588 if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) {
13589 BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen);
13590 bxe_autogreeen = 0;
13593 if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) {
13594 BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss);
13598 /* pull in user settings */
13600 sc->interrupt_mode = bxe_interrupt_mode;
13601 sc->max_rx_bufs = bxe_max_rx_bufs;
13602 sc->hc_rx_ticks = bxe_hc_rx_ticks;
13603 sc->hc_tx_ticks = bxe_hc_tx_ticks;
13604 sc->max_aggregation_size = bxe_max_aggregation_size;
13605 sc->mrrs = bxe_mrrs;
13606 sc->autogreeen = bxe_autogreeen;
13607 sc->udp_rss = bxe_udp_rss;
13609 if (bxe_interrupt_mode == INTR_MODE_INTX) {
13610 sc->num_queues = 1;
13611 } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */
13613 min((bxe_queue_count ? bxe_queue_count : mp_ncpus),
13615 if (sc->num_queues > mp_ncpus) {
13616 sc->num_queues = mp_ncpus;
13620 BLOGD(sc, DBG_LOAD,
13623 "interrupt_mode=%d "
13628 "max_aggregation_size=%d "
13633 sc->interrupt_mode,
13638 sc->max_aggregation_size,
13645 bxe_media_detect(struct bxe_softc *sc)
13647 uint32_t phy_idx = bxe_get_cur_phy_idx(sc);
13648 switch (sc->link_params.phy[phy_idx].media_type) {
13649 case ELINK_ETH_PHY_SFPP_10G_FIBER:
13650 case ELINK_ETH_PHY_XFP_FIBER:
13651 BLOGI(sc, "Found 10Gb Fiber media.\n");
13652 sc->media = IFM_10G_SR;
13654 case ELINK_ETH_PHY_SFP_1G_FIBER:
13655 BLOGI(sc, "Found 1Gb Fiber media.\n");
13656 sc->media = IFM_1000_SX;
13658 case ELINK_ETH_PHY_KR:
13659 case ELINK_ETH_PHY_CX4:
13660 BLOGI(sc, "Found 10GBase-CX4 media.\n");
13661 sc->media = IFM_10G_CX4;
13663 case ELINK_ETH_PHY_DA_TWINAX:
13664 BLOGI(sc, "Found 10Gb Twinax media.\n");
13665 sc->media = IFM_10G_TWINAX;
13667 case ELINK_ETH_PHY_BASE_T:
13668 if (sc->link_params.speed_cap_mask[0] &
13669 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
13670 BLOGI(sc, "Found 10GBase-T media.\n");
13671 sc->media = IFM_10G_T;
13673 BLOGI(sc, "Found 1000Base-T media.\n");
13674 sc->media = IFM_1000_T;
13677 case ELINK_ETH_PHY_NOT_PRESENT:
13678 BLOGI(sc, "Media not present.\n");
13681 case ELINK_ETH_PHY_UNSPECIFIED:
13683 BLOGI(sc, "Unknown media!\n");
13689 #define GET_FIELD(value, fname) \
13690 (((value) & (fname##_MASK)) >> (fname##_SHIFT))
13691 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
13692 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
13695 bxe_get_igu_cam_info(struct bxe_softc *sc)
13697 int pfid = SC_FUNC(sc);
13700 uint8_t fid, igu_sb_cnt = 0;
13702 sc->igu_base_sb = 0xff;
13704 if (CHIP_INT_MODE_IS_BC(sc)) {
13705 int vn = SC_VN(sc);
13706 igu_sb_cnt = sc->igu_sb_cnt;
13707 sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) *
13709 sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x +
13710 (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn));
13714 /* IGU in normal mode - read CAM */
13715 for (igu_sb_id = 0;
13716 igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
13718 val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
13719 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) {
13722 fid = IGU_FID(val);
13723 if ((fid & IGU_FID_ENCODE_IS_PF)) {
13724 if ((fid & IGU_FID_PF_NUM_MASK) != pfid) {
13727 if (IGU_VEC(val) == 0) {
13728 /* default status block */
13729 sc->igu_dsb_id = igu_sb_id;
13731 if (sc->igu_base_sb == 0xff) {
13732 sc->igu_base_sb = igu_sb_id;
13740 * Due to new PF resource allocation by MFW T7.4 and above, it's optional
13741 * that number of CAM entries will not be equal to the value advertised in
13742 * PCI. Driver should use the minimal value of both as the actual status
13745 sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt);
13747 if (igu_sb_cnt == 0) {
13748 BLOGE(sc, "CAM configuration error\n");
13756 * Gather various information from the device config space, the device itself,
13757 * shmem, and the user input.
13760 bxe_get_device_info(struct bxe_softc *sc)
13765 /* Get the data for the device */
13766 sc->devinfo.vendor_id = pci_get_vendor(sc->dev);
13767 sc->devinfo.device_id = pci_get_device(sc->dev);
13768 sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev);
13769 sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev);
13771 /* get the chip revision (chip metal comes from pci config space) */
13772 sc->devinfo.chip_id =
13773 sc->link_params.chip_id =
13774 (((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) |
13775 ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) |
13776 (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) |
13777 ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0));
13779 /* force 57811 according to MISC register */
13780 if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
13781 if (CHIP_IS_57810(sc)) {
13782 sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) |
13783 (sc->devinfo.chip_id & 0x0000ffff));
13784 } else if (CHIP_IS_57810_MF(sc)) {
13785 sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) |
13786 (sc->devinfo.chip_id & 0x0000ffff));
13788 sc->devinfo.chip_id |= 0x1;
13791 BLOGD(sc, DBG_LOAD,
13792 "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n",
13793 sc->devinfo.chip_id,
13794 ((sc->devinfo.chip_id >> 16) & 0xffff),
13795 ((sc->devinfo.chip_id >> 12) & 0xf),
13796 ((sc->devinfo.chip_id >> 4) & 0xff),
13797 ((sc->devinfo.chip_id >> 0) & 0xf));
13799 val = (REG_RD(sc, 0x2874) & 0x55);
13800 if ((sc->devinfo.chip_id & 0x1) ||
13801 (CHIP_IS_E1(sc) && val) ||
13802 (CHIP_IS_E1H(sc) && (val == 0x55))) {
13803 sc->flags |= BXE_ONE_PORT_FLAG;
13804 BLOGD(sc, DBG_LOAD, "single port device\n");
13807 /* set the doorbell size */
13808 sc->doorbell_size = (1 << BXE_DB_SHIFT);
13810 /* determine whether the device is in 2 port or 4 port mode */
13811 sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/
13812 if (CHIP_IS_E2E3(sc)) {
13814 * Read port4mode_en_ovwr[0]:
13815 * If 1, four port mode is in port4mode_en_ovwr[1].
13816 * If 0, four port mode is in port4mode_en[0].
13818 val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR);
13820 val = ((val >> 1) & 1);
13822 val = REG_RD(sc, MISC_REG_PORT4MODE_EN);
13825 sc->devinfo.chip_port_mode =
13826 (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE;
13828 BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2");
13831 /* get the function and path info for the device */
13832 bxe_get_function_num(sc);
13834 /* get the shared memory base address */
13835 sc->devinfo.shmem_base =
13836 sc->link_params.shmem_base =
13837 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
13838 sc->devinfo.shmem2_base =
13839 REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 :
13840 MISC_REG_GENERIC_CR_0));
13842 BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n",
13843 sc->devinfo.shmem_base, sc->devinfo.shmem2_base);
13845 if (!sc->devinfo.shmem_base) {
13846 /* this should ONLY prevent upcoming shmem reads */
13847 BLOGI(sc, "MCP not active\n");
13848 sc->flags |= BXE_NO_MCP_FLAG;
13852 /* make sure the shared memory contents are valid */
13853 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
13854 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
13855 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
13856 BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val);
13859 BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val);
13861 /* get the bootcode version */
13862 sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev);
13863 snprintf(sc->devinfo.bc_ver_str,
13864 sizeof(sc->devinfo.bc_ver_str),
13866 ((sc->devinfo.bc_ver >> 24) & 0xff),
13867 ((sc->devinfo.bc_ver >> 16) & 0xff),
13868 ((sc->devinfo.bc_ver >> 8) & 0xff));
13869 BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str);
13871 /* get the bootcode shmem address */
13872 sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc);
13873 BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base);
13875 /* clean indirect addresses as they're not used */
13876 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
13878 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0);
13879 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0);
13880 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0);
13881 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0);
13882 if (CHIP_IS_E1x(sc)) {
13883 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0);
13884 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0);
13885 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
13886 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
13890 * Enable internal target-read (in case we are probed after PF
13891 * FLR). Must be done prior to any BAR read access. Only for
13894 if (!CHIP_IS_E1x(sc)) {
13895 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
13899 /* get the nvram size */
13900 val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4);
13901 sc->devinfo.flash_size =
13902 (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
13903 BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size);
13905 /* get PCI capabilites */
13906 bxe_probe_pci_caps(sc);
13908 bxe_set_power_state(sc, PCI_PM_D0);
13910 /* get various configuration parameters from shmem */
13911 bxe_get_shmem_info(sc);
13913 if (sc->devinfo.pcie_msix_cap_reg != 0) {
13914 val = pci_read_config(sc->dev,
13915 (sc->devinfo.pcie_msix_cap_reg +
13918 sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE);
13920 sc->igu_sb_cnt = 1;
13923 sc->igu_base_addr = BAR_IGU_INTMEM;
13925 /* initialize IGU parameters */
13926 if (CHIP_IS_E1x(sc)) {
13927 sc->devinfo.int_block = INT_BLOCK_HC;
13928 sc->igu_dsb_id = DEF_SB_IGU_ID;
13929 sc->igu_base_sb = 0;
13931 sc->devinfo.int_block = INT_BLOCK_IGU;
13933 /* do not allow device reset during IGU info preocessing */
13934 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13936 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
13938 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
13941 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n");
13943 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
13944 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val);
13945 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f);
13947 while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
13952 if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
13953 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n");
13954 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13959 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
13960 BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n");
13961 sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP;
13963 BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n");
13966 rc = bxe_get_igu_cam_info(sc);
13968 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13976 * Get base FW non-default (fast path) status block ID. This value is
13977 * used to initialize the fw_sb_id saved on the fp/queue structure to
13978 * determine the id used by the FW.
13980 if (CHIP_IS_E1x(sc)) {
13981 sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc));
13984 * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of
13985 * the same queue are indicated on the same IGU SB). So we prefer
13986 * FW and IGU SBs to be the same value.
13988 sc->base_fw_ndsb = sc->igu_base_sb;
13991 BLOGD(sc, DBG_LOAD,
13992 "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n",
13993 sc->igu_dsb_id, sc->igu_base_sb,
13994 sc->igu_sb_cnt, sc->base_fw_ndsb);
13996 elink_phy_probe(&sc->link_params);
14002 bxe_link_settings_supported(struct bxe_softc *sc,
14003 uint32_t switch_cfg)
14005 uint32_t cfg_size = 0;
14007 uint8_t port = SC_PORT(sc);
14009 /* aggregation of supported attributes of all external phys */
14010 sc->port.supported[0] = 0;
14011 sc->port.supported[1] = 0;
14013 switch (sc->link_params.num_phys) {
14015 sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported;
14019 sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported;
14023 if (sc->link_params.multi_phy_config &
14024 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
14025 sc->port.supported[1] =
14026 sc->link_params.phy[ELINK_EXT_PHY1].supported;
14027 sc->port.supported[0] =
14028 sc->link_params.phy[ELINK_EXT_PHY2].supported;
14030 sc->port.supported[0] =
14031 sc->link_params.phy[ELINK_EXT_PHY1].supported;
14032 sc->port.supported[1] =
14033 sc->link_params.phy[ELINK_EXT_PHY2].supported;
14039 if (!(sc->port.supported[0] || sc->port.supported[1])) {
14040 BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n",
14042 dev_info.port_hw_config[port].external_phy_config),
14044 dev_info.port_hw_config[port].external_phy_config2));
14048 if (CHIP_IS_E3(sc))
14049 sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR);
14051 switch (switch_cfg) {
14052 case ELINK_SWITCH_CFG_1G:
14053 sc->port.phy_addr =
14054 REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
14056 case ELINK_SWITCH_CFG_10G:
14057 sc->port.phy_addr =
14058 REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
14061 BLOGE(sc, "Invalid switch config in link_config=0x%08x\n",
14062 sc->port.link_config[0]);
14067 BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr);
14069 /* mask what we support according to speed_cap_mask per configuration */
14070 for (idx = 0; idx < cfg_size; idx++) {
14071 if (!(sc->link_params.speed_cap_mask[idx] &
14072 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
14073 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half;
14076 if (!(sc->link_params.speed_cap_mask[idx] &
14077 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) {
14078 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full;
14081 if (!(sc->link_params.speed_cap_mask[idx] &
14082 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
14083 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half;
14086 if (!(sc->link_params.speed_cap_mask[idx] &
14087 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) {
14088 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full;
14091 if (!(sc->link_params.speed_cap_mask[idx] &
14092 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) {
14093 sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full;
14096 if (!(sc->link_params.speed_cap_mask[idx] &
14097 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) {
14098 sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full;
14101 if (!(sc->link_params.speed_cap_mask[idx] &
14102 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
14103 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full;
14106 if (!(sc->link_params.speed_cap_mask[idx] &
14107 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) {
14108 sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full;
14112 BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n",
14113 sc->port.supported[0], sc->port.supported[1]);
14117 bxe_link_settings_requested(struct bxe_softc *sc)
14119 uint32_t link_config;
14121 uint32_t cfg_size = 0;
14123 sc->port.advertising[0] = 0;
14124 sc->port.advertising[1] = 0;
14126 switch (sc->link_params.num_phys) {
14136 for (idx = 0; idx < cfg_size; idx++) {
14137 sc->link_params.req_duplex[idx] = DUPLEX_FULL;
14138 link_config = sc->port.link_config[idx];
14140 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
14141 case PORT_FEATURE_LINK_SPEED_AUTO:
14142 if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) {
14143 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14144 sc->port.advertising[idx] |= sc->port.supported[idx];
14145 if (sc->link_params.phy[ELINK_EXT_PHY1].type ==
14146 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
14147 sc->port.advertising[idx] |=
14148 (ELINK_SUPPORTED_100baseT_Half |
14149 ELINK_SUPPORTED_100baseT_Full);
14151 /* force 10G, no AN */
14152 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14153 sc->port.advertising[idx] |=
14154 (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
14159 case PORT_FEATURE_LINK_SPEED_10M_FULL:
14160 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) {
14161 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14162 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full |
14165 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14166 "speed_cap_mask=0x%08x\n",
14167 link_config, sc->link_params.speed_cap_mask[idx]);
14172 case PORT_FEATURE_LINK_SPEED_10M_HALF:
14173 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) {
14174 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14175 sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14176 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half |
14179 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14180 "speed_cap_mask=0x%08x\n",
14181 link_config, sc->link_params.speed_cap_mask[idx]);
14186 case PORT_FEATURE_LINK_SPEED_100M_FULL:
14187 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) {
14188 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14189 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full |
14192 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14193 "speed_cap_mask=0x%08x\n",
14194 link_config, sc->link_params.speed_cap_mask[idx]);
14199 case PORT_FEATURE_LINK_SPEED_100M_HALF:
14200 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) {
14201 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14202 sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14203 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half |
14206 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14207 "speed_cap_mask=0x%08x\n",
14208 link_config, sc->link_params.speed_cap_mask[idx]);
14213 case PORT_FEATURE_LINK_SPEED_1G:
14214 if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) {
14215 sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000;
14216 sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full |
14219 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14220 "speed_cap_mask=0x%08x\n",
14221 link_config, sc->link_params.speed_cap_mask[idx]);
14226 case PORT_FEATURE_LINK_SPEED_2_5G:
14227 if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) {
14228 sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500;
14229 sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full |
14232 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14233 "speed_cap_mask=0x%08x\n",
14234 link_config, sc->link_params.speed_cap_mask[idx]);
14239 case PORT_FEATURE_LINK_SPEED_10G_CX4:
14240 if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) {
14241 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14242 sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full |
14245 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14246 "speed_cap_mask=0x%08x\n",
14247 link_config, sc->link_params.speed_cap_mask[idx]);
14252 case PORT_FEATURE_LINK_SPEED_20G:
14253 sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000;
14257 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14258 "speed_cap_mask=0x%08x\n",
14259 link_config, sc->link_params.speed_cap_mask[idx]);
14260 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14261 sc->port.advertising[idx] = sc->port.supported[idx];
14265 sc->link_params.req_flow_ctrl[idx] =
14266 (link_config & PORT_FEATURE_FLOW_CONTROL_MASK);
14268 if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) {
14269 if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) {
14270 sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE;
14272 bxe_set_requested_fc(sc);
14276 BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d "
14277 "req_flow_ctrl=0x%x advertising=0x%x\n",
14278 sc->link_params.req_line_speed[idx],
14279 sc->link_params.req_duplex[idx],
14280 sc->link_params.req_flow_ctrl[idx],
14281 sc->port.advertising[idx]);
14286 bxe_get_phy_info(struct bxe_softc *sc)
14288 uint8_t port = SC_PORT(sc);
14289 uint32_t config = sc->port.config;
14292 /* shmem data already read in bxe_get_shmem_info() */
14294 BLOGD(sc, DBG_LOAD, "lane_config=0x%08x speed_cap_mask0=0x%08x "
14295 "link_config0=0x%08x\n",
14296 sc->link_params.lane_config,
14297 sc->link_params.speed_cap_mask[0],
14298 sc->port.link_config[0]);
14300 bxe_link_settings_supported(sc, sc->link_params.switch_cfg);
14301 bxe_link_settings_requested(sc);
14303 if (sc->autogreeen == AUTO_GREEN_FORCE_ON) {
14304 sc->link_params.feature_config_flags |=
14305 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14306 } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) {
14307 sc->link_params.feature_config_flags &=
14308 ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14309 } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) {
14310 sc->link_params.feature_config_flags |=
14311 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14314 /* configure link feature according to nvram value */
14316 (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) &
14317 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
14318 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
14319 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
14320 sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI |
14321 ELINK_EEE_MODE_ENABLE_LPI |
14322 ELINK_EEE_MODE_OUTPUT_TIME);
14324 sc->link_params.eee_mode = 0;
14327 /* get the media type */
14328 bxe_media_detect(sc);
14332 bxe_get_params(struct bxe_softc *sc)
14334 /* get user tunable params */
14335 bxe_get_tunable_params(sc);
14337 /* select the RX and TX ring sizes */
14338 sc->tx_ring_size = TX_BD_USABLE;
14339 sc->rx_ring_size = RX_BD_USABLE;
14341 /* XXX disable WoL */
14346 bxe_set_modes_bitmap(struct bxe_softc *sc)
14348 uint32_t flags = 0;
14350 if (CHIP_REV_IS_FPGA(sc)) {
14351 SET_FLAGS(flags, MODE_FPGA);
14352 } else if (CHIP_REV_IS_EMUL(sc)) {
14353 SET_FLAGS(flags, MODE_EMUL);
14355 SET_FLAGS(flags, MODE_ASIC);
14358 if (CHIP_IS_MODE_4_PORT(sc)) {
14359 SET_FLAGS(flags, MODE_PORT4);
14361 SET_FLAGS(flags, MODE_PORT2);
14364 if (CHIP_IS_E2(sc)) {
14365 SET_FLAGS(flags, MODE_E2);
14366 } else if (CHIP_IS_E3(sc)) {
14367 SET_FLAGS(flags, MODE_E3);
14368 if (CHIP_REV(sc) == CHIP_REV_Ax) {
14369 SET_FLAGS(flags, MODE_E3_A0);
14370 } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ {
14371 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
14376 SET_FLAGS(flags, MODE_MF);
14377 switch (sc->devinfo.mf_info.mf_mode) {
14378 case MULTI_FUNCTION_SD:
14379 SET_FLAGS(flags, MODE_MF_SD);
14381 case MULTI_FUNCTION_SI:
14382 SET_FLAGS(flags, MODE_MF_SI);
14384 case MULTI_FUNCTION_AFEX:
14385 SET_FLAGS(flags, MODE_MF_AFEX);
14389 SET_FLAGS(flags, MODE_SF);
14392 #if defined(__LITTLE_ENDIAN)
14393 SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
14394 #else /* __BIG_ENDIAN */
14395 SET_FLAGS(flags, MODE_BIG_ENDIAN);
14398 INIT_MODE_FLAGS(sc) = flags;
14402 bxe_alloc_hsi_mem(struct bxe_softc *sc)
14404 struct bxe_fastpath *fp;
14405 bus_addr_t busaddr;
14406 int max_agg_queues;
14408 bus_size_t max_size;
14409 bus_size_t max_seg_size;
14414 /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */
14416 /* allocate the parent bus DMA tag */
14417 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */
14419 0, /* boundary limit */
14420 BUS_SPACE_MAXADDR, /* restricted low */
14421 BUS_SPACE_MAXADDR, /* restricted hi */
14422 NULL, /* addr filter() */
14423 NULL, /* addr filter() arg */
14424 BUS_SPACE_MAXSIZE_32BIT, /* max map size */
14425 BUS_SPACE_UNRESTRICTED, /* num discontinuous */
14426 BUS_SPACE_MAXSIZE_32BIT, /* max seg size */
14429 NULL, /* lock() arg */
14430 &sc->parent_dma_tag); /* returned dma tag */
14432 BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc);
14436 /************************/
14437 /* DEFAULT STATUS BLOCK */
14438 /************************/
14440 if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block),
14441 &sc->def_sb_dma, "default status block") != 0) {
14443 bus_dma_tag_destroy(sc->parent_dma_tag);
14447 sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr;
14453 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14454 &sc->eq_dma, "event queue") != 0) {
14456 bxe_dma_free(sc, &sc->def_sb_dma);
14458 bus_dma_tag_destroy(sc->parent_dma_tag);
14462 sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr;
14468 if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath),
14469 &sc->sp_dma, "slow path") != 0) {
14471 bxe_dma_free(sc, &sc->eq_dma);
14473 bxe_dma_free(sc, &sc->def_sb_dma);
14475 bus_dma_tag_destroy(sc->parent_dma_tag);
14479 sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr;
14481 /*******************/
14482 /* SLOW PATH QUEUE */
14483 /*******************/
14485 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14486 &sc->spq_dma, "slow path queue") != 0) {
14488 bxe_dma_free(sc, &sc->sp_dma);
14490 bxe_dma_free(sc, &sc->eq_dma);
14492 bxe_dma_free(sc, &sc->def_sb_dma);
14494 bus_dma_tag_destroy(sc->parent_dma_tag);
14498 sc->spq = (struct eth_spe *)sc->spq_dma.vaddr;
14500 /***************************/
14501 /* FW DECOMPRESSION BUFFER */
14502 /***************************/
14504 if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
14505 "fw decompression buffer") != 0) {
14507 bxe_dma_free(sc, &sc->spq_dma);
14509 bxe_dma_free(sc, &sc->sp_dma);
14511 bxe_dma_free(sc, &sc->eq_dma);
14513 bxe_dma_free(sc, &sc->def_sb_dma);
14515 bus_dma_tag_destroy(sc->parent_dma_tag);
14519 sc->gz_buf = (void *)sc->gz_buf_dma.vaddr;
14522 malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) {
14524 bxe_dma_free(sc, &sc->gz_buf_dma);
14526 bxe_dma_free(sc, &sc->spq_dma);
14528 bxe_dma_free(sc, &sc->sp_dma);
14530 bxe_dma_free(sc, &sc->eq_dma);
14532 bxe_dma_free(sc, &sc->def_sb_dma);
14534 bus_dma_tag_destroy(sc->parent_dma_tag);
14542 /* allocate DMA memory for each fastpath structure */
14543 for (i = 0; i < sc->num_queues; i++) {
14548 /*******************/
14549 /* FP STATUS BLOCK */
14550 /*******************/
14552 snprintf(buf, sizeof(buf), "fp %d status block", i);
14553 if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block),
14554 &fp->sb_dma, buf) != 0) {
14555 /* XXX unwind and free previous fastpath allocations */
14556 BLOGE(sc, "Failed to alloc %s\n", buf);
14559 if (CHIP_IS_E2E3(sc)) {
14560 fp->status_block.e2_sb =
14561 (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr;
14563 fp->status_block.e1x_sb =
14564 (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr;
14568 /******************/
14569 /* FP TX BD CHAIN */
14570 /******************/
14572 snprintf(buf, sizeof(buf), "fp %d tx bd chain", i);
14573 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES),
14574 &fp->tx_dma, buf) != 0) {
14575 /* XXX unwind and free previous fastpath allocations */
14576 BLOGE(sc, "Failed to alloc %s\n", buf);
14579 fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr;
14582 /* link together the tx bd chain pages */
14583 for (j = 1; j <= TX_BD_NUM_PAGES; j++) {
14584 /* index into the tx bd chain array to last entry per page */
14585 struct eth_tx_next_bd *tx_next_bd =
14586 &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd;
14587 /* point to the next page and wrap from last page */
14588 busaddr = (fp->tx_dma.paddr +
14589 (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES)));
14590 tx_next_bd->addr_hi = htole32(U64_HI(busaddr));
14591 tx_next_bd->addr_lo = htole32(U64_LO(busaddr));
14594 /******************/
14595 /* FP RX BD CHAIN */
14596 /******************/
14598 snprintf(buf, sizeof(buf), "fp %d rx bd chain", i);
14599 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES),
14600 &fp->rx_dma, buf) != 0) {
14601 /* XXX unwind and free previous fastpath allocations */
14602 BLOGE(sc, "Failed to alloc %s\n", buf);
14605 fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr;
14608 /* link together the rx bd chain pages */
14609 for (j = 1; j <= RX_BD_NUM_PAGES; j++) {
14610 /* index into the rx bd chain array to last entry per page */
14611 struct eth_rx_bd *rx_bd =
14612 &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2];
14613 /* point to the next page and wrap from last page */
14614 busaddr = (fp->rx_dma.paddr +
14615 (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES)));
14616 rx_bd->addr_hi = htole32(U64_HI(busaddr));
14617 rx_bd->addr_lo = htole32(U64_LO(busaddr));
14620 /*******************/
14621 /* FP RX RCQ CHAIN */
14622 /*******************/
14624 snprintf(buf, sizeof(buf), "fp %d rcq chain", i);
14625 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES),
14626 &fp->rcq_dma, buf) != 0) {
14627 /* XXX unwind and free previous fastpath allocations */
14628 BLOGE(sc, "Failed to alloc %s\n", buf);
14631 fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr;
14634 /* link together the rcq chain pages */
14635 for (j = 1; j <= RCQ_NUM_PAGES; j++) {
14636 /* index into the rcq chain array to last entry per page */
14637 struct eth_rx_cqe_next_page *rx_cqe_next =
14638 (struct eth_rx_cqe_next_page *)
14639 &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1];
14640 /* point to the next page and wrap from last page */
14641 busaddr = (fp->rcq_dma.paddr +
14642 (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES)));
14643 rx_cqe_next->addr_hi = htole32(U64_HI(busaddr));
14644 rx_cqe_next->addr_lo = htole32(U64_LO(busaddr));
14647 /*******************/
14648 /* FP RX SGE CHAIN */
14649 /*******************/
14651 snprintf(buf, sizeof(buf), "fp %d sge chain", i);
14652 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES),
14653 &fp->rx_sge_dma, buf) != 0) {
14654 /* XXX unwind and free previous fastpath allocations */
14655 BLOGE(sc, "Failed to alloc %s\n", buf);
14658 fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr;
14661 /* link together the sge chain pages */
14662 for (j = 1; j <= RX_SGE_NUM_PAGES; j++) {
14663 /* index into the rcq chain array to last entry per page */
14664 struct eth_rx_sge *rx_sge =
14665 &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2];
14666 /* point to the next page and wrap from last page */
14667 busaddr = (fp->rx_sge_dma.paddr +
14668 (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES)));
14669 rx_sge->addr_hi = htole32(U64_HI(busaddr));
14670 rx_sge->addr_lo = htole32(U64_LO(busaddr));
14673 /***********************/
14674 /* FP TX MBUF DMA MAPS */
14675 /***********************/
14677 /* set required sizes before mapping to conserve resources */
14678 if (sc->ifnet->if_capenable & (IFCAP_TSO4 | IFCAP_TSO6)) {
14679 max_size = BXE_TSO_MAX_SIZE;
14680 max_segments = BXE_TSO_MAX_SEGMENTS;
14681 max_seg_size = BXE_TSO_MAX_SEG_SIZE;
14683 max_size = (MCLBYTES * BXE_MAX_SEGMENTS);
14684 max_segments = BXE_MAX_SEGMENTS;
14685 max_seg_size = MCLBYTES;
14688 /* create a dma tag for the tx mbufs */
14689 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14691 0, /* boundary limit */
14692 BUS_SPACE_MAXADDR, /* restricted low */
14693 BUS_SPACE_MAXADDR, /* restricted hi */
14694 NULL, /* addr filter() */
14695 NULL, /* addr filter() arg */
14696 max_size, /* max map size */
14697 max_segments, /* num discontinuous */
14698 max_seg_size, /* max seg size */
14701 NULL, /* lock() arg */
14702 &fp->tx_mbuf_tag); /* returned dma tag */
14704 /* XXX unwind and free previous fastpath allocations */
14705 BLOGE(sc, "Failed to create dma tag for "
14706 "'fp %d tx mbufs' (%d)\n", i, rc);
14710 /* create dma maps for each of the tx mbuf clusters */
14711 for (j = 0; j < TX_BD_TOTAL; j++) {
14712 if (bus_dmamap_create(fp->tx_mbuf_tag,
14714 &fp->tx_mbuf_chain[j].m_map)) {
14715 /* XXX unwind and free previous fastpath allocations */
14716 BLOGE(sc, "Failed to create dma map for "
14717 "'fp %d tx mbuf %d' (%d)\n", i, j, rc);
14722 /***********************/
14723 /* FP RX MBUF DMA MAPS */
14724 /***********************/
14726 /* create a dma tag for the rx mbufs */
14727 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14729 0, /* boundary limit */
14730 BUS_SPACE_MAXADDR, /* restricted low */
14731 BUS_SPACE_MAXADDR, /* restricted hi */
14732 NULL, /* addr filter() */
14733 NULL, /* addr filter() arg */
14734 MJUM9BYTES, /* max map size */
14735 1, /* num discontinuous */
14736 MJUM9BYTES, /* max seg size */
14739 NULL, /* lock() arg */
14740 &fp->rx_mbuf_tag); /* returned dma tag */
14742 /* XXX unwind and free previous fastpath allocations */
14743 BLOGE(sc, "Failed to create dma tag for "
14744 "'fp %d rx mbufs' (%d)\n", i, rc);
14748 /* create dma maps for each of the rx mbuf clusters */
14749 for (j = 0; j < RX_BD_TOTAL; j++) {
14750 if (bus_dmamap_create(fp->rx_mbuf_tag,
14752 &fp->rx_mbuf_chain[j].m_map)) {
14753 /* XXX unwind and free previous fastpath allocations */
14754 BLOGE(sc, "Failed to create dma map for "
14755 "'fp %d rx mbuf %d' (%d)\n", i, j, rc);
14760 /* create dma map for the spare rx mbuf cluster */
14761 if (bus_dmamap_create(fp->rx_mbuf_tag,
14763 &fp->rx_mbuf_spare_map)) {
14764 /* XXX unwind and free previous fastpath allocations */
14765 BLOGE(sc, "Failed to create dma map for "
14766 "'fp %d spare rx mbuf' (%d)\n", i, rc);
14770 /***************************/
14771 /* FP RX SGE MBUF DMA MAPS */
14772 /***************************/
14774 /* create a dma tag for the rx sge mbufs */
14775 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14777 0, /* boundary limit */
14778 BUS_SPACE_MAXADDR, /* restricted low */
14779 BUS_SPACE_MAXADDR, /* restricted hi */
14780 NULL, /* addr filter() */
14781 NULL, /* addr filter() arg */
14782 BCM_PAGE_SIZE, /* max map size */
14783 1, /* num discontinuous */
14784 BCM_PAGE_SIZE, /* max seg size */
14787 NULL, /* lock() arg */
14788 &fp->rx_sge_mbuf_tag); /* returned dma tag */
14790 /* XXX unwind and free previous fastpath allocations */
14791 BLOGE(sc, "Failed to create dma tag for "
14792 "'fp %d rx sge mbufs' (%d)\n", i, rc);
14796 /* create dma maps for the rx sge mbuf clusters */
14797 for (j = 0; j < RX_SGE_TOTAL; j++) {
14798 if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
14800 &fp->rx_sge_mbuf_chain[j].m_map)) {
14801 /* XXX unwind and free previous fastpath allocations */
14802 BLOGE(sc, "Failed to create dma map for "
14803 "'fp %d rx sge mbuf %d' (%d)\n", i, j, rc);
14808 /* create dma map for the spare rx sge mbuf cluster */
14809 if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
14811 &fp->rx_sge_mbuf_spare_map)) {
14812 /* XXX unwind and free previous fastpath allocations */
14813 BLOGE(sc, "Failed to create dma map for "
14814 "'fp %d spare rx sge mbuf' (%d)\n", i, rc);
14818 /***************************/
14819 /* FP RX TPA MBUF DMA MAPS */
14820 /***************************/
14822 /* create dma maps for the rx tpa mbuf clusters */
14823 max_agg_queues = MAX_AGG_QS(sc);
14825 for (j = 0; j < max_agg_queues; j++) {
14826 if (bus_dmamap_create(fp->rx_mbuf_tag,
14828 &fp->rx_tpa_info[j].bd.m_map)) {
14829 /* XXX unwind and free previous fastpath allocations */
14830 BLOGE(sc, "Failed to create dma map for "
14831 "'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc);
14836 /* create dma map for the spare rx tpa mbuf cluster */
14837 if (bus_dmamap_create(fp->rx_mbuf_tag,
14839 &fp->rx_tpa_info_mbuf_spare_map)) {
14840 /* XXX unwind and free previous fastpath allocations */
14841 BLOGE(sc, "Failed to create dma map for "
14842 "'fp %d spare rx tpa mbuf' (%d)\n", i, rc);
14846 bxe_init_sge_ring_bit_mask(fp);
14853 bxe_free_hsi_mem(struct bxe_softc *sc)
14855 struct bxe_fastpath *fp;
14856 int max_agg_queues;
14859 if (sc->parent_dma_tag == NULL) {
14860 return; /* assume nothing was allocated */
14863 for (i = 0; i < sc->num_queues; i++) {
14866 /*******************/
14867 /* FP STATUS BLOCK */
14868 /*******************/
14870 bxe_dma_free(sc, &fp->sb_dma);
14871 memset(&fp->status_block, 0, sizeof(fp->status_block));
14873 /******************/
14874 /* FP TX BD CHAIN */
14875 /******************/
14877 bxe_dma_free(sc, &fp->tx_dma);
14878 fp->tx_chain = NULL;
14880 /******************/
14881 /* FP RX BD CHAIN */
14882 /******************/
14884 bxe_dma_free(sc, &fp->rx_dma);
14885 fp->rx_chain = NULL;
14887 /*******************/
14888 /* FP RX RCQ CHAIN */
14889 /*******************/
14891 bxe_dma_free(sc, &fp->rcq_dma);
14892 fp->rcq_chain = NULL;
14894 /*******************/
14895 /* FP RX SGE CHAIN */
14896 /*******************/
14898 bxe_dma_free(sc, &fp->rx_sge_dma);
14899 fp->rx_sge_chain = NULL;
14901 /***********************/
14902 /* FP TX MBUF DMA MAPS */
14903 /***********************/
14905 if (fp->tx_mbuf_tag != NULL) {
14906 for (j = 0; j < TX_BD_TOTAL; j++) {
14907 if (fp->tx_mbuf_chain[j].m_map != NULL) {
14908 bus_dmamap_unload(fp->tx_mbuf_tag,
14909 fp->tx_mbuf_chain[j].m_map);
14910 bus_dmamap_destroy(fp->tx_mbuf_tag,
14911 fp->tx_mbuf_chain[j].m_map);
14915 bus_dma_tag_destroy(fp->tx_mbuf_tag);
14916 fp->tx_mbuf_tag = NULL;
14919 /***********************/
14920 /* FP RX MBUF DMA MAPS */
14921 /***********************/
14923 if (fp->rx_mbuf_tag != NULL) {
14924 for (j = 0; j < RX_BD_TOTAL; j++) {
14925 if (fp->rx_mbuf_chain[j].m_map != NULL) {
14926 bus_dmamap_unload(fp->rx_mbuf_tag,
14927 fp->rx_mbuf_chain[j].m_map);
14928 bus_dmamap_destroy(fp->rx_mbuf_tag,
14929 fp->rx_mbuf_chain[j].m_map);
14933 if (fp->rx_mbuf_spare_map != NULL) {
14934 bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
14935 bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
14938 /***************************/
14939 /* FP RX TPA MBUF DMA MAPS */
14940 /***************************/
14942 max_agg_queues = MAX_AGG_QS(sc);
14944 for (j = 0; j < max_agg_queues; j++) {
14945 if (fp->rx_tpa_info[j].bd.m_map != NULL) {
14946 bus_dmamap_unload(fp->rx_mbuf_tag,
14947 fp->rx_tpa_info[j].bd.m_map);
14948 bus_dmamap_destroy(fp->rx_mbuf_tag,
14949 fp->rx_tpa_info[j].bd.m_map);
14953 if (fp->rx_tpa_info_mbuf_spare_map != NULL) {
14954 bus_dmamap_unload(fp->rx_mbuf_tag,
14955 fp->rx_tpa_info_mbuf_spare_map);
14956 bus_dmamap_destroy(fp->rx_mbuf_tag,
14957 fp->rx_tpa_info_mbuf_spare_map);
14960 bus_dma_tag_destroy(fp->rx_mbuf_tag);
14961 fp->rx_mbuf_tag = NULL;
14964 /***************************/
14965 /* FP RX SGE MBUF DMA MAPS */
14966 /***************************/
14968 if (fp->rx_sge_mbuf_tag != NULL) {
14969 for (j = 0; j < RX_SGE_TOTAL; j++) {
14970 if (fp->rx_sge_mbuf_chain[j].m_map != NULL) {
14971 bus_dmamap_unload(fp->rx_sge_mbuf_tag,
14972 fp->rx_sge_mbuf_chain[j].m_map);
14973 bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
14974 fp->rx_sge_mbuf_chain[j].m_map);
14978 if (fp->rx_sge_mbuf_spare_map != NULL) {
14979 bus_dmamap_unload(fp->rx_sge_mbuf_tag,
14980 fp->rx_sge_mbuf_spare_map);
14981 bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
14982 fp->rx_sge_mbuf_spare_map);
14985 bus_dma_tag_destroy(fp->rx_sge_mbuf_tag);
14986 fp->rx_sge_mbuf_tag = NULL;
14990 /***************************/
14991 /* FW DECOMPRESSION BUFFER */
14992 /***************************/
14994 bxe_dma_free(sc, &sc->gz_buf_dma);
14996 free(sc->gz_strm, M_DEVBUF);
14997 sc->gz_strm = NULL;
14999 /*******************/
15000 /* SLOW PATH QUEUE */
15001 /*******************/
15003 bxe_dma_free(sc, &sc->spq_dma);
15010 bxe_dma_free(sc, &sc->sp_dma);
15017 bxe_dma_free(sc, &sc->eq_dma);
15020 /************************/
15021 /* DEFAULT STATUS BLOCK */
15022 /************************/
15024 bxe_dma_free(sc, &sc->def_sb_dma);
15027 bus_dma_tag_destroy(sc->parent_dma_tag);
15028 sc->parent_dma_tag = NULL;
15032 * Previous driver DMAE transaction may have occurred when pre-boot stage
15033 * ended and boot began. This would invalidate the addresses of the
15034 * transaction, resulting in was-error bit set in the PCI causing all
15035 * hw-to-host PCIe transactions to timeout. If this happened we want to clear
15036 * the interrupt which detected this from the pglueb and the was-done bit
15039 bxe_prev_interrupted_dmae(struct bxe_softc *sc)
15043 if (!CHIP_IS_E1x(sc)) {
15044 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS);
15045 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
15046 BLOGD(sc, DBG_LOAD,
15047 "Clearing 'was-error' bit that was set in pglueb");
15048 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc));
15054 bxe_prev_mcp_done(struct bxe_softc *sc)
15056 uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE,
15057 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
15059 BLOGE(sc, "MCP response failure, aborting\n");
15066 static struct bxe_prev_list_node *
15067 bxe_prev_path_get_entry(struct bxe_softc *sc)
15069 struct bxe_prev_list_node *tmp;
15071 LIST_FOREACH(tmp, &bxe_prev_list, node) {
15072 if ((sc->pcie_bus == tmp->bus) &&
15073 (sc->pcie_device == tmp->slot) &&
15074 (SC_PATH(sc) == tmp->path)) {
15083 bxe_prev_is_path_marked(struct bxe_softc *sc)
15085 struct bxe_prev_list_node *tmp;
15088 mtx_lock(&bxe_prev_mtx);
15090 tmp = bxe_prev_path_get_entry(sc);
15093 BLOGD(sc, DBG_LOAD,
15094 "Path %d/%d/%d was marked by AER\n",
15095 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15098 BLOGD(sc, DBG_LOAD,
15099 "Path %d/%d/%d was already cleaned from previous drivers\n",
15100 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15104 mtx_unlock(&bxe_prev_mtx);
15110 bxe_prev_mark_path(struct bxe_softc *sc,
15111 uint8_t after_undi)
15113 struct bxe_prev_list_node *tmp;
15115 mtx_lock(&bxe_prev_mtx);
15117 /* Check whether the entry for this path already exists */
15118 tmp = bxe_prev_path_get_entry(sc);
15121 BLOGD(sc, DBG_LOAD,
15122 "Re-marking AER in path %d/%d/%d\n",
15123 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15125 BLOGD(sc, DBG_LOAD,
15126 "Removing AER indication from path %d/%d/%d\n",
15127 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15131 mtx_unlock(&bxe_prev_mtx);
15135 mtx_unlock(&bxe_prev_mtx);
15137 /* Create an entry for this path and add it */
15138 tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF,
15139 (M_NOWAIT | M_ZERO));
15141 BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n");
15145 tmp->bus = sc->pcie_bus;
15146 tmp->slot = sc->pcie_device;
15147 tmp->path = SC_PATH(sc);
15149 tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0;
15151 mtx_lock(&bxe_prev_mtx);
15153 BLOGD(sc, DBG_LOAD,
15154 "Marked path %d/%d/%d - finished previous unload\n",
15155 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15156 LIST_INSERT_HEAD(&bxe_prev_list, tmp, node);
15158 mtx_unlock(&bxe_prev_mtx);
15164 bxe_do_flr(struct bxe_softc *sc)
15168 /* only E2 and onwards support FLR */
15169 if (CHIP_IS_E1x(sc)) {
15170 BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n");
15174 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
15175 if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
15176 BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n",
15177 sc->devinfo.bc_ver);
15181 /* Wait for Transaction Pending bit clean */
15182 for (i = 0; i < 4; i++) {
15184 DELAY(((1 << (i - 1)) * 100) * 1000);
15187 if (!bxe_is_pcie_pending(sc)) {
15192 BLOGE(sc, "PCIE transaction is not cleared, "
15193 "proceeding with reset anyway\n");
15197 BLOGD(sc, DBG_LOAD, "Initiating FLR\n");
15198 bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0);
15203 struct bxe_mac_vals {
15204 uint32_t xmac_addr;
15206 uint32_t emac_addr;
15208 uint32_t umac_addr;
15210 uint32_t bmac_addr;
15211 uint32_t bmac_val[2];
15215 bxe_prev_unload_close_mac(struct bxe_softc *sc,
15216 struct bxe_mac_vals *vals)
15218 uint32_t val, base_addr, offset, mask, reset_reg;
15219 uint8_t mac_stopped = FALSE;
15220 uint8_t port = SC_PORT(sc);
15221 uint32_t wb_data[2];
15223 /* reset addresses as they also mark which values were changed */
15224 vals->bmac_addr = 0;
15225 vals->umac_addr = 0;
15226 vals->xmac_addr = 0;
15227 vals->emac_addr = 0;
15229 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2);
15231 if (!CHIP_IS_E3(sc)) {
15232 val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
15233 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
15234 if ((mask & reset_reg) && val) {
15235 BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n");
15236 base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM
15237 : NIG_REG_INGRESS_BMAC0_MEM;
15238 offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL
15239 : BIGMAC_REGISTER_BMAC_CONTROL;
15242 * use rd/wr since we cannot use dmae. This is safe
15243 * since MCP won't access the bus due to the request
15244 * to unload, and no function on the path can be
15245 * loaded at this time.
15247 wb_data[0] = REG_RD(sc, base_addr + offset);
15248 wb_data[1] = REG_RD(sc, base_addr + offset + 0x4);
15249 vals->bmac_addr = base_addr + offset;
15250 vals->bmac_val[0] = wb_data[0];
15251 vals->bmac_val[1] = wb_data[1];
15252 wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE;
15253 REG_WR(sc, vals->bmac_addr, wb_data[0]);
15254 REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]);
15257 BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n");
15258 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4;
15259 vals->emac_val = REG_RD(sc, vals->emac_addr);
15260 REG_WR(sc, vals->emac_addr, 0);
15261 mac_stopped = TRUE;
15263 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
15264 BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n");
15265 base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
15266 val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI);
15267 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1));
15268 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1));
15269 vals->xmac_addr = base_addr + XMAC_REG_CTRL;
15270 vals->xmac_val = REG_RD(sc, vals->xmac_addr);
15271 REG_WR(sc, vals->xmac_addr, 0);
15272 mac_stopped = TRUE;
15275 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
15276 if (mask & reset_reg) {
15277 BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n");
15278 base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
15279 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
15280 vals->umac_val = REG_RD(sc, vals->umac_addr);
15281 REG_WR(sc, vals->umac_addr, 0);
15282 mac_stopped = TRUE;
15291 #define BXE_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
15292 #define BXE_PREV_UNDI_RCQ(val) ((val) & 0xffff)
15293 #define BXE_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
15294 #define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
15297 bxe_prev_unload_undi_inc(struct bxe_softc *sc,
15302 uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port));
15304 rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc;
15305 bd = BXE_PREV_UNDI_BD(tmp_reg) + inc;
15307 tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd);
15308 REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg);
15310 BLOGD(sc, DBG_LOAD,
15311 "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
15316 bxe_prev_unload_common(struct bxe_softc *sc)
15318 uint32_t reset_reg, tmp_reg = 0, rc;
15319 uint8_t prev_undi = FALSE;
15320 struct bxe_mac_vals mac_vals;
15321 uint32_t timer_count = 1000;
15325 * It is possible a previous function received 'common' answer,
15326 * but hasn't loaded yet, therefore creating a scenario of
15327 * multiple functions receiving 'common' on the same path.
15329 BLOGD(sc, DBG_LOAD, "Common unload Flow\n");
15331 memset(&mac_vals, 0, sizeof(mac_vals));
15333 if (bxe_prev_is_path_marked(sc)) {
15334 return (bxe_prev_mcp_done(sc));
15337 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1);
15339 /* Reset should be performed after BRB is emptied */
15340 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
15341 /* Close the MAC Rx to prevent BRB from filling up */
15342 bxe_prev_unload_close_mac(sc, &mac_vals);
15344 /* close LLH filters towards the BRB */
15345 elink_set_rx_filter(&sc->link_params, 0);
15348 * Check if the UNDI driver was previously loaded.
15349 * UNDI driver initializes CID offset for normal bell to 0x7
15351 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
15352 tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST);
15353 if (tmp_reg == 0x7) {
15354 BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n");
15356 /* clear the UNDI indication */
15357 REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0);
15358 /* clear possible idle check errors */
15359 REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0);
15363 /* wait until BRB is empty */
15364 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15365 while (timer_count) {
15366 prev_brb = tmp_reg;
15368 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15373 BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg);
15375 /* reset timer as long as BRB actually gets emptied */
15376 if (prev_brb > tmp_reg) {
15377 timer_count = 1000;
15382 /* If UNDI resides in memory, manually increment it */
15384 bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1);
15390 if (!timer_count) {
15391 BLOGE(sc, "Failed to empty BRB\n");
15395 /* No packets are in the pipeline, path is ready for reset */
15396 bxe_reset_common(sc);
15398 if (mac_vals.xmac_addr) {
15399 REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val);
15401 if (mac_vals.umac_addr) {
15402 REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val);
15404 if (mac_vals.emac_addr) {
15405 REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val);
15407 if (mac_vals.bmac_addr) {
15408 REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
15409 REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
15412 rc = bxe_prev_mark_path(sc, prev_undi);
15414 bxe_prev_mcp_done(sc);
15418 return (bxe_prev_mcp_done(sc));
15422 bxe_prev_unload_uncommon(struct bxe_softc *sc)
15426 BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n");
15428 /* Test if previous unload process was already finished for this path */
15429 if (bxe_prev_is_path_marked(sc)) {
15430 return (bxe_prev_mcp_done(sc));
15433 BLOGD(sc, DBG_LOAD, "Path is unmarked\n");
15436 * If function has FLR capabilities, and existing FW version matches
15437 * the one required, then FLR will be sufficient to clean any residue
15438 * left by previous driver
15440 rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION);
15442 /* fw version is good */
15443 BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n");
15444 rc = bxe_do_flr(sc);
15448 /* FLR was performed */
15449 BLOGD(sc, DBG_LOAD, "FLR successful\n");
15453 BLOGD(sc, DBG_LOAD, "Could not FLR\n");
15455 /* Close the MCP request, return failure*/
15456 rc = bxe_prev_mcp_done(sc);
15458 rc = BXE_PREV_WAIT_NEEDED;
15465 bxe_prev_unload(struct bxe_softc *sc)
15467 int time_counter = 10;
15468 uint32_t fw, hw_lock_reg, hw_lock_val;
15472 * Clear HW from errors which may have resulted from an interrupted
15473 * DMAE transaction.
15475 bxe_prev_interrupted_dmae(sc);
15477 /* Release previously held locks */
15479 (SC_FUNC(sc) <= 5) ?
15480 (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) :
15481 (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
15483 hw_lock_val = (REG_RD(sc, hw_lock_reg));
15485 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
15486 BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n");
15487 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
15488 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc)));
15490 BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n");
15491 REG_WR(sc, hw_lock_reg, 0xffffffff);
15493 BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n");
15496 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) {
15497 BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n");
15498 REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0);
15502 /* Lock MCP using an unload request */
15503 fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
15505 BLOGE(sc, "MCP response failure, aborting\n");
15510 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
15511 rc = bxe_prev_unload_common(sc);
15515 /* non-common reply from MCP night require looping */
15516 rc = bxe_prev_unload_uncommon(sc);
15517 if (rc != BXE_PREV_WAIT_NEEDED) {
15522 } while (--time_counter);
15524 if (!time_counter || rc) {
15525 BLOGE(sc, "Failed to unload previous driver!"
15526 " time_counter %d rc %d\n", time_counter, rc);
15534 bxe_dcbx_set_state(struct bxe_softc *sc,
15536 uint32_t dcbx_enabled)
15538 if (!CHIP_IS_E1x(sc)) {
15539 sc->dcb_state = dcb_on;
15540 sc->dcbx_enabled = dcbx_enabled;
15542 sc->dcb_state = FALSE;
15543 sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID;
15545 BLOGD(sc, DBG_LOAD,
15546 "DCB state [%s:%s]\n",
15547 dcb_on ? "ON" : "OFF",
15548 (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" :
15549 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" :
15550 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ?
15551 "on-chip with negotiation" : "invalid");
15554 /* must be called after sriov-enable */
15556 bxe_set_qm_cid_count(struct bxe_softc *sc)
15558 int cid_count = BXE_L2_MAX_CID(sc);
15560 if (IS_SRIOV(sc)) {
15561 cid_count += BXE_VF_CIDS;
15564 if (CNIC_SUPPORT(sc)) {
15565 cid_count += CNIC_CID_MAX;
15568 return (roundup(cid_count, QM_CID_ROUND));
15572 bxe_init_multi_cos(struct bxe_softc *sc)
15576 uint32_t pri_map = 0; /* XXX change to user config */
15578 for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) {
15579 cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4));
15580 if (cos < sc->max_cos) {
15581 sc->prio_to_cos[pri] = cos;
15583 BLOGW(sc, "Invalid COS %d for priority %d "
15584 "(max COS is %d), setting to 0\n",
15585 cos, pri, (sc->max_cos - 1));
15586 sc->prio_to_cos[pri] = 0;
15592 bxe_sysctl_state(SYSCTL_HANDLER_ARGS)
15594 struct bxe_softc *sc;
15598 error = sysctl_handle_int(oidp, &result, 0, req);
15600 if (error || !req->newptr) {
15606 sc = (struct bxe_softc *)arg1;
15608 BLOGI(sc, "... dumping driver state ...\n");
15609 temp = SHMEM2_RD(sc, temperature_in_half_celsius);
15610 BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2));
15617 bxe_sysctl_trigger_grcdump(SYSCTL_HANDLER_ARGS)
15619 struct bxe_softc *sc;
15623 error = sysctl_handle_int(oidp, &result, 0, req);
15625 if (error || !req->newptr) {
15630 sc = (struct bxe_softc *)arg1;
15632 BLOGI(sc, "... grcdump start ...\n");
15634 BLOGI(sc, "... grcdump done ...\n");
15641 bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS)
15643 struct bxe_softc *sc = (struct bxe_softc *)arg1;
15644 uint32_t *eth_stats = (uint32_t *)&sc->eth_stats;
15646 uint64_t value = 0;
15647 int index = (int)arg2;
15649 if (index >= BXE_NUM_ETH_STATS) {
15650 BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index);
15654 offset = (eth_stats + bxe_eth_stats_arr[index].offset);
15656 switch (bxe_eth_stats_arr[index].size) {
15658 value = (uint64_t)*offset;
15661 value = HILO_U64(*offset, *(offset + 1));
15664 BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n",
15665 index, bxe_eth_stats_arr[index].size);
15669 return (sysctl_handle_64(oidp, &value, 0, req));
15673 bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS)
15675 struct bxe_softc *sc = (struct bxe_softc *)arg1;
15676 uint32_t *eth_stats;
15678 uint64_t value = 0;
15679 uint32_t q_stat = (uint32_t)arg2;
15680 uint32_t fp_index = ((q_stat >> 16) & 0xffff);
15681 uint32_t index = (q_stat & 0xffff);
15683 eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats;
15685 if (index >= BXE_NUM_ETH_Q_STATS) {
15686 BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index);
15690 offset = (eth_stats + bxe_eth_q_stats_arr[index].offset);
15692 switch (bxe_eth_q_stats_arr[index].size) {
15694 value = (uint64_t)*offset;
15697 value = HILO_U64(*offset, *(offset + 1));
15700 BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n",
15701 index, bxe_eth_q_stats_arr[index].size);
15705 return (sysctl_handle_64(oidp, &value, 0, req));
15709 bxe_add_sysctls(struct bxe_softc *sc)
15711 struct sysctl_ctx_list *ctx;
15712 struct sysctl_oid_list *children;
15713 struct sysctl_oid *queue_top, *queue;
15714 struct sysctl_oid_list *queue_top_children, *queue_children;
15715 char queue_num_buf[32];
15719 ctx = device_get_sysctl_ctx(sc->dev);
15720 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
15722 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version",
15723 CTLFLAG_RD, BXE_DRIVER_VERSION, 0,
15726 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
15727 CTLFLAG_RD, sc->devinfo.bc_ver_str, 0,
15728 "bootcode version");
15730 snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d",
15731 BCM_5710_FW_MAJOR_VERSION,
15732 BCM_5710_FW_MINOR_VERSION,
15733 BCM_5710_FW_REVISION_VERSION,
15734 BCM_5710_FW_ENGINEERING_VERSION);
15735 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
15736 CTLFLAG_RD, sc->fw_ver_str, 0,
15737 "firmware version");
15739 snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s",
15740 ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION) ? "Single" :
15741 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD) ? "MF-SD" :
15742 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI) ? "MF-SI" :
15743 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" :
15745 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
15746 CTLFLAG_RD, sc->mf_mode_str, 0,
15747 "multifunction mode");
15749 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics",
15750 CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0,
15751 "multifunction vnics per port");
15753 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
15754 CTLFLAG_RD, sc->mac_addr_str, 0,
15757 snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d",
15758 ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" :
15759 (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" :
15760 (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" :
15762 sc->devinfo.pcie_link_width);
15763 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
15764 CTLFLAG_RD, sc->pci_link_str, 0,
15765 "pci link status");
15767 sc->debug = bxe_debug;
15768 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug",
15769 CTLFLAG_RW, &sc->debug,
15770 "debug logging mode");
15772 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "trigger_grcdump",
15773 CTLTYPE_UINT | CTLFLAG_RW, sc, 0,
15774 bxe_sysctl_trigger_grcdump, "IU",
15775 "set by driver when a grcdump is needed");
15777 sc->grcdump_done = 0;
15778 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done",
15779 CTLFLAG_RW, &sc->grcdump_done, 0,
15780 "set by driver when grcdump is done");
15782 sc->rx_budget = bxe_rx_budget;
15783 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget",
15784 CTLFLAG_RW, &sc->rx_budget, 0,
15785 "rx processing budget");
15787 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state",
15788 CTLTYPE_UINT | CTLFLAG_RW, sc, 0,
15789 bxe_sysctl_state, "IU", "dump driver state");
15791 for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
15792 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
15793 bxe_eth_stats_arr[i].string,
15794 CTLTYPE_U64 | CTLFLAG_RD, sc, i,
15795 bxe_sysctl_eth_stat, "LU",
15796 bxe_eth_stats_arr[i].string);
15799 /* add a new parent node for all queues "dev.bxe.#.queue" */
15800 queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue",
15801 CTLFLAG_RD, NULL, "queue");
15802 queue_top_children = SYSCTL_CHILDREN(queue_top);
15804 for (i = 0; i < sc->num_queues; i++) {
15805 /* add a new parent node for a single queue "dev.bxe.#.queue.#" */
15806 snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i);
15807 queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO,
15808 queue_num_buf, CTLFLAG_RD, NULL,
15810 queue_children = SYSCTL_CHILDREN(queue);
15812 for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) {
15813 q_stat = ((i << 16) | j);
15814 SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO,
15815 bxe_eth_q_stats_arr[j].string,
15816 CTLTYPE_U64 | CTLFLAG_RD, sc, q_stat,
15817 bxe_sysctl_eth_q_stat, "LU",
15818 bxe_eth_q_stats_arr[j].string);
15824 * Device attach function.
15826 * Allocates device resources, performs secondary chip identification, and
15827 * initializes driver instance variables. This function is called from driver
15828 * load after a successful probe.
15831 * 0 = Success, >0 = Failure
15834 bxe_attach(device_t dev)
15836 struct bxe_softc *sc;
15838 sc = device_get_softc(dev);
15840 BLOGD(sc, DBG_LOAD, "Starting attach...\n");
15842 sc->state = BXE_STATE_CLOSED;
15845 sc->unit = device_get_unit(dev);
15847 BLOGD(sc, DBG_LOAD, "softc = %p\n", sc);
15849 sc->pcie_bus = pci_get_bus(dev);
15850 sc->pcie_device = pci_get_slot(dev);
15851 sc->pcie_func = pci_get_function(dev);
15853 /* enable bus master capability */
15854 pci_enable_busmaster(dev);
15857 if (bxe_allocate_bars(sc) != 0) {
15861 /* initialize the mutexes */
15862 bxe_init_mutexes(sc);
15864 /* prepare the periodic callout */
15865 callout_init(&sc->periodic_callout, 0);
15867 /* prepare the chip taskqueue */
15868 sc->chip_tq_flags = CHIP_TQ_NONE;
15869 snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name),
15870 "bxe%d_chip_tq", sc->unit);
15871 TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc);
15872 sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT,
15873 taskqueue_thread_enqueue,
15875 taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */
15876 "%s", sc->chip_tq_name);
15878 /* get device info and set params */
15879 if (bxe_get_device_info(sc) != 0) {
15880 BLOGE(sc, "getting device info\n");
15881 bxe_deallocate_bars(sc);
15882 pci_disable_busmaster(dev);
15886 /* get final misc params */
15887 bxe_get_params(sc);
15889 /* set the default MTU (changed via ifconfig) */
15890 sc->mtu = ETHERMTU;
15892 bxe_set_modes_bitmap(sc);
15895 * If in AFEX mode and the function is configured for FCoE
15896 * then bail... no L2 allowed.
15899 /* get phy settings from shmem and 'and' against admin settings */
15900 bxe_get_phy_info(sc);
15902 /* initialize the FreeBSD ifnet interface */
15903 if (bxe_init_ifnet(sc) != 0) {
15904 bxe_release_mutexes(sc);
15905 bxe_deallocate_bars(sc);
15906 pci_disable_busmaster(dev);
15910 if (bxe_add_cdev(sc) != 0) {
15911 if (sc->ifnet != NULL) {
15912 ether_ifdetach(sc->ifnet);
15914 ifmedia_removeall(&sc->ifmedia);
15915 bxe_release_mutexes(sc);
15916 bxe_deallocate_bars(sc);
15917 pci_disable_busmaster(dev);
15921 /* allocate device interrupts */
15922 if (bxe_interrupt_alloc(sc) != 0) {
15924 if (sc->ifnet != NULL) {
15925 ether_ifdetach(sc->ifnet);
15927 ifmedia_removeall(&sc->ifmedia);
15928 bxe_release_mutexes(sc);
15929 bxe_deallocate_bars(sc);
15930 pci_disable_busmaster(dev);
15935 if (bxe_alloc_ilt_mem(sc) != 0) {
15936 bxe_interrupt_free(sc);
15938 if (sc->ifnet != NULL) {
15939 ether_ifdetach(sc->ifnet);
15941 ifmedia_removeall(&sc->ifmedia);
15942 bxe_release_mutexes(sc);
15943 bxe_deallocate_bars(sc);
15944 pci_disable_busmaster(dev);
15948 /* allocate the host hardware/software hsi structures */
15949 if (bxe_alloc_hsi_mem(sc) != 0) {
15950 bxe_free_ilt_mem(sc);
15951 bxe_interrupt_free(sc);
15953 if (sc->ifnet != NULL) {
15954 ether_ifdetach(sc->ifnet);
15956 ifmedia_removeall(&sc->ifmedia);
15957 bxe_release_mutexes(sc);
15958 bxe_deallocate_bars(sc);
15959 pci_disable_busmaster(dev);
15963 /* need to reset chip if UNDI was active */
15964 if (IS_PF(sc) && !BXE_NOMCP(sc)) {
15967 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
15968 DRV_MSG_SEQ_NUMBER_MASK);
15969 BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq);
15970 bxe_prev_unload(sc);
15975 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
15977 if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) &&
15978 SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) &&
15979 SHMEM2_RD(sc, dcbx_lldp_params_offset) &&
15980 SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) {
15981 bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON);
15982 bxe_dcbx_init_params(sc);
15984 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
15988 /* calculate qm_cid_count */
15989 sc->qm_cid_count = bxe_set_qm_cid_count(sc);
15990 BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count);
15993 bxe_init_multi_cos(sc);
15995 bxe_add_sysctls(sc);
16001 * Device detach function.
16003 * Stops the controller, resets the controller, and releases resources.
16006 * 0 = Success, >0 = Failure
16009 bxe_detach(device_t dev)
16011 struct bxe_softc *sc;
16014 sc = device_get_softc(dev);
16016 BLOGD(sc, DBG_LOAD, "Starting detach...\n");
16019 if (ifp != NULL && ifp->if_vlantrunk != NULL) {
16020 BLOGE(sc, "Cannot detach while VLANs are in use.\n");
16026 /* stop the periodic callout */
16027 bxe_periodic_stop(sc);
16029 /* stop the chip taskqueue */
16030 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE);
16032 taskqueue_drain(sc->chip_tq, &sc->chip_tq_task);
16033 taskqueue_free(sc->chip_tq);
16034 sc->chip_tq = NULL;
16037 /* stop and reset the controller if it was open */
16038 if (sc->state != BXE_STATE_CLOSED) {
16040 bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE);
16041 BXE_CORE_UNLOCK(sc);
16044 /* release the network interface */
16046 ether_ifdetach(ifp);
16048 ifmedia_removeall(&sc->ifmedia);
16050 /* XXX do the following based on driver state... */
16052 /* free the host hardware/software hsi structures */
16053 bxe_free_hsi_mem(sc);
16056 bxe_free_ilt_mem(sc);
16058 /* release the interrupts */
16059 bxe_interrupt_free(sc);
16061 /* Release the mutexes*/
16062 bxe_release_mutexes(sc);
16064 /* Release the PCIe BAR mapped memory */
16065 bxe_deallocate_bars(sc);
16067 /* Release the FreeBSD interface. */
16068 if (sc->ifnet != NULL) {
16069 if_free(sc->ifnet);
16072 pci_disable_busmaster(dev);
16078 * Device shutdown function.
16080 * Stops and resets the controller.
16086 bxe_shutdown(device_t dev)
16088 struct bxe_softc *sc;
16090 sc = device_get_softc(dev);
16092 BLOGD(sc, DBG_LOAD, "Starting shutdown...\n");
16094 /* stop the periodic callout */
16095 bxe_periodic_stop(sc);
16098 bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE);
16099 BXE_CORE_UNLOCK(sc);
16105 bxe_igu_ack_sb(struct bxe_softc *sc,
16112 uint32_t igu_addr = sc->igu_base_addr;
16113 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
16114 bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr);
16118 bxe_igu_clear_sb_gen(struct bxe_softc *sc,
16123 uint32_t data, ctl, cnt = 100;
16124 uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
16125 uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
16126 uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
16127 uint32_t sb_bit = 1 << (idu_sb_id%32);
16128 uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
16129 uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
16131 /* Not supported in BC mode */
16132 if (CHIP_INT_MODE_IS_BC(sc)) {
16136 data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup <<
16137 IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
16138 IGU_REGULAR_CLEANUP_SET |
16139 IGU_REGULAR_BCLEANUP);
16141 ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) |
16142 (func_encode << IGU_CTRL_REG_FID_SHIFT) |
16143 (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
16145 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16146 data, igu_addr_data);
16147 REG_WR(sc, igu_addr_data, data);
16149 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16150 BUS_SPACE_BARRIER_WRITE);
16153 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16154 ctl, igu_addr_ctl);
16155 REG_WR(sc, igu_addr_ctl, ctl);
16157 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16158 BUS_SPACE_BARRIER_WRITE);
16161 /* wait for clean up to finish */
16162 while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) {
16166 if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) {
16167 BLOGD(sc, DBG_LOAD,
16168 "Unable to finish IGU cleanup: "
16169 "idu_sb_id %d offset %d bit %d (cnt %d)\n",
16170 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
16175 bxe_igu_clear_sb(struct bxe_softc *sc,
16178 bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/);
16187 /*******************/
16188 /* ECORE CALLBACKS */
16189 /*******************/
16192 bxe_reset_common(struct bxe_softc *sc)
16194 uint32_t val = 0x1400;
16197 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f);
16199 if (CHIP_IS_E3(sc)) {
16200 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16201 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16204 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val);
16208 bxe_common_init_phy(struct bxe_softc *sc)
16210 uint32_t shmem_base[2];
16211 uint32_t shmem2_base[2];
16213 /* Avoid common init in case MFW supports LFA */
16214 if (SHMEM2_RD(sc, size) >
16215 (uint32_t)offsetof(struct shmem2_region,
16216 lfa_host_addr[SC_PORT(sc)])) {
16220 shmem_base[0] = sc->devinfo.shmem_base;
16221 shmem2_base[0] = sc->devinfo.shmem2_base;
16223 if (!CHIP_IS_E1x(sc)) {
16224 shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr);
16225 shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr);
16228 bxe_acquire_phy_lock(sc);
16229 elink_common_init_phy(sc, shmem_base, shmem2_base,
16230 sc->devinfo.chip_id, 0);
16231 bxe_release_phy_lock(sc);
16235 bxe_pf_disable(struct bxe_softc *sc)
16237 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
16239 val &= ~IGU_PF_CONF_FUNC_EN;
16241 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
16242 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16243 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0);
16247 bxe_init_pxp(struct bxe_softc *sc)
16250 int r_order, w_order;
16252 devctl = bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL, 2);
16254 BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl);
16256 w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5);
16258 if (sc->mrrs == -1) {
16259 r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12);
16261 BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs);
16262 r_order = sc->mrrs;
16265 ecore_init_pxp_arb(sc, r_order, w_order);
16269 bxe_get_pretend_reg(struct bxe_softc *sc)
16271 uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0;
16272 uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base);
16273 return (base + (SC_ABS_FUNC(sc)) * stride);
16277 * Called only on E1H or E2.
16278 * When pretending to be PF, the pretend value is the function number 0..7.
16279 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
16283 bxe_pretend_func(struct bxe_softc *sc,
16284 uint16_t pretend_func_val)
16286 uint32_t pretend_reg;
16288 if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) {
16292 /* get my own pretend register */
16293 pretend_reg = bxe_get_pretend_reg(sc);
16294 REG_WR(sc, pretend_reg, pretend_func_val);
16295 REG_RD(sc, pretend_reg);
16300 bxe_iov_init_dmae(struct bxe_softc *sc)
16306 bxe_iov_init_dq(struct bxe_softc *sc)
16311 /* send a NIG loopback debug packet */
16313 bxe_lb_pckt(struct bxe_softc *sc)
16315 uint32_t wb_write[3];
16317 /* Ethernet source and destination addresses */
16318 wb_write[0] = 0x55555555;
16319 wb_write[1] = 0x55555555;
16320 wb_write[2] = 0x20; /* SOP */
16321 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16323 /* NON-IP protocol */
16324 wb_write[0] = 0x09000000;
16325 wb_write[1] = 0x55555555;
16326 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
16327 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16331 * Some of the internal memories are not directly readable from the driver.
16332 * To test them we send debug packets.
16335 bxe_int_mem_test(struct bxe_softc *sc)
16341 if (CHIP_REV_IS_FPGA(sc)) {
16343 } else if (CHIP_REV_IS_EMUL(sc)) {
16349 /* disable inputs of parser neighbor blocks */
16350 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16351 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16352 REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16353 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16355 /* write 0 to parser credits for CFC search request */
16356 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16358 /* send Ethernet packet */
16361 /* TODO do i reset NIG statistic? */
16362 /* Wait until NIG register shows 1 packet of size 0x10 */
16363 count = 1000 * factor;
16365 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16366 val = *BXE_SP(sc, wb_data[0]);
16376 BLOGE(sc, "NIG timeout val=0x%x\n", val);
16380 /* wait until PRS register shows 1 packet */
16381 count = (1000 * factor);
16383 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16393 BLOGE(sc, "PRS timeout val=0x%x\n", val);
16397 /* Reset and init BRB, PRS */
16398 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16400 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16402 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16403 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16405 /* Disable inputs of parser neighbor blocks */
16406 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16407 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16408 REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16409 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16411 /* Write 0 to parser credits for CFC search request */
16412 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16414 /* send 10 Ethernet packets */
16415 for (i = 0; i < 10; i++) {
16419 /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */
16420 count = (1000 * factor);
16422 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16423 val = *BXE_SP(sc, wb_data[0]);
16433 BLOGE(sc, "NIG timeout val=0x%x\n", val);
16437 /* Wait until PRS register shows 2 packets */
16438 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16440 BLOGE(sc, "PRS timeout val=0x%x\n", val);
16443 /* Write 1 to parser credits for CFC search request */
16444 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
16446 /* Wait until PRS register shows 3 packets */
16447 DELAY(10000 * factor);
16449 /* Wait until NIG register shows 1 packet of size 0x10 */
16450 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16452 BLOGE(sc, "PRS timeout val=0x%x\n", val);
16455 /* clear NIG EOP FIFO */
16456 for (i = 0; i < 11; i++) {
16457 REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO);
16460 val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY);
16462 BLOGE(sc, "clear of NIG failed val=0x%x\n", val);
16466 /* Reset and init BRB, PRS, NIG */
16467 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16469 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16471 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16472 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16473 if (!CNIC_SUPPORT(sc)) {
16475 REG_WR(sc, PRS_REG_NIC_MODE, 1);
16478 /* Enable inputs of parser neighbor blocks */
16479 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff);
16480 REG_WR(sc, TCM_REG_PRS_IFEN, 0x1);
16481 REG_WR(sc, CFC_REG_DEBUG0, 0x0);
16482 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1);
16488 bxe_setup_fan_failure_detection(struct bxe_softc *sc)
16495 val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) &
16496 SHARED_HW_CFG_FAN_FAILURE_MASK);
16498 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) {
16502 * The fan failure mechanism is usually related to the PHY type since
16503 * the power consumption of the board is affected by the PHY. Currently,
16504 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
16506 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) {
16507 for (port = PORT_0; port < PORT_MAX; port++) {
16508 is_required |= elink_fan_failure_det_req(sc,
16509 sc->devinfo.shmem_base,
16510 sc->devinfo.shmem2_base,
16515 BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required);
16517 if (is_required == 0) {
16521 /* Fan failure is indicated by SPIO 5 */
16522 bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
16524 /* set to active low mode */
16525 val = REG_RD(sc, MISC_REG_SPIO_INT);
16526 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
16527 REG_WR(sc, MISC_REG_SPIO_INT, val);
16529 /* enable interrupt to signal the IGU */
16530 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
16531 val |= MISC_SPIO_SPIO5;
16532 REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val);
16536 bxe_enable_blocks_attention(struct bxe_softc *sc)
16540 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16541 if (!CHIP_IS_E1x(sc)) {
16542 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40);
16544 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0);
16546 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
16547 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
16549 * mask read length error interrupts in brb for parser
16550 * (parsing unit and 'checksum and crc' unit)
16551 * these errors are legal (PU reads fixed length and CAC can cause
16552 * read length error on truncated packets)
16554 REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00);
16555 REG_WR(sc, QM_REG_QM_INT_MASK, 0);
16556 REG_WR(sc, TM_REG_TM_INT_MASK, 0);
16557 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0);
16558 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0);
16559 REG_WR(sc, XCM_REG_XCM_INT_MASK, 0);
16560 /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */
16561 /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */
16562 REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0);
16563 REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0);
16564 REG_WR(sc, UCM_REG_UCM_INT_MASK, 0);
16565 /* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */
16566 /* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */
16567 REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
16568 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0);
16569 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0);
16570 REG_WR(sc, CCM_REG_CCM_INT_MASK, 0);
16571 /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */
16572 /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */
16574 val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
16575 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
16576 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN);
16577 if (!CHIP_IS_E1x(sc)) {
16578 val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
16579 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED);
16581 REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val);
16583 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0);
16584 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0);
16585 REG_WR(sc, TCM_REG_TCM_INT_MASK, 0);
16586 /* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */
16588 if (!CHIP_IS_E1x(sc)) {
16589 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
16590 REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
16593 REG_WR(sc, CDU_REG_CDU_INT_MASK, 0);
16594 REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0);
16595 /* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */
16596 REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
16600 * bxe_init_hw_common - initialize the HW at the COMMON phase.
16602 * @sc: driver handle
16605 bxe_init_hw_common(struct bxe_softc *sc)
16607 uint8_t abs_func_id;
16610 BLOGD(sc, DBG_LOAD, "starting common init for func %d\n",
16614 * take the RESET lock to protect undi_unload flow from accessing
16615 * registers while we are resetting the chip
16617 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
16619 bxe_reset_common(sc);
16621 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff);
16624 if (CHIP_IS_E3(sc)) {
16625 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16626 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16629 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val);
16631 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
16633 ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON);
16634 BLOGD(sc, DBG_LOAD, "after misc block init\n");
16636 if (!CHIP_IS_E1x(sc)) {
16638 * 4-port mode or 2-port mode we need to turn off master-enable for
16639 * everyone. After that we turn it back on for self. So, we disregard
16640 * multi-function, and always disable all functions on the given path,
16641 * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1
16643 for (abs_func_id = SC_PATH(sc);
16644 abs_func_id < (E2_FUNC_MAX * 2);
16645 abs_func_id += 2) {
16646 if (abs_func_id == SC_ABS_FUNC(sc)) {
16647 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
16651 bxe_pretend_func(sc, abs_func_id);
16653 /* clear pf enable */
16654 bxe_pf_disable(sc);
16656 bxe_pretend_func(sc, SC_ABS_FUNC(sc));
16660 BLOGD(sc, DBG_LOAD, "after pf disable\n");
16662 ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON);
16664 if (CHIP_IS_E1(sc)) {
16666 * enable HW interrupt from PXP on USDM overflow
16667 * bit 16 on INT_MASK_0
16669 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16672 ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON);
16675 #ifdef __BIG_ENDIAN
16676 REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1);
16677 REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1);
16678 REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
16679 REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
16680 REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
16681 /* make sure this value is 0 */
16682 REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0);
16684 //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1);
16685 REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1);
16686 REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1);
16687 REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1);
16688 REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
16691 ecore_ilt_init_page_size(sc, INITOP_SET);
16693 if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) {
16694 REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
16697 /* let the HW do it's magic... */
16700 /* finish PXP init */
16701 val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE);
16703 BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n",
16707 val = REG_RD(sc, PXP2_REG_RD_INIT_DONE);
16709 BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val);
16713 BLOGD(sc, DBG_LOAD, "after pxp init\n");
16716 * Timer bug workaround for E2 only. We need to set the entire ILT to have
16717 * entries with value "0" and valid bit on. This needs to be done by the
16718 * first PF that is loaded in a path (i.e. common phase)
16720 if (!CHIP_IS_E1x(sc)) {
16722 * In E2 there is a bug in the timers block that can cause function 6 / 7
16723 * (i.e. vnic3) to start even if it is marked as "scan-off".
16724 * This occurs when a different function (func2,3) is being marked
16725 * as "scan-off". Real-life scenario for example: if a driver is being
16726 * load-unloaded while func6,7 are down. This will cause the timer to access
16727 * the ilt, translate to a logical address and send a request to read/write.
16728 * Since the ilt for the function that is down is not valid, this will cause
16729 * a translation error which is unrecoverable.
16730 * The Workaround is intended to make sure that when this happens nothing
16731 * fatal will occur. The workaround:
16732 * 1. First PF driver which loads on a path will:
16733 * a. After taking the chip out of reset, by using pretend,
16734 * it will write "0" to the following registers of
16736 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16737 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
16738 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
16739 * And for itself it will write '1' to
16740 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
16741 * dmae-operations (writing to pram for example.)
16742 * note: can be done for only function 6,7 but cleaner this
16744 * b. Write zero+valid to the entire ILT.
16745 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of
16746 * VNIC3 (of that port). The range allocated will be the
16747 * entire ILT. This is needed to prevent ILT range error.
16748 * 2. Any PF driver load flow:
16749 * a. ILT update with the physical addresses of the allocated
16751 * b. Wait 20msec. - note that this timeout is needed to make
16752 * sure there are no requests in one of the PXP internal
16753 * queues with "old" ILT addresses.
16754 * c. PF enable in the PGLC.
16755 * d. Clear the was_error of the PF in the PGLC. (could have
16756 * occurred while driver was down)
16757 * e. PF enable in the CFC (WEAK + STRONG)
16758 * f. Timers scan enable
16759 * 3. PF driver unload flow:
16760 * a. Clear the Timers scan_en.
16761 * b. Polling for scan_on=0 for that PF.
16762 * c. Clear the PF enable bit in the PXP.
16763 * d. Clear the PF enable in the CFC (WEAK + STRONG)
16764 * e. Write zero+valid to all ILT entries (The valid bit must
16766 * f. If this is VNIC 3 of a port then also init
16767 * first_timers_ilt_entry to zero and last_timers_ilt_entry
16768 * to the last enrty in the ILT.
16771 * Currently the PF error in the PGLC is non recoverable.
16772 * In the future the there will be a recovery routine for this error.
16773 * Currently attention is masked.
16774 * Having an MCP lock on the load/unload process does not guarantee that
16775 * there is no Timer disable during Func6/7 enable. This is because the
16776 * Timers scan is currently being cleared by the MCP on FLR.
16777 * Step 2.d can be done only for PF6/7 and the driver can also check if
16778 * there is error before clearing it. But the flow above is simpler and
16780 * All ILT entries are written by zero+valid and not just PF6/7
16781 * ILT entries since in the future the ILT entries allocation for
16782 * PF-s might be dynamic.
16784 struct ilt_client_info ilt_cli;
16785 struct ecore_ilt ilt;
16787 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
16788 memset(&ilt, 0, sizeof(struct ecore_ilt));
16790 /* initialize dummy TM client */
16792 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
16793 ilt_cli.client_num = ILT_CLIENT_TM;
16796 * Step 1: set zeroes to all ilt page entries with valid bit on
16797 * Step 2: set the timers first/last ilt entry to point
16798 * to the entire range to prevent ILT range error for 3rd/4th
16799 * vnic (this code assumes existence of the vnic)
16801 * both steps performed by call to ecore_ilt_client_init_op()
16802 * with dummy TM client
16804 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
16805 * and his brother are split registers
16808 bxe_pretend_func(sc, (SC_PATH(sc) + 6));
16809 ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR);
16810 bxe_pretend_func(sc, SC_ABS_FUNC(sc));
16812 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN);
16813 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN);
16814 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
16817 REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0);
16818 REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0);
16820 if (!CHIP_IS_E1x(sc)) {
16821 int factor = CHIP_REV_IS_EMUL(sc) ? 1000 :
16822 (CHIP_REV_IS_FPGA(sc) ? 400 : 0);
16824 ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON);
16825 ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON);
16827 /* let the HW do it's magic... */
16830 val = REG_RD(sc, ATC_REG_ATC_INIT_DONE);
16831 } while (factor-- && (val != 1));
16834 BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val);
16839 BLOGD(sc, DBG_LOAD, "after pglue and atc init\n");
16841 ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON);
16843 bxe_iov_init_dmae(sc);
16845 /* clean the DMAE memory */
16846 sc->dmae_ready = 1;
16847 ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1);
16849 ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON);
16851 ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON);
16853 ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON);
16855 ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON);
16857 bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3);
16858 bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3);
16859 bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3);
16860 bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3);
16862 ecore_init_block(sc, BLOCK_QM, PHASE_COMMON);
16864 /* QM queues pointers table */
16865 ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET);
16867 /* soft reset pulse */
16868 REG_WR(sc, QM_REG_SOFT_RESET, 1);
16869 REG_WR(sc, QM_REG_SOFT_RESET, 0);
16871 if (CNIC_SUPPORT(sc))
16872 ecore_init_block(sc, BLOCK_TM, PHASE_COMMON);
16874 ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON);
16875 REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT);
16876 if (!CHIP_REV_IS_SLOW(sc)) {
16877 /* enable hw interrupt from doorbell Q */
16878 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
16881 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16883 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16884 REG_WR(sc, PRS_REG_A_PRSU_20, 0xf);
16886 if (!CHIP_IS_E1(sc)) {
16887 REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan);
16890 if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) {
16891 if (IS_MF_AFEX(sc)) {
16893 * configure that AFEX and VLAN headers must be
16894 * received in AFEX mode
16896 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE);
16897 REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA);
16898 REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
16899 REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
16900 REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4);
16903 * Bit-map indicating which L2 hdrs may appear
16904 * after the basic Ethernet header
16906 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC,
16907 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
16911 ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON);
16912 ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON);
16913 ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON);
16914 ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON);
16916 if (!CHIP_IS_E1x(sc)) {
16917 /* reset VFC memories */
16918 REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
16919 VFC_MEMORIES_RST_REG_CAM_RST |
16920 VFC_MEMORIES_RST_REG_RAM_RST);
16921 REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
16922 VFC_MEMORIES_RST_REG_CAM_RST |
16923 VFC_MEMORIES_RST_REG_RAM_RST);
16928 ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON);
16929 ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON);
16930 ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON);
16931 ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON);
16933 /* sync semi rtc */
16934 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
16936 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
16939 ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON);
16940 ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON);
16941 ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON);
16943 if (!CHIP_IS_E1x(sc)) {
16944 if (IS_MF_AFEX(sc)) {
16946 * configure that AFEX and VLAN headers must be
16947 * sent in AFEX mode
16949 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE);
16950 REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA);
16951 REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
16952 REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
16953 REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4);
16955 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC,
16956 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
16960 REG_WR(sc, SRC_REG_SOFT_RST, 1);
16962 ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON);
16964 if (CNIC_SUPPORT(sc)) {
16965 REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672);
16966 REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
16967 REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b);
16968 REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a);
16969 REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116);
16970 REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
16971 REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf);
16972 REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
16973 REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f);
16974 REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7);
16976 REG_WR(sc, SRC_REG_SOFT_RST, 0);
16978 if (sizeof(union cdu_context) != 1024) {
16979 /* we currently assume that a context is 1024 bytes */
16980 BLOGE(sc, "please adjust the size of cdu_context(%ld)\n",
16981 (long)sizeof(union cdu_context));
16984 ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON);
16985 val = (4 << 24) + (0 << 12) + 1024;
16986 REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val);
16988 ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON);
16990 REG_WR(sc, CFC_REG_INIT_REG, 0x7FF);
16991 /* enable context validation interrupt from CFC */
16992 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
16994 /* set the thresholds to prevent CFC/CDU race */
16995 REG_WR(sc, CFC_REG_DEBUG0, 0x20020000);
16996 ecore_init_block(sc, BLOCK_HC, PHASE_COMMON);
16998 if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) {
16999 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36);
17002 ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON);
17003 ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON);
17005 /* Reset PCIE errors for debug */
17006 REG_WR(sc, 0x2814, 0xffffffff);
17007 REG_WR(sc, 0x3820, 0xffffffff);
17009 if (!CHIP_IS_E1x(sc)) {
17010 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
17011 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
17012 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
17013 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
17014 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
17015 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
17016 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
17017 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
17018 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
17019 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
17020 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
17023 ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON);
17025 if (!CHIP_IS_E1(sc)) {
17026 /* in E3 this done in per-port section */
17027 if (!CHIP_IS_E3(sc))
17028 REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc));
17031 if (CHIP_IS_E1H(sc)) {
17032 /* not applicable for E2 (and above ...) */
17033 REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc));
17036 if (CHIP_REV_IS_SLOW(sc)) {
17040 /* finish CFC init */
17041 val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10);
17043 BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val);
17046 val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10);
17048 BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val);
17051 val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
17053 BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val);
17056 REG_WR(sc, CFC_REG_DEBUG0, 0);
17058 if (CHIP_IS_E1(sc)) {
17059 /* read NIG statistic to see if this is our first up since powerup */
17060 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
17061 val = *BXE_SP(sc, wb_data[0]);
17063 /* do internal memory self test */
17064 if ((val == 0) && bxe_int_mem_test(sc)) {
17065 BLOGE(sc, "internal mem self test failed val=0x%x\n", val);
17070 bxe_setup_fan_failure_detection(sc);
17072 /* clear PXP2 attentions */
17073 REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
17075 bxe_enable_blocks_attention(sc);
17077 if (!CHIP_REV_IS_SLOW(sc)) {
17078 ecore_enable_blocks_parity(sc);
17081 if (!BXE_NOMCP(sc)) {
17082 if (CHIP_IS_E1x(sc)) {
17083 bxe_common_init_phy(sc);
17091 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase.
17093 * @sc: driver handle
17096 bxe_init_hw_common_chip(struct bxe_softc *sc)
17098 int rc = bxe_init_hw_common(sc);
17101 BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc);
17105 /* In E2 2-PORT mode, same ext phy is used for the two paths */
17106 if (!BXE_NOMCP(sc)) {
17107 bxe_common_init_phy(sc);
17114 bxe_init_hw_port(struct bxe_softc *sc)
17116 int port = SC_PORT(sc);
17117 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
17118 uint32_t low, high;
17121 BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port);
17123 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
17125 ecore_init_block(sc, BLOCK_MISC, init_phase);
17126 ecore_init_block(sc, BLOCK_PXP, init_phase);
17127 ecore_init_block(sc, BLOCK_PXP2, init_phase);
17130 * Timers bug workaround: disables the pf_master bit in pglue at
17131 * common phase, we need to enable it here before any dmae access are
17132 * attempted. Therefore we manually added the enable-master to the
17133 * port phase (it also happens in the function phase)
17135 if (!CHIP_IS_E1x(sc)) {
17136 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17139 ecore_init_block(sc, BLOCK_ATC, init_phase);
17140 ecore_init_block(sc, BLOCK_DMAE, init_phase);
17141 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17142 ecore_init_block(sc, BLOCK_QM, init_phase);
17144 ecore_init_block(sc, BLOCK_TCM, init_phase);
17145 ecore_init_block(sc, BLOCK_UCM, init_phase);
17146 ecore_init_block(sc, BLOCK_CCM, init_phase);
17147 ecore_init_block(sc, BLOCK_XCM, init_phase);
17149 /* QM cid (connection) count */
17150 ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET);
17152 if (CNIC_SUPPORT(sc)) {
17153 ecore_init_block(sc, BLOCK_TM, init_phase);
17154 REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20);
17155 REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
17158 ecore_init_block(sc, BLOCK_DORQ, init_phase);
17160 ecore_init_block(sc, BLOCK_BRB1, init_phase);
17162 if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) {
17164 low = (BXE_ONE_PORT(sc) ? 160 : 246);
17165 } else if (sc->mtu > 4096) {
17166 if (BXE_ONE_PORT(sc)) {
17170 /* (24*1024 + val*4)/256 */
17171 low = (96 + (val / 64) + ((val % 64) ? 1 : 0));
17174 low = (BXE_ONE_PORT(sc) ? 80 : 160);
17176 high = (low + 56); /* 14*1024/256 */
17177 REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
17178 REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
17181 if (CHIP_IS_MODE_4_PORT(sc)) {
17182 REG_WR(sc, SC_PORT(sc) ?
17183 BRB1_REG_MAC_GUARANTIED_1 :
17184 BRB1_REG_MAC_GUARANTIED_0, 40);
17187 ecore_init_block(sc, BLOCK_PRS, init_phase);
17188 if (CHIP_IS_E3B0(sc)) {
17189 if (IS_MF_AFEX(sc)) {
17190 /* configure headers for AFEX mode */
17191 REG_WR(sc, SC_PORT(sc) ?
17192 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17193 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
17194 REG_WR(sc, SC_PORT(sc) ?
17195 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
17196 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
17197 REG_WR(sc, SC_PORT(sc) ?
17198 PRS_REG_MUST_HAVE_HDRS_PORT_1 :
17199 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
17201 /* Ovlan exists only if we are in multi-function +
17202 * switch-dependent mode, in switch-independent there
17203 * is no ovlan headers
17205 REG_WR(sc, SC_PORT(sc) ?
17206 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17207 PRS_REG_HDRS_AFTER_BASIC_PORT_0,
17208 (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6));
17212 ecore_init_block(sc, BLOCK_TSDM, init_phase);
17213 ecore_init_block(sc, BLOCK_CSDM, init_phase);
17214 ecore_init_block(sc, BLOCK_USDM, init_phase);
17215 ecore_init_block(sc, BLOCK_XSDM, init_phase);
17217 ecore_init_block(sc, BLOCK_TSEM, init_phase);
17218 ecore_init_block(sc, BLOCK_USEM, init_phase);
17219 ecore_init_block(sc, BLOCK_CSEM, init_phase);
17220 ecore_init_block(sc, BLOCK_XSEM, init_phase);
17222 ecore_init_block(sc, BLOCK_UPB, init_phase);
17223 ecore_init_block(sc, BLOCK_XPB, init_phase);
17225 ecore_init_block(sc, BLOCK_PBF, init_phase);
17227 if (CHIP_IS_E1x(sc)) {
17228 /* configure PBF to work without PAUSE mtu 9000 */
17229 REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
17231 /* update threshold */
17232 REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
17233 /* update init credit */
17234 REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
17236 /* probe changes */
17237 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1);
17239 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0);
17242 if (CNIC_SUPPORT(sc)) {
17243 ecore_init_block(sc, BLOCK_SRC, init_phase);
17246 ecore_init_block(sc, BLOCK_CDU, init_phase);
17247 ecore_init_block(sc, BLOCK_CFC, init_phase);
17249 if (CHIP_IS_E1(sc)) {
17250 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17251 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17253 ecore_init_block(sc, BLOCK_HC, init_phase);
17255 ecore_init_block(sc, BLOCK_IGU, init_phase);
17257 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17258 /* init aeu_mask_attn_func_0/1:
17259 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
17260 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
17261 * bits 4-7 are used for "per vn group attention" */
17262 val = IS_MF(sc) ? 0xF7 : 0x7;
17263 /* Enable DCBX attention for all but E1 */
17264 val |= CHIP_IS_E1(sc) ? 0 : 0x10;
17265 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
17267 ecore_init_block(sc, BLOCK_NIG, init_phase);
17269 if (!CHIP_IS_E1x(sc)) {
17270 /* Bit-map indicating which L2 hdrs may appear after the
17271 * basic Ethernet header
17273 if (IS_MF_AFEX(sc)) {
17274 REG_WR(sc, SC_PORT(sc) ?
17275 NIG_REG_P1_HDRS_AFTER_BASIC :
17276 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
17278 REG_WR(sc, SC_PORT(sc) ?
17279 NIG_REG_P1_HDRS_AFTER_BASIC :
17280 NIG_REG_P0_HDRS_AFTER_BASIC,
17281 IS_MF_SD(sc) ? 7 : 6);
17284 if (CHIP_IS_E3(sc)) {
17285 REG_WR(sc, SC_PORT(sc) ?
17286 NIG_REG_LLH1_MF_MODE :
17287 NIG_REG_LLH_MF_MODE, IS_MF(sc));
17290 if (!CHIP_IS_E3(sc)) {
17291 REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
17294 if (!CHIP_IS_E1(sc)) {
17295 /* 0x2 disable mf_ov, 0x1 enable */
17296 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
17297 (IS_MF_SD(sc) ? 0x1 : 0x2));
17299 if (!CHIP_IS_E1x(sc)) {
17301 switch (sc->devinfo.mf_info.mf_mode) {
17302 case MULTI_FUNCTION_SD:
17305 case MULTI_FUNCTION_SI:
17306 case MULTI_FUNCTION_AFEX:
17311 REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE :
17312 NIG_REG_LLH0_CLS_TYPE), val);
17314 REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
17315 REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
17316 REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
17319 /* If SPIO5 is set to generate interrupts, enable it for this port */
17320 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
17321 if (val & MISC_SPIO_SPIO5) {
17322 uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
17323 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
17324 val = REG_RD(sc, reg_addr);
17325 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
17326 REG_WR(sc, reg_addr, val);
17333 bxe_flr_clnup_reg_poll(struct bxe_softc *sc,
17336 uint32_t poll_count)
17338 uint32_t cur_cnt = poll_count;
17341 while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) {
17342 DELAY(FLR_WAIT_INTERVAL);
17349 bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc,
17354 uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt);
17357 BLOGE(sc, "%s usage count=%d\n", msg, val);
17364 /* Common routines with VF FLR cleanup */
17366 bxe_flr_clnup_poll_count(struct bxe_softc *sc)
17368 /* adjust polling timeout */
17369 if (CHIP_REV_IS_EMUL(sc)) {
17370 return (FLR_POLL_CNT * 2000);
17373 if (CHIP_REV_IS_FPGA(sc)) {
17374 return (FLR_POLL_CNT * 120);
17377 return (FLR_POLL_CNT);
17381 bxe_poll_hw_usage_counters(struct bxe_softc *sc,
17384 /* wait for CFC PF usage-counter to zero (includes all the VFs) */
17385 if (bxe_flr_clnup_poll_hw_counter(sc,
17386 CFC_REG_NUM_LCIDS_INSIDE_PF,
17387 "CFC PF usage counter timed out",
17392 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
17393 if (bxe_flr_clnup_poll_hw_counter(sc,
17394 DORQ_REG_PF_USAGE_CNT,
17395 "DQ PF usage counter timed out",
17400 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
17401 if (bxe_flr_clnup_poll_hw_counter(sc,
17402 QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc),
17403 "QM PF usage counter timed out",
17408 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
17409 if (bxe_flr_clnup_poll_hw_counter(sc,
17410 TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc),
17411 "Timers VNIC usage counter timed out",
17416 if (bxe_flr_clnup_poll_hw_counter(sc,
17417 TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc),
17418 "Timers NUM_SCANS usage counter timed out",
17423 /* Wait DMAE PF usage counter to zero */
17424 if (bxe_flr_clnup_poll_hw_counter(sc,
17425 dmae_reg_go_c[INIT_DMAE_C(sc)],
17426 "DMAE dommand register timed out",
17434 #define OP_GEN_PARAM(param) \
17435 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
17436 #define OP_GEN_TYPE(type) \
17437 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
17438 #define OP_GEN_AGG_VECT(index) \
17439 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
17442 bxe_send_final_clnup(struct bxe_softc *sc,
17443 uint8_t clnup_func,
17446 uint32_t op_gen_command = 0;
17447 uint32_t comp_addr = (BAR_CSTRORM_INTMEM +
17448 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func));
17451 if (REG_RD(sc, comp_addr)) {
17452 BLOGE(sc, "Cleanup complete was not 0 before sending\n");
17456 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
17457 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
17458 op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
17459 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
17461 BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n");
17462 REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command);
17464 if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) {
17465 BLOGE(sc, "FW final cleanup did not succeed\n");
17466 BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n",
17467 (REG_RD(sc, comp_addr)));
17468 bxe_panic(sc, ("FLR cleanup failed\n"));
17472 /* Zero completion for nxt FLR */
17473 REG_WR(sc, comp_addr, 0);
17479 bxe_pbf_pN_buf_flushed(struct bxe_softc *sc,
17480 struct pbf_pN_buf_regs *regs,
17481 uint32_t poll_count)
17483 uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start;
17484 uint32_t cur_cnt = poll_count;
17486 crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed);
17487 crd = crd_start = REG_RD(sc, regs->crd);
17488 init_crd = REG_RD(sc, regs->init_crd);
17490 BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
17491 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : s:%x\n", regs->pN, crd);
17492 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
17494 while ((crd != init_crd) &&
17495 ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) <
17496 (init_crd - crd_start))) {
17498 DELAY(FLR_WAIT_INTERVAL);
17499 crd = REG_RD(sc, regs->crd);
17500 crd_freed = REG_RD(sc, regs->crd_freed);
17502 BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN);
17503 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : c:%x\n", regs->pN, crd);
17504 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed);
17509 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n",
17510 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17514 bxe_pbf_pN_cmd_flushed(struct bxe_softc *sc,
17515 struct pbf_pN_cmd_regs *regs,
17516 uint32_t poll_count)
17518 uint32_t occup, to_free, freed, freed_start;
17519 uint32_t cur_cnt = poll_count;
17521 occup = to_free = REG_RD(sc, regs->lines_occup);
17522 freed = freed_start = REG_RD(sc, regs->lines_freed);
17524 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
17525 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17528 ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) {
17530 DELAY(FLR_WAIT_INTERVAL);
17531 occup = REG_RD(sc, regs->lines_occup);
17532 freed = REG_RD(sc, regs->lines_freed);
17534 BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN);
17535 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
17536 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17541 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n",
17542 poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17546 bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count)
17548 struct pbf_pN_cmd_regs cmd_regs[] = {
17549 {0, (CHIP_IS_E3B0(sc)) ?
17550 PBF_REG_TQ_OCCUPANCY_Q0 :
17551 PBF_REG_P0_TQ_OCCUPANCY,
17552 (CHIP_IS_E3B0(sc)) ?
17553 PBF_REG_TQ_LINES_FREED_CNT_Q0 :
17554 PBF_REG_P0_TQ_LINES_FREED_CNT},
17555 {1, (CHIP_IS_E3B0(sc)) ?
17556 PBF_REG_TQ_OCCUPANCY_Q1 :
17557 PBF_REG_P1_TQ_OCCUPANCY,
17558 (CHIP_IS_E3B0(sc)) ?
17559 PBF_REG_TQ_LINES_FREED_CNT_Q1 :
17560 PBF_REG_P1_TQ_LINES_FREED_CNT},
17561 {4, (CHIP_IS_E3B0(sc)) ?
17562 PBF_REG_TQ_OCCUPANCY_LB_Q :
17563 PBF_REG_P4_TQ_OCCUPANCY,
17564 (CHIP_IS_E3B0(sc)) ?
17565 PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
17566 PBF_REG_P4_TQ_LINES_FREED_CNT}
17569 struct pbf_pN_buf_regs buf_regs[] = {
17570 {0, (CHIP_IS_E3B0(sc)) ?
17571 PBF_REG_INIT_CRD_Q0 :
17572 PBF_REG_P0_INIT_CRD ,
17573 (CHIP_IS_E3B0(sc)) ?
17574 PBF_REG_CREDIT_Q0 :
17576 (CHIP_IS_E3B0(sc)) ?
17577 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
17578 PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
17579 {1, (CHIP_IS_E3B0(sc)) ?
17580 PBF_REG_INIT_CRD_Q1 :
17581 PBF_REG_P1_INIT_CRD,
17582 (CHIP_IS_E3B0(sc)) ?
17583 PBF_REG_CREDIT_Q1 :
17585 (CHIP_IS_E3B0(sc)) ?
17586 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
17587 PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
17588 {4, (CHIP_IS_E3B0(sc)) ?
17589 PBF_REG_INIT_CRD_LB_Q :
17590 PBF_REG_P4_INIT_CRD,
17591 (CHIP_IS_E3B0(sc)) ?
17592 PBF_REG_CREDIT_LB_Q :
17594 (CHIP_IS_E3B0(sc)) ?
17595 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
17596 PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
17601 /* Verify the command queues are flushed P0, P1, P4 */
17602 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) {
17603 bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count);
17606 /* Verify the transmission buffers are flushed P0, P1, P4 */
17607 for (i = 0; i < ARRAY_SIZE(buf_regs); i++) {
17608 bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count);
17613 bxe_hw_enable_status(struct bxe_softc *sc)
17617 val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF);
17618 BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
17620 val = REG_RD(sc, PBF_REG_DISABLE_PF);
17621 BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val);
17623 val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN);
17624 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
17626 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN);
17627 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
17629 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
17630 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
17632 val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
17633 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
17635 val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
17636 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
17638 val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
17639 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val);
17643 bxe_pf_flr_clnup(struct bxe_softc *sc)
17645 uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc);
17647 BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc));
17649 /* Re-enable PF target read access */
17650 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
17652 /* Poll HW usage counters */
17653 BLOGD(sc, DBG_LOAD, "Polling usage counters\n");
17654 if (bxe_poll_hw_usage_counters(sc, poll_cnt)) {
17658 /* Zero the igu 'trailing edge' and 'leading edge' */
17660 /* Send the FW cleanup command */
17661 if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) {
17667 /* Verify TX hw is flushed */
17668 bxe_tx_hw_flushed(sc, poll_cnt);
17670 /* Wait 100ms (not adjusted according to platform) */
17673 /* Verify no pending pci transactions */
17674 if (bxe_is_pcie_pending(sc)) {
17675 BLOGE(sc, "PCIE Transactions still pending\n");
17679 bxe_hw_enable_status(sc);
17682 * Master enable - Due to WB DMAE writes performed before this
17683 * register is re-initialized as part of the regular function init
17685 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17691 bxe_init_hw_func(struct bxe_softc *sc)
17693 int port = SC_PORT(sc);
17694 int func = SC_FUNC(sc);
17695 int init_phase = PHASE_PF0 + func;
17696 struct ecore_ilt *ilt = sc->ilt;
17697 uint16_t cdu_ilt_start;
17698 uint32_t addr, val;
17699 uint32_t main_mem_base, main_mem_size, main_mem_prty_clr;
17700 int i, main_mem_width, rc;
17702 BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func);
17705 if (!CHIP_IS_E1x(sc)) {
17706 rc = bxe_pf_flr_clnup(sc);
17708 BLOGE(sc, "FLR cleanup failed!\n");
17709 // XXX bxe_fw_dump(sc);
17710 // XXX bxe_idle_chk(sc);
17715 /* set MSI reconfigure capability */
17716 if (sc->devinfo.int_block == INT_BLOCK_HC) {
17717 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
17718 val = REG_RD(sc, addr);
17719 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
17720 REG_WR(sc, addr, val);
17723 ecore_init_block(sc, BLOCK_PXP, init_phase);
17724 ecore_init_block(sc, BLOCK_PXP2, init_phase);
17727 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
17729 for (i = 0; i < L2_ILT_LINES(sc); i++) {
17730 ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
17731 ilt->lines[cdu_ilt_start + i].page_mapping =
17732 sc->context[i].vcxt_dma.paddr;
17733 ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
17735 ecore_ilt_init_op(sc, INITOP_SET);
17738 REG_WR(sc, PRS_REG_NIC_MODE, 1);
17739 BLOGD(sc, DBG_LOAD, "NIC MODE configured\n");
17741 if (!CHIP_IS_E1x(sc)) {
17742 uint32_t pf_conf = IGU_PF_CONF_FUNC_EN;
17744 /* Turn on a single ISR mode in IGU if driver is going to use
17747 if (sc->interrupt_mode != INTR_MODE_MSIX) {
17748 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
17752 * Timers workaround bug: function init part.
17753 * Need to wait 20msec after initializing ILT,
17754 * needed to make sure there are no requests in
17755 * one of the PXP internal queues with "old" ILT addresses
17760 * Master enable - Due to WB DMAE writes performed before this
17761 * register is re-initialized as part of the regular function
17764 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17765 /* Enable the function in IGU */
17766 REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf);
17769 sc->dmae_ready = 1;
17771 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17773 if (!CHIP_IS_E1x(sc))
17774 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
17776 ecore_init_block(sc, BLOCK_ATC, init_phase);
17777 ecore_init_block(sc, BLOCK_DMAE, init_phase);
17778 ecore_init_block(sc, BLOCK_NIG, init_phase);
17779 ecore_init_block(sc, BLOCK_SRC, init_phase);
17780 ecore_init_block(sc, BLOCK_MISC, init_phase);
17781 ecore_init_block(sc, BLOCK_TCM, init_phase);
17782 ecore_init_block(sc, BLOCK_UCM, init_phase);
17783 ecore_init_block(sc, BLOCK_CCM, init_phase);
17784 ecore_init_block(sc, BLOCK_XCM, init_phase);
17785 ecore_init_block(sc, BLOCK_TSEM, init_phase);
17786 ecore_init_block(sc, BLOCK_USEM, init_phase);
17787 ecore_init_block(sc, BLOCK_CSEM, init_phase);
17788 ecore_init_block(sc, BLOCK_XSEM, init_phase);
17790 if (!CHIP_IS_E1x(sc))
17791 REG_WR(sc, QM_REG_PF_EN, 1);
17793 if (!CHIP_IS_E1x(sc)) {
17794 REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17795 REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17796 REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17797 REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17799 ecore_init_block(sc, BLOCK_QM, init_phase);
17801 ecore_init_block(sc, BLOCK_TM, init_phase);
17802 ecore_init_block(sc, BLOCK_DORQ, init_phase);
17804 bxe_iov_init_dq(sc);
17806 ecore_init_block(sc, BLOCK_BRB1, init_phase);
17807 ecore_init_block(sc, BLOCK_PRS, init_phase);
17808 ecore_init_block(sc, BLOCK_TSDM, init_phase);
17809 ecore_init_block(sc, BLOCK_CSDM, init_phase);
17810 ecore_init_block(sc, BLOCK_USDM, init_phase);
17811 ecore_init_block(sc, BLOCK_XSDM, init_phase);
17812 ecore_init_block(sc, BLOCK_UPB, init_phase);
17813 ecore_init_block(sc, BLOCK_XPB, init_phase);
17814 ecore_init_block(sc, BLOCK_PBF, init_phase);
17815 if (!CHIP_IS_E1x(sc))
17816 REG_WR(sc, PBF_REG_DISABLE_PF, 0);
17818 ecore_init_block(sc, BLOCK_CDU, init_phase);
17820 ecore_init_block(sc, BLOCK_CFC, init_phase);
17822 if (!CHIP_IS_E1x(sc))
17823 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1);
17826 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
17827 REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc));
17830 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17832 /* HC init per function */
17833 if (sc->devinfo.int_block == INT_BLOCK_HC) {
17834 if (CHIP_IS_E1H(sc)) {
17835 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
17837 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17838 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17840 ecore_init_block(sc, BLOCK_HC, init_phase);
17843 int num_segs, sb_idx, prod_offset;
17845 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
17847 if (!CHIP_IS_E1x(sc)) {
17848 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
17849 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
17852 ecore_init_block(sc, BLOCK_IGU, init_phase);
17854 if (!CHIP_IS_E1x(sc)) {
17858 * E2 mode: address 0-135 match to the mapping memory;
17859 * 136 - PF0 default prod; 137 - PF1 default prod;
17860 * 138 - PF2 default prod; 139 - PF3 default prod;
17861 * 140 - PF0 attn prod; 141 - PF1 attn prod;
17862 * 142 - PF2 attn prod; 143 - PF3 attn prod;
17863 * 144-147 reserved.
17865 * E1.5 mode - In backward compatible mode;
17866 * for non default SB; each even line in the memory
17867 * holds the U producer and each odd line hold
17868 * the C producer. The first 128 producers are for
17869 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
17870 * producers are for the DSB for each PF.
17871 * Each PF has five segments: (the order inside each
17872 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
17873 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
17874 * 144-147 attn prods;
17876 /* non-default-status-blocks */
17877 num_segs = CHIP_INT_MODE_IS_BC(sc) ?
17878 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
17879 for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) {
17880 prod_offset = (sc->igu_base_sb + sb_idx) *
17883 for (i = 0; i < num_segs; i++) {
17884 addr = IGU_REG_PROD_CONS_MEMORY +
17885 (prod_offset + i) * 4;
17886 REG_WR(sc, addr, 0);
17888 /* send consumer update with value 0 */
17889 bxe_ack_sb(sc, sc->igu_base_sb + sb_idx,
17890 USTORM_ID, 0, IGU_INT_NOP, 1);
17891 bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx);
17894 /* default-status-blocks */
17895 num_segs = CHIP_INT_MODE_IS_BC(sc) ?
17896 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
17898 if (CHIP_IS_MODE_4_PORT(sc))
17899 dsb_idx = SC_FUNC(sc);
17901 dsb_idx = SC_VN(sc);
17903 prod_offset = (CHIP_INT_MODE_IS_BC(sc) ?
17904 IGU_BC_BASE_DSB_PROD + dsb_idx :
17905 IGU_NORM_BASE_DSB_PROD + dsb_idx);
17908 * igu prods come in chunks of E1HVN_MAX (4) -
17909 * does not matters what is the current chip mode
17911 for (i = 0; i < (num_segs * E1HVN_MAX);
17913 addr = IGU_REG_PROD_CONS_MEMORY +
17914 (prod_offset + i)*4;
17915 REG_WR(sc, addr, 0);
17917 /* send consumer update with 0 */
17918 if (CHIP_INT_MODE_IS_BC(sc)) {
17919 bxe_ack_sb(sc, sc->igu_dsb_id,
17920 USTORM_ID, 0, IGU_INT_NOP, 1);
17921 bxe_ack_sb(sc, sc->igu_dsb_id,
17922 CSTORM_ID, 0, IGU_INT_NOP, 1);
17923 bxe_ack_sb(sc, sc->igu_dsb_id,
17924 XSTORM_ID, 0, IGU_INT_NOP, 1);
17925 bxe_ack_sb(sc, sc->igu_dsb_id,
17926 TSTORM_ID, 0, IGU_INT_NOP, 1);
17927 bxe_ack_sb(sc, sc->igu_dsb_id,
17928 ATTENTION_ID, 0, IGU_INT_NOP, 1);
17930 bxe_ack_sb(sc, sc->igu_dsb_id,
17931 USTORM_ID, 0, IGU_INT_NOP, 1);
17932 bxe_ack_sb(sc, sc->igu_dsb_id,
17933 ATTENTION_ID, 0, IGU_INT_NOP, 1);
17935 bxe_igu_clear_sb(sc, sc->igu_dsb_id);
17937 /* !!! these should become driver const once
17938 rf-tool supports split-68 const */
17939 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
17940 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
17941 REG_WR(sc, IGU_REG_SB_MASK_LSB, 0);
17942 REG_WR(sc, IGU_REG_SB_MASK_MSB, 0);
17943 REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0);
17944 REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0);
17948 /* Reset PCIE errors for debug */
17949 REG_WR(sc, 0x2114, 0xffffffff);
17950 REG_WR(sc, 0x2120, 0xffffffff);
17952 if (CHIP_IS_E1x(sc)) {
17953 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
17954 main_mem_base = HC_REG_MAIN_MEMORY +
17955 SC_PORT(sc) * (main_mem_size * 4);
17956 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
17957 main_mem_width = 8;
17959 val = REG_RD(sc, main_mem_prty_clr);
17961 BLOGD(sc, DBG_LOAD,
17962 "Parity errors in HC block during function init (0x%x)!\n",
17966 /* Clear "false" parity errors in MSI-X table */
17967 for (i = main_mem_base;
17968 i < main_mem_base + main_mem_size * 4;
17969 i += main_mem_width) {
17970 bxe_read_dmae(sc, i, main_mem_width / 4);
17971 bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data),
17972 i, main_mem_width / 4);
17974 /* Clear HC parity attention */
17975 REG_RD(sc, main_mem_prty_clr);
17979 /* Enable STORMs SP logging */
17980 REG_WR8(sc, BAR_USTRORM_INTMEM +
17981 USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
17982 REG_WR8(sc, BAR_TSTRORM_INTMEM +
17983 TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
17984 REG_WR8(sc, BAR_CSTRORM_INTMEM +
17985 CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
17986 REG_WR8(sc, BAR_XSTRORM_INTMEM +
17987 XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
17990 elink_phy_probe(&sc->link_params);
17996 bxe_link_reset(struct bxe_softc *sc)
17998 if (!BXE_NOMCP(sc)) {
17999 bxe_acquire_phy_lock(sc);
18000 elink_lfa_reset(&sc->link_params, &sc->link_vars);
18001 bxe_release_phy_lock(sc);
18003 if (!CHIP_REV_IS_SLOW(sc)) {
18004 BLOGW(sc, "Bootcode is missing - cannot reset link\n");
18010 bxe_reset_port(struct bxe_softc *sc)
18012 int port = SC_PORT(sc);
18015 /* reset physical Link */
18016 bxe_link_reset(sc);
18018 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
18020 /* Do not rcv packets to BRB */
18021 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
18022 /* Do not direct rcv packets that are not for MCP to the BRB */
18023 REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
18024 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
18026 /* Configure AEU */
18027 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
18031 /* Check for BRB port occupancy */
18032 val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
18034 BLOGD(sc, DBG_LOAD,
18035 "BRB1 is not empty, %d blocks are occupied\n", val);
18038 /* TODO: Close Doorbell port? */
18042 bxe_ilt_wr(struct bxe_softc *sc,
18047 uint32_t wb_write[2];
18049 if (CHIP_IS_E1(sc)) {
18050 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
18052 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
18055 wb_write[0] = ONCHIP_ADDR1(addr);
18056 wb_write[1] = ONCHIP_ADDR2(addr);
18057 REG_WR_DMAE(sc, reg, wb_write, 2);
18061 bxe_clear_func_ilt(struct bxe_softc *sc,
18064 uint32_t i, base = FUNC_ILT_BASE(func);
18065 for (i = base; i < base + ILT_PER_FUNC; i++) {
18066 bxe_ilt_wr(sc, i, 0);
18071 bxe_reset_func(struct bxe_softc *sc)
18073 struct bxe_fastpath *fp;
18074 int port = SC_PORT(sc);
18075 int func = SC_FUNC(sc);
18078 /* Disable the function in the FW */
18079 REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
18080 REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
18081 REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
18082 REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
18085 FOR_EACH_ETH_QUEUE(sc, i) {
18087 REG_WR8(sc, BAR_CSTRORM_INTMEM +
18088 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
18093 REG_WR8(sc, BAR_CSTRORM_INTMEM +
18094 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
18097 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) {
18098 REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0);
18101 /* Configure IGU */
18102 if (sc->devinfo.int_block == INT_BLOCK_HC) {
18103 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18104 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18106 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18107 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18110 if (CNIC_LOADED(sc)) {
18111 /* Disable Timer scan */
18112 REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
18114 * Wait for at least 10ms and up to 2 second for the timers
18117 for (i = 0; i < 200; i++) {
18119 if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4))
18125 bxe_clear_func_ilt(sc, func);
18128 * Timers workaround bug for E2: if this is vnic-3,
18129 * we need to set the entire ilt range for this timers.
18131 if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) {
18132 struct ilt_client_info ilt_cli;
18133 /* use dummy TM client */
18134 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
18136 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
18137 ilt_cli.client_num = ILT_CLIENT_TM;
18139 ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR);
18142 /* this assumes that reset_port() called before reset_func()*/
18143 if (!CHIP_IS_E1x(sc)) {
18144 bxe_pf_disable(sc);
18147 sc->dmae_ready = 0;
18151 bxe_gunzip_init(struct bxe_softc *sc)
18157 bxe_gunzip_end(struct bxe_softc *sc)
18163 bxe_init_firmware(struct bxe_softc *sc)
18165 if (CHIP_IS_E1(sc)) {
18166 ecore_init_e1_firmware(sc);
18167 sc->iro_array = e1_iro_arr;
18168 } else if (CHIP_IS_E1H(sc)) {
18169 ecore_init_e1h_firmware(sc);
18170 sc->iro_array = e1h_iro_arr;
18171 } else if (!CHIP_IS_E1x(sc)) {
18172 ecore_init_e2_firmware(sc);
18173 sc->iro_array = e2_iro_arr;
18175 BLOGE(sc, "Unsupported chip revision\n");
18183 bxe_release_firmware(struct bxe_softc *sc)
18190 ecore_gunzip(struct bxe_softc *sc,
18191 const uint8_t *zbuf,
18194 /* XXX : Implement... */
18195 BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n");
18200 ecore_reg_wr_ind(struct bxe_softc *sc,
18204 bxe_reg_wr_ind(sc, addr, val);
18208 ecore_write_dmae_phys_len(struct bxe_softc *sc,
18209 bus_addr_t phys_addr,
18213 bxe_write_dmae_phys_len(sc, phys_addr, addr, len);
18217 ecore_storm_memset_struct(struct bxe_softc *sc,
18223 for (i = 0; i < size/4; i++) {
18224 REG_WR(sc, addr + (i * 4), data[i]);
18230 * character device - ioctl interface definitions
18234 #include "bxe_dump.h"
18235 #include "bxe_ioctl.h"
18236 #include <sys/conf.h>
18238 static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
18239 struct thread *td);
18241 static struct cdevsw bxe_cdevsw = {
18242 .d_version = D_VERSION,
18243 .d_ioctl = bxe_eioctl,
18244 .d_name = "bxecnic",
18247 #define BXE_PATH(sc) (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1))
18250 #define DUMP_ALL_PRESETS 0x1FFF
18251 #define DUMP_MAX_PRESETS 13
18252 #define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
18253 #define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
18254 #define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
18255 #define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
18256 #define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
18258 #define IS_REG_IN_PRESET(presets, idx) \
18259 ((presets & (1 << (idx-1))) == (1 << (idx-1)))
18263 bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset)
18265 if (CHIP_IS_E1(sc))
18266 return dump_num_registers[0][preset-1];
18267 else if (CHIP_IS_E1H(sc))
18268 return dump_num_registers[1][preset-1];
18269 else if (CHIP_IS_E2(sc))
18270 return dump_num_registers[2][preset-1];
18271 else if (CHIP_IS_E3A0(sc))
18272 return dump_num_registers[3][preset-1];
18273 else if (CHIP_IS_E3B0(sc))
18274 return dump_num_registers[4][preset-1];
18280 bxe_get_total_regs_len32(struct bxe_softc *sc)
18282 uint32_t preset_idx;
18283 int regdump_len32 = 0;
18286 /* Calculate the total preset regs length */
18287 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18288 regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx);
18291 return regdump_len32;
18294 static const uint32_t *
18295 __bxe_get_page_addr_ar(struct bxe_softc *sc)
18297 if (CHIP_IS_E2(sc))
18298 return page_vals_e2;
18299 else if (CHIP_IS_E3(sc))
18300 return page_vals_e3;
18306 __bxe_get_page_reg_num(struct bxe_softc *sc)
18308 if (CHIP_IS_E2(sc))
18309 return PAGE_MODE_VALUES_E2;
18310 else if (CHIP_IS_E3(sc))
18311 return PAGE_MODE_VALUES_E3;
18316 static const uint32_t *
18317 __bxe_get_page_write_ar(struct bxe_softc *sc)
18319 if (CHIP_IS_E2(sc))
18320 return page_write_regs_e2;
18321 else if (CHIP_IS_E3(sc))
18322 return page_write_regs_e3;
18328 __bxe_get_page_write_num(struct bxe_softc *sc)
18330 if (CHIP_IS_E2(sc))
18331 return PAGE_WRITE_REGS_E2;
18332 else if (CHIP_IS_E3(sc))
18333 return PAGE_WRITE_REGS_E3;
18338 static const struct reg_addr *
18339 __bxe_get_page_read_ar(struct bxe_softc *sc)
18341 if (CHIP_IS_E2(sc))
18342 return page_read_regs_e2;
18343 else if (CHIP_IS_E3(sc))
18344 return page_read_regs_e3;
18350 __bxe_get_page_read_num(struct bxe_softc *sc)
18352 if (CHIP_IS_E2(sc))
18353 return PAGE_READ_REGS_E2;
18354 else if (CHIP_IS_E3(sc))
18355 return PAGE_READ_REGS_E3;
18361 bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info)
18363 if (CHIP_IS_E1(sc))
18364 return IS_E1_REG(reg_info->chips);
18365 else if (CHIP_IS_E1H(sc))
18366 return IS_E1H_REG(reg_info->chips);
18367 else if (CHIP_IS_E2(sc))
18368 return IS_E2_REG(reg_info->chips);
18369 else if (CHIP_IS_E3A0(sc))
18370 return IS_E3A0_REG(reg_info->chips);
18371 else if (CHIP_IS_E3B0(sc))
18372 return IS_E3B0_REG(reg_info->chips);
18378 bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info)
18380 if (CHIP_IS_E1(sc))
18381 return IS_E1_REG(wreg_info->chips);
18382 else if (CHIP_IS_E1H(sc))
18383 return IS_E1H_REG(wreg_info->chips);
18384 else if (CHIP_IS_E2(sc))
18385 return IS_E2_REG(wreg_info->chips);
18386 else if (CHIP_IS_E3A0(sc))
18387 return IS_E3A0_REG(wreg_info->chips);
18388 else if (CHIP_IS_E3B0(sc))
18389 return IS_E3B0_REG(wreg_info->chips);
18395 * bxe_read_pages_regs - read "paged" registers
18397 * @bp device handle
18400 * Reads "paged" memories: memories that may only be read by first writing to a
18401 * specific address ("write address") and then reading from a specific address
18402 * ("read address"). There may be more than one write address per "page" and
18403 * more than one read address per write address.
18406 bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18408 uint32_t i, j, k, n;
18410 /* addresses of the paged registers */
18411 const uint32_t *page_addr = __bxe_get_page_addr_ar(sc);
18412 /* number of paged registers */
18413 int num_pages = __bxe_get_page_reg_num(sc);
18414 /* write addresses */
18415 const uint32_t *write_addr = __bxe_get_page_write_ar(sc);
18416 /* number of write addresses */
18417 int write_num = __bxe_get_page_write_num(sc);
18418 /* read addresses info */
18419 const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc);
18420 /* number of read addresses */
18421 int read_num = __bxe_get_page_read_num(sc);
18422 uint32_t addr, size;
18424 for (i = 0; i < num_pages; i++) {
18425 for (j = 0; j < write_num; j++) {
18426 REG_WR(sc, write_addr[j], page_addr[i]);
18428 for (k = 0; k < read_num; k++) {
18429 if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) {
18430 size = read_addr[k].size;
18431 for (n = 0; n < size; n++) {
18432 addr = read_addr[k].addr + n*4;
18433 *p++ = REG_RD(sc, addr);
18444 bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18446 uint32_t i, j, addr;
18447 const struct wreg_addr *wreg_addr_p = NULL;
18449 if (CHIP_IS_E1(sc))
18450 wreg_addr_p = &wreg_addr_e1;
18451 else if (CHIP_IS_E1H(sc))
18452 wreg_addr_p = &wreg_addr_e1h;
18453 else if (CHIP_IS_E2(sc))
18454 wreg_addr_p = &wreg_addr_e2;
18455 else if (CHIP_IS_E3A0(sc))
18456 wreg_addr_p = &wreg_addr_e3;
18457 else if (CHIP_IS_E3B0(sc))
18458 wreg_addr_p = &wreg_addr_e3b0;
18462 /* Read the idle_chk registers */
18463 for (i = 0; i < IDLE_REGS_COUNT; i++) {
18464 if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) &&
18465 IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
18466 for (j = 0; j < idle_reg_addrs[i].size; j++)
18467 *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4);
18471 /* Read the regular registers */
18472 for (i = 0; i < REGS_COUNT; i++) {
18473 if (bxe_is_reg_in_chip(sc, ®_addrs[i]) &&
18474 IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
18475 for (j = 0; j < reg_addrs[i].size; j++)
18476 *p++ = REG_RD(sc, reg_addrs[i].addr + j*4);
18480 /* Read the CAM registers */
18481 if (bxe_is_wreg_in_chip(sc, wreg_addr_p) &&
18482 IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
18483 for (i = 0; i < wreg_addr_p->size; i++) {
18484 *p++ = REG_RD(sc, wreg_addr_p->addr + i*4);
18486 /* In case of wreg_addr register, read additional
18487 registers from read_regs array
18489 for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
18490 addr = *(wreg_addr_p->read_regs);
18491 *p++ = REG_RD(sc, addr + j*4);
18496 /* Paged registers are supported in E2 & E3 only */
18497 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
18498 /* Read "paged" registers */
18499 bxe_read_pages_regs(sc, p, preset);
18506 bxe_grc_dump(struct bxe_softc *sc)
18509 uint32_t preset_idx;
18512 struct dump_header *d_hdr;
18514 if (sc->grcdump_done)
18517 ecore_disable_blocks_parity(sc);
18519 buf = sc->grc_dump;
18520 d_hdr = sc->grc_dump;
18522 d_hdr->header_size = (sizeof(struct dump_header) >> 2) - 1;
18523 d_hdr->version = BNX2X_DUMP_VERSION;
18524 d_hdr->preset = DUMP_ALL_PRESETS;
18526 if (CHIP_IS_E1(sc)) {
18527 d_hdr->dump_meta_data = DUMP_CHIP_E1;
18528 } else if (CHIP_IS_E1H(sc)) {
18529 d_hdr->dump_meta_data = DUMP_CHIP_E1H;
18530 } else if (CHIP_IS_E2(sc)) {
18531 d_hdr->dump_meta_data = DUMP_CHIP_E2 |
18532 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18533 } else if (CHIP_IS_E3A0(sc)) {
18534 d_hdr->dump_meta_data = DUMP_CHIP_E3A0 |
18535 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18536 } else if (CHIP_IS_E3B0(sc)) {
18537 d_hdr->dump_meta_data = DUMP_CHIP_E3B0 |
18538 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18541 buf += sizeof(struct dump_header);
18543 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18545 /* Skip presets with IOR */
18546 if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) ||
18547 (preset_idx == 11))
18550 rval = bxe_get_preset_regs(sc, sc->grc_dump, preset_idx);
18555 size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t));
18560 ecore_clear_blocks_parity(sc);
18561 ecore_enable_blocks_parity(sc);
18563 sc->grcdump_done = 1;
18568 bxe_add_cdev(struct bxe_softc *sc)
18572 grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18573 sizeof(struct dump_header);
18575 sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT);
18577 if (sc->grc_dump == NULL)
18580 sc->ioctl_dev = make_dev(&bxe_cdevsw,
18581 sc->ifnet->if_dunit,
18586 if_name(sc->ifnet));
18588 if (sc->ioctl_dev == NULL) {
18590 free(sc->grc_dump, M_DEVBUF);
18595 sc->ioctl_dev->si_drv1 = sc;
18601 bxe_del_cdev(struct bxe_softc *sc)
18603 if (sc->ioctl_dev != NULL)
18604 destroy_dev(sc->ioctl_dev);
18606 if (sc->grc_dump == NULL)
18607 free(sc->grc_dump, M_DEVBUF);
18613 bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
18616 struct bxe_softc *sc;
18619 bxe_grcdump_t *dump = NULL;
18622 if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL)
18627 dump = (bxe_grcdump_t *)data;
18631 case BXE_GRC_DUMP_SIZE:
18632 dump->pci_func = sc->pcie_func;
18633 dump->grcdump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18634 sizeof(struct dump_header);
18639 grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18640 sizeof(struct dump_header);
18642 if ((sc->grc_dump == NULL) || (dump->grcdump == NULL) ||
18643 (dump->grcdump_size < grc_dump_size) || (!sc->grcdump_done)) {
18647 dump->grcdump_dwords = grc_dump_size >> 2;
18648 rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size);
18649 sc->grcdump_done = 0;