2 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
18 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
24 * THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #define BXE_DRIVER_VERSION "1.78.89"
34 #include "ecore_init.h"
35 #include "ecore_init_ops.h"
37 #include "57710_int_offsets.h"
38 #include "57711_int_offsets.h"
39 #include "57712_int_offsets.h"
42 * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these
43 * explicitly here for older kernels that don't include this changeset.
46 #define CTLTYPE_U64 CTLTYPE_QUAD
47 #define sysctl_handle_64 sysctl_handle_quad
51 * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these
52 * here as zero(0) for older kernels that don't include this changeset
53 * thereby masking the functionality.
56 #define CSUM_TCP_IPV6 0
57 #define CSUM_UDP_IPV6 0
61 * pci_find_cap was added in r219865. Re-define this at pci_find_extcap
62 * for older kernels that don't include this changeset.
64 #if __FreeBSD_version < 900035
65 #define pci_find_cap pci_find_extcap
68 #define BXE_DEF_SB_ATT_IDX 0x0001
69 #define BXE_DEF_SB_IDX 0x0002
72 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per
73 * function HW initialization.
75 #define FLR_WAIT_USEC 10000 /* 10 msecs */
76 #define FLR_WAIT_INTERVAL 50 /* usecs */
77 #define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */
79 struct pbf_pN_buf_regs {
86 struct pbf_pN_cmd_regs {
93 * PCI Device ID Table used by bxe_probe().
95 #define BXE_DEVDESC_MAX 64
96 static struct bxe_device_type bxe_devs[] = {
100 PCI_ANY_ID, PCI_ANY_ID,
101 "QLogic NetXtreme II BCM57710 10GbE"
106 PCI_ANY_ID, PCI_ANY_ID,
107 "QLogic NetXtreme II BCM57711 10GbE"
112 PCI_ANY_ID, PCI_ANY_ID,
113 "QLogic NetXtreme II BCM57711E 10GbE"
118 PCI_ANY_ID, PCI_ANY_ID,
119 "QLogic NetXtreme II BCM57712 10GbE"
124 PCI_ANY_ID, PCI_ANY_ID,
125 "QLogic NetXtreme II BCM57712 MF 10GbE"
130 PCI_ANY_ID, PCI_ANY_ID,
131 "QLogic NetXtreme II BCM57800 10GbE"
136 PCI_ANY_ID, PCI_ANY_ID,
137 "QLogic NetXtreme II BCM57800 MF 10GbE"
142 PCI_ANY_ID, PCI_ANY_ID,
143 "QLogic NetXtreme II BCM57810 10GbE"
148 PCI_ANY_ID, PCI_ANY_ID,
149 "QLogic NetXtreme II BCM57810 MF 10GbE"
154 PCI_ANY_ID, PCI_ANY_ID,
155 "QLogic NetXtreme II BCM57811 10GbE"
160 PCI_ANY_ID, PCI_ANY_ID,
161 "QLogic NetXtreme II BCM57811 MF 10GbE"
166 PCI_ANY_ID, PCI_ANY_ID,
167 "QLogic NetXtreme II BCM57840 4x10GbE"
172 PCI_ANY_ID, PCI_ANY_ID,
173 "QLogic NetXtreme II BCM57840 2x20GbE"
178 PCI_ANY_ID, PCI_ANY_ID,
179 "QLogic NetXtreme II BCM57840 MF 10GbE"
186 MALLOC_DECLARE(M_BXE_ILT);
187 MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer");
190 * FreeBSD device entry points.
192 static int bxe_probe(device_t);
193 static int bxe_attach(device_t);
194 static int bxe_detach(device_t);
195 static int bxe_shutdown(device_t);
198 * FreeBSD KLD module/device interface event handler method.
200 static device_method_t bxe_methods[] = {
201 /* Device interface (device_if.h) */
202 DEVMETHOD(device_probe, bxe_probe),
203 DEVMETHOD(device_attach, bxe_attach),
204 DEVMETHOD(device_detach, bxe_detach),
205 DEVMETHOD(device_shutdown, bxe_shutdown),
206 /* Bus interface (bus_if.h) */
207 DEVMETHOD(bus_print_child, bus_generic_print_child),
208 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
213 * FreeBSD KLD Module data declaration
215 static driver_t bxe_driver = {
216 "bxe", /* module name */
217 bxe_methods, /* event handler */
218 sizeof(struct bxe_softc) /* extra data */
222 * FreeBSD dev class is needed to manage dev instances and
223 * to associate with a bus type
225 static devclass_t bxe_devclass;
227 MODULE_DEPEND(bxe, pci, 1, 1, 1);
228 MODULE_DEPEND(bxe, ether, 1, 1, 1);
229 DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0);
231 /* resources needed for unloading a previously loaded device */
233 #define BXE_PREV_WAIT_NEEDED 1
234 struct mtx bxe_prev_mtx;
235 MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF);
236 struct bxe_prev_list_node {
237 LIST_ENTRY(bxe_prev_list_node) node;
241 uint8_t aer; /* XXX automatic error recovery */
244 static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list);
246 static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
248 /* Tunable device values... */
250 SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters");
253 unsigned long bxe_debug = 0;
254 SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN,
255 &bxe_debug, 0, "Debug logging mode");
257 /* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
258 static int bxe_interrupt_mode = INTR_MODE_MSIX;
259 SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN,
260 &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode");
262 /* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */
263 static int bxe_queue_count = 4;
264 SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
265 &bxe_queue_count, 0, "Multi-Queue queue count");
267 /* max number of buffers per queue (default RX_BD_USABLE) */
268 static int bxe_max_rx_bufs = 0;
269 SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN,
270 &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue");
272 /* Host interrupt coalescing RX tick timer (usecs) */
273 static int bxe_hc_rx_ticks = 25;
274 SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN,
275 &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks");
277 /* Host interrupt coalescing TX tick timer (usecs) */
278 static int bxe_hc_tx_ticks = 50;
279 SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN,
280 &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks");
282 /* Maximum number of Rx packets to process at a time */
283 static int bxe_rx_budget = 0xffffffff;
284 SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN,
285 &bxe_rx_budget, 0, "Rx processing budget");
287 /* Maximum LRO aggregation size */
288 static int bxe_max_aggregation_size = 0;
289 SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN,
290 &bxe_max_aggregation_size, 0, "max aggregation size");
292 /* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */
293 static int bxe_mrrs = -1;
294 SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN,
295 &bxe_mrrs, 0, "PCIe maximum read request size");
297 /* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */
298 static int bxe_autogreeen = 0;
299 SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN,
300 &bxe_autogreeen, 0, "AutoGrEEEn support");
302 /* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */
303 static int bxe_udp_rss = 0;
304 SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN,
305 &bxe_udp_rss, 0, "UDP RSS support");
308 #define STAT_NAME_LEN 32 /* no stat names below can be longer than this */
310 #define STATS_OFFSET32(stat_name) \
311 (offsetof(struct bxe_eth_stats, stat_name) / 4)
313 #define Q_STATS_OFFSET32(stat_name) \
314 (offsetof(struct bxe_eth_q_stats, stat_name) / 4)
316 static const struct {
320 #define STATS_FLAGS_PORT 1
321 #define STATS_FLAGS_FUNC 2 /* MF only cares about function stats */
322 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
323 char string[STAT_NAME_LEN];
324 } bxe_eth_stats_arr[] = {
325 { STATS_OFFSET32(total_bytes_received_hi),
326 8, STATS_FLAGS_BOTH, "rx_bytes" },
327 { STATS_OFFSET32(error_bytes_received_hi),
328 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
329 { STATS_OFFSET32(total_unicast_packets_received_hi),
330 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
331 { STATS_OFFSET32(total_multicast_packets_received_hi),
332 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
333 { STATS_OFFSET32(total_broadcast_packets_received_hi),
334 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
335 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
336 8, STATS_FLAGS_PORT, "rx_crc_errors" },
337 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
338 8, STATS_FLAGS_PORT, "rx_align_errors" },
339 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
340 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
341 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
342 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
343 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
344 8, STATS_FLAGS_PORT, "rx_fragments" },
345 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
346 8, STATS_FLAGS_PORT, "rx_jabbers" },
347 { STATS_OFFSET32(no_buff_discard_hi),
348 8, STATS_FLAGS_BOTH, "rx_discards" },
349 { STATS_OFFSET32(mac_filter_discard),
350 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
351 { STATS_OFFSET32(mf_tag_discard),
352 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
353 { STATS_OFFSET32(pfc_frames_received_hi),
354 8, STATS_FLAGS_PORT, "pfc_frames_received" },
355 { STATS_OFFSET32(pfc_frames_sent_hi),
356 8, STATS_FLAGS_PORT, "pfc_frames_sent" },
357 { STATS_OFFSET32(brb_drop_hi),
358 8, STATS_FLAGS_PORT, "rx_brb_discard" },
359 { STATS_OFFSET32(brb_truncate_hi),
360 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
361 { STATS_OFFSET32(pause_frames_received_hi),
362 8, STATS_FLAGS_PORT, "rx_pause_frames" },
363 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
364 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
365 { STATS_OFFSET32(nig_timer_max),
366 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
367 { STATS_OFFSET32(total_bytes_transmitted_hi),
368 8, STATS_FLAGS_BOTH, "tx_bytes" },
369 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
370 8, STATS_FLAGS_PORT, "tx_error_bytes" },
371 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
372 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
373 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
374 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
375 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
376 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
377 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
378 8, STATS_FLAGS_PORT, "tx_mac_errors" },
379 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
380 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
381 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
382 8, STATS_FLAGS_PORT, "tx_single_collisions" },
383 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
384 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
385 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
386 8, STATS_FLAGS_PORT, "tx_deferred" },
387 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
388 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
389 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
390 8, STATS_FLAGS_PORT, "tx_late_collisions" },
391 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
392 8, STATS_FLAGS_PORT, "tx_total_collisions" },
393 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
394 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
395 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
396 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
397 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
398 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
399 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
400 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
401 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
402 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
403 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
404 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
405 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
406 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
407 { STATS_OFFSET32(pause_frames_sent_hi),
408 8, STATS_FLAGS_PORT, "tx_pause_frames" },
409 { STATS_OFFSET32(total_tpa_aggregations_hi),
410 8, STATS_FLAGS_FUNC, "tpa_aggregations" },
411 { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
412 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
413 { STATS_OFFSET32(total_tpa_bytes_hi),
414 8, STATS_FLAGS_FUNC, "tpa_bytes"},
415 { STATS_OFFSET32(eee_tx_lpi),
416 4, STATS_FLAGS_PORT, "eee_tx_lpi"},
417 { STATS_OFFSET32(rx_calls),
418 4, STATS_FLAGS_FUNC, "rx_calls"},
419 { STATS_OFFSET32(rx_pkts),
420 4, STATS_FLAGS_FUNC, "rx_pkts"},
421 { STATS_OFFSET32(rx_tpa_pkts),
422 4, STATS_FLAGS_FUNC, "rx_tpa_pkts"},
423 { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
424 4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"},
425 { STATS_OFFSET32(rx_bxe_service_rxsgl),
426 4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"},
427 { STATS_OFFSET32(rx_jumbo_sge_pkts),
428 4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"},
429 { STATS_OFFSET32(rx_soft_errors),
430 4, STATS_FLAGS_FUNC, "rx_soft_errors"},
431 { STATS_OFFSET32(rx_hw_csum_errors),
432 4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"},
433 { STATS_OFFSET32(rx_ofld_frames_csum_ip),
434 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"},
435 { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
436 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"},
437 { STATS_OFFSET32(rx_budget_reached),
438 4, STATS_FLAGS_FUNC, "rx_budget_reached"},
439 { STATS_OFFSET32(tx_pkts),
440 4, STATS_FLAGS_FUNC, "tx_pkts"},
441 { STATS_OFFSET32(tx_soft_errors),
442 4, STATS_FLAGS_FUNC, "tx_soft_errors"},
443 { STATS_OFFSET32(tx_ofld_frames_csum_ip),
444 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"},
445 { STATS_OFFSET32(tx_ofld_frames_csum_tcp),
446 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"},
447 { STATS_OFFSET32(tx_ofld_frames_csum_udp),
448 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"},
449 { STATS_OFFSET32(tx_ofld_frames_lso),
450 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"},
451 { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
452 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"},
453 { STATS_OFFSET32(tx_encap_failures),
454 4, STATS_FLAGS_FUNC, "tx_encap_failures"},
455 { STATS_OFFSET32(tx_hw_queue_full),
456 4, STATS_FLAGS_FUNC, "tx_hw_queue_full"},
457 { STATS_OFFSET32(tx_hw_max_queue_depth),
458 4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"},
459 { STATS_OFFSET32(tx_dma_mapping_failure),
460 4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"},
461 { STATS_OFFSET32(tx_max_drbr_queue_depth),
462 4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"},
463 { STATS_OFFSET32(tx_window_violation_std),
464 4, STATS_FLAGS_FUNC, "tx_window_violation_std"},
465 { STATS_OFFSET32(tx_window_violation_tso),
466 4, STATS_FLAGS_FUNC, "tx_window_violation_tso"},
467 { STATS_OFFSET32(tx_chain_lost_mbuf),
468 4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"},
469 { STATS_OFFSET32(tx_frames_deferred),
470 4, STATS_FLAGS_FUNC, "tx_frames_deferred"},
471 { STATS_OFFSET32(tx_queue_xoff),
472 4, STATS_FLAGS_FUNC, "tx_queue_xoff"},
473 { STATS_OFFSET32(mbuf_defrag_attempts),
474 4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"},
475 { STATS_OFFSET32(mbuf_defrag_failures),
476 4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"},
477 { STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
478 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"},
479 { STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
480 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"},
481 { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
482 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"},
483 { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
484 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"},
485 { STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
486 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"},
487 { STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
488 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"},
489 { STATS_OFFSET32(mbuf_alloc_tx),
490 4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"},
491 { STATS_OFFSET32(mbuf_alloc_rx),
492 4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"},
493 { STATS_OFFSET32(mbuf_alloc_sge),
494 4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"},
495 { STATS_OFFSET32(mbuf_alloc_tpa),
496 4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"},
497 { STATS_OFFSET32(tx_queue_full_return),
498 4, STATS_FLAGS_FUNC, "tx_queue_full_return"},
499 { STATS_OFFSET32(tx_request_link_down_failures),
500 4, STATS_FLAGS_FUNC, "tx_request_link_down_failures"},
501 { STATS_OFFSET32(bd_avail_too_less_failures),
502 4, STATS_FLAGS_FUNC, "bd_avail_too_less_failures"},
503 { STATS_OFFSET32(tx_mq_not_empty),
504 4, STATS_FLAGS_FUNC, "tx_mq_not_empty"}
508 static const struct {
511 char string[STAT_NAME_LEN];
512 } bxe_eth_q_stats_arr[] = {
513 { Q_STATS_OFFSET32(total_bytes_received_hi),
515 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
516 8, "rx_ucast_packets" },
517 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
518 8, "rx_mcast_packets" },
519 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
520 8, "rx_bcast_packets" },
521 { Q_STATS_OFFSET32(no_buff_discard_hi),
523 { Q_STATS_OFFSET32(total_bytes_transmitted_hi),
525 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
526 8, "tx_ucast_packets" },
527 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
528 8, "tx_mcast_packets" },
529 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
530 8, "tx_bcast_packets" },
531 { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
532 8, "tpa_aggregations" },
533 { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
534 8, "tpa_aggregated_frames"},
535 { Q_STATS_OFFSET32(total_tpa_bytes_hi),
537 { Q_STATS_OFFSET32(rx_calls),
539 { Q_STATS_OFFSET32(rx_pkts),
541 { Q_STATS_OFFSET32(rx_tpa_pkts),
543 { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
544 4, "rx_erroneous_jumbo_sge_pkts"},
545 { Q_STATS_OFFSET32(rx_bxe_service_rxsgl),
546 4, "rx_bxe_service_rxsgl"},
547 { Q_STATS_OFFSET32(rx_jumbo_sge_pkts),
548 4, "rx_jumbo_sge_pkts"},
549 { Q_STATS_OFFSET32(rx_soft_errors),
550 4, "rx_soft_errors"},
551 { Q_STATS_OFFSET32(rx_hw_csum_errors),
552 4, "rx_hw_csum_errors"},
553 { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip),
554 4, "rx_ofld_frames_csum_ip"},
555 { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
556 4, "rx_ofld_frames_csum_tcp_udp"},
557 { Q_STATS_OFFSET32(rx_budget_reached),
558 4, "rx_budget_reached"},
559 { Q_STATS_OFFSET32(tx_pkts),
561 { Q_STATS_OFFSET32(tx_soft_errors),
562 4, "tx_soft_errors"},
563 { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip),
564 4, "tx_ofld_frames_csum_ip"},
565 { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp),
566 4, "tx_ofld_frames_csum_tcp"},
567 { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp),
568 4, "tx_ofld_frames_csum_udp"},
569 { Q_STATS_OFFSET32(tx_ofld_frames_lso),
570 4, "tx_ofld_frames_lso"},
571 { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
572 4, "tx_ofld_frames_lso_hdr_splits"},
573 { Q_STATS_OFFSET32(tx_encap_failures),
574 4, "tx_encap_failures"},
575 { Q_STATS_OFFSET32(tx_hw_queue_full),
576 4, "tx_hw_queue_full"},
577 { Q_STATS_OFFSET32(tx_hw_max_queue_depth),
578 4, "tx_hw_max_queue_depth"},
579 { Q_STATS_OFFSET32(tx_dma_mapping_failure),
580 4, "tx_dma_mapping_failure"},
581 { Q_STATS_OFFSET32(tx_max_drbr_queue_depth),
582 4, "tx_max_drbr_queue_depth"},
583 { Q_STATS_OFFSET32(tx_window_violation_std),
584 4, "tx_window_violation_std"},
585 { Q_STATS_OFFSET32(tx_window_violation_tso),
586 4, "tx_window_violation_tso"},
587 { Q_STATS_OFFSET32(tx_chain_lost_mbuf),
588 4, "tx_chain_lost_mbuf"},
589 { Q_STATS_OFFSET32(tx_frames_deferred),
590 4, "tx_frames_deferred"},
591 { Q_STATS_OFFSET32(tx_queue_xoff),
593 { Q_STATS_OFFSET32(mbuf_defrag_attempts),
594 4, "mbuf_defrag_attempts"},
595 { Q_STATS_OFFSET32(mbuf_defrag_failures),
596 4, "mbuf_defrag_failures"},
597 { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
598 4, "mbuf_rx_bd_alloc_failed"},
599 { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
600 4, "mbuf_rx_bd_mapping_failed"},
601 { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
602 4, "mbuf_rx_tpa_alloc_failed"},
603 { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
604 4, "mbuf_rx_tpa_mapping_failed"},
605 { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
606 4, "mbuf_rx_sge_alloc_failed"},
607 { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
608 4, "mbuf_rx_sge_mapping_failed"},
609 { Q_STATS_OFFSET32(mbuf_alloc_tx),
611 { Q_STATS_OFFSET32(mbuf_alloc_rx),
613 { Q_STATS_OFFSET32(mbuf_alloc_sge),
614 4, "mbuf_alloc_sge"},
615 { Q_STATS_OFFSET32(mbuf_alloc_tpa),
616 4, "mbuf_alloc_tpa"},
617 { Q_STATS_OFFSET32(tx_queue_full_return),
618 4, "tx_queue_full_return"},
619 { Q_STATS_OFFSET32(tx_request_link_down_failures),
620 4, "tx_request_link_down_failures"},
621 { Q_STATS_OFFSET32(bd_avail_too_less_failures),
622 4, "bd_avail_too_less_failures"},
623 { Q_STATS_OFFSET32(tx_mq_not_empty),
624 4, "tx_mq_not_empty"}
628 #define BXE_NUM_ETH_STATS ARRAY_SIZE(bxe_eth_stats_arr)
629 #define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr)
632 static void bxe_cmng_fns_init(struct bxe_softc *sc,
635 static int bxe_get_cmng_fns_mode(struct bxe_softc *sc);
636 static void storm_memset_cmng(struct bxe_softc *sc,
637 struct cmng_init *cmng,
639 static void bxe_set_reset_global(struct bxe_softc *sc);
640 static void bxe_set_reset_in_progress(struct bxe_softc *sc);
641 static uint8_t bxe_reset_is_done(struct bxe_softc *sc,
643 static uint8_t bxe_clear_pf_load(struct bxe_softc *sc);
644 static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc,
647 static void bxe_int_disable(struct bxe_softc *sc);
648 static int bxe_release_leader_lock(struct bxe_softc *sc);
649 static void bxe_pf_disable(struct bxe_softc *sc);
650 static void bxe_free_fp_buffers(struct bxe_softc *sc);
651 static inline void bxe_update_rx_prod(struct bxe_softc *sc,
652 struct bxe_fastpath *fp,
655 uint16_t rx_sge_prod);
656 static void bxe_link_report_locked(struct bxe_softc *sc);
657 static void bxe_link_report(struct bxe_softc *sc);
658 static void bxe_link_status_update(struct bxe_softc *sc);
659 static void bxe_periodic_callout_func(void *xsc);
660 static void bxe_periodic_start(struct bxe_softc *sc);
661 static void bxe_periodic_stop(struct bxe_softc *sc);
662 static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
665 static int bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
667 static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
669 static uint8_t bxe_txeof(struct bxe_softc *sc,
670 struct bxe_fastpath *fp);
671 static void bxe_task_fp(struct bxe_fastpath *fp);
672 static __noinline void bxe_dump_mbuf(struct bxe_softc *sc,
675 static int bxe_alloc_mem(struct bxe_softc *sc);
676 static void bxe_free_mem(struct bxe_softc *sc);
677 static int bxe_alloc_fw_stats_mem(struct bxe_softc *sc);
678 static void bxe_free_fw_stats_mem(struct bxe_softc *sc);
679 static int bxe_interrupt_attach(struct bxe_softc *sc);
680 static void bxe_interrupt_detach(struct bxe_softc *sc);
681 static void bxe_set_rx_mode(struct bxe_softc *sc);
682 static int bxe_init_locked(struct bxe_softc *sc);
683 static int bxe_stop_locked(struct bxe_softc *sc);
684 static __noinline int bxe_nic_load(struct bxe_softc *sc,
686 static __noinline int bxe_nic_unload(struct bxe_softc *sc,
687 uint32_t unload_mode,
690 static void bxe_handle_sp_tq(void *context, int pending);
691 static void bxe_handle_fp_tq(void *context, int pending);
693 static int bxe_add_cdev(struct bxe_softc *sc);
694 static void bxe_del_cdev(struct bxe_softc *sc);
695 static int bxe_alloc_buf_rings(struct bxe_softc *sc);
696 static void bxe_free_buf_rings(struct bxe_softc *sc);
698 /* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */
700 calc_crc32(uint8_t *crc32_packet,
701 uint32_t crc32_length,
710 uint8_t current_byte = 0;
711 uint32_t crc32_result = crc32_seed;
712 const uint32_t CRC32_POLY = 0x1edc6f41;
714 if ((crc32_packet == NULL) ||
715 (crc32_length == 0) ||
716 ((crc32_length % 8) != 0))
718 return (crc32_result);
721 for (byte = 0; byte < crc32_length; byte = byte + 1)
723 current_byte = crc32_packet[byte];
724 for (bit = 0; bit < 8; bit = bit + 1)
726 /* msb = crc32_result[31]; */
727 msb = (uint8_t)(crc32_result >> 31);
729 crc32_result = crc32_result << 1;
731 /* it (msb != current_byte[bit]) */
732 if (msb != (0x1 & (current_byte >> bit)))
734 crc32_result = crc32_result ^ CRC32_POLY;
735 /* crc32_result[0] = 1 */
742 * 1. "mirror" every bit
743 * 2. swap the 4 bytes
744 * 3. complement each bit
749 shft = sizeof(crc32_result) * 8 - 1;
751 for (crc32_result >>= 1; crc32_result; crc32_result >>= 1)
754 temp |= crc32_result & 1;
758 /* temp[31-bit] = crc32_result[bit] */
762 /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */
764 uint32_t t0, t1, t2, t3;
765 t0 = (0x000000ff & (temp >> 24));
766 t1 = (0x0000ff00 & (temp >> 8));
767 t2 = (0x00ff0000 & (temp << 8));
768 t3 = (0xff000000 & (temp << 24));
769 crc32_result = t0 | t1 | t2 | t3;
775 crc32_result = ~crc32_result;
778 return (crc32_result);
783 volatile unsigned long *addr)
785 return ((atomic_load_acq_long(addr) & (1 << nr)) != 0);
789 bxe_set_bit(unsigned int nr,
790 volatile unsigned long *addr)
792 atomic_set_acq_long(addr, (1 << nr));
796 bxe_clear_bit(int nr,
797 volatile unsigned long *addr)
799 atomic_clear_acq_long(addr, (1 << nr));
803 bxe_test_and_set_bit(int nr,
804 volatile unsigned long *addr)
810 } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0);
811 // if (x & nr) bit_was_set; else bit_was_not_set;
816 bxe_test_and_clear_bit(int nr,
817 volatile unsigned long *addr)
823 } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0);
824 // if (x & nr) bit_was_set; else bit_was_not_set;
829 bxe_cmpxchg(volatile int *addr,
836 } while (atomic_cmpset_acq_int(addr, old, new) == 0);
841 * Get DMA memory from the OS.
843 * Validates that the OS has provided DMA buffers in response to a
844 * bus_dmamap_load call and saves the physical address of those buffers.
845 * When the callback is used the OS will return 0 for the mapping function
846 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any
847 * failures back to the caller.
853 bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
855 struct bxe_dma *dma = arg;
860 BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error);
862 dma->paddr = segs->ds_addr;
868 * Allocate a block of memory and map it for DMA. No partial completions
869 * allowed and release any resources acquired if we can't acquire all
873 * 0 = Success, !0 = Failure
876 bxe_dma_alloc(struct bxe_softc *sc,
884 BLOGE(sc, "dma block '%s' already has size %lu\n", msg,
885 (unsigned long)dma->size);
889 memset(dma, 0, sizeof(*dma)); /* sanity */
892 snprintf(dma->msg, sizeof(dma->msg), "%s", msg);
894 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
895 BCM_PAGE_SIZE, /* alignment */
896 0, /* boundary limit */
897 BUS_SPACE_MAXADDR, /* restricted low */
898 BUS_SPACE_MAXADDR, /* restricted hi */
899 NULL, /* addr filter() */
900 NULL, /* addr filter() arg */
901 size, /* max map size */
902 1, /* num discontinuous */
903 size, /* max seg size */
904 BUS_DMA_ALLOCNOW, /* flags */
906 NULL, /* lock() arg */
907 &dma->tag); /* returned dma tag */
909 BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc);
910 memset(dma, 0, sizeof(*dma));
914 rc = bus_dmamem_alloc(dma->tag,
915 (void **)&dma->vaddr,
916 (BUS_DMA_NOWAIT | BUS_DMA_ZERO),
919 BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc);
920 bus_dma_tag_destroy(dma->tag);
921 memset(dma, 0, sizeof(*dma));
925 rc = bus_dmamap_load(dma->tag,
929 bxe_dma_map_addr, /* BLOGD in here */
933 BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc);
934 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
935 bus_dma_tag_destroy(dma->tag);
936 memset(dma, 0, sizeof(*dma));
944 bxe_dma_free(struct bxe_softc *sc,
948 DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL"));
950 bus_dmamap_sync(dma->tag, dma->map,
951 (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE));
952 bus_dmamap_unload(dma->tag, dma->map);
953 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
954 bus_dma_tag_destroy(dma->tag);
957 memset(dma, 0, sizeof(*dma));
961 * These indirect read and write routines are only during init.
962 * The locking is handled by the MCP.
966 bxe_reg_wr_ind(struct bxe_softc *sc,
970 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
971 pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4);
972 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
976 bxe_reg_rd_ind(struct bxe_softc *sc,
981 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
982 val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4);
983 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
989 bxe_acquire_hw_lock(struct bxe_softc *sc,
992 uint32_t lock_status;
993 uint32_t resource_bit = (1 << resource);
994 int func = SC_FUNC(sc);
995 uint32_t hw_lock_control_reg;
998 /* validate the resource is within range */
999 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1000 BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1001 " resource_bit 0x%x\n", resource, resource_bit);
1006 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1008 hw_lock_control_reg =
1009 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1012 /* validate the resource is not already taken */
1013 lock_status = REG_RD(sc, hw_lock_control_reg);
1014 if (lock_status & resource_bit) {
1015 BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n",
1016 resource, lock_status, resource_bit);
1020 /* try every 5ms for 5 seconds */
1021 for (cnt = 0; cnt < 1000; cnt++) {
1022 REG_WR(sc, (hw_lock_control_reg + 4), resource_bit);
1023 lock_status = REG_RD(sc, hw_lock_control_reg);
1024 if (lock_status & resource_bit) {
1030 BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n",
1031 resource, resource_bit);
1036 bxe_release_hw_lock(struct bxe_softc *sc,
1039 uint32_t lock_status;
1040 uint32_t resource_bit = (1 << resource);
1041 int func = SC_FUNC(sc);
1042 uint32_t hw_lock_control_reg;
1044 /* validate the resource is within range */
1045 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1046 BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1047 " resource_bit 0x%x\n", resource, resource_bit);
1052 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1054 hw_lock_control_reg =
1055 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1058 /* validate the resource is currently taken */
1059 lock_status = REG_RD(sc, hw_lock_control_reg);
1060 if (!(lock_status & resource_bit)) {
1061 BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n",
1062 resource, lock_status, resource_bit);
1066 REG_WR(sc, hw_lock_control_reg, resource_bit);
1069 static void bxe_acquire_phy_lock(struct bxe_softc *sc)
1072 bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1075 static void bxe_release_phy_lock(struct bxe_softc *sc)
1077 bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1081 * Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
1082 * had we done things the other way around, if two pfs from the same port
1083 * would attempt to access nvram at the same time, we could run into a
1085 * pf A takes the port lock.
1086 * pf B succeeds in taking the same lock since they are from the same port.
1087 * pf A takes the per pf misc lock. Performs eeprom access.
1088 * pf A finishes. Unlocks the per pf misc lock.
1089 * Pf B takes the lock and proceeds to perform it's own access.
1090 * pf A unlocks the per port lock, while pf B is still working (!).
1091 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
1092 * access corrupted by pf B).*
1095 bxe_acquire_nvram_lock(struct bxe_softc *sc)
1097 int port = SC_PORT(sc);
1101 /* acquire HW lock: protect against other PFs in PF Direct Assignment */
1102 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1104 /* adjust timeout for emulation/FPGA */
1105 count = NVRAM_TIMEOUT_COUNT;
1106 if (CHIP_REV_IS_SLOW(sc)) {
1110 /* request access to nvram interface */
1111 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1112 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
1114 for (i = 0; i < count*10; i++) {
1115 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1116 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1123 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1124 BLOGE(sc, "Cannot get access to nvram interface "
1125 "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1134 bxe_release_nvram_lock(struct bxe_softc *sc)
1136 int port = SC_PORT(sc);
1140 /* adjust timeout for emulation/FPGA */
1141 count = NVRAM_TIMEOUT_COUNT;
1142 if (CHIP_REV_IS_SLOW(sc)) {
1146 /* relinquish nvram interface */
1147 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1148 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
1150 for (i = 0; i < count*10; i++) {
1151 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1152 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1159 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1160 BLOGE(sc, "Cannot free access to nvram interface "
1161 "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1166 /* release HW lock: protect against other PFs in PF Direct Assignment */
1167 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1173 bxe_enable_nvram_access(struct bxe_softc *sc)
1177 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1179 /* enable both bits, even on read */
1180 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1181 (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN));
1185 bxe_disable_nvram_access(struct bxe_softc *sc)
1189 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1191 /* disable both bits, even after read */
1192 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1193 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
1194 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
1198 bxe_nvram_read_dword(struct bxe_softc *sc,
1206 /* build the command word */
1207 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
1209 /* need to clear DONE bit separately */
1210 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1212 /* address of the NVRAM to read from */
1213 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1214 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1216 /* issue a read command */
1217 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1219 /* adjust timeout for emulation/FPGA */
1220 count = NVRAM_TIMEOUT_COUNT;
1221 if (CHIP_REV_IS_SLOW(sc)) {
1225 /* wait for completion */
1228 for (i = 0; i < count; i++) {
1230 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1232 if (val & MCPR_NVM_COMMAND_DONE) {
1233 val = REG_RD(sc, MCP_REG_MCPR_NVM_READ);
1234 /* we read nvram data in cpu order
1235 * but ethtool sees it as an array of bytes
1236 * converting to big-endian will do the work
1238 *ret_val = htobe32(val);
1245 BLOGE(sc, "nvram read timeout expired "
1246 "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1247 offset, cmd_flags, val);
1254 bxe_nvram_read(struct bxe_softc *sc,
1263 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
1264 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1269 if ((offset + buf_size) > sc->devinfo.flash_size) {
1270 BLOGE(sc, "Invalid parameter, "
1271 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1272 offset, buf_size, sc->devinfo.flash_size);
1276 /* request access to nvram interface */
1277 rc = bxe_acquire_nvram_lock(sc);
1282 /* enable access to nvram interface */
1283 bxe_enable_nvram_access(sc);
1285 /* read the first word(s) */
1286 cmd_flags = MCPR_NVM_COMMAND_FIRST;
1287 while ((buf_size > sizeof(uint32_t)) && (rc == 0)) {
1288 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1289 memcpy(ret_buf, &val, 4);
1291 /* advance to the next dword */
1292 offset += sizeof(uint32_t);
1293 ret_buf += sizeof(uint32_t);
1294 buf_size -= sizeof(uint32_t);
1299 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1300 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1301 memcpy(ret_buf, &val, 4);
1304 /* disable access to nvram interface */
1305 bxe_disable_nvram_access(sc);
1306 bxe_release_nvram_lock(sc);
1312 bxe_nvram_write_dword(struct bxe_softc *sc,
1319 /* build the command word */
1320 cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR);
1322 /* need to clear DONE bit separately */
1323 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1325 /* write the data */
1326 REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val);
1328 /* address of the NVRAM to write to */
1329 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1330 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1332 /* issue the write command */
1333 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1335 /* adjust timeout for emulation/FPGA */
1336 count = NVRAM_TIMEOUT_COUNT;
1337 if (CHIP_REV_IS_SLOW(sc)) {
1341 /* wait for completion */
1343 for (i = 0; i < count; i++) {
1345 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1346 if (val & MCPR_NVM_COMMAND_DONE) {
1353 BLOGE(sc, "nvram write timeout expired "
1354 "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1355 offset, cmd_flags, val);
1361 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
1364 bxe_nvram_write1(struct bxe_softc *sc,
1370 uint32_t align_offset;
1374 if ((offset + buf_size) > sc->devinfo.flash_size) {
1375 BLOGE(sc, "Invalid parameter, "
1376 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1377 offset, buf_size, sc->devinfo.flash_size);
1381 /* request access to nvram interface */
1382 rc = bxe_acquire_nvram_lock(sc);
1387 /* enable access to nvram interface */
1388 bxe_enable_nvram_access(sc);
1390 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
1391 align_offset = (offset & ~0x03);
1392 rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags);
1395 val &= ~(0xff << BYTE_OFFSET(offset));
1396 val |= (*data_buf << BYTE_OFFSET(offset));
1398 /* nvram data is returned as an array of bytes
1399 * convert it back to cpu order
1403 rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags);
1406 /* disable access to nvram interface */
1407 bxe_disable_nvram_access(sc);
1408 bxe_release_nvram_lock(sc);
1414 bxe_nvram_write(struct bxe_softc *sc,
1421 uint32_t written_so_far;
1424 if (buf_size == 1) {
1425 return (bxe_nvram_write1(sc, offset, data_buf, buf_size));
1428 if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) {
1429 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1434 if (buf_size == 0) {
1435 return (0); /* nothing to do */
1438 if ((offset + buf_size) > sc->devinfo.flash_size) {
1439 BLOGE(sc, "Invalid parameter, "
1440 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1441 offset, buf_size, sc->devinfo.flash_size);
1445 /* request access to nvram interface */
1446 rc = bxe_acquire_nvram_lock(sc);
1451 /* enable access to nvram interface */
1452 bxe_enable_nvram_access(sc);
1455 cmd_flags = MCPR_NVM_COMMAND_FIRST;
1456 while ((written_so_far < buf_size) && (rc == 0)) {
1457 if (written_so_far == (buf_size - sizeof(uint32_t))) {
1458 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1459 } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) {
1460 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1461 } else if ((offset % NVRAM_PAGE_SIZE) == 0) {
1462 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
1465 memcpy(&val, data_buf, 4);
1467 rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags);
1469 /* advance to the next dword */
1470 offset += sizeof(uint32_t);
1471 data_buf += sizeof(uint32_t);
1472 written_so_far += sizeof(uint32_t);
1476 /* disable access to nvram interface */
1477 bxe_disable_nvram_access(sc);
1478 bxe_release_nvram_lock(sc);
1483 /* copy command into DMAE command memory and set DMAE command Go */
1485 bxe_post_dmae(struct bxe_softc *sc,
1486 struct dmae_cmd *dmae,
1489 uint32_t cmd_offset;
1492 cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_cmd) * idx));
1493 for (i = 0; i < ((sizeof(struct dmae_cmd) / 4)); i++) {
1494 REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i));
1497 REG_WR(sc, dmae_reg_go_c[idx], 1);
1501 bxe_dmae_opcode_add_comp(uint32_t opcode,
1504 return (opcode | ((comp_type << DMAE_CMD_C_DST_SHIFT) |
1505 DMAE_CMD_C_TYPE_ENABLE));
1509 bxe_dmae_opcode_clr_src_reset(uint32_t opcode)
1511 return (opcode & ~DMAE_CMD_SRC_RESET);
1515 bxe_dmae_opcode(struct bxe_softc *sc,
1521 uint32_t opcode = 0;
1523 opcode |= ((src_type << DMAE_CMD_SRC_SHIFT) |
1524 (dst_type << DMAE_CMD_DST_SHIFT));
1526 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
1528 opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
1530 opcode |= ((SC_VN(sc) << DMAE_CMD_E1HVN_SHIFT) |
1531 (SC_VN(sc) << DMAE_CMD_DST_VN_SHIFT));
1533 opcode |= (DMAE_COM_SET_ERR << DMAE_CMD_ERR_POLICY_SHIFT);
1536 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
1538 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
1542 opcode = bxe_dmae_opcode_add_comp(opcode, comp_type);
1549 bxe_prep_dmae_with_comp(struct bxe_softc *sc,
1550 struct dmae_cmd *dmae,
1554 memset(dmae, 0, sizeof(struct dmae_cmd));
1556 /* set the opcode */
1557 dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type,
1558 TRUE, DMAE_COMP_PCI);
1560 /* fill in the completion parameters */
1561 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp));
1562 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp));
1563 dmae->comp_val = DMAE_COMP_VAL;
1566 /* issue a DMAE command over the init channel and wait for completion */
1568 bxe_issue_dmae_with_comp(struct bxe_softc *sc,
1569 struct dmae_cmd *dmae)
1571 uint32_t *wb_comp = BXE_SP(sc, wb_comp);
1572 int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000;
1576 /* reset completion */
1579 /* post the command on the channel used for initializations */
1580 bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
1582 /* wait for completion */
1585 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
1587 (sc->recovery_state != BXE_RECOVERY_DONE &&
1588 sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) {
1589 BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n",
1590 *wb_comp, sc->recovery_state);
1591 BXE_DMAE_UNLOCK(sc);
1592 return (DMAE_TIMEOUT);
1599 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
1600 BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n",
1601 *wb_comp, sc->recovery_state);
1602 BXE_DMAE_UNLOCK(sc);
1603 return (DMAE_PCI_ERROR);
1606 BXE_DMAE_UNLOCK(sc);
1611 bxe_read_dmae(struct bxe_softc *sc,
1615 struct dmae_cmd dmae;
1619 DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32));
1621 if (!sc->dmae_ready) {
1622 data = BXE_SP(sc, wb_data[0]);
1624 for (i = 0; i < len32; i++) {
1625 data[i] = (CHIP_IS_E1(sc)) ?
1626 bxe_reg_rd_ind(sc, (src_addr + (i * 4))) :
1627 REG_RD(sc, (src_addr + (i * 4)));
1633 /* set opcode and fixed command fields */
1634 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
1636 /* fill in addresses and len */
1637 dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */
1638 dmae.src_addr_hi = 0;
1639 dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data));
1640 dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data));
1643 /* issue the command and wait for completion */
1644 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1645 bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1650 bxe_write_dmae(struct bxe_softc *sc,
1651 bus_addr_t dma_addr,
1655 struct dmae_cmd dmae;
1658 if (!sc->dmae_ready) {
1659 DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32));
1661 if (CHIP_IS_E1(sc)) {
1662 ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1664 ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1670 /* set opcode and fixed command fields */
1671 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
1673 /* fill in addresses and len */
1674 dmae.src_addr_lo = U64_LO(dma_addr);
1675 dmae.src_addr_hi = U64_HI(dma_addr);
1676 dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */
1677 dmae.dst_addr_hi = 0;
1680 /* issue the command and wait for completion */
1681 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1682 bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1687 bxe_write_dmae_phys_len(struct bxe_softc *sc,
1688 bus_addr_t phys_addr,
1692 int dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
1695 while (len > dmae_wr_max) {
1697 (phys_addr + offset), /* src DMA address */
1698 (addr + offset), /* dst GRC address */
1700 offset += (dmae_wr_max * 4);
1705 (phys_addr + offset), /* src DMA address */
1706 (addr + offset), /* dst GRC address */
1711 bxe_set_ctx_validation(struct bxe_softc *sc,
1712 struct eth_context *cxt,
1715 /* ustorm cxt validation */
1716 cxt->ustorm_ag_context.cdu_usage =
1717 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1718 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
1719 /* xcontext validation */
1720 cxt->xstorm_ag_context.cdu_reserved =
1721 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1722 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
1726 bxe_storm_memset_hc_timeout(struct bxe_softc *sc,
1733 (BAR_CSTRORM_INTMEM +
1734 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index));
1736 REG_WR8(sc, addr, ticks);
1739 "port %d fw_sb_id %d sb_index %d ticks %d\n",
1740 port, fw_sb_id, sb_index, ticks);
1744 bxe_storm_memset_hc_disable(struct bxe_softc *sc,
1750 uint32_t enable_flag =
1751 (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
1753 (BAR_CSTRORM_INTMEM +
1754 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index));
1758 flags = REG_RD8(sc, addr);
1759 flags &= ~HC_INDEX_DATA_HC_ENABLED;
1760 flags |= enable_flag;
1761 REG_WR8(sc, addr, flags);
1764 "port %d fw_sb_id %d sb_index %d disable %d\n",
1765 port, fw_sb_id, sb_index, disable);
1769 bxe_update_coalesce_sb_index(struct bxe_softc *sc,
1775 int port = SC_PORT(sc);
1776 uint8_t ticks = (usec / 4); /* XXX ??? */
1778 bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks);
1780 disable = (disable) ? 1 : ((usec) ? 0 : 1);
1781 bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable);
1785 elink_cb_udelay(struct bxe_softc *sc,
1792 elink_cb_reg_read(struct bxe_softc *sc,
1795 return (REG_RD(sc, reg_addr));
1799 elink_cb_reg_write(struct bxe_softc *sc,
1803 REG_WR(sc, reg_addr, val);
1807 elink_cb_reg_wb_write(struct bxe_softc *sc,
1812 REG_WR_DMAE(sc, offset, wb_write, len);
1816 elink_cb_reg_wb_read(struct bxe_softc *sc,
1821 REG_RD_DMAE(sc, offset, wb_write, len);
1825 elink_cb_path_id(struct bxe_softc *sc)
1827 return (SC_PATH(sc));
1831 elink_cb_event_log(struct bxe_softc *sc,
1832 const elink_log_id_t elink_log_id,
1836 BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id);
1840 bxe_set_spio(struct bxe_softc *sc,
1846 /* Only 2 SPIOs are configurable */
1847 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
1848 BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode);
1852 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1854 /* read SPIO and mask except the float bits */
1855 spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
1858 case MISC_SPIO_OUTPUT_LOW:
1859 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio);
1860 /* clear FLOAT and set CLR */
1861 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1862 spio_reg |= (spio << MISC_SPIO_CLR_POS);
1865 case MISC_SPIO_OUTPUT_HIGH:
1866 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio);
1867 /* clear FLOAT and set SET */
1868 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1869 spio_reg |= (spio << MISC_SPIO_SET_POS);
1872 case MISC_SPIO_INPUT_HI_Z:
1873 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio);
1875 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
1882 REG_WR(sc, MISC_REG_SPIO, spio_reg);
1883 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1889 bxe_gpio_read(struct bxe_softc *sc,
1893 /* The GPIO should be swapped if swap register is set and active */
1894 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1895 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1896 int gpio_shift = (gpio_num +
1897 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1898 uint32_t gpio_mask = (1 << gpio_shift);
1901 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1902 BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d"
1903 " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift,
1908 /* read GPIO value */
1909 gpio_reg = REG_RD(sc, MISC_REG_GPIO);
1911 /* get the requested pin value */
1912 return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0;
1916 bxe_gpio_write(struct bxe_softc *sc,
1921 /* The GPIO should be swapped if swap register is set and active */
1922 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1923 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1924 int gpio_shift = (gpio_num +
1925 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1926 uint32_t gpio_mask = (1 << gpio_shift);
1929 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1930 BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
1931 " gpio_shift %d gpio_mask 0x%x\n",
1932 gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
1936 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1938 /* read GPIO and mask except the float bits */
1939 gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1942 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1944 "Set GPIO %d (shift %d) -> output low\n",
1945 gpio_num, gpio_shift);
1946 /* clear FLOAT and set CLR */
1947 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1948 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1951 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1953 "Set GPIO %d (shift %d) -> output high\n",
1954 gpio_num, gpio_shift);
1955 /* clear FLOAT and set SET */
1956 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1957 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1960 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1962 "Set GPIO %d (shift %d) -> input\n",
1963 gpio_num, gpio_shift);
1965 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1972 REG_WR(sc, MISC_REG_GPIO, gpio_reg);
1973 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1979 bxe_gpio_mult_write(struct bxe_softc *sc,
1985 /* any port swapping should be handled by caller */
1987 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1989 /* read GPIO and mask except the float bits */
1990 gpio_reg = REG_RD(sc, MISC_REG_GPIO);
1991 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
1992 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
1993 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
1996 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1997 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins);
1999 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2002 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2003 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins);
2005 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2008 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2009 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins);
2011 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2015 BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x"
2016 " gpio_reg 0x%x\n", pins, mode, gpio_reg);
2017 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2021 REG_WR(sc, MISC_REG_GPIO, gpio_reg);
2022 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2028 bxe_gpio_int_write(struct bxe_softc *sc,
2033 /* The GPIO should be swapped if swap register is set and active */
2034 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
2035 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
2036 int gpio_shift = (gpio_num +
2037 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
2038 uint32_t gpio_mask = (1 << gpio_shift);
2041 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2042 BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
2043 " gpio_shift %d gpio_mask 0x%x\n",
2044 gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
2048 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2051 gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT);
2054 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2056 "Clear GPIO INT %d (shift %d) -> output low\n",
2057 gpio_num, gpio_shift);
2058 /* clear SET and set CLR */
2059 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2060 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2063 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2065 "Set GPIO INT %d (shift %d) -> output high\n",
2066 gpio_num, gpio_shift);
2067 /* clear CLR and set SET */
2068 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2069 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2076 REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg);
2077 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2083 elink_cb_gpio_read(struct bxe_softc *sc,
2087 return (bxe_gpio_read(sc, gpio_num, port));
2091 elink_cb_gpio_write(struct bxe_softc *sc,
2093 uint8_t mode, /* 0=low 1=high */
2096 return (bxe_gpio_write(sc, gpio_num, mode, port));
2100 elink_cb_gpio_mult_write(struct bxe_softc *sc,
2102 uint8_t mode) /* 0=low 1=high */
2104 return (bxe_gpio_mult_write(sc, pins, mode));
2108 elink_cb_gpio_int_write(struct bxe_softc *sc,
2110 uint8_t mode, /* 0=low 1=high */
2113 return (bxe_gpio_int_write(sc, gpio_num, mode, port));
2117 elink_cb_notify_link_changed(struct bxe_softc *sc)
2119 REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 +
2120 (SC_FUNC(sc) * sizeof(uint32_t))), 1);
2123 /* send the MCP a request, block until there is a reply */
2125 elink_cb_fw_command(struct bxe_softc *sc,
2129 int mb_idx = SC_FW_MB_IDX(sc);
2133 uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10;
2138 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param);
2139 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq));
2142 "wrote command 0x%08x to FW MB param 0x%08x\n",
2143 (command | seq), param);
2145 /* Let the FW do it's magic. GIve it up to 5 seconds... */
2147 DELAY(delay * 1000);
2148 rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header);
2149 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2152 "[after %d ms] read 0x%x seq 0x%x from FW MB\n",
2153 cnt*delay, rc, seq);
2155 /* is this a reply to our command? */
2156 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
2157 rc &= FW_MSG_CODE_MASK;
2160 BLOGE(sc, "FW failed to respond!\n");
2161 // XXX bxe_fw_dump(sc);
2165 BXE_FWMB_UNLOCK(sc);
2170 bxe_fw_command(struct bxe_softc *sc,
2174 return (elink_cb_fw_command(sc, command, param));
2178 __storm_memset_dma_mapping(struct bxe_softc *sc,
2182 REG_WR(sc, addr, U64_LO(mapping));
2183 REG_WR(sc, (addr + 4), U64_HI(mapping));
2187 storm_memset_spq_addr(struct bxe_softc *sc,
2191 uint32_t addr = (XSEM_REG_FAST_MEMORY +
2192 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid));
2193 __storm_memset_dma_mapping(sc, addr, mapping);
2197 storm_memset_vf_to_pf(struct bxe_softc *sc,
2201 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2202 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2203 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2204 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2208 storm_memset_func_en(struct bxe_softc *sc,
2212 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2213 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2214 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2215 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2219 storm_memset_eq_data(struct bxe_softc *sc,
2220 struct event_ring_data *eq_data,
2226 addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid));
2227 size = sizeof(struct event_ring_data);
2228 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data);
2232 storm_memset_eq_prod(struct bxe_softc *sc,
2236 uint32_t addr = (BAR_CSTRORM_INTMEM +
2237 CSTORM_EVENT_RING_PROD_OFFSET(pfid));
2238 REG_WR16(sc, addr, eq_prod);
2242 * Post a slowpath command.
2244 * A slowpath command is used to propagate a configuration change through
2245 * the controller in a controlled manner, allowing each STORM processor and
2246 * other H/W blocks to phase in the change. The commands sent on the
2247 * slowpath are referred to as ramrods. Depending on the ramrod used the
2248 * completion of the ramrod will occur in different ways. Here's a
2249 * breakdown of ramrods and how they complete:
2251 * RAMROD_CMD_ID_ETH_PORT_SETUP
2252 * Used to setup the leading connection on a port. Completes on the
2253 * Receive Completion Queue (RCQ) of that port (typically fp[0]).
2255 * RAMROD_CMD_ID_ETH_CLIENT_SETUP
2256 * Used to setup an additional connection on a port. Completes on the
2257 * RCQ of the multi-queue/RSS connection being initialized.
2259 * RAMROD_CMD_ID_ETH_STAT_QUERY
2260 * Used to force the storm processors to update the statistics database
2261 * in host memory. This ramrod is send on the leading connection CID and
2262 * completes as an index increment of the CSTORM on the default status
2265 * RAMROD_CMD_ID_ETH_UPDATE
2266 * Used to update the state of the leading connection, usually to udpate
2267 * the RSS indirection table. Completes on the RCQ of the leading
2268 * connection. (Not currently used under FreeBSD until OS support becomes
2271 * RAMROD_CMD_ID_ETH_HALT
2272 * Used when tearing down a connection prior to driver unload. Completes
2273 * on the RCQ of the multi-queue/RSS connection being torn down. Don't
2274 * use this on the leading connection.
2276 * RAMROD_CMD_ID_ETH_SET_MAC
2277 * Sets the Unicast/Broadcast/Multicast used by the port. Completes on
2278 * the RCQ of the leading connection.
2280 * RAMROD_CMD_ID_ETH_CFC_DEL
2281 * Used when tearing down a conneciton prior to driver unload. Completes
2282 * on the RCQ of the leading connection (since the current connection
2283 * has been completely removed from controller memory).
2285 * RAMROD_CMD_ID_ETH_PORT_DEL
2286 * Used to tear down the leading connection prior to driver unload,
2287 * typically fp[0]. Completes as an index increment of the CSTORM on the
2288 * default status block.
2290 * RAMROD_CMD_ID_ETH_FORWARD_SETUP
2291 * Used for connection offload. Completes on the RCQ of the multi-queue
2292 * RSS connection that is being offloaded. (Not currently used under
2295 * There can only be one command pending per function.
2298 * 0 = Success, !0 = Failure.
2301 /* must be called under the spq lock */
2303 struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc)
2305 struct eth_spe *next_spe = sc->spq_prod_bd;
2307 if (sc->spq_prod_bd == sc->spq_last_bd) {
2308 /* wrap back to the first eth_spq */
2309 sc->spq_prod_bd = sc->spq;
2310 sc->spq_prod_idx = 0;
2319 /* must be called under the spq lock */
2321 void bxe_sp_prod_update(struct bxe_softc *sc)
2323 int func = SC_FUNC(sc);
2326 * Make sure that BD data is updated before writing the producer.
2327 * BD data is written to the memory, the producer is read from the
2328 * memory, thus we need a full memory barrier to ensure the ordering.
2332 REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)),
2335 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
2336 BUS_SPACE_BARRIER_WRITE);
2340 * bxe_is_contextless_ramrod - check if the current command ends on EQ
2342 * @cmd: command to check
2343 * @cmd_type: command type
2346 int bxe_is_contextless_ramrod(int cmd,
2349 if ((cmd_type == NONE_CONNECTION_TYPE) ||
2350 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
2351 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
2352 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
2353 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
2354 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
2355 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) {
2363 * bxe_sp_post - place a single command on an SP ring
2365 * @sc: driver handle
2366 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.)
2367 * @cid: SW CID the command is related to
2368 * @data_hi: command private data address (high 32 bits)
2369 * @data_lo: command private data address (low 32 bits)
2370 * @cmd_type: command type (e.g. NONE, ETH)
2372 * SP data is handled as if it's always an address pair, thus data fields are
2373 * not swapped to little endian in upper functions. Instead this function swaps
2374 * data as if it's two uint32 fields.
2377 bxe_sp_post(struct bxe_softc *sc,
2384 struct eth_spe *spe;
2388 common = bxe_is_contextless_ramrod(command, cmd_type);
2393 if (!atomic_load_acq_long(&sc->eq_spq_left)) {
2394 BLOGE(sc, "EQ ring is full!\n");
2399 if (!atomic_load_acq_long(&sc->cq_spq_left)) {
2400 BLOGE(sc, "SPQ ring is full!\n");
2406 spe = bxe_sp_get_next(sc);
2408 /* CID needs port number to be encoded int it */
2409 spe->hdr.conn_and_cmd_data =
2410 htole32((command << SPE_HDR_T_CMD_ID_SHIFT) | HW_CID(sc, cid));
2412 type = (cmd_type << SPE_HDR_T_CONN_TYPE_SHIFT) & SPE_HDR_T_CONN_TYPE;
2414 /* TBD: Check if it works for VFs */
2415 type |= ((SC_FUNC(sc) << SPE_HDR_T_FUNCTION_ID_SHIFT) &
2416 SPE_HDR_T_FUNCTION_ID);
2418 spe->hdr.type = htole16(type);
2420 spe->data.update_data_addr.hi = htole32(data_hi);
2421 spe->data.update_data_addr.lo = htole32(data_lo);
2424 * It's ok if the actual decrement is issued towards the memory
2425 * somewhere between the lock and unlock. Thus no more explict
2426 * memory barrier is needed.
2429 atomic_subtract_acq_long(&sc->eq_spq_left, 1);
2431 atomic_subtract_acq_long(&sc->cq_spq_left, 1);
2434 BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr);
2435 BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n",
2436 BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata));
2438 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n",
2440 (uint32_t)U64_HI(sc->spq_dma.paddr),
2441 (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq),
2448 atomic_load_acq_long(&sc->cq_spq_left),
2449 atomic_load_acq_long(&sc->eq_spq_left));
2451 bxe_sp_prod_update(sc);
2458 * bxe_debug_print_ind_table - prints the indirection table configuration.
2460 * @sc: driver hanlde
2461 * @p: pointer to rss configuration
2465 * FreeBSD Device probe function.
2467 * Compares the device found to the driver's list of supported devices and
2468 * reports back to the bsd loader whether this is the right driver for the device.
2469 * This is the driver entry function called from the "kldload" command.
2472 * BUS_PROBE_DEFAULT on success, positive value on failure.
2475 bxe_probe(device_t dev)
2477 struct bxe_device_type *t;
2479 uint16_t did, sdid, svid, vid;
2481 /* Find our device structure */
2484 /* Get the data for the device to be probed. */
2485 vid = pci_get_vendor(dev);
2486 did = pci_get_device(dev);
2487 svid = pci_get_subvendor(dev);
2488 sdid = pci_get_subdevice(dev);
2490 /* Look through the list of known devices for a match. */
2491 while (t->bxe_name != NULL) {
2492 if ((vid == t->bxe_vid) && (did == t->bxe_did) &&
2493 ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) &&
2494 ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) {
2495 descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
2496 if (descbuf == NULL)
2499 /* Print out the device identity. */
2500 snprintf(descbuf, BXE_DEVDESC_MAX,
2501 "%s (%c%d) BXE v:%s\n", t->bxe_name,
2502 (((pci_read_config(dev, PCIR_REVID, 4) &
2504 (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
2505 BXE_DRIVER_VERSION);
2507 device_set_desc_copy(dev, descbuf);
2508 free(descbuf, M_TEMP);
2509 return (BUS_PROBE_DEFAULT);
2518 bxe_init_mutexes(struct bxe_softc *sc)
2520 #ifdef BXE_CORE_LOCK_SX
2521 snprintf(sc->core_sx_name, sizeof(sc->core_sx_name),
2522 "bxe%d_core_lock", sc->unit);
2523 sx_init(&sc->core_sx, sc->core_sx_name);
2525 snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name),
2526 "bxe%d_core_lock", sc->unit);
2527 mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF);
2530 snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name),
2531 "bxe%d_sp_lock", sc->unit);
2532 mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF);
2534 snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name),
2535 "bxe%d_dmae_lock", sc->unit);
2536 mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF);
2538 snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name),
2539 "bxe%d_phy_lock", sc->unit);
2540 mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF);
2542 snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name),
2543 "bxe%d_fwmb_lock", sc->unit);
2544 mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF);
2546 snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name),
2547 "bxe%d_print_lock", sc->unit);
2548 mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF);
2550 snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name),
2551 "bxe%d_stats_lock", sc->unit);
2552 mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF);
2554 snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name),
2555 "bxe%d_mcast_lock", sc->unit);
2556 mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF);
2560 bxe_release_mutexes(struct bxe_softc *sc)
2562 #ifdef BXE_CORE_LOCK_SX
2563 sx_destroy(&sc->core_sx);
2565 if (mtx_initialized(&sc->core_mtx)) {
2566 mtx_destroy(&sc->core_mtx);
2570 if (mtx_initialized(&sc->sp_mtx)) {
2571 mtx_destroy(&sc->sp_mtx);
2574 if (mtx_initialized(&sc->dmae_mtx)) {
2575 mtx_destroy(&sc->dmae_mtx);
2578 if (mtx_initialized(&sc->port.phy_mtx)) {
2579 mtx_destroy(&sc->port.phy_mtx);
2582 if (mtx_initialized(&sc->fwmb_mtx)) {
2583 mtx_destroy(&sc->fwmb_mtx);
2586 if (mtx_initialized(&sc->print_mtx)) {
2587 mtx_destroy(&sc->print_mtx);
2590 if (mtx_initialized(&sc->stats_mtx)) {
2591 mtx_destroy(&sc->stats_mtx);
2594 if (mtx_initialized(&sc->mcast_mtx)) {
2595 mtx_destroy(&sc->mcast_mtx);
2600 bxe_tx_disable(struct bxe_softc* sc)
2604 /* tell the stack the driver is stopped and TX queue is full */
2606 if_setdrvflags(ifp, 0);
2611 bxe_drv_pulse(struct bxe_softc *sc)
2613 SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb,
2614 sc->fw_drv_pulse_wr_seq);
2617 static inline uint16_t
2618 bxe_tx_avail(struct bxe_softc *sc,
2619 struct bxe_fastpath *fp)
2625 prod = fp->tx_bd_prod;
2626 cons = fp->tx_bd_cons;
2628 used = SUB_S16(prod, cons);
2630 return (int16_t)(sc->tx_ring_size) - used;
2634 bxe_tx_queue_has_work(struct bxe_fastpath *fp)
2638 mb(); /* status block fields can change */
2639 hw_cons = le16toh(*fp->tx_cons_sb);
2640 return (hw_cons != fp->tx_pkt_cons);
2643 static inline uint8_t
2644 bxe_has_tx_work(struct bxe_fastpath *fp)
2646 /* expand this for multi-cos if ever supported */
2647 return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE;
2651 bxe_has_rx_work(struct bxe_fastpath *fp)
2653 uint16_t rx_cq_cons_sb;
2655 mb(); /* status block fields can change */
2656 rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb);
2657 if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX)
2659 return (fp->rx_cq_cons != rx_cq_cons_sb);
2663 bxe_sp_event(struct bxe_softc *sc,
2664 struct bxe_fastpath *fp,
2665 union eth_rx_cqe *rr_cqe)
2667 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2668 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2669 enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX;
2670 struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
2672 BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n",
2673 fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type);
2676 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
2677 BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid);
2678 drv_cmd = ECORE_Q_CMD_UPDATE;
2681 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
2682 BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid);
2683 drv_cmd = ECORE_Q_CMD_SETUP;
2686 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
2687 BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
2688 drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY;
2691 case (RAMROD_CMD_ID_ETH_HALT):
2692 BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid);
2693 drv_cmd = ECORE_Q_CMD_HALT;
2696 case (RAMROD_CMD_ID_ETH_TERMINATE):
2697 BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid);
2698 drv_cmd = ECORE_Q_CMD_TERMINATE;
2701 case (RAMROD_CMD_ID_ETH_EMPTY):
2702 BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid);
2703 drv_cmd = ECORE_Q_CMD_EMPTY;
2707 BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n",
2708 command, fp->index);
2712 if ((drv_cmd != ECORE_Q_CMD_MAX) &&
2713 q_obj->complete_cmd(sc, q_obj, drv_cmd)) {
2715 * q_obj->complete_cmd() failure means that this was
2716 * an unexpected completion.
2718 * In this case we don't want to increase the sc->spq_left
2719 * because apparently we haven't sent this command the first
2722 // bxe_panic(sc, ("Unexpected SP completion\n"));
2726 atomic_add_acq_long(&sc->cq_spq_left, 1);
2728 BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n",
2729 atomic_load_acq_long(&sc->cq_spq_left));
2733 * The current mbuf is part of an aggregation. Move the mbuf into the TPA
2734 * aggregation queue, put an empty mbuf back onto the receive chain, and mark
2735 * the current aggregation queue as in-progress.
2738 bxe_tpa_start(struct bxe_softc *sc,
2739 struct bxe_fastpath *fp,
2743 struct eth_fast_path_rx_cqe *cqe)
2745 struct bxe_sw_rx_bd tmp_bd;
2746 struct bxe_sw_rx_bd *rx_buf;
2747 struct eth_rx_bd *rx_bd;
2749 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
2752 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START "
2753 "cons=%d prod=%d\n",
2754 fp->index, queue, cons, prod);
2756 max_agg_queues = MAX_AGG_QS(sc);
2758 KASSERT((queue < max_agg_queues),
2759 ("fp[%02d] invalid aggr queue (%d >= %d)!",
2760 fp->index, queue, max_agg_queues));
2762 KASSERT((tpa_info->state == BXE_TPA_STATE_STOP),
2763 ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!",
2766 /* copy the existing mbuf and mapping from the TPA pool */
2767 tmp_bd = tpa_info->bd;
2769 if (tmp_bd.m == NULL) {
2772 tmp = (uint32_t *)cqe;
2774 BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n",
2775 fp->index, queue, cons, prod);
2776 BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2777 *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2779 /* XXX Error handling? */
2783 /* change the TPA queue to the start state */
2784 tpa_info->state = BXE_TPA_STATE_START;
2785 tpa_info->placement_offset = cqe->placement_offset;
2786 tpa_info->parsing_flags = le16toh(cqe->pars_flags.flags);
2787 tpa_info->vlan_tag = le16toh(cqe->vlan_tag);
2788 tpa_info->len_on_bd = le16toh(cqe->len_on_bd);
2790 fp->rx_tpa_queue_used |= (1 << queue);
2793 * If all the buffer descriptors are filled with mbufs then fill in
2794 * the current consumer index with a new BD. Else if a maximum Rx
2795 * buffer limit is imposed then fill in the next producer index.
2797 index = (sc->max_rx_bufs != RX_BD_USABLE) ?
2800 /* move the received mbuf and mapping to TPA pool */
2801 tpa_info->bd = fp->rx_mbuf_chain[cons];
2803 /* release any existing RX BD mbuf mappings */
2804 if (cons != index) {
2805 rx_buf = &fp->rx_mbuf_chain[cons];
2807 if (rx_buf->m_map != NULL) {
2808 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
2809 BUS_DMASYNC_POSTREAD);
2810 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
2814 * We get here when the maximum number of rx buffers is less than
2815 * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL
2816 * it out here without concern of a memory leak.
2818 fp->rx_mbuf_chain[cons].m = NULL;
2821 /* update the Rx SW BD with the mbuf info from the TPA pool */
2822 fp->rx_mbuf_chain[index] = tmp_bd;
2824 /* update the Rx BD with the empty mbuf phys address from the TPA pool */
2825 rx_bd = &fp->rx_chain[index];
2826 rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr));
2827 rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr));
2831 * When a TPA aggregation is completed, loop through the individual mbufs
2832 * of the aggregation, combining them into a single mbuf which will be sent
2833 * up the stack. Refill all freed SGEs with mbufs as we go along.
2836 bxe_fill_frag_mbuf(struct bxe_softc *sc,
2837 struct bxe_fastpath *fp,
2838 struct bxe_sw_tpa_info *tpa_info,
2842 struct eth_end_agg_rx_cqe *cqe,
2845 struct mbuf *m_frag;
2846 uint32_t frag_len, frag_size, i;
2851 frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd;
2854 "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n",
2855 fp->index, queue, tpa_info->len_on_bd, frag_size, pages);
2857 /* make sure the aggregated frame is not too big to handle */
2858 if (pages > 8 * PAGES_PER_SGE) {
2860 uint32_t *tmp = (uint32_t *)cqe;
2862 BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! "
2863 "pkt_len=%d len_on_bd=%d frag_size=%d\n",
2864 fp->index, cqe_idx, pages, le16toh(cqe->pkt_len),
2865 tpa_info->len_on_bd, frag_size);
2867 BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2868 *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2870 bxe_panic(sc, ("sge page count error\n"));
2875 * Scan through the scatter gather list pulling individual mbufs into a
2876 * single mbuf for the host stack.
2878 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
2879 sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j]));
2882 * Firmware gives the indices of the SGE as if the ring is an array
2883 * (meaning that the "next" element will consume 2 indices).
2885 frag_len = min(frag_size, (uint32_t)(SGE_PAGES));
2887 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d "
2888 "sge_idx=%d frag_size=%d frag_len=%d\n",
2889 fp->index, queue, i, j, sge_idx, frag_size, frag_len);
2891 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
2893 /* allocate a new mbuf for the SGE */
2894 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
2896 /* Leave all remaining SGEs in the ring! */
2900 /* update the fragment length */
2901 m_frag->m_len = frag_len;
2903 /* concatenate the fragment to the head mbuf */
2905 fp->eth_q_stats.mbuf_alloc_sge--;
2907 /* update the TPA mbuf size and remaining fragment size */
2908 m->m_pkthdr.len += frag_len;
2909 frag_size -= frag_len;
2913 "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n",
2914 fp->index, queue, frag_size);
2920 bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp)
2924 for (i = 1; i <= RX_SGE_NUM_PAGES; i++) {
2925 int idx = RX_SGE_TOTAL_PER_PAGE * i - 1;
2927 for (j = 0; j < 2; j++) {
2928 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
2935 bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp)
2937 /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */
2938 memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
2941 * Clear the two last indices in the page to 1. These are the indices that
2942 * correspond to the "next" element, hence will never be indicated and
2943 * should be removed from the calculations.
2945 bxe_clear_sge_mask_next_elems(fp);
2949 bxe_update_last_max_sge(struct bxe_fastpath *fp,
2952 uint16_t last_max = fp->last_max_sge;
2954 if (SUB_S16(idx, last_max) > 0) {
2955 fp->last_max_sge = idx;
2960 bxe_update_sge_prod(struct bxe_softc *sc,
2961 struct bxe_fastpath *fp,
2963 union eth_sgl_or_raw_data *cqe)
2965 uint16_t last_max, last_elem, first_elem;
2973 /* first mark all used pages */
2974 for (i = 0; i < sge_len; i++) {
2975 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
2976 RX_SGE(le16toh(cqe->sgl[i])));
2980 "fp[%02d] fp_cqe->sgl[%d] = %d\n",
2981 fp->index, sge_len - 1,
2982 le16toh(cqe->sgl[sge_len - 1]));
2984 /* assume that the last SGE index is the biggest */
2985 bxe_update_last_max_sge(fp,
2986 le16toh(cqe->sgl[sge_len - 1]));
2988 last_max = RX_SGE(fp->last_max_sge);
2989 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
2990 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
2992 /* if ring is not full */
2993 if (last_elem + 1 != first_elem) {
2997 /* now update the prod */
2998 for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) {
2999 if (__predict_true(fp->sge_mask[i])) {
3003 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
3004 delta += BIT_VEC64_ELEM_SZ;
3008 fp->rx_sge_prod += delta;
3009 /* clear page-end entries */
3010 bxe_clear_sge_mask_next_elems(fp);
3014 "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n",
3015 fp->index, fp->last_max_sge, fp->rx_sge_prod);
3019 * The aggregation on the current TPA queue has completed. Pull the individual
3020 * mbuf fragments together into a single mbuf, perform all necessary checksum
3021 * calculations, and send the resuting mbuf to the stack.
3024 bxe_tpa_stop(struct bxe_softc *sc,
3025 struct bxe_fastpath *fp,
3026 struct bxe_sw_tpa_info *tpa_info,
3029 struct eth_end_agg_rx_cqe *cqe,
3037 "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n",
3038 fp->index, queue, tpa_info->placement_offset,
3039 le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag);
3043 /* allocate a replacement before modifying existing mbuf */
3044 rc = bxe_alloc_rx_tpa_mbuf(fp, queue);
3046 /* drop the frame and log an error */
3047 fp->eth_q_stats.rx_soft_errors++;
3048 goto bxe_tpa_stop_exit;
3051 /* we have a replacement, fixup the current mbuf */
3052 m_adj(m, tpa_info->placement_offset);
3053 m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd;
3055 /* mark the checksums valid (taken care of by the firmware) */
3056 fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3057 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3058 m->m_pkthdr.csum_data = 0xffff;
3059 m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
3064 /* aggregate all of the SGEs into a single mbuf */
3065 rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx);
3067 /* drop the packet and log an error */
3068 fp->eth_q_stats.rx_soft_errors++;
3071 if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3072 m->m_pkthdr.ether_vtag = tpa_info->vlan_tag;
3073 m->m_flags |= M_VLANTAG;
3076 /* assign packet to this interface interface */
3077 if_setrcvif(m, ifp);
3079 #if __FreeBSD_version >= 800000
3080 /* specify what RSS queue was used for this flow */
3081 m->m_pkthdr.flowid = fp->index;
3085 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3086 fp->eth_q_stats.rx_tpa_pkts++;
3088 /* pass the frame to the stack */
3092 /* we passed an mbuf up the stack or dropped the frame */
3093 fp->eth_q_stats.mbuf_alloc_tpa--;
3097 fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP;
3098 fp->rx_tpa_queue_used &= ~(1 << queue);
3103 struct bxe_fastpath *fp,
3107 struct eth_fast_path_rx_cqe *cqe_fp)
3109 struct mbuf *m_frag;
3110 uint16_t frags, frag_len;
3111 uint16_t sge_idx = 0;
3116 /* adjust the mbuf */
3119 frag_size = len - lenonbd;
3120 frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3122 for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) {
3123 sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j]));
3125 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
3126 frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE));
3127 m_frag->m_len = frag_len;
3129 /* allocate a new mbuf for the SGE */
3130 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
3132 /* Leave all remaining SGEs in the ring! */
3135 fp->eth_q_stats.mbuf_alloc_sge--;
3137 /* concatenate the fragment to the head mbuf */
3140 frag_size -= frag_len;
3143 bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data);
3149 bxe_rxeof(struct bxe_softc *sc,
3150 struct bxe_fastpath *fp)
3153 uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
3154 uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
3160 /* CQ "next element" is of the size of the regular element */
3161 hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
3162 if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) {
3166 bd_cons = fp->rx_bd_cons;
3167 bd_prod = fp->rx_bd_prod;
3168 bd_prod_fw = bd_prod;
3169 sw_cq_cons = fp->rx_cq_cons;
3170 sw_cq_prod = fp->rx_cq_prod;
3173 * Memory barrier necessary as speculative reads of the rx
3174 * buffer can be ahead of the index in the status block
3179 "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n",
3180 fp->index, hw_cq_cons, sw_cq_cons);
3182 while (sw_cq_cons != hw_cq_cons) {
3183 struct bxe_sw_rx_bd *rx_buf = NULL;
3184 union eth_rx_cqe *cqe;
3185 struct eth_fast_path_rx_cqe *cqe_fp;
3186 uint8_t cqe_fp_flags;
3187 enum eth_rx_cqe_type cqe_fp_type;
3188 uint16_t len, lenonbd, pad;
3189 struct mbuf *m = NULL;
3191 comp_ring_cons = RCQ(sw_cq_cons);
3192 bd_prod = RX_BD(bd_prod);
3193 bd_cons = RX_BD(bd_cons);
3195 cqe = &fp->rcq_chain[comp_ring_cons];
3196 cqe_fp = &cqe->fast_path_cqe;
3197 cqe_fp_flags = cqe_fp->type_error_flags;
3198 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
3201 "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d "
3202 "BD prod=%d cons=%d CQE type=0x%x err=0x%x "
3203 "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n",
3209 CQE_TYPE(cqe_fp_flags),
3211 cqe_fp->status_flags,
3212 le32toh(cqe_fp->rss_hash_result),
3213 le16toh(cqe_fp->vlan_tag),
3214 le16toh(cqe_fp->pkt_len_or_gro_seg_len),
3215 le16toh(cqe_fp->len_on_bd));
3217 /* is this a slowpath msg? */
3218 if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) {
3219 bxe_sp_event(sc, fp, cqe);
3223 rx_buf = &fp->rx_mbuf_chain[bd_cons];
3225 if (!CQE_TYPE_FAST(cqe_fp_type)) {
3226 struct bxe_sw_tpa_info *tpa_info;
3227 uint16_t frag_size, pages;
3230 if (CQE_TYPE_START(cqe_fp_type)) {
3231 bxe_tpa_start(sc, fp, cqe_fp->queue_index,
3232 bd_cons, bd_prod, cqe_fp);
3233 m = NULL; /* packet not ready yet */
3237 KASSERT(CQE_TYPE_STOP(cqe_fp_type),
3238 ("CQE type is not STOP! (0x%x)\n", cqe_fp_type));
3240 queue = cqe->end_agg_cqe.queue_index;
3241 tpa_info = &fp->rx_tpa_info[queue];
3243 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n",
3246 frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) -
3247 tpa_info->len_on_bd);
3248 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3250 bxe_tpa_stop(sc, fp, tpa_info, queue, pages,
3251 &cqe->end_agg_cqe, comp_ring_cons);
3253 bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data);
3260 /* is this an error packet? */
3261 if (__predict_false(cqe_fp_flags &
3262 ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
3263 BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons);
3264 fp->eth_q_stats.rx_soft_errors++;
3268 len = le16toh(cqe_fp->pkt_len_or_gro_seg_len);
3269 lenonbd = le16toh(cqe_fp->len_on_bd);
3270 pad = cqe_fp->placement_offset;
3274 if (__predict_false(m == NULL)) {
3275 BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n",
3276 bd_cons, fp->index);
3280 /* XXX double copy if packet length under a threshold */
3283 * If all the buffer descriptors are filled with mbufs then fill in
3284 * the current consumer index with a new BD. Else if a maximum Rx
3285 * buffer limit is imposed then fill in the next producer index.
3287 rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons,
3288 (sc->max_rx_bufs != RX_BD_USABLE) ?
3292 /* we simply reuse the received mbuf and don't post it to the stack */
3295 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
3297 fp->eth_q_stats.rx_soft_errors++;
3299 if (sc->max_rx_bufs != RX_BD_USABLE) {
3300 /* copy this consumer index to the producer index */
3301 memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf,
3302 sizeof(struct bxe_sw_rx_bd));
3303 memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd));
3309 /* current mbuf was detached from the bd */
3310 fp->eth_q_stats.mbuf_alloc_rx--;
3312 /* we allocated a replacement mbuf, fixup the current one */
3314 m->m_pkthdr.len = m->m_len = len;
3316 if ((len > 60) && (len > lenonbd)) {
3317 fp->eth_q_stats.rx_bxe_service_rxsgl++;
3318 rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp);
3321 fp->eth_q_stats.rx_jumbo_sge_pkts++;
3322 } else if (lenonbd < len) {
3323 fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++;
3326 /* assign packet to this interface interface */
3327 if_setrcvif(m, ifp);
3329 /* assume no hardware checksum has complated */
3330 m->m_pkthdr.csum_flags = 0;
3332 /* validate checksum if offload enabled */
3333 if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
3334 /* check for a valid IP frame */
3335 if (!(cqe->fast_path_cqe.status_flags &
3336 ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) {
3337 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3338 if (__predict_false(cqe_fp_flags &
3339 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) {
3340 fp->eth_q_stats.rx_hw_csum_errors++;
3342 fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3343 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3347 /* check for a valid TCP/UDP frame */
3348 if (!(cqe->fast_path_cqe.status_flags &
3349 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) {
3350 if (__predict_false(cqe_fp_flags &
3351 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) {
3352 fp->eth_q_stats.rx_hw_csum_errors++;
3354 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3355 m->m_pkthdr.csum_data = 0xFFFF;
3356 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
3362 /* if there is a VLAN tag then flag that info */
3363 if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3364 m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag;
3365 m->m_flags |= M_VLANTAG;
3368 #if __FreeBSD_version >= 800000
3369 /* specify what RSS queue was used for this flow */
3370 m->m_pkthdr.flowid = fp->index;
3376 bd_cons = RX_BD_NEXT(bd_cons);
3377 bd_prod = RX_BD_NEXT(bd_prod);
3378 bd_prod_fw = RX_BD_NEXT(bd_prod_fw);
3380 /* pass the frame to the stack */
3381 if (__predict_true(m != NULL)) {
3382 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3389 sw_cq_prod = RCQ_NEXT(sw_cq_prod);
3390 sw_cq_cons = RCQ_NEXT(sw_cq_cons);
3392 /* limit spinning on the queue */
3396 if (rx_pkts == sc->rx_budget) {
3397 fp->eth_q_stats.rx_budget_reached++;
3400 } /* while work to do */
3402 fp->rx_bd_cons = bd_cons;
3403 fp->rx_bd_prod = bd_prod_fw;
3404 fp->rx_cq_cons = sw_cq_cons;
3405 fp->rx_cq_prod = sw_cq_prod;
3407 /* Update producers */
3408 bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod);
3410 fp->eth_q_stats.rx_pkts += rx_pkts;
3411 fp->eth_q_stats.rx_calls++;
3413 BXE_FP_RX_UNLOCK(fp);
3415 return (sw_cq_cons != hw_cq_cons);
3419 bxe_free_tx_pkt(struct bxe_softc *sc,
3420 struct bxe_fastpath *fp,
3423 struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx];
3424 struct eth_tx_start_bd *tx_start_bd;
3425 uint16_t bd_idx = TX_BD(tx_buf->first_bd);
3429 /* unmap the mbuf from non-paged memory */
3430 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
3432 tx_start_bd = &fp->tx_chain[bd_idx].start_bd;
3433 nbd = le16toh(tx_start_bd->nbd) - 1;
3435 new_cons = (tx_buf->first_bd + nbd);
3438 if (__predict_true(tx_buf->m != NULL)) {
3440 fp->eth_q_stats.mbuf_alloc_tx--;
3442 fp->eth_q_stats.tx_chain_lost_mbuf++;
3446 tx_buf->first_bd = 0;
3451 /* transmit timeout watchdog */
3453 bxe_watchdog(struct bxe_softc *sc,
3454 struct bxe_fastpath *fp)
3458 if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) {
3459 BXE_FP_TX_UNLOCK(fp);
3463 BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index);
3464 if(sc->trigger_grcdump) {
3465 /* taking grcdump */
3469 BXE_FP_TX_UNLOCK(fp);
3471 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT);
3472 taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task);
3477 /* processes transmit completions */
3479 bxe_txeof(struct bxe_softc *sc,
3480 struct bxe_fastpath *fp)
3483 uint16_t bd_cons, hw_cons, sw_cons, pkt_cons;
3484 uint16_t tx_bd_avail;
3486 BXE_FP_TX_LOCK_ASSERT(fp);
3488 bd_cons = fp->tx_bd_cons;
3489 hw_cons = le16toh(*fp->tx_cons_sb);
3490 sw_cons = fp->tx_pkt_cons;
3492 while (sw_cons != hw_cons) {
3493 pkt_cons = TX_BD(sw_cons);
3496 "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n",
3497 fp->index, hw_cons, sw_cons, pkt_cons);
3499 bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons);
3504 fp->tx_pkt_cons = sw_cons;
3505 fp->tx_bd_cons = bd_cons;
3508 "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n",
3509 fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod);
3513 tx_bd_avail = bxe_tx_avail(sc, fp);
3515 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
3516 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
3518 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
3521 if (fp->tx_pkt_prod != fp->tx_pkt_cons) {
3522 /* reset the watchdog timer if there are pending transmits */
3523 fp->watchdog_timer = BXE_TX_TIMEOUT;
3526 /* clear watchdog when there are no pending transmits */
3527 fp->watchdog_timer = 0;
3533 bxe_drain_tx_queues(struct bxe_softc *sc)
3535 struct bxe_fastpath *fp;
3538 /* wait until all TX fastpath tasks have completed */
3539 for (i = 0; i < sc->num_queues; i++) {
3544 while (bxe_has_tx_work(fp)) {
3548 BXE_FP_TX_UNLOCK(fp);
3551 BLOGE(sc, "Timeout waiting for fp[%d] "
3552 "transmits to complete!\n", i);
3553 bxe_panic(sc, ("tx drain failure\n"));
3567 bxe_del_all_macs(struct bxe_softc *sc,
3568 struct ecore_vlan_mac_obj *mac_obj,
3570 uint8_t wait_for_comp)
3572 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3575 /* wait for completion of requested */
3576 if (wait_for_comp) {
3577 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3580 /* Set the mac type of addresses we want to clear */
3581 bxe_set_bit(mac_type, &vlan_mac_flags);
3583 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
3585 BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n",
3586 rc, mac_type, wait_for_comp);
3593 bxe_fill_accept_flags(struct bxe_softc *sc,
3595 unsigned long *rx_accept_flags,
3596 unsigned long *tx_accept_flags)
3598 /* Clear the flags first */
3599 *rx_accept_flags = 0;
3600 *tx_accept_flags = 0;
3603 case BXE_RX_MODE_NONE:
3605 * 'drop all' supersedes any accept flags that may have been
3606 * passed to the function.
3610 case BXE_RX_MODE_NORMAL:
3611 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3612 bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags);
3613 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3615 /* internal switching mode */
3616 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3617 bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags);
3618 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3622 case BXE_RX_MODE_ALLMULTI:
3623 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3624 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3625 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3627 /* internal switching mode */
3628 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3629 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3630 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3634 case BXE_RX_MODE_PROMISC:
3636 * According to deffinition of SI mode, iface in promisc mode
3637 * should receive matched and unmatched (in resolution of port)
3640 bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags);
3641 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3642 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3643 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3645 /* internal switching mode */
3646 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3647 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3650 bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags);
3652 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3658 BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode);
3662 /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
3663 if (rx_mode != BXE_RX_MODE_NONE) {
3664 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags);
3665 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags);
3672 bxe_set_q_rx_mode(struct bxe_softc *sc,
3674 unsigned long rx_mode_flags,
3675 unsigned long rx_accept_flags,
3676 unsigned long tx_accept_flags,
3677 unsigned long ramrod_flags)
3679 struct ecore_rx_mode_ramrod_params ramrod_param;
3682 memset(&ramrod_param, 0, sizeof(ramrod_param));
3684 /* Prepare ramrod parameters */
3685 ramrod_param.cid = 0;
3686 ramrod_param.cl_id = cl_id;
3687 ramrod_param.rx_mode_obj = &sc->rx_mode_obj;
3688 ramrod_param.func_id = SC_FUNC(sc);
3690 ramrod_param.pstate = &sc->sp_state;
3691 ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING;
3693 ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata);
3694 ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata);
3696 bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
3698 ramrod_param.ramrod_flags = ramrod_flags;
3699 ramrod_param.rx_mode_flags = rx_mode_flags;
3701 ramrod_param.rx_accept_flags = rx_accept_flags;
3702 ramrod_param.tx_accept_flags = tx_accept_flags;
3704 rc = ecore_config_rx_mode(sc, &ramrod_param);
3706 BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x "
3707 "rx_accept_flags 0x%x tx_accept_flags 0x%x "
3708 "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id,
3709 (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags,
3710 (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc);
3718 bxe_set_storm_rx_mode(struct bxe_softc *sc)
3720 unsigned long rx_mode_flags = 0, ramrod_flags = 0;
3721 unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
3724 rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags,
3730 bxe_set_bit(RAMROD_RX, &ramrod_flags);
3731 bxe_set_bit(RAMROD_TX, &ramrod_flags);
3733 /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */
3734 return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags,
3735 rx_accept_flags, tx_accept_flags,
3739 /* returns the "mcp load_code" according to global load_count array */
3741 bxe_nic_load_no_mcp(struct bxe_softc *sc)
3743 int path = SC_PATH(sc);
3744 int port = SC_PORT(sc);
3746 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n",
3747 path, load_count[path][0], load_count[path][1],
3748 load_count[path][2]);
3749 load_count[path][0]++;
3750 load_count[path][1 + port]++;
3751 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n",
3752 path, load_count[path][0], load_count[path][1],
3753 load_count[path][2]);
3754 if (load_count[path][0] == 1) {
3755 return (FW_MSG_CODE_DRV_LOAD_COMMON);
3756 } else if (load_count[path][1 + port] == 1) {
3757 return (FW_MSG_CODE_DRV_LOAD_PORT);
3759 return (FW_MSG_CODE_DRV_LOAD_FUNCTION);
3763 /* returns the "mcp load_code" according to global load_count array */
3765 bxe_nic_unload_no_mcp(struct bxe_softc *sc)
3767 int port = SC_PORT(sc);
3768 int path = SC_PATH(sc);
3770 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n",
3771 path, load_count[path][0], load_count[path][1],
3772 load_count[path][2]);
3773 load_count[path][0]--;
3774 load_count[path][1 + port]--;
3775 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n",
3776 path, load_count[path][0], load_count[path][1],
3777 load_count[path][2]);
3778 if (load_count[path][0] == 0) {
3779 return (FW_MSG_CODE_DRV_UNLOAD_COMMON);
3780 } else if (load_count[path][1 + port] == 0) {
3781 return (FW_MSG_CODE_DRV_UNLOAD_PORT);
3783 return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION);
3787 /* request unload mode from the MCP: COMMON, PORT or FUNCTION */
3789 bxe_send_unload_req(struct bxe_softc *sc,
3792 uint32_t reset_code = 0;
3794 /* Select the UNLOAD request mode */
3795 if (unload_mode == UNLOAD_NORMAL) {
3796 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3798 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3801 /* Send the request to the MCP */
3802 if (!BXE_NOMCP(sc)) {
3803 reset_code = bxe_fw_command(sc, reset_code, 0);
3805 reset_code = bxe_nic_unload_no_mcp(sc);
3808 return (reset_code);
3811 /* send UNLOAD_DONE command to the MCP */
3813 bxe_send_unload_done(struct bxe_softc *sc,
3816 uint32_t reset_param =
3817 keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
3819 /* Report UNLOAD_DONE to MCP */
3820 if (!BXE_NOMCP(sc)) {
3821 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
3826 bxe_func_wait_started(struct bxe_softc *sc)
3830 if (!sc->port.pmf) {
3835 * (assumption: No Attention from MCP at this stage)
3836 * PMF probably in the middle of TX disable/enable transaction
3837 * 1. Sync IRS for default SB
3838 * 2. Sync SP queue - this guarantees us that attention handling started
3839 * 3. Wait, that TX disable/enable transaction completes
3841 * 1+2 guarantee that if DCBX attention was scheduled it already changed
3842 * pending bit of transaction from STARTED-->TX_STOPPED, if we already
3843 * received completion for the transaction the state is TX_STOPPED.
3844 * State will return to STARTED after completion of TX_STOPPED-->STARTED
3848 /* XXX make sure default SB ISR is done */
3849 /* need a way to synchronize an irq (intr_mtx?) */
3851 /* XXX flush any work queues */
3853 while (ecore_func_get_state(sc, &sc->func_obj) !=
3854 ECORE_F_STATE_STARTED && tout--) {
3858 if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) {
3860 * Failed to complete the transaction in a "good way"
3861 * Force both transactions with CLR bit.
3863 struct ecore_func_state_params func_params = { NULL };
3865 BLOGE(sc, "Unexpected function state! "
3866 "Forcing STARTED-->TX_STOPPED-->STARTED\n");
3868 func_params.f_obj = &sc->func_obj;
3869 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3871 /* STARTED-->TX_STOPPED */
3872 func_params.cmd = ECORE_F_CMD_TX_STOP;
3873 ecore_func_state_change(sc, &func_params);
3875 /* TX_STOPPED-->STARTED */
3876 func_params.cmd = ECORE_F_CMD_TX_START;
3877 return (ecore_func_state_change(sc, &func_params));
3884 bxe_stop_queue(struct bxe_softc *sc,
3887 struct bxe_fastpath *fp = &sc->fp[index];
3888 struct ecore_queue_state_params q_params = { NULL };
3891 BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index);
3893 q_params.q_obj = &sc->sp_objs[fp->index].q_obj;
3894 /* We want to wait for completion in this context */
3895 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3897 /* Stop the primary connection: */
3899 /* ...halt the connection */
3900 q_params.cmd = ECORE_Q_CMD_HALT;
3901 rc = ecore_queue_state_change(sc, &q_params);
3906 /* ...terminate the connection */
3907 q_params.cmd = ECORE_Q_CMD_TERMINATE;
3908 memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate));
3909 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
3910 rc = ecore_queue_state_change(sc, &q_params);
3915 /* ...delete cfc entry */
3916 q_params.cmd = ECORE_Q_CMD_CFC_DEL;
3917 memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del));
3918 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
3919 return (ecore_queue_state_change(sc, &q_params));
3922 /* wait for the outstanding SP commands */
3923 static inline uint8_t
3924 bxe_wait_sp_comp(struct bxe_softc *sc,
3928 int tout = 5000; /* wait for 5 secs tops */
3932 if (!(atomic_load_acq_long(&sc->sp_state) & mask)) {
3941 tmp = atomic_load_acq_long(&sc->sp_state);
3943 BLOGE(sc, "Filtering completion timed out: "
3944 "sp_state 0x%lx, mask 0x%lx\n",
3953 bxe_func_stop(struct bxe_softc *sc)
3955 struct ecore_func_state_params func_params = { NULL };
3958 /* prepare parameters for function state transitions */
3959 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3960 func_params.f_obj = &sc->func_obj;
3961 func_params.cmd = ECORE_F_CMD_STOP;
3964 * Try to stop the function the 'good way'. If it fails (in case
3965 * of a parity error during bxe_chip_cleanup()) and we are
3966 * not in a debug mode, perform a state transaction in order to
3967 * enable further HW_RESET transaction.
3969 rc = ecore_func_state_change(sc, &func_params);
3971 BLOGE(sc, "FUNC_STOP ramrod failed. "
3972 "Running a dry transaction (%d)\n", rc);
3973 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3974 return (ecore_func_state_change(sc, &func_params));
3981 bxe_reset_hw(struct bxe_softc *sc,
3984 struct ecore_func_state_params func_params = { NULL };
3986 /* Prepare parameters for function state transitions */
3987 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3989 func_params.f_obj = &sc->func_obj;
3990 func_params.cmd = ECORE_F_CMD_HW_RESET;
3992 func_params.params.hw_init.load_phase = load_code;
3994 return (ecore_func_state_change(sc, &func_params));
3998 bxe_int_disable_sync(struct bxe_softc *sc,
4002 /* prevent the HW from sending interrupts */
4003 bxe_int_disable(sc);
4006 /* XXX need a way to synchronize ALL irqs (intr_mtx?) */
4007 /* make sure all ISRs are done */
4009 /* XXX make sure sp_task is not running */
4010 /* cancel and flush work queues */
4014 bxe_chip_cleanup(struct bxe_softc *sc,
4015 uint32_t unload_mode,
4018 int port = SC_PORT(sc);
4019 struct ecore_mcast_ramrod_params rparam = { NULL };
4020 uint32_t reset_code;
4023 bxe_drain_tx_queues(sc);
4025 /* give HW time to discard old tx messages */
4028 /* Clean all ETH MACs */
4029 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE);
4031 BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc);
4034 /* Clean up UC list */
4035 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE);
4037 BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc);
4041 if (!CHIP_IS_E1(sc)) {
4042 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
4045 /* Set "drop all" to stop Rx */
4048 * We need to take the BXE_MCAST_LOCK() here in order to prevent
4049 * a race between the completion code and this code.
4053 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
4054 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
4056 bxe_set_storm_rx_mode(sc);
4059 /* Clean up multicast configuration */
4060 rparam.mcast_obj = &sc->mcast_obj;
4061 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4063 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4066 BXE_MCAST_UNLOCK(sc);
4068 // XXX bxe_iov_chip_cleanup(sc);
4071 * Send the UNLOAD_REQUEST to the MCP. This will return if
4072 * this function should perform FUNCTION, PORT, or COMMON HW
4075 reset_code = bxe_send_unload_req(sc, unload_mode);
4078 * (assumption: No Attention from MCP at this stage)
4079 * PMF probably in the middle of TX disable/enable transaction
4081 rc = bxe_func_wait_started(sc);
4083 BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc);
4087 * Close multi and leading connections
4088 * Completions for ramrods are collected in a synchronous way
4090 for (i = 0; i < sc->num_queues; i++) {
4091 if (bxe_stop_queue(sc, i)) {
4097 * If SP settings didn't get completed so far - something
4098 * very wrong has happen.
4100 if (!bxe_wait_sp_comp(sc, ~0x0UL)) {
4101 BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc);
4106 rc = bxe_func_stop(sc);
4108 BLOGE(sc, "Function stop failed!(%d)\n", rc);
4111 /* disable HW interrupts */
4112 bxe_int_disable_sync(sc, TRUE);
4114 /* detach interrupts */
4115 bxe_interrupt_detach(sc);
4117 /* Reset the chip */
4118 rc = bxe_reset_hw(sc, reset_code);
4120 BLOGE(sc, "Hardware reset failed(%d)\n", rc);
4123 /* Report UNLOAD_DONE to MCP */
4124 bxe_send_unload_done(sc, keep_link);
4128 bxe_disable_close_the_gate(struct bxe_softc *sc)
4131 int port = SC_PORT(sc);
4134 "Disabling 'close the gates'\n");
4136 if (CHIP_IS_E1(sc)) {
4137 uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4138 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4139 val = REG_RD(sc, addr);
4141 REG_WR(sc, addr, val);
4143 val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK);
4144 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
4145 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
4146 REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val);
4151 * Cleans the object that have internal lists without sending
4152 * ramrods. Should be run when interrutps are disabled.
4155 bxe_squeeze_objects(struct bxe_softc *sc)
4157 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
4158 struct ecore_mcast_ramrod_params rparam = { NULL };
4159 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
4162 /* Cleanup MACs' object first... */
4164 /* Wait for completion of requested */
4165 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
4166 /* Perform a dry cleanup */
4167 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
4169 /* Clean ETH primary MAC */
4170 bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags);
4171 rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags,
4174 BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc);
4177 /* Cleanup UC list */
4179 bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags);
4180 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags,
4183 BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc);
4186 /* Now clean mcast object... */
4188 rparam.mcast_obj = &sc->mcast_obj;
4189 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
4191 /* Add a DEL command... */
4192 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4194 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4197 /* now wait until all pending commands are cleared */
4199 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4202 BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc);
4206 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4210 /* stop the controller */
4211 static __noinline int
4212 bxe_nic_unload(struct bxe_softc *sc,
4213 uint32_t unload_mode,
4216 uint8_t global = FALSE;
4220 BXE_CORE_LOCK_ASSERT(sc);
4222 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
4224 for (i = 0; i < sc->num_queues; i++) {
4225 struct bxe_fastpath *fp;
4229 BXE_FP_TX_UNLOCK(fp);
4232 BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n");
4234 /* mark driver as unloaded in shmem2 */
4235 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
4236 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
4237 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
4238 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
4241 if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE &&
4242 (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) {
4244 * We can get here if the driver has been unloaded
4245 * during parity error recovery and is either waiting for a
4246 * leader to complete or for other functions to unload and
4247 * then ifconfig down has been issued. In this case we want to
4248 * unload and let other functions to complete a recovery
4251 sc->recovery_state = BXE_RECOVERY_DONE;
4253 bxe_release_leader_lock(sc);
4256 BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n");
4257 BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x"
4258 " state = 0x%x\n", sc->recovery_state, sc->state);
4263 * Nothing to do during unload if previous bxe_nic_load()
4264 * did not completed successfully - all resourses are released.
4266 if ((sc->state == BXE_STATE_CLOSED) ||
4267 (sc->state == BXE_STATE_ERROR)) {
4271 sc->state = BXE_STATE_CLOSING_WAITING_HALT;
4277 sc->rx_mode = BXE_RX_MODE_NONE;
4278 /* XXX set rx mode ??? */
4280 if (IS_PF(sc) && !sc->grcdump_done) {
4281 /* set ALWAYS_ALIVE bit in shmem */
4282 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
4286 bxe_stats_handle(sc, STATS_EVENT_STOP);
4287 bxe_save_statistics(sc);
4290 /* wait till consumers catch up with producers in all queues */
4291 bxe_drain_tx_queues(sc);
4293 /* if VF indicate to PF this function is going down (PF will delete sp
4294 * elements and clear initializations
4297 ; /* bxe_vfpf_close_vf(sc); */
4298 } else if (unload_mode != UNLOAD_RECOVERY) {
4299 /* if this is a normal/close unload need to clean up chip */
4300 if (!sc->grcdump_done)
4301 bxe_chip_cleanup(sc, unload_mode, keep_link);
4303 /* Send the UNLOAD_REQUEST to the MCP */
4304 bxe_send_unload_req(sc, unload_mode);
4307 * Prevent transactions to host from the functions on the
4308 * engine that doesn't reset global blocks in case of global
4309 * attention once gloabl blocks are reset and gates are opened
4310 * (the engine which leader will perform the recovery
4313 if (!CHIP_IS_E1x(sc)) {
4317 /* disable HW interrupts */
4318 bxe_int_disable_sync(sc, TRUE);
4320 /* detach interrupts */
4321 bxe_interrupt_detach(sc);
4323 /* Report UNLOAD_DONE to MCP */
4324 bxe_send_unload_done(sc, FALSE);
4328 * At this stage no more interrupts will arrive so we may safely clean
4329 * the queue'able objects here in case they failed to get cleaned so far.
4332 bxe_squeeze_objects(sc);
4335 /* There should be no more pending SP commands at this stage */
4340 bxe_free_fp_buffers(sc);
4346 bxe_free_fw_stats_mem(sc);
4348 sc->state = BXE_STATE_CLOSED;
4351 * Check if there are pending parity attentions. If there are - set
4352 * RECOVERY_IN_PROGRESS.
4354 if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) {
4355 bxe_set_reset_in_progress(sc);
4357 /* Set RESET_IS_GLOBAL if needed */
4359 bxe_set_reset_global(sc);
4364 * The last driver must disable a "close the gate" if there is no
4365 * parity attention or "process kill" pending.
4367 if (IS_PF(sc) && !bxe_clear_pf_load(sc) &&
4368 bxe_reset_is_done(sc, SC_PATH(sc))) {
4369 bxe_disable_close_the_gate(sc);
4372 BLOGD(sc, DBG_LOAD, "Ended NIC unload\n");
4378 * Called by the OS to set various media options (i.e. link, speed, etc.) when
4379 * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...".
4382 bxe_ifmedia_update(struct ifnet *ifp)
4384 struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp);
4385 struct ifmedia *ifm;
4389 /* We only support Ethernet media type. */
4390 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
4394 switch (IFM_SUBTYPE(ifm->ifm_media)) {
4400 case IFM_10G_TWINAX:
4402 /* We don't support changing the media type. */
4403 BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n",
4404 IFM_SUBTYPE(ifm->ifm_media));
4412 * Called by the OS to get the current media status (i.e. link, speed, etc.).
4415 bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
4417 struct bxe_softc *sc = if_getsoftc(ifp);
4419 /* Report link down if the driver isn't running. */
4420 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
4421 ifmr->ifm_active |= IFM_NONE;
4425 /* Setup the default interface info. */
4426 ifmr->ifm_status = IFM_AVALID;
4427 ifmr->ifm_active = IFM_ETHER;
4429 if (sc->link_vars.link_up) {
4430 ifmr->ifm_status |= IFM_ACTIVE;
4432 ifmr->ifm_active |= IFM_NONE;
4436 ifmr->ifm_active |= sc->media;
4438 if (sc->link_vars.duplex == DUPLEX_FULL) {
4439 ifmr->ifm_active |= IFM_FDX;
4441 ifmr->ifm_active |= IFM_HDX;
4446 bxe_handle_chip_tq(void *context,
4449 struct bxe_softc *sc = (struct bxe_softc *)context;
4450 long work = atomic_load_acq_long(&sc->chip_tq_flags);
4455 case CHIP_TQ_REINIT:
4456 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
4457 /* restart the interface */
4458 BLOGD(sc, DBG_LOAD, "Restarting the interface...\n");
4459 bxe_periodic_stop(sc);
4461 bxe_stop_locked(sc);
4462 bxe_init_locked(sc);
4463 BXE_CORE_UNLOCK(sc);
4473 * Handles any IOCTL calls from the operating system.
4476 * 0 = Success, >0 Failure
4483 struct bxe_softc *sc = if_getsoftc(ifp);
4484 struct ifreq *ifr = (struct ifreq *)data;
4489 int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN);
4490 int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING);
4495 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n",
4498 if (sc->mtu == ifr->ifr_mtu) {
4499 /* nothing to change */
4503 if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) {
4504 BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n",
4505 ifr->ifr_mtu, mtu_min, mtu_max);
4510 atomic_store_rel_int((volatile unsigned int *)&sc->mtu,
4511 (unsigned long)ifr->ifr_mtu);
4513 atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp),
4514 (unsigned long)ifr->ifr_mtu);
4515 XXX - Not sure why it needs to be atomic
4517 if_setmtu(ifp, ifr->ifr_mtu);
4522 /* toggle the interface state up or down */
4523 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n");
4526 /* check if the interface is up */
4527 if (if_getflags(ifp) & IFF_UP) {
4528 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4529 /* set the receive mode flags */
4530 bxe_set_rx_mode(sc);
4531 } else if(sc->state != BXE_STATE_DISABLED) {
4532 bxe_init_locked(sc);
4535 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4536 bxe_periodic_stop(sc);
4537 bxe_stop_locked(sc);
4540 BXE_CORE_UNLOCK(sc);
4546 /* add/delete multicast addresses */
4547 BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n");
4549 /* check if the interface is up */
4550 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4551 /* set the receive mode flags */
4553 bxe_set_rx_mode(sc);
4554 BXE_CORE_UNLOCK(sc);
4560 /* find out which capabilities have changed */
4561 mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp));
4563 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n",
4566 /* toggle the LRO capabilites enable flag */
4567 if (mask & IFCAP_LRO) {
4568 if_togglecapenable(ifp, IFCAP_LRO);
4569 BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n",
4570 (if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF");
4574 /* toggle the TXCSUM checksum capabilites enable flag */
4575 if (mask & IFCAP_TXCSUM) {
4576 if_togglecapenable(ifp, IFCAP_TXCSUM);
4577 BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n",
4578 (if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF");
4579 if (if_getcapenable(ifp) & IFCAP_TXCSUM) {
4580 if_sethwassistbits(ifp, (CSUM_IP |
4587 if_clearhwassist(ifp); /* XXX */
4591 /* toggle the RXCSUM checksum capabilities enable flag */
4592 if (mask & IFCAP_RXCSUM) {
4593 if_togglecapenable(ifp, IFCAP_RXCSUM);
4594 BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n",
4595 (if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF");
4596 if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
4597 if_sethwassistbits(ifp, (CSUM_IP |
4604 if_clearhwassist(ifp); /* XXX */
4608 /* toggle TSO4 capabilities enabled flag */
4609 if (mask & IFCAP_TSO4) {
4610 if_togglecapenable(ifp, IFCAP_TSO4);
4611 BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n",
4612 (if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF");
4615 /* toggle TSO6 capabilities enabled flag */
4616 if (mask & IFCAP_TSO6) {
4617 if_togglecapenable(ifp, IFCAP_TSO6);
4618 BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n",
4619 (if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF");
4622 /* toggle VLAN_HWTSO capabilities enabled flag */
4623 if (mask & IFCAP_VLAN_HWTSO) {
4625 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
4626 BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n",
4627 (if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF");
4630 /* toggle VLAN_HWCSUM capabilities enabled flag */
4631 if (mask & IFCAP_VLAN_HWCSUM) {
4632 /* XXX investigate this... */
4633 BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n");
4637 /* toggle VLAN_MTU capabilities enable flag */
4638 if (mask & IFCAP_VLAN_MTU) {
4639 /* XXX investigate this... */
4640 BLOGE(sc, "Changing VLAN_MTU is not supported!\n");
4644 /* toggle VLAN_HWTAGGING capabilities enabled flag */
4645 if (mask & IFCAP_VLAN_HWTAGGING) {
4646 /* XXX investigate this... */
4647 BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n");
4651 /* toggle VLAN_HWFILTER capabilities enabled flag */
4652 if (mask & IFCAP_VLAN_HWFILTER) {
4653 /* XXX investigate this... */
4654 BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n");
4666 /* set/get interface media */
4667 BLOGD(sc, DBG_IOCTL,
4668 "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n",
4670 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
4674 BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n",
4676 error = ether_ioctl(ifp, command, data);
4680 if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
4681 BLOGD(sc, DBG_LOAD | DBG_IOCTL,
4682 "Re-initializing hardware from IOCTL change\n");
4683 bxe_periodic_stop(sc);
4685 bxe_stop_locked(sc);
4686 bxe_init_locked(sc);
4687 BXE_CORE_UNLOCK(sc);
4693 static __noinline void
4694 bxe_dump_mbuf(struct bxe_softc *sc,
4701 if (!(sc->debug & DBG_MBUF)) {
4706 BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n");
4712 #if __FreeBSD_version >= 1000000
4714 "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4715 i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data);
4717 if (m->m_flags & M_PKTHDR) {
4719 "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4720 i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS,
4721 (int)m->m_pkthdr.csum_flags, CSUM_BITS);
4725 "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4726 i, m, m->m_len, m->m_flags,
4727 "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", m->m_data);
4729 if (m->m_flags & M_PKTHDR) {
4731 "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4732 i, m->m_pkthdr.len, m->m_flags,
4733 "\20\12M_BCAST\13M_MCAST\14M_FRAG"
4734 "\15M_FIRSTFRAG\16M_LASTFRAG\21M_VLANTAG"
4735 "\22M_PROMISC\23M_NOFREE",
4736 (int)m->m_pkthdr.csum_flags,
4737 "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS"
4738 "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED"
4739 "\12CSUM_IP_VALID\13CSUM_DATA_VALID"
4740 "\14CSUM_PSEUDO_HDR");
4742 #endif /* #if __FreeBSD_version >= 1000000 */
4744 if (m->m_flags & M_EXT) {
4745 switch (m->m_ext.ext_type) {
4746 case EXT_CLUSTER: type = "EXT_CLUSTER"; break;
4747 case EXT_SFBUF: type = "EXT_SFBUF"; break;
4748 case EXT_JUMBOP: type = "EXT_JUMBOP"; break;
4749 case EXT_JUMBO9: type = "EXT_JUMBO9"; break;
4750 case EXT_JUMBO16: type = "EXT_JUMBO16"; break;
4751 case EXT_PACKET: type = "EXT_PACKET"; break;
4752 case EXT_MBUF: type = "EXT_MBUF"; break;
4753 case EXT_NET_DRV: type = "EXT_NET_DRV"; break;
4754 case EXT_MOD_TYPE: type = "EXT_MOD_TYPE"; break;
4755 case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break;
4756 case EXT_EXTREF: type = "EXT_EXTREF"; break;
4757 default: type = "UNKNOWN"; break;
4761 "%02d: - m_ext: %p ext_size=%d type=%s\n",
4762 i, m->m_ext.ext_buf, m->m_ext.ext_size, type);
4766 bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE);
4775 * Checks to ensure the 13 bd sliding window is >= MSS for TSO.
4776 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS.
4777 * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD
4778 * The headers comes in a separate bd in FreeBSD so 13-3=10.
4779 * Returns: 0 if OK to send, 1 if packet needs further defragmentation
4782 bxe_chktso_window(struct bxe_softc *sc,
4784 bus_dma_segment_t *segs,
4787 uint32_t num_wnds, wnd_size, wnd_sum;
4788 int32_t frag_idx, wnd_idx;
4789 unsigned short lso_mss;
4795 num_wnds = nsegs - wnd_size;
4796 lso_mss = htole16(m->m_pkthdr.tso_segsz);
4799 * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the
4800 * first window sum of data while skipping the first assuming it is the
4801 * header in FreeBSD.
4803 for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) {
4804 wnd_sum += htole16(segs[frag_idx].ds_len);
4807 /* check the first 10 bd window size */
4808 if (wnd_sum < lso_mss) {
4812 /* run through the windows */
4813 for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) {
4814 /* subtract the first mbuf->m_len of the last wndw(-header) */
4815 wnd_sum -= htole16(segs[wnd_idx+1].ds_len);
4816 /* add the next mbuf len to the len of our new window */
4817 wnd_sum += htole16(segs[frag_idx].ds_len);
4818 if (wnd_sum < lso_mss) {
4827 bxe_set_pbd_csum_e2(struct bxe_fastpath *fp,
4829 uint32_t *parsing_data)
4831 struct ether_vlan_header *eh = NULL;
4832 struct ip *ip4 = NULL;
4833 struct ip6_hdr *ip6 = NULL;
4835 struct tcphdr *th = NULL;
4836 int e_hlen, ip_hlen, l4_off;
4839 if (m->m_pkthdr.csum_flags == CSUM_IP) {
4840 /* no L4 checksum offload needed */
4844 /* get the Ethernet header */
4845 eh = mtod(m, struct ether_vlan_header *);
4847 /* handle VLAN encapsulation if present */
4848 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4849 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4850 proto = ntohs(eh->evl_proto);
4852 e_hlen = ETHER_HDR_LEN;
4853 proto = ntohs(eh->evl_encap_proto);
4858 /* get the IP header, if mbuf len < 20 then header in next mbuf */
4859 ip4 = (m->m_len < sizeof(struct ip)) ?
4860 (struct ip *)m->m_next->m_data :
4861 (struct ip *)(m->m_data + e_hlen);
4862 /* ip_hl is number of 32-bit words */
4863 ip_hlen = (ip4->ip_hl << 2);
4866 case ETHERTYPE_IPV6:
4867 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4868 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4869 (struct ip6_hdr *)m->m_next->m_data :
4870 (struct ip6_hdr *)(m->m_data + e_hlen);
4871 /* XXX cannot support offload with IPv6 extensions */
4872 ip_hlen = sizeof(struct ip6_hdr);
4876 /* We can't offload in this case... */
4877 /* XXX error stat ??? */
4881 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4882 l4_off = (e_hlen + ip_hlen);
4885 (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
4886 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W);
4888 if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4891 fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
4892 th = (struct tcphdr *)(ip + ip_hlen);
4893 /* th_off is number of 32-bit words */
4894 *parsing_data |= ((th->th_off <<
4895 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
4896 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW);
4897 return (l4_off + (th->th_off << 2)); /* entire header length */
4898 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4900 fp->eth_q_stats.tx_ofld_frames_csum_udp++;
4901 return (l4_off + sizeof(struct udphdr)); /* entire header length */
4903 /* XXX error stat ??? */
4909 bxe_set_pbd_csum(struct bxe_fastpath *fp,
4911 struct eth_tx_parse_bd_e1x *pbd)
4913 struct ether_vlan_header *eh = NULL;
4914 struct ip *ip4 = NULL;
4915 struct ip6_hdr *ip6 = NULL;
4917 struct tcphdr *th = NULL;
4918 struct udphdr *uh = NULL;
4919 int e_hlen, ip_hlen;
4925 /* get the Ethernet header */
4926 eh = mtod(m, struct ether_vlan_header *);
4928 /* handle VLAN encapsulation if present */
4929 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4930 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4931 proto = ntohs(eh->evl_proto);
4933 e_hlen = ETHER_HDR_LEN;
4934 proto = ntohs(eh->evl_encap_proto);
4939 /* get the IP header, if mbuf len < 20 then header in next mbuf */
4940 ip4 = (m->m_len < sizeof(struct ip)) ?
4941 (struct ip *)m->m_next->m_data :
4942 (struct ip *)(m->m_data + e_hlen);
4943 /* ip_hl is number of 32-bit words */
4944 ip_hlen = (ip4->ip_hl << 1);
4947 case ETHERTYPE_IPV6:
4948 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4949 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4950 (struct ip6_hdr *)m->m_next->m_data :
4951 (struct ip6_hdr *)(m->m_data + e_hlen);
4952 /* XXX cannot support offload with IPv6 extensions */
4953 ip_hlen = (sizeof(struct ip6_hdr) >> 1);
4957 /* We can't offload in this case... */
4958 /* XXX error stat ??? */
4962 hlen = (e_hlen >> 1);
4964 /* note that rest of global_data is indirectly zeroed here */
4965 if (m->m_flags & M_VLANTAG) {
4967 htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
4969 pbd->global_data = htole16(hlen);
4972 pbd->ip_hlen_w = ip_hlen;
4974 hlen += pbd->ip_hlen_w;
4976 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4978 if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4981 th = (struct tcphdr *)(ip + (ip_hlen << 1));
4982 /* th_off is number of 32-bit words */
4983 hlen += (uint16_t)(th->th_off << 1);
4984 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4986 uh = (struct udphdr *)(ip + (ip_hlen << 1));
4987 hlen += (sizeof(struct udphdr) / 2);
4989 /* valid case as only CSUM_IP was set */
4993 pbd->total_hlen_w = htole16(hlen);
4995 if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4998 fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
4999 pbd->tcp_pseudo_csum = ntohs(th->th_sum);
5000 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5002 fp->eth_q_stats.tx_ofld_frames_csum_udp++;
5005 * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP
5006 * checksums and does not know anything about the UDP header and where
5007 * the checksum field is located. It only knows about TCP. Therefore
5008 * we "lie" to the hardware for outgoing UDP packets w/ checksum
5009 * offload. Since the checksum field offset for TCP is 16 bytes and
5010 * for UDP it is 6 bytes we pass a pointer to the hardware that is 10
5011 * bytes less than the start of the UDP header. This allows the
5012 * hardware to write the checksum in the correct spot. But the
5013 * hardware will compute a checksum which includes the last 10 bytes
5014 * of the IP header. To correct this we tweak the stack computed
5015 * pseudo checksum by folding in the calculation of the inverse
5016 * checksum for those final 10 bytes of the IP header. This allows
5017 * the correct checksum to be computed by the hardware.
5020 /* set pointer 10 bytes before UDP header */
5021 tmp_uh = (uint32_t *)((uint8_t *)uh - 10);
5023 /* calculate a pseudo header checksum over the first 10 bytes */
5024 tmp_csum = in_pseudo(*tmp_uh,
5026 *(uint16_t *)(tmp_uh + 2));
5028 pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum));
5031 return (hlen * 2); /* entire header length, number of bytes */
5035 bxe_set_pbd_lso_e2(struct mbuf *m,
5036 uint32_t *parsing_data)
5038 *parsing_data |= ((m->m_pkthdr.tso_segsz <<
5039 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
5040 ETH_TX_PARSE_BD_E2_LSO_MSS);
5042 /* XXX test for IPv6 with extension header... */
5046 bxe_set_pbd_lso(struct mbuf *m,
5047 struct eth_tx_parse_bd_e1x *pbd)
5049 struct ether_vlan_header *eh = NULL;
5050 struct ip *ip = NULL;
5051 struct tcphdr *th = NULL;
5054 /* get the Ethernet header */
5055 eh = mtod(m, struct ether_vlan_header *);
5057 /* handle VLAN encapsulation if present */
5058 e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ?
5059 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN;
5061 /* get the IP and TCP header, with LSO entire header in first mbuf */
5062 /* XXX assuming IPv4 */
5063 ip = (struct ip *)(m->m_data + e_hlen);
5064 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
5066 pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz);
5067 pbd->tcp_send_seq = ntohl(th->th_seq);
5068 pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff);
5072 pbd->ip_id = ntohs(ip->ip_id);
5073 pbd->tcp_pseudo_csum =
5074 ntohs(in_pseudo(ip->ip_src.s_addr,
5076 htons(IPPROTO_TCP)));
5079 pbd->tcp_pseudo_csum =
5080 ntohs(in_pseudo(&ip6->ip6_src,
5082 htons(IPPROTO_TCP)));
5086 htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
5090 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory
5091 * visible to the controller.
5093 * If an mbuf is submitted to this routine and cannot be given to the
5094 * controller (e.g. it has too many fragments) then the function may free
5095 * the mbuf and return to the caller.
5098 * 0 = Success, !0 = Failure
5099 * Note the side effect that an mbuf may be freed if it causes a problem.
5102 bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
5104 bus_dma_segment_t segs[32];
5106 struct bxe_sw_tx_bd *tx_buf;
5107 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
5108 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
5109 /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */
5110 struct eth_tx_bd *tx_data_bd;
5111 struct eth_tx_bd *tx_total_pkt_size_bd;
5112 struct eth_tx_start_bd *tx_start_bd;
5113 uint16_t bd_prod, pkt_prod, total_pkt_size;
5115 int defragged, error, nsegs, rc, nbds, vlan_off, ovlan;
5116 struct bxe_softc *sc;
5117 uint16_t tx_bd_avail;
5118 struct ether_vlan_header *eh;
5119 uint32_t pbd_e2_parsing_data = 0;
5126 #if __FreeBSD_version >= 800000
5127 M_ASSERTPKTHDR(*m_head);
5128 #endif /* #if __FreeBSD_version >= 800000 */
5131 rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0;
5134 tx_total_pkt_size_bd = NULL;
5136 /* get the H/W pointer for packets and BDs */
5137 pkt_prod = fp->tx_pkt_prod;
5138 bd_prod = fp->tx_bd_prod;
5140 mac_type = UNICAST_ADDRESS;
5142 /* map the mbuf into the next open DMAable memory */
5143 tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)];
5144 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5146 segs, &nsegs, BUS_DMA_NOWAIT);
5148 /* mapping errors */
5149 if(__predict_false(error != 0)) {
5150 fp->eth_q_stats.tx_dma_mapping_failure++;
5151 if (error == ENOMEM) {
5152 /* resource issue, try again later */
5154 } else if (error == EFBIG) {
5155 /* possibly recoverable with defragmentation */
5156 fp->eth_q_stats.mbuf_defrag_attempts++;
5157 m0 = m_defrag(*m_head, M_NOWAIT);
5159 fp->eth_q_stats.mbuf_defrag_failures++;
5162 /* defrag successful, try mapping again */
5164 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5166 segs, &nsegs, BUS_DMA_NOWAIT);
5168 fp->eth_q_stats.tx_dma_mapping_failure++;
5173 /* unknown, unrecoverable mapping error */
5174 BLOGE(sc, "Unknown TX mapping error rc=%d\n", error);
5175 bxe_dump_mbuf(sc, m0, FALSE);
5179 goto bxe_tx_encap_continue;
5182 tx_bd_avail = bxe_tx_avail(sc, fp);
5184 /* make sure there is enough room in the send queue */
5185 if (__predict_false(tx_bd_avail < (nsegs + 2))) {
5186 /* Recoverable, try again later. */
5187 fp->eth_q_stats.tx_hw_queue_full++;
5188 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5190 goto bxe_tx_encap_continue;
5193 /* capture the current H/W TX chain high watermark */
5194 if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth <
5195 (TX_BD_USABLE - tx_bd_avail))) {
5196 fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail);
5199 /* make sure it fits in the packet window */
5200 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5202 * The mbuf may be to big for the controller to handle. If the frame
5203 * is a TSO frame we'll need to do an additional check.
5205 if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5206 if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) {
5207 goto bxe_tx_encap_continue; /* OK to send */
5209 fp->eth_q_stats.tx_window_violation_tso++;
5212 fp->eth_q_stats.tx_window_violation_std++;
5215 /* lets try to defragment this mbuf and remap it */
5216 fp->eth_q_stats.mbuf_defrag_attempts++;
5217 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5219 m0 = m_defrag(*m_head, M_NOWAIT);
5221 fp->eth_q_stats.mbuf_defrag_failures++;
5222 /* Ugh, just drop the frame... :( */
5225 /* defrag successful, try mapping again */
5227 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5229 segs, &nsegs, BUS_DMA_NOWAIT);
5231 fp->eth_q_stats.tx_dma_mapping_failure++;
5232 /* No sense in trying to defrag/copy chain, drop it. :( */
5236 /* if the chain is still too long then drop it */
5237 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5238 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5245 bxe_tx_encap_continue:
5247 /* Check for errors */
5250 /* recoverable try again later */
5252 fp->eth_q_stats.tx_soft_errors++;
5253 fp->eth_q_stats.mbuf_alloc_tx--;
5261 /* set flag according to packet type (UNICAST_ADDRESS is default) */
5262 if (m0->m_flags & M_BCAST) {
5263 mac_type = BROADCAST_ADDRESS;
5264 } else if (m0->m_flags & M_MCAST) {
5265 mac_type = MULTICAST_ADDRESS;
5268 /* store the mbuf into the mbuf ring */
5270 tx_buf->first_bd = fp->tx_bd_prod;
5273 /* prepare the first transmit (start) BD for the mbuf */
5274 tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd;
5277 "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n",
5278 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
5280 tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
5281 tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
5282 tx_start_bd->nbytes = htole16(segs[0].ds_len);
5283 total_pkt_size += tx_start_bd->nbytes;
5284 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
5286 tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
5288 /* all frames have at least Start BD + Parsing BD */
5290 tx_start_bd->nbd = htole16(nbds);
5292 if (m0->m_flags & M_VLANTAG) {
5293 tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag);
5294 tx_start_bd->bd_flags.as_bitfield |=
5295 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
5297 /* vf tx, start bd must hold the ethertype for fw to enforce it */
5299 /* map ethernet header to find type and header length */
5300 eh = mtod(m0, struct ether_vlan_header *);
5301 tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto;
5303 /* used by FW for packet accounting */
5304 tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod);
5309 * add a parsing BD from the chain. The parsing BD is always added
5310 * though it is only used for TSO and chksum
5312 bd_prod = TX_BD_NEXT(bd_prod);
5314 if (m0->m_pkthdr.csum_flags) {
5315 if (m0->m_pkthdr.csum_flags & CSUM_IP) {
5316 fp->eth_q_stats.tx_ofld_frames_csum_ip++;
5317 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
5320 if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) {
5321 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5322 ETH_TX_BD_FLAGS_L4_CSUM);
5323 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) {
5324 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5325 ETH_TX_BD_FLAGS_IS_UDP |
5326 ETH_TX_BD_FLAGS_L4_CSUM);
5327 } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) ||
5328 (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5329 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
5330 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) {
5331 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM |
5332 ETH_TX_BD_FLAGS_IS_UDP);
5336 if (!CHIP_IS_E1x(sc)) {
5337 pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2;
5338 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
5340 if (m0->m_pkthdr.csum_flags) {
5341 hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data);
5344 SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE,
5347 uint16_t global_data = 0;
5349 pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x;
5350 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
5352 if (m0->m_pkthdr.csum_flags) {
5353 hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x);
5356 SET_FLAG(global_data,
5357 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
5358 pbd_e1x->global_data |= htole16(global_data);
5361 /* setup the parsing BD with TSO specific info */
5362 if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5363 fp->eth_q_stats.tx_ofld_frames_lso++;
5364 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
5366 if (__predict_false(tx_start_bd->nbytes > hlen)) {
5367 fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++;
5369 /* split the first BD into header/data making the fw job easy */
5371 tx_start_bd->nbd = htole16(nbds);
5372 tx_start_bd->nbytes = htole16(hlen);
5374 bd_prod = TX_BD_NEXT(bd_prod);
5376 /* new transmit BD after the tx_parse_bd */
5377 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5378 tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen));
5379 tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen));
5380 tx_data_bd->nbytes = htole16(segs[0].ds_len - hlen);
5381 if (tx_total_pkt_size_bd == NULL) {
5382 tx_total_pkt_size_bd = tx_data_bd;
5386 "TSO split header size is %d (%x:%x) nbds %d\n",
5387 le16toh(tx_start_bd->nbytes),
5388 le32toh(tx_start_bd->addr_hi),
5389 le32toh(tx_start_bd->addr_lo),
5393 if (!CHIP_IS_E1x(sc)) {
5394 bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data);
5396 bxe_set_pbd_lso(m0, pbd_e1x);
5400 if (pbd_e2_parsing_data) {
5401 pbd_e2->parsing_data = htole32(pbd_e2_parsing_data);
5404 /* prepare remaining BDs, start tx bd contains first seg/frag */
5405 for (i = 1; i < nsegs ; i++) {
5406 bd_prod = TX_BD_NEXT(bd_prod);
5407 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5408 tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr));
5409 tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr));
5410 tx_data_bd->nbytes = htole16(segs[i].ds_len);
5411 if (tx_total_pkt_size_bd == NULL) {
5412 tx_total_pkt_size_bd = tx_data_bd;
5414 total_pkt_size += tx_data_bd->nbytes;
5417 BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd);
5419 if (tx_total_pkt_size_bd != NULL) {
5420 tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size;
5423 if (__predict_false(sc->debug & DBG_TX)) {
5424 tmp_bd = tx_buf->first_bd;
5425 for (i = 0; i < nbds; i++)
5429 "TX Strt: %p bd=%d nbd=%d vlan=0x%x "
5430 "bd_flags=0x%x hdr_nbds=%d\n",
5433 le16toh(tx_start_bd->nbd),
5434 le16toh(tx_start_bd->vlan_or_ethertype),
5435 tx_start_bd->bd_flags.as_bitfield,
5436 (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS));
5437 } else if (i == 1) {
5440 "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u "
5441 "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x "
5442 "tcp_seq=%u total_hlen_w=%u\n",
5445 pbd_e1x->global_data,
5450 pbd_e1x->tcp_pseudo_csum,
5451 pbd_e1x->tcp_send_seq,
5452 le16toh(pbd_e1x->total_hlen_w));
5453 } else { /* if (pbd_e2) */
5455 "-> Parse: %p bd=%d dst=%02x:%02x:%02x "
5456 "src=%02x:%02x:%02x parsing_data=0x%x\n",
5459 pbd_e2->data.mac_addr.dst_hi,
5460 pbd_e2->data.mac_addr.dst_mid,
5461 pbd_e2->data.mac_addr.dst_lo,
5462 pbd_e2->data.mac_addr.src_hi,
5463 pbd_e2->data.mac_addr.src_mid,
5464 pbd_e2->data.mac_addr.src_lo,
5465 pbd_e2->parsing_data);
5469 if (i != 1) { /* skip parse db as it doesn't hold data */
5470 tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd;
5472 "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n",
5475 le16toh(tx_data_bd->nbytes),
5476 le32toh(tx_data_bd->addr_hi),
5477 le32toh(tx_data_bd->addr_lo));
5480 tmp_bd = TX_BD_NEXT(tmp_bd);
5484 BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod);
5486 /* update TX BD producer index value for next TX */
5487 bd_prod = TX_BD_NEXT(bd_prod);
5490 * If the chain of tx_bd's describing this frame is adjacent to or spans
5491 * an eth_tx_next_bd element then we need to increment the nbds value.
5493 if (TX_BD_IDX(bd_prod) < nbds) {
5497 /* don't allow reordering of writes for nbd and packets */
5500 fp->tx_db.data.prod += nbds;
5502 /* producer points to the next free tx_bd at this point */
5504 fp->tx_bd_prod = bd_prod;
5506 DOORBELL(sc, fp->index, fp->tx_db.raw);
5508 fp->eth_q_stats.tx_pkts++;
5510 /* Prevent speculative reads from getting ahead of the status block. */
5511 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle,
5512 0, 0, BUS_SPACE_BARRIER_READ);
5514 /* Prevent speculative reads from getting ahead of the doorbell. */
5515 bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle,
5516 0, 0, BUS_SPACE_BARRIER_READ);
5522 bxe_tx_start_locked(struct bxe_softc *sc,
5524 struct bxe_fastpath *fp)
5526 struct mbuf *m = NULL;
5528 uint16_t tx_bd_avail;
5530 BXE_FP_TX_LOCK_ASSERT(fp);
5532 /* keep adding entries while there are frames to send */
5533 while (!if_sendq_empty(ifp)) {
5536 * check for any frames to send
5537 * dequeue can still be NULL even if queue is not empty
5539 m = if_dequeue(ifp);
5540 if (__predict_false(m == NULL)) {
5544 /* the mbuf now belongs to us */
5545 fp->eth_q_stats.mbuf_alloc_tx++;
5548 * Put the frame into the transmit ring. If we don't have room,
5549 * place the mbuf back at the head of the TX queue, set the
5550 * OACTIVE flag, and wait for the NIC to drain the chain.
5552 if (__predict_false(bxe_tx_encap(fp, &m))) {
5553 fp->eth_q_stats.tx_encap_failures++;
5555 /* mark the TX queue as full and return the frame */
5556 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5557 if_sendq_prepend(ifp, m);
5558 fp->eth_q_stats.mbuf_alloc_tx--;
5559 fp->eth_q_stats.tx_queue_xoff++;
5562 /* stop looking for more work */
5566 /* the frame was enqueued successfully */
5569 /* send a copy of the frame to any BPF listeners. */
5570 if_etherbpfmtap(ifp, m);
5572 tx_bd_avail = bxe_tx_avail(sc, fp);
5574 /* handle any completions if we're running low */
5575 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5576 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5578 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5584 /* all TX packets were dequeued and/or the tx ring is full */
5586 /* reset the TX watchdog timeout timer */
5587 fp->watchdog_timer = BXE_TX_TIMEOUT;
5591 /* Legacy (non-RSS) dispatch routine */
5593 bxe_tx_start(if_t ifp)
5595 struct bxe_softc *sc;
5596 struct bxe_fastpath *fp;
5598 sc = if_getsoftc(ifp);
5600 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
5601 BLOGW(sc, "Interface not running, ignoring transmit request\n");
5605 if (!sc->link_vars.link_up) {
5606 BLOGW(sc, "Interface link is down, ignoring transmit request\n");
5612 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5613 fp->eth_q_stats.tx_queue_full_return++;
5618 bxe_tx_start_locked(sc, ifp, fp);
5619 BXE_FP_TX_UNLOCK(fp);
5622 #if __FreeBSD_version >= 901504
5625 bxe_tx_mq_start_locked(struct bxe_softc *sc,
5627 struct bxe_fastpath *fp,
5630 struct buf_ring *tx_br = fp->tx_br;
5632 int depth, rc, tx_count;
5633 uint16_t tx_bd_avail;
5637 BXE_FP_TX_LOCK_ASSERT(fp);
5640 BLOGE(sc, "Multiqueue TX and no buf_ring!\n");
5645 rc = drbr_enqueue(ifp, tx_br, m);
5647 fp->eth_q_stats.tx_soft_errors++;
5648 goto bxe_tx_mq_start_locked_exit;
5652 if (!sc->link_vars.link_up || !(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
5653 fp->eth_q_stats.tx_request_link_down_failures++;
5654 goto bxe_tx_mq_start_locked_exit;
5657 /* fetch the depth of the driver queue */
5658 depth = drbr_inuse_drv(ifp, tx_br);
5659 if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) {
5660 fp->eth_q_stats.tx_max_drbr_queue_depth = depth;
5663 /* keep adding entries while there are frames to send */
5664 while ((next = drbr_peek(ifp, tx_br)) != NULL) {
5665 /* handle any completions if we're running low */
5666 tx_bd_avail = bxe_tx_avail(sc, fp);
5667 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5668 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5670 tx_bd_avail = bxe_tx_avail(sc, fp);
5671 if (tx_bd_avail < (BXE_TSO_MAX_SEGMENTS + 1)) {
5672 fp->eth_q_stats.bd_avail_too_less_failures++;
5674 drbr_advance(ifp, tx_br);
5680 /* the mbuf now belongs to us */
5681 fp->eth_q_stats.mbuf_alloc_tx++;
5684 * Put the frame into the transmit ring. If we don't have room,
5685 * place the mbuf back at the head of the TX queue, set the
5686 * OACTIVE flag, and wait for the NIC to drain the chain.
5688 rc = bxe_tx_encap(fp, &next);
5689 if (__predict_false(rc != 0)) {
5690 fp->eth_q_stats.tx_encap_failures++;
5692 /* mark the TX queue as full and save the frame */
5693 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5694 drbr_putback(ifp, tx_br, next);
5695 fp->eth_q_stats.mbuf_alloc_tx--;
5696 fp->eth_q_stats.tx_frames_deferred++;
5698 drbr_advance(ifp, tx_br);
5700 /* stop looking for more work */
5704 /* the transmit frame was enqueued successfully */
5707 /* send a copy of the frame to any BPF listeners */
5708 if_etherbpfmtap(ifp, next);
5710 drbr_advance(ifp, tx_br);
5713 /* all TX packets were dequeued and/or the tx ring is full */
5715 /* reset the TX watchdog timeout timer */
5716 fp->watchdog_timer = BXE_TX_TIMEOUT;
5719 bxe_tx_mq_start_locked_exit:
5720 /* If we didn't drain the drbr, enqueue a task in the future to do it. */
5721 if (!drbr_empty(ifp, tx_br)) {
5722 fp->eth_q_stats.tx_mq_not_empty++;
5723 taskqueue_enqueue_timeout(fp->tq, &fp->tx_timeout_task, 1);
5730 bxe_tx_mq_start_deferred(void *arg,
5733 struct bxe_fastpath *fp = (struct bxe_fastpath *)arg;
5734 struct bxe_softc *sc = fp->sc;
5738 bxe_tx_mq_start_locked(sc, ifp, fp, NULL);
5739 BXE_FP_TX_UNLOCK(fp);
5742 /* Multiqueue (TSS) dispatch routine. */
5744 bxe_tx_mq_start(struct ifnet *ifp,
5747 struct bxe_softc *sc = if_getsoftc(ifp);
5748 struct bxe_fastpath *fp;
5751 fp_index = 0; /* default is the first queue */
5753 /* check if flowid is set */
5755 if (BXE_VALID_FLOWID(m))
5756 fp_index = (m->m_pkthdr.flowid % sc->num_queues);
5758 fp = &sc->fp[fp_index];
5760 if (BXE_FP_TX_TRYLOCK(fp)) {
5761 rc = bxe_tx_mq_start_locked(sc, ifp, fp, m);
5762 BXE_FP_TX_UNLOCK(fp);
5764 rc = drbr_enqueue(ifp, fp->tx_br, m);
5765 taskqueue_enqueue(fp->tq, &fp->tx_task);
5772 bxe_mq_flush(struct ifnet *ifp)
5774 struct bxe_softc *sc = if_getsoftc(ifp);
5775 struct bxe_fastpath *fp;
5779 for (i = 0; i < sc->num_queues; i++) {
5782 if (fp->state != BXE_FP_STATE_OPEN) {
5783 BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n",
5784 fp->index, fp->state);
5788 if (fp->tx_br != NULL) {
5789 BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index);
5791 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) {
5794 BXE_FP_TX_UNLOCK(fp);
5801 #endif /* FreeBSD_version >= 901504 */
5804 bxe_cid_ilt_lines(struct bxe_softc *sc)
5807 return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS);
5809 return (L2_ILT_LINES(sc));
5813 bxe_ilt_set_info(struct bxe_softc *sc)
5815 struct ilt_client_info *ilt_client;
5816 struct ecore_ilt *ilt = sc->ilt;
5819 ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc));
5820 BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line);
5823 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
5824 ilt_client->client_num = ILT_CLIENT_CDU;
5825 ilt_client->page_size = CDU_ILT_PAGE_SZ;
5826 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
5827 ilt_client->start = line;
5828 line += bxe_cid_ilt_lines(sc);
5830 if (CNIC_SUPPORT(sc)) {
5831 line += CNIC_ILT_LINES;
5834 ilt_client->end = (line - 1);
5837 "ilt client[CDU]: start %d, end %d, "
5838 "psz 0x%x, flags 0x%x, hw psz %d\n",
5839 ilt_client->start, ilt_client->end,
5840 ilt_client->page_size,
5842 ilog2(ilt_client->page_size >> 12));
5845 if (QM_INIT(sc->qm_cid_count)) {
5846 ilt_client = &ilt->clients[ILT_CLIENT_QM];
5847 ilt_client->client_num = ILT_CLIENT_QM;
5848 ilt_client->page_size = QM_ILT_PAGE_SZ;
5849 ilt_client->flags = 0;
5850 ilt_client->start = line;
5852 /* 4 bytes for each cid */
5853 line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
5856 ilt_client->end = (line - 1);
5859 "ilt client[QM]: start %d, end %d, "
5860 "psz 0x%x, flags 0x%x, hw psz %d\n",
5861 ilt_client->start, ilt_client->end,
5862 ilt_client->page_size, ilt_client->flags,
5863 ilog2(ilt_client->page_size >> 12));
5866 if (CNIC_SUPPORT(sc)) {
5868 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
5869 ilt_client->client_num = ILT_CLIENT_SRC;
5870 ilt_client->page_size = SRC_ILT_PAGE_SZ;
5871 ilt_client->flags = 0;
5872 ilt_client->start = line;
5873 line += SRC_ILT_LINES;
5874 ilt_client->end = (line - 1);
5877 "ilt client[SRC]: start %d, end %d, "
5878 "psz 0x%x, flags 0x%x, hw psz %d\n",
5879 ilt_client->start, ilt_client->end,
5880 ilt_client->page_size, ilt_client->flags,
5881 ilog2(ilt_client->page_size >> 12));
5884 ilt_client = &ilt->clients[ILT_CLIENT_TM];
5885 ilt_client->client_num = ILT_CLIENT_TM;
5886 ilt_client->page_size = TM_ILT_PAGE_SZ;
5887 ilt_client->flags = 0;
5888 ilt_client->start = line;
5889 line += TM_ILT_LINES;
5890 ilt_client->end = (line - 1);
5893 "ilt client[TM]: start %d, end %d, "
5894 "psz 0x%x, flags 0x%x, hw psz %d\n",
5895 ilt_client->start, ilt_client->end,
5896 ilt_client->page_size, ilt_client->flags,
5897 ilog2(ilt_client->page_size >> 12));
5900 KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!"));
5904 bxe_set_fp_rx_buf_size(struct bxe_softc *sc)
5907 uint32_t rx_buf_size;
5909 rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu);
5911 for (i = 0; i < sc->num_queues; i++) {
5912 if(rx_buf_size <= MCLBYTES){
5913 sc->fp[i].rx_buf_size = rx_buf_size;
5914 sc->fp[i].mbuf_alloc_size = MCLBYTES;
5915 }else if (rx_buf_size <= MJUMPAGESIZE){
5916 sc->fp[i].rx_buf_size = rx_buf_size;
5917 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5918 }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){
5919 sc->fp[i].rx_buf_size = MCLBYTES;
5920 sc->fp[i].mbuf_alloc_size = MCLBYTES;
5921 }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){
5922 sc->fp[i].rx_buf_size = MJUMPAGESIZE;
5923 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5925 sc->fp[i].rx_buf_size = MCLBYTES;
5926 sc->fp[i].mbuf_alloc_size = MCLBYTES;
5932 bxe_alloc_ilt_mem(struct bxe_softc *sc)
5937 (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt),
5939 (M_NOWAIT | M_ZERO))) == NULL) {
5947 bxe_alloc_ilt_lines_mem(struct bxe_softc *sc)
5951 if ((sc->ilt->lines =
5952 (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES),
5954 (M_NOWAIT | M_ZERO))) == NULL) {
5962 bxe_free_ilt_mem(struct bxe_softc *sc)
5964 if (sc->ilt != NULL) {
5965 free(sc->ilt, M_BXE_ILT);
5971 bxe_free_ilt_lines_mem(struct bxe_softc *sc)
5973 if (sc->ilt->lines != NULL) {
5974 free(sc->ilt->lines, M_BXE_ILT);
5975 sc->ilt->lines = NULL;
5980 bxe_free_mem(struct bxe_softc *sc)
5984 for (i = 0; i < L2_ILT_LINES(sc); i++) {
5985 bxe_dma_free(sc, &sc->context[i].vcxt_dma);
5986 sc->context[i].vcxt = NULL;
5987 sc->context[i].size = 0;
5990 ecore_ilt_mem_op(sc, ILT_MEMOP_FREE);
5992 bxe_free_ilt_lines_mem(sc);
5997 bxe_alloc_mem(struct bxe_softc *sc)
6004 * Allocate memory for CDU context:
6005 * This memory is allocated separately and not in the generic ILT
6006 * functions because CDU differs in few aspects:
6007 * 1. There can be multiple entities allocating memory for context -
6008 * regular L2, CNIC, and SRIOV drivers. Each separately controls
6009 * its own ILT lines.
6010 * 2. Since CDU page-size is not a single 4KB page (which is the case
6011 * for the other ILT clients), to be efficient we want to support
6012 * allocation of sub-page-size in the last entry.
6013 * 3. Context pointers are used by the driver to pass to FW / update
6014 * the context (for the other ILT clients the pointers are used just to
6015 * free the memory during unload).
6017 context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc));
6018 for (i = 0, allocated = 0; allocated < context_size; i++) {
6019 sc->context[i].size = min(CDU_ILT_PAGE_SZ,
6020 (context_size - allocated));
6022 if (bxe_dma_alloc(sc, sc->context[i].size,
6023 &sc->context[i].vcxt_dma,
6024 "cdu context") != 0) {
6029 sc->context[i].vcxt =
6030 (union cdu_context *)sc->context[i].vcxt_dma.vaddr;
6032 allocated += sc->context[i].size;
6035 bxe_alloc_ilt_lines_mem(sc);
6037 BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n",
6038 sc->ilt, sc->ilt->start_line, sc->ilt->lines);
6040 for (i = 0; i < 4; i++) {
6042 "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n",
6044 sc->ilt->clients[i].page_size,
6045 sc->ilt->clients[i].start,
6046 sc->ilt->clients[i].end,
6047 sc->ilt->clients[i].client_num,
6048 sc->ilt->clients[i].flags);
6051 if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) {
6052 BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n");
6061 bxe_free_rx_bd_chain(struct bxe_fastpath *fp)
6063 struct bxe_softc *sc;
6068 if (fp->rx_mbuf_tag == NULL) {
6072 /* free all mbufs and unload all maps */
6073 for (i = 0; i < RX_BD_TOTAL; i++) {
6074 if (fp->rx_mbuf_chain[i].m_map != NULL) {
6075 bus_dmamap_sync(fp->rx_mbuf_tag,
6076 fp->rx_mbuf_chain[i].m_map,
6077 BUS_DMASYNC_POSTREAD);
6078 bus_dmamap_unload(fp->rx_mbuf_tag,
6079 fp->rx_mbuf_chain[i].m_map);
6082 if (fp->rx_mbuf_chain[i].m != NULL) {
6083 m_freem(fp->rx_mbuf_chain[i].m);
6084 fp->rx_mbuf_chain[i].m = NULL;
6085 fp->eth_q_stats.mbuf_alloc_rx--;
6091 bxe_free_tpa_pool(struct bxe_fastpath *fp)
6093 struct bxe_softc *sc;
6094 int i, max_agg_queues;
6098 if (fp->rx_mbuf_tag == NULL) {
6102 max_agg_queues = MAX_AGG_QS(sc);
6104 /* release all mbufs and unload all DMA maps in the TPA pool */
6105 for (i = 0; i < max_agg_queues; i++) {
6106 if (fp->rx_tpa_info[i].bd.m_map != NULL) {
6107 bus_dmamap_sync(fp->rx_mbuf_tag,
6108 fp->rx_tpa_info[i].bd.m_map,
6109 BUS_DMASYNC_POSTREAD);
6110 bus_dmamap_unload(fp->rx_mbuf_tag,
6111 fp->rx_tpa_info[i].bd.m_map);
6114 if (fp->rx_tpa_info[i].bd.m != NULL) {
6115 m_freem(fp->rx_tpa_info[i].bd.m);
6116 fp->rx_tpa_info[i].bd.m = NULL;
6117 fp->eth_q_stats.mbuf_alloc_tpa--;
6123 bxe_free_sge_chain(struct bxe_fastpath *fp)
6125 struct bxe_softc *sc;
6130 if (fp->rx_sge_mbuf_tag == NULL) {
6134 /* rree all mbufs and unload all maps */
6135 for (i = 0; i < RX_SGE_TOTAL; i++) {
6136 if (fp->rx_sge_mbuf_chain[i].m_map != NULL) {
6137 bus_dmamap_sync(fp->rx_sge_mbuf_tag,
6138 fp->rx_sge_mbuf_chain[i].m_map,
6139 BUS_DMASYNC_POSTREAD);
6140 bus_dmamap_unload(fp->rx_sge_mbuf_tag,
6141 fp->rx_sge_mbuf_chain[i].m_map);
6144 if (fp->rx_sge_mbuf_chain[i].m != NULL) {
6145 m_freem(fp->rx_sge_mbuf_chain[i].m);
6146 fp->rx_sge_mbuf_chain[i].m = NULL;
6147 fp->eth_q_stats.mbuf_alloc_sge--;
6153 bxe_free_fp_buffers(struct bxe_softc *sc)
6155 struct bxe_fastpath *fp;
6158 for (i = 0; i < sc->num_queues; i++) {
6161 #if __FreeBSD_version >= 901504
6162 if (fp->tx_br != NULL) {
6163 /* just in case bxe_mq_flush() wasn't called */
6164 if (mtx_initialized(&fp->tx_mtx)) {
6168 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL)
6170 BXE_FP_TX_UNLOCK(fp);
6175 /* free all RX buffers */
6176 bxe_free_rx_bd_chain(fp);
6177 bxe_free_tpa_pool(fp);
6178 bxe_free_sge_chain(fp);
6180 if (fp->eth_q_stats.mbuf_alloc_rx != 0) {
6181 BLOGE(sc, "failed to claim all rx mbufs (%d left)\n",
6182 fp->eth_q_stats.mbuf_alloc_rx);
6185 if (fp->eth_q_stats.mbuf_alloc_sge != 0) {
6186 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6187 fp->eth_q_stats.mbuf_alloc_sge);
6190 if (fp->eth_q_stats.mbuf_alloc_tpa != 0) {
6191 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6192 fp->eth_q_stats.mbuf_alloc_tpa);
6195 if (fp->eth_q_stats.mbuf_alloc_tx != 0) {
6196 BLOGE(sc, "failed to release tx mbufs (%d left)\n",
6197 fp->eth_q_stats.mbuf_alloc_tx);
6200 /* XXX verify all mbufs were reclaimed */
6205 bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
6206 uint16_t prev_index,
6209 struct bxe_sw_rx_bd *rx_buf;
6210 struct eth_rx_bd *rx_bd;
6211 bus_dma_segment_t segs[1];
6218 /* allocate the new RX BD mbuf */
6219 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6220 if (__predict_false(m == NULL)) {
6221 fp->eth_q_stats.mbuf_rx_bd_alloc_failed++;
6225 fp->eth_q_stats.mbuf_alloc_rx++;
6227 /* initialize the mbuf buffer length */
6228 m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6230 /* map the mbuf into non-paged pool */
6231 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6232 fp->rx_mbuf_spare_map,
6233 m, segs, &nsegs, BUS_DMA_NOWAIT);
6234 if (__predict_false(rc != 0)) {
6235 fp->eth_q_stats.mbuf_rx_bd_mapping_failed++;
6237 fp->eth_q_stats.mbuf_alloc_rx--;
6241 /* all mbufs must map to a single segment */
6242 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6244 /* release any existing RX BD mbuf mappings */
6246 if (prev_index != index) {
6247 rx_buf = &fp->rx_mbuf_chain[prev_index];
6249 if (rx_buf->m_map != NULL) {
6250 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6251 BUS_DMASYNC_POSTREAD);
6252 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6256 * We only get here from bxe_rxeof() when the maximum number
6257 * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already
6258 * holds the mbuf in the prev_index so it's OK to NULL it out
6259 * here without concern of a memory leak.
6261 fp->rx_mbuf_chain[prev_index].m = NULL;
6264 rx_buf = &fp->rx_mbuf_chain[index];
6266 if (rx_buf->m_map != NULL) {
6267 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6268 BUS_DMASYNC_POSTREAD);
6269 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6272 /* save the mbuf and mapping info for a future packet */
6273 map = (prev_index != index) ?
6274 fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map;
6275 rx_buf->m_map = fp->rx_mbuf_spare_map;
6276 fp->rx_mbuf_spare_map = map;
6277 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6278 BUS_DMASYNC_PREREAD);
6281 rx_bd = &fp->rx_chain[index];
6282 rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6283 rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6289 bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
6292 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
6293 bus_dma_segment_t segs[1];
6299 /* allocate the new TPA mbuf */
6300 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6301 if (__predict_false(m == NULL)) {
6302 fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++;
6306 fp->eth_q_stats.mbuf_alloc_tpa++;
6308 /* initialize the mbuf buffer length */
6309 m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6311 /* map the mbuf into non-paged pool */
6312 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6313 fp->rx_tpa_info_mbuf_spare_map,
6314 m, segs, &nsegs, BUS_DMA_NOWAIT);
6315 if (__predict_false(rc != 0)) {
6316 fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++;
6318 fp->eth_q_stats.mbuf_alloc_tpa--;
6322 /* all mbufs must map to a single segment */
6323 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6325 /* release any existing TPA mbuf mapping */
6326 if (tpa_info->bd.m_map != NULL) {
6327 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6328 BUS_DMASYNC_POSTREAD);
6329 bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map);
6332 /* save the mbuf and mapping info for the TPA mbuf */
6333 map = tpa_info->bd.m_map;
6334 tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map;
6335 fp->rx_tpa_info_mbuf_spare_map = map;
6336 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6337 BUS_DMASYNC_PREREAD);
6339 tpa_info->seg = segs[0];
6345 * Allocate an mbuf and assign it to the receive scatter gather chain. The
6346 * caller must take care to save a copy of the existing mbuf in the SG mbuf
6350 bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
6353 struct bxe_sw_rx_bd *sge_buf;
6354 struct eth_rx_sge *sge;
6355 bus_dma_segment_t segs[1];
6361 /* allocate a new SGE mbuf */
6362 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
6363 if (__predict_false(m == NULL)) {
6364 fp->eth_q_stats.mbuf_rx_sge_alloc_failed++;
6368 fp->eth_q_stats.mbuf_alloc_sge++;
6370 /* initialize the mbuf buffer length */
6371 m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE;
6373 /* map the SGE mbuf into non-paged pool */
6374 rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag,
6375 fp->rx_sge_mbuf_spare_map,
6376 m, segs, &nsegs, BUS_DMA_NOWAIT);
6377 if (__predict_false(rc != 0)) {
6378 fp->eth_q_stats.mbuf_rx_sge_mapping_failed++;
6380 fp->eth_q_stats.mbuf_alloc_sge--;
6384 /* all mbufs must map to a single segment */
6385 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6387 sge_buf = &fp->rx_sge_mbuf_chain[index];
6389 /* release any existing SGE mbuf mapping */
6390 if (sge_buf->m_map != NULL) {
6391 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6392 BUS_DMASYNC_POSTREAD);
6393 bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map);
6396 /* save the mbuf and mapping info for a future packet */
6397 map = sge_buf->m_map;
6398 sge_buf->m_map = fp->rx_sge_mbuf_spare_map;
6399 fp->rx_sge_mbuf_spare_map = map;
6400 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6401 BUS_DMASYNC_PREREAD);
6404 sge = &fp->rx_sge_chain[index];
6405 sge->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6406 sge->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6411 static __noinline int
6412 bxe_alloc_fp_buffers(struct bxe_softc *sc)
6414 struct bxe_fastpath *fp;
6416 int ring_prod, cqe_ring_prod;
6419 for (i = 0; i < sc->num_queues; i++) {
6422 ring_prod = cqe_ring_prod = 0;
6426 /* allocate buffers for the RX BDs in RX BD chain */
6427 for (j = 0; j < sc->max_rx_bufs; j++) {
6428 rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod);
6430 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
6432 goto bxe_alloc_fp_buffers_error;
6435 ring_prod = RX_BD_NEXT(ring_prod);
6436 cqe_ring_prod = RCQ_NEXT(cqe_ring_prod);
6439 fp->rx_bd_prod = ring_prod;
6440 fp->rx_cq_prod = cqe_ring_prod;
6441 fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0;
6443 max_agg_queues = MAX_AGG_QS(sc);
6445 fp->tpa_enable = TRUE;
6447 /* fill the TPA pool */
6448 for (j = 0; j < max_agg_queues; j++) {
6449 rc = bxe_alloc_rx_tpa_mbuf(fp, j);
6451 BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n",
6453 fp->tpa_enable = FALSE;
6454 goto bxe_alloc_fp_buffers_error;
6457 fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP;
6460 if (fp->tpa_enable) {
6461 /* fill the RX SGE chain */
6463 for (j = 0; j < RX_SGE_USABLE; j++) {
6464 rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod);
6466 BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n",
6468 fp->tpa_enable = FALSE;
6470 goto bxe_alloc_fp_buffers_error;
6473 ring_prod = RX_SGE_NEXT(ring_prod);
6476 fp->rx_sge_prod = ring_prod;
6482 bxe_alloc_fp_buffers_error:
6484 /* unwind what was already allocated */
6485 bxe_free_rx_bd_chain(fp);
6486 bxe_free_tpa_pool(fp);
6487 bxe_free_sge_chain(fp);
6493 bxe_free_fw_stats_mem(struct bxe_softc *sc)
6495 bxe_dma_free(sc, &sc->fw_stats_dma);
6497 sc->fw_stats_num = 0;
6499 sc->fw_stats_req_size = 0;
6500 sc->fw_stats_req = NULL;
6501 sc->fw_stats_req_mapping = 0;
6503 sc->fw_stats_data_size = 0;
6504 sc->fw_stats_data = NULL;
6505 sc->fw_stats_data_mapping = 0;
6509 bxe_alloc_fw_stats_mem(struct bxe_softc *sc)
6511 uint8_t num_queue_stats;
6514 /* number of queues for statistics is number of eth queues */
6515 num_queue_stats = BXE_NUM_ETH_QUEUES(sc);
6518 * Total number of FW statistics requests =
6519 * 1 for port stats + 1 for PF stats + num of queues
6521 sc->fw_stats_num = (2 + num_queue_stats);
6524 * Request is built from stats_query_header and an array of
6525 * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT
6526 * rules. The real number or requests is configured in the
6527 * stats_query_header.
6530 ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) +
6531 ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0));
6533 BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n",
6534 sc->fw_stats_num, num_groups);
6536 sc->fw_stats_req_size =
6537 (sizeof(struct stats_query_header) +
6538 (num_groups * sizeof(struct stats_query_cmd_group)));
6541 * Data for statistics requests + stats_counter.
6542 * stats_counter holds per-STORM counters that are incremented when
6543 * STORM has finished with the current request. Memory for FCoE
6544 * offloaded statistics are counted anyway, even if they will not be sent.
6545 * VF stats are not accounted for here as the data of VF stats is stored
6546 * in memory allocated by the VF, not here.
6548 sc->fw_stats_data_size =
6549 (sizeof(struct stats_counter) +
6550 sizeof(struct per_port_stats) +
6551 sizeof(struct per_pf_stats) +
6552 /* sizeof(struct fcoe_statistics_params) + */
6553 (sizeof(struct per_queue_stats) * num_queue_stats));
6555 if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size),
6556 &sc->fw_stats_dma, "fw stats") != 0) {
6557 bxe_free_fw_stats_mem(sc);
6561 /* set up the shortcuts */
6564 (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr;
6565 sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr;
6568 (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr +
6569 sc->fw_stats_req_size);
6570 sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr +
6571 sc->fw_stats_req_size);
6573 BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n",
6574 (uintmax_t)sc->fw_stats_req_mapping);
6576 BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n",
6577 (uintmax_t)sc->fw_stats_data_mapping);
6584 * 0-7 - Engine0 load counter.
6585 * 8-15 - Engine1 load counter.
6586 * 16 - Engine0 RESET_IN_PROGRESS bit.
6587 * 17 - Engine1 RESET_IN_PROGRESS bit.
6588 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active
6589 * function on the engine
6590 * 19 - Engine1 ONE_IS_LOADED.
6591 * 20 - Chip reset flow bit. When set none-leader must wait for both engines
6592 * leader to complete (check for both RESET_IN_PROGRESS bits and not
6593 * for just the one belonging to its engine).
6595 #define BXE_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
6596 #define BXE_PATH0_LOAD_CNT_MASK 0x000000ff
6597 #define BXE_PATH0_LOAD_CNT_SHIFT 0
6598 #define BXE_PATH1_LOAD_CNT_MASK 0x0000ff00
6599 #define BXE_PATH1_LOAD_CNT_SHIFT 8
6600 #define BXE_PATH0_RST_IN_PROG_BIT 0x00010000
6601 #define BXE_PATH1_RST_IN_PROG_BIT 0x00020000
6602 #define BXE_GLOBAL_RESET_BIT 0x00040000
6604 /* set the GLOBAL_RESET bit, should be run under rtnl lock */
6606 bxe_set_reset_global(struct bxe_softc *sc)
6609 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6610 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6611 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT);
6612 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6615 /* clear the GLOBAL_RESET bit, should be run under rtnl lock */
6617 bxe_clear_reset_global(struct bxe_softc *sc)
6620 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6621 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6622 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT));
6623 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6626 /* checks the GLOBAL_RESET bit, should be run under rtnl lock */
6628 bxe_reset_is_global(struct bxe_softc *sc)
6630 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6631 BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val);
6632 return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE;
6635 /* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */
6637 bxe_set_reset_done(struct bxe_softc *sc)
6640 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6641 BXE_PATH0_RST_IN_PROG_BIT;
6643 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6645 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6648 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6650 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6653 /* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */
6655 bxe_set_reset_in_progress(struct bxe_softc *sc)
6658 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6659 BXE_PATH0_RST_IN_PROG_BIT;
6661 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6663 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6666 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6668 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6671 /* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */
6673 bxe_reset_is_done(struct bxe_softc *sc,
6676 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6677 uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT :
6678 BXE_PATH0_RST_IN_PROG_BIT;
6680 /* return false if bit is set */
6681 return (val & bit) ? FALSE : TRUE;
6684 /* get the load status for an engine, should be run under rtnl lock */
6686 bxe_get_load_status(struct bxe_softc *sc,
6689 uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK :
6690 BXE_PATH0_LOAD_CNT_MASK;
6691 uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT :
6692 BXE_PATH0_LOAD_CNT_SHIFT;
6693 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6695 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6697 val = ((val & mask) >> shift);
6699 BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val);
6704 /* set pf load mark */
6705 /* XXX needs to be under rtnl lock */
6707 bxe_set_pf_load(struct bxe_softc *sc)
6711 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6712 BXE_PATH0_LOAD_CNT_MASK;
6713 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6714 BXE_PATH0_LOAD_CNT_SHIFT;
6716 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6718 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6719 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6721 /* get the current counter value */
6722 val1 = ((val & mask) >> shift);
6724 /* set bit of this PF */
6725 val1 |= (1 << SC_ABS_FUNC(sc));
6727 /* clear the old value */
6730 /* set the new one */
6731 val |= ((val1 << shift) & mask);
6733 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6735 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6738 /* clear pf load mark */
6739 /* XXX needs to be under rtnl lock */
6741 bxe_clear_pf_load(struct bxe_softc *sc)
6744 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6745 BXE_PATH0_LOAD_CNT_MASK;
6746 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6747 BXE_PATH0_LOAD_CNT_SHIFT;
6749 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6750 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6751 BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val);
6753 /* get the current counter value */
6754 val1 = (val & mask) >> shift;
6756 /* clear bit of that PF */
6757 val1 &= ~(1 << SC_ABS_FUNC(sc));
6759 /* clear the old value */
6762 /* set the new one */
6763 val |= ((val1 << shift) & mask);
6765 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6766 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6770 /* send load requrest to mcp and analyze response */
6772 bxe_nic_load_request(struct bxe_softc *sc,
6773 uint32_t *load_code)
6777 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
6778 DRV_MSG_SEQ_NUMBER_MASK);
6780 BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq);
6782 /* get the current FW pulse sequence */
6783 sc->fw_drv_pulse_wr_seq =
6784 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) &
6785 DRV_PULSE_SEQ_MASK);
6787 BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n",
6788 sc->fw_drv_pulse_wr_seq);
6791 (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
6792 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
6794 /* if the MCP fails to respond we must abort */
6795 if (!(*load_code)) {
6796 BLOGE(sc, "MCP response failure!\n");
6800 /* if MCP refused then must abort */
6801 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6802 BLOGE(sc, "MCP refused load request\n");
6810 * Check whether another PF has already loaded FW to chip. In virtualized
6811 * environments a pf from anoth VM may have already initialized the device
6812 * including loading FW.
6815 bxe_nic_load_analyze_req(struct bxe_softc *sc,
6818 uint32_t my_fw, loaded_fw;
6820 /* is another pf loaded on this engine? */
6821 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
6822 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
6823 /* build my FW version dword */
6824 my_fw = (BCM_5710_FW_MAJOR_VERSION +
6825 (BCM_5710_FW_MINOR_VERSION << 8 ) +
6826 (BCM_5710_FW_REVISION_VERSION << 16) +
6827 (BCM_5710_FW_ENGINEERING_VERSION << 24));
6829 /* read loaded FW from chip */
6830 loaded_fw = REG_RD(sc, XSEM_REG_PRAM);
6831 BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n",
6834 /* abort nic load if version mismatch */
6835 if (my_fw != loaded_fw) {
6836 BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)",
6845 /* mark PMF if applicable */
6847 bxe_nic_load_pmf(struct bxe_softc *sc,
6850 uint32_t ncsi_oem_data_addr;
6852 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6853 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
6854 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
6856 * Barrier here for ordering between the writing to sc->port.pmf here
6857 * and reading it from the periodic task.
6865 BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf);
6868 if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) {
6869 if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) {
6870 ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr);
6871 if (ncsi_oem_data_addr) {
6873 (ncsi_oem_data_addr +
6874 offsetof(struct glob_ncsi_oem_data, driver_version)),
6882 bxe_read_mf_cfg(struct bxe_softc *sc)
6884 int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1);
6888 if (BXE_NOMCP(sc)) {
6889 return; /* what should be the default bvalue in this case */
6893 * The formula for computing the absolute function number is...
6894 * For 2 port configuration (4 functions per port):
6895 * abs_func = 2 * vn + SC_PORT + SC_PATH
6896 * For 4 port configuration (2 functions per port):
6897 * abs_func = 4 * vn + 2 * SC_PORT + SC_PATH
6899 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
6900 abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc));
6901 if (abs_func >= E1H_FUNC_MAX) {
6904 sc->devinfo.mf_info.mf_config[vn] =
6905 MFCFG_RD(sc, func_mf_config[abs_func].config);
6908 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] &
6909 FUNC_MF_CFG_FUNC_DISABLED) {
6910 BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n");
6911 sc->flags |= BXE_MF_FUNC_DIS;
6913 BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n");
6914 sc->flags &= ~BXE_MF_FUNC_DIS;
6918 /* acquire split MCP access lock register */
6919 static int bxe_acquire_alr(struct bxe_softc *sc)
6923 for (j = 0; j < 1000; j++) {
6925 REG_WR(sc, GRCBASE_MCP + 0x9c, val);
6926 val = REG_RD(sc, GRCBASE_MCP + 0x9c);
6927 if (val & (1L << 31))
6933 if (!(val & (1L << 31))) {
6934 BLOGE(sc, "Cannot acquire MCP access lock register\n");
6941 /* release split MCP access lock register */
6942 static void bxe_release_alr(struct bxe_softc *sc)
6944 REG_WR(sc, GRCBASE_MCP + 0x9c, 0);
6948 bxe_fan_failure(struct bxe_softc *sc)
6950 int port = SC_PORT(sc);
6951 uint32_t ext_phy_config;
6953 /* mark the failure */
6955 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
6957 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6958 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
6959 SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config,
6962 /* log the failure */
6963 BLOGW(sc, "Fan Failure has caused the driver to shutdown "
6964 "the card to prevent permanent damage. "
6965 "Please contact OEM Support for assistance\n");
6969 bxe_panic(sc, ("Schedule task to handle fan failure\n"));
6972 * Schedule device reset (unload)
6973 * This is due to some boards consuming sufficient power when driver is
6974 * up to overheat if fan fails.
6976 bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state);
6977 schedule_delayed_work(&sc->sp_rtnl_task, 0);
6981 /* this function is called upon a link interrupt */
6983 bxe_link_attn(struct bxe_softc *sc)
6985 uint32_t pause_enabled = 0;
6986 struct host_port_stats *pstats;
6988 struct bxe_fastpath *fp;
6991 /* Make sure that we are synced with the current statistics */
6992 bxe_stats_handle(sc, STATS_EVENT_STOP);
6994 elink_link_update(&sc->link_params, &sc->link_vars);
6996 if (sc->link_vars.link_up) {
6998 /* dropless flow control */
6999 if (!CHIP_IS_E1(sc) && sc->dropless_fc) {
7002 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
7007 (BAR_USTRORM_INTMEM +
7008 USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))),
7012 if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) {
7013 pstats = BXE_SP(sc, port_stats);
7014 /* reset old mac stats */
7015 memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx));
7018 if (sc->state == BXE_STATE_OPEN) {
7019 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
7022 /* Restart tx when the link comes back. */
7023 FOR_EACH_ETH_QUEUE(sc, i) {
7025 taskqueue_enqueue(fp->tq, &fp->tx_task);
7029 if (sc->link_vars.link_up && sc->link_vars.line_speed) {
7030 cmng_fns = bxe_get_cmng_fns_mode(sc);
7032 if (cmng_fns != CMNG_FNS_NONE) {
7033 bxe_cmng_fns_init(sc, FALSE, cmng_fns);
7034 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7036 /* rate shaping and fairness are disabled */
7037 BLOGD(sc, DBG_LOAD, "single function mode without fairness\n");
7041 bxe_link_report_locked(sc);
7044 ; // XXX bxe_link_sync_notify(sc);
7049 bxe_attn_int_asserted(struct bxe_softc *sc,
7052 int port = SC_PORT(sc);
7053 uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7054 MISC_REG_AEU_MASK_ATTN_FUNC_0;
7055 uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
7056 NIG_REG_MASK_INTERRUPT_PORT0;
7058 uint32_t nig_mask = 0;
7063 if (sc->attn_state & asserted) {
7064 BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted);
7067 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7069 aeu_mask = REG_RD(sc, aeu_addr);
7071 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n",
7072 aeu_mask, asserted);
7074 aeu_mask &= ~(asserted & 0x3ff);
7076 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
7078 REG_WR(sc, aeu_addr, aeu_mask);
7080 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7082 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
7083 sc->attn_state |= asserted;
7084 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
7086 if (asserted & ATTN_HARD_WIRED_MASK) {
7087 if (asserted & ATTN_NIG_FOR_FUNC) {
7089 bxe_acquire_phy_lock(sc);
7090 /* save nig interrupt mask */
7091 nig_mask = REG_RD(sc, nig_int_mask_addr);
7093 /* If nig_mask is not set, no need to call the update function */
7095 REG_WR(sc, nig_int_mask_addr, 0);
7100 /* handle unicore attn? */
7103 if (asserted & ATTN_SW_TIMER_4_FUNC) {
7104 BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n");
7107 if (asserted & GPIO_2_FUNC) {
7108 BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n");
7111 if (asserted & GPIO_3_FUNC) {
7112 BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n");
7115 if (asserted & GPIO_4_FUNC) {
7116 BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n");
7120 if (asserted & ATTN_GENERAL_ATTN_1) {
7121 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n");
7122 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
7124 if (asserted & ATTN_GENERAL_ATTN_2) {
7125 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n");
7126 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
7128 if (asserted & ATTN_GENERAL_ATTN_3) {
7129 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n");
7130 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
7133 if (asserted & ATTN_GENERAL_ATTN_4) {
7134 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n");
7135 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
7137 if (asserted & ATTN_GENERAL_ATTN_5) {
7138 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n");
7139 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
7141 if (asserted & ATTN_GENERAL_ATTN_6) {
7142 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n");
7143 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
7148 if (sc->devinfo.int_block == INT_BLOCK_HC) {
7149 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET);
7151 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
7154 BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n",
7156 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
7157 REG_WR(sc, reg_addr, asserted);
7159 /* now set back the mask */
7160 if (asserted & ATTN_NIG_FOR_FUNC) {
7162 * Verify that IGU ack through BAR was written before restoring
7163 * NIG mask. This loop should exit after 2-3 iterations max.
7165 if (sc->devinfo.int_block != INT_BLOCK_HC) {
7169 igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS);
7170 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
7171 (++cnt < MAX_IGU_ATTN_ACK_TO));
7174 BLOGE(sc, "Failed to verify IGU ack on time\n");
7180 REG_WR(sc, nig_int_mask_addr, nig_mask);
7182 bxe_release_phy_lock(sc);
7187 bxe_print_next_block(struct bxe_softc *sc,
7191 BLOGI(sc, "%s%s", idx ? ", " : "", blk);
7195 bxe_check_blocks_with_parity0(struct bxe_softc *sc,
7200 uint32_t cur_bit = 0;
7203 for (i = 0; sig; i++) {
7204 cur_bit = ((uint32_t)0x1 << i);
7205 if (sig & cur_bit) {
7207 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
7209 bxe_print_next_block(sc, par_num++, "BRB");
7211 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
7213 bxe_print_next_block(sc, par_num++, "PARSER");
7215 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
7217 bxe_print_next_block(sc, par_num++, "TSDM");
7219 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
7221 bxe_print_next_block(sc, par_num++, "SEARCHER");
7223 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
7225 bxe_print_next_block(sc, par_num++, "TCM");
7227 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
7229 bxe_print_next_block(sc, par_num++, "TSEMI");
7231 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
7233 bxe_print_next_block(sc, par_num++, "XPB");
7246 bxe_check_blocks_with_parity1(struct bxe_softc *sc,
7253 uint32_t cur_bit = 0;
7254 for (i = 0; sig; i++) {
7255 cur_bit = ((uint32_t)0x1 << i);
7256 if (sig & cur_bit) {
7258 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
7260 bxe_print_next_block(sc, par_num++, "PBF");
7262 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
7264 bxe_print_next_block(sc, par_num++, "QM");
7266 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
7268 bxe_print_next_block(sc, par_num++, "TM");
7270 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
7272 bxe_print_next_block(sc, par_num++, "XSDM");
7274 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
7276 bxe_print_next_block(sc, par_num++, "XCM");
7278 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
7280 bxe_print_next_block(sc, par_num++, "XSEMI");
7282 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
7284 bxe_print_next_block(sc, par_num++, "DOORBELLQ");
7286 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
7288 bxe_print_next_block(sc, par_num++, "NIG");
7290 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
7292 bxe_print_next_block(sc, par_num++, "VAUX PCI CORE");
7295 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
7297 bxe_print_next_block(sc, par_num++, "DEBUG");
7299 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
7301 bxe_print_next_block(sc, par_num++, "USDM");
7303 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
7305 bxe_print_next_block(sc, par_num++, "UCM");
7307 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
7309 bxe_print_next_block(sc, par_num++, "USEMI");
7311 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
7313 bxe_print_next_block(sc, par_num++, "UPB");
7315 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
7317 bxe_print_next_block(sc, par_num++, "CSDM");
7319 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
7321 bxe_print_next_block(sc, par_num++, "CCM");
7334 bxe_check_blocks_with_parity2(struct bxe_softc *sc,
7339 uint32_t cur_bit = 0;
7342 for (i = 0; sig; i++) {
7343 cur_bit = ((uint32_t)0x1 << i);
7344 if (sig & cur_bit) {
7346 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
7348 bxe_print_next_block(sc, par_num++, "CSEMI");
7350 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
7352 bxe_print_next_block(sc, par_num++, "PXP");
7354 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
7356 bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT");
7358 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
7360 bxe_print_next_block(sc, par_num++, "CFC");
7362 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
7364 bxe_print_next_block(sc, par_num++, "CDU");
7366 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
7368 bxe_print_next_block(sc, par_num++, "DMAE");
7370 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
7372 bxe_print_next_block(sc, par_num++, "IGU");
7374 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
7376 bxe_print_next_block(sc, par_num++, "MISC");
7389 bxe_check_blocks_with_parity3(struct bxe_softc *sc,
7395 uint32_t cur_bit = 0;
7398 for (i = 0; sig; i++) {
7399 cur_bit = ((uint32_t)0x1 << i);
7400 if (sig & cur_bit) {
7402 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
7404 bxe_print_next_block(sc, par_num++, "MCP ROM");
7407 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
7409 bxe_print_next_block(sc, par_num++,
7413 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
7415 bxe_print_next_block(sc, par_num++,
7419 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
7421 bxe_print_next_block(sc, par_num++,
7436 bxe_check_blocks_with_parity4(struct bxe_softc *sc,
7441 uint32_t cur_bit = 0;
7444 for (i = 0; sig; i++) {
7445 cur_bit = ((uint32_t)0x1 << i);
7446 if (sig & cur_bit) {
7448 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
7450 bxe_print_next_block(sc, par_num++, "PGLUE_B");
7452 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
7454 bxe_print_next_block(sc, par_num++, "ATC");
7467 bxe_parity_attn(struct bxe_softc *sc,
7474 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
7475 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
7476 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
7477 (sig[3] & HW_PRTY_ASSERT_SET_3) ||
7478 (sig[4] & HW_PRTY_ASSERT_SET_4)) {
7479 BLOGE(sc, "Parity error: HW block parity attention:\n"
7480 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
7481 (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0),
7482 (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1),
7483 (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2),
7484 (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3),
7485 (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4));
7488 BLOGI(sc, "Parity errors detected in blocks: ");
7491 bxe_check_blocks_with_parity0(sc, sig[0] &
7492 HW_PRTY_ASSERT_SET_0,
7495 bxe_check_blocks_with_parity1(sc, sig[1] &
7496 HW_PRTY_ASSERT_SET_1,
7497 par_num, global, print);
7499 bxe_check_blocks_with_parity2(sc, sig[2] &
7500 HW_PRTY_ASSERT_SET_2,
7503 bxe_check_blocks_with_parity3(sc, sig[3] &
7504 HW_PRTY_ASSERT_SET_3,
7505 par_num, global, print);
7507 bxe_check_blocks_with_parity4(sc, sig[4] &
7508 HW_PRTY_ASSERT_SET_4,
7521 bxe_chk_parity_attn(struct bxe_softc *sc,
7525 struct attn_route attn = { {0} };
7526 int port = SC_PORT(sc);
7528 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
7529 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
7530 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
7531 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
7534 * Since MCP attentions can't be disabled inside the block, we need to
7535 * read AEU registers to see whether they're currently disabled
7537 attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
7538 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) &
7539 MISC_AEU_ENABLE_MCP_PRTY_BITS) |
7540 ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
7543 if (!CHIP_IS_E1x(sc))
7544 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
7546 return (bxe_parity_attn(sc, global, print, attn.sig));
7550 bxe_attn_int_deasserted4(struct bxe_softc *sc,
7555 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
7556 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
7557 BLOGE(sc, "PGLUE hw attention 0x%08x\n", val);
7558 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
7559 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
7560 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
7561 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
7562 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
7563 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
7564 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
7565 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
7566 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
7567 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
7568 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
7569 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
7570 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
7571 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
7572 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
7573 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
7574 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
7575 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
7578 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
7579 val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR);
7580 BLOGE(sc, "ATC hw attention 0x%08x\n", val);
7581 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
7582 BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
7583 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
7584 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
7585 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
7586 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
7587 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
7588 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
7589 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
7590 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
7591 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
7592 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
7595 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7596 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
7597 BLOGE(sc, "FATAL parity attention set4 0x%08x\n",
7598 (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7599 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
7604 bxe_e1h_disable(struct bxe_softc *sc)
7606 int port = SC_PORT(sc);
7610 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7614 bxe_e1h_enable(struct bxe_softc *sc)
7616 int port = SC_PORT(sc);
7618 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7620 // XXX bxe_tx_enable(sc);
7624 * called due to MCP event (on pmf):
7625 * reread new bandwidth configuration
7627 * notify others function about the change
7630 bxe_config_mf_bw(struct bxe_softc *sc)
7632 if (sc->link_vars.link_up) {
7633 bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX);
7634 // XXX bxe_link_sync_notify(sc);
7637 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7641 bxe_set_mf_bw(struct bxe_softc *sc)
7643 bxe_config_mf_bw(sc);
7644 bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
7648 bxe_handle_eee_event(struct bxe_softc *sc)
7650 BLOGD(sc, DBG_INTR, "EEE - LLDP event\n");
7651 bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
7654 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
7657 bxe_drv_info_ether_stat(struct bxe_softc *sc)
7659 struct eth_stats_info *ether_stat =
7660 &sc->sp->drv_info_to_mcp.ether_stat;
7662 strlcpy(ether_stat->version, BXE_DRIVER_VERSION,
7663 ETH_STAT_INFO_VERSION_LEN);
7665 /* XXX (+ MAC_PAD) taken from other driver... verify this is right */
7666 sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj,
7667 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
7668 ether_stat->mac_local + MAC_PAD,
7671 ether_stat->mtu_size = sc->mtu;
7673 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
7674 if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
7675 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
7678 // XXX ether_stat->feature_flags |= ???;
7680 ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0;
7682 ether_stat->txq_size = sc->tx_ring_size;
7683 ether_stat->rxq_size = sc->rx_ring_size;
7687 bxe_handle_drv_info_req(struct bxe_softc *sc)
7689 enum drv_info_opcode op_code;
7690 uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control);
7692 /* if drv_info version supported by MFW doesn't match - send NACK */
7693 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
7694 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7698 op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
7699 DRV_INFO_CONTROL_OP_CODE_SHIFT);
7701 memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp));
7704 case ETH_STATS_OPCODE:
7705 bxe_drv_info_ether_stat(sc);
7707 case FCOE_STATS_OPCODE:
7708 case ISCSI_STATS_OPCODE:
7710 /* if op code isn't supported - send NACK */
7711 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7716 * If we got drv_info attn from MFW then these fields are defined in
7719 SHMEM2_WR(sc, drv_info_host_addr_lo,
7720 U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7721 SHMEM2_WR(sc, drv_info_host_addr_hi,
7722 U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7724 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0);
7728 bxe_dcc_event(struct bxe_softc *sc,
7731 BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event);
7733 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
7735 * This is the only place besides the function initialization
7736 * where the sc->flags can change so it is done without any
7739 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
7740 BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n");
7741 sc->flags |= BXE_MF_FUNC_DIS;
7742 bxe_e1h_disable(sc);
7744 BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n");
7745 sc->flags &= ~BXE_MF_FUNC_DIS;
7748 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
7751 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
7752 bxe_config_mf_bw(sc);
7753 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
7756 /* Report results to MCP */
7758 bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0);
7760 bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0);
7764 bxe_pmf_update(struct bxe_softc *sc)
7766 int port = SC_PORT(sc);
7770 BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf);
7773 * We need the mb() to ensure the ordering between the writing to
7774 * sc->port.pmf here and reading it from the bxe_periodic_task().
7778 /* queue a periodic task */
7779 // XXX schedule task...
7781 // XXX bxe_dcbx_pmf_update(sc);
7783 /* enable nig attention */
7784 val = (0xff0f | (1 << (SC_VN(sc) + 4)));
7785 if (sc->devinfo.int_block == INT_BLOCK_HC) {
7786 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val);
7787 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val);
7788 } else if (!CHIP_IS_E1x(sc)) {
7789 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
7790 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
7793 bxe_stats_handle(sc, STATS_EVENT_PMF);
7797 bxe_mc_assert(struct bxe_softc *sc)
7801 uint32_t row0, row1, row2, row3;
7804 last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET);
7806 BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7808 /* print the asserts */
7809 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7811 row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i));
7812 row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4);
7813 row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8);
7814 row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12);
7816 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7817 BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7818 i, row3, row2, row1, row0);
7826 last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET);
7828 BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7831 /* print the asserts */
7832 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7834 row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i));
7835 row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4);
7836 row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8);
7837 row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12);
7839 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7840 BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7841 i, row3, row2, row1, row0);
7849 last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET);
7851 BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7854 /* print the asserts */
7855 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7857 row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i));
7858 row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4);
7859 row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8);
7860 row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12);
7862 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7863 BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7864 i, row3, row2, row1, row0);
7872 last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET);
7874 BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7877 /* print the asserts */
7878 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7880 row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i));
7881 row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4);
7882 row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8);
7883 row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12);
7885 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7886 BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7887 i, row3, row2, row1, row0);
7898 bxe_attn_int_deasserted3(struct bxe_softc *sc,
7901 int func = SC_FUNC(sc);
7904 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
7906 if (attn & BXE_PMF_LINK_ASSERT(sc)) {
7908 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7909 bxe_read_mf_cfg(sc);
7910 sc->devinfo.mf_info.mf_config[SC_VN(sc)] =
7911 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
7912 val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status);
7914 if (val & DRV_STATUS_DCC_EVENT_MASK)
7915 bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK));
7917 if (val & DRV_STATUS_SET_MF_BW)
7920 if (val & DRV_STATUS_DRV_INFO_REQ)
7921 bxe_handle_drv_info_req(sc);
7923 if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF))
7926 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
7927 bxe_handle_eee_event(sc);
7929 if (sc->link_vars.periodic_flags &
7930 ELINK_PERIODIC_FLAGS_LINK_EVENT) {
7931 /* sync with link */
7932 bxe_acquire_phy_lock(sc);
7933 sc->link_vars.periodic_flags &=
7934 ~ELINK_PERIODIC_FLAGS_LINK_EVENT;
7935 bxe_release_phy_lock(sc);
7937 ; // XXX bxe_link_sync_notify(sc);
7938 bxe_link_report(sc);
7942 * Always call it here: bxe_link_report() will
7943 * prevent the link indication duplication.
7945 bxe_link_status_update(sc);
7947 } else if (attn & BXE_MC_ASSERT_BITS) {
7949 BLOGE(sc, "MC assert!\n");
7951 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0);
7952 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0);
7953 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0);
7954 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0);
7955 bxe_panic(sc, ("MC assert!\n"));
7957 } else if (attn & BXE_MCP_ASSERT) {
7959 BLOGE(sc, "MCP assert!\n");
7960 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0);
7961 // XXX bxe_fw_dump(sc);
7964 BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn);
7968 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
7969 BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn);
7970 if (attn & BXE_GRC_TIMEOUT) {
7971 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN);
7972 BLOGE(sc, "GRC time-out 0x%08x\n", val);
7974 if (attn & BXE_GRC_RSV) {
7975 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN);
7976 BLOGE(sc, "GRC reserved 0x%08x\n", val);
7978 REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
7983 bxe_attn_int_deasserted2(struct bxe_softc *sc,
7986 int port = SC_PORT(sc);
7988 uint32_t val0, mask0, val1, mask1;
7991 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
7992 val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR);
7993 BLOGE(sc, "CFC hw attention 0x%08x\n", val);
7994 /* CFC error attention */
7996 BLOGE(sc, "FATAL error from CFC\n");
8000 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
8001 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0);
8002 BLOGE(sc, "PXP hw attention-0 0x%08x\n", val);
8003 /* RQ_USDMDP_FIFO_OVERFLOW */
8004 if (val & 0x18000) {
8005 BLOGE(sc, "FATAL error from PXP\n");
8008 if (!CHIP_IS_E1x(sc)) {
8009 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1);
8010 BLOGE(sc, "PXP hw attention-1 0x%08x\n", val);
8014 #define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR
8015 #define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT
8017 if (attn & AEU_PXP2_HW_INT_BIT) {
8018 /* CQ47854 workaround do not panic on
8019 * PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8021 if (!CHIP_IS_E1x(sc)) {
8022 mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0);
8023 val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1);
8024 mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1);
8025 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0);
8027 * If the only PXP2_EOP_ERROR_BIT is set in
8028 * STS0 and STS1 - clear it
8030 * probably we lose additional attentions between
8031 * STS0 and STS_CLR0, in this case user will not
8032 * be notified about them
8034 if (val0 & mask0 & PXP2_EOP_ERROR_BIT &&
8036 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
8038 /* print the register, since no one can restore it */
8039 BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0);
8042 * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8045 if (val0 & PXP2_EOP_ERROR_BIT) {
8046 BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n");
8049 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is
8050 * set then clear attention from PXP2 block without panic
8052 if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) &&
8053 ((val1 & mask1) == 0))
8054 attn &= ~AEU_PXP2_HW_INT_BIT;
8059 if (attn & HW_INTERRUT_ASSERT_SET_2) {
8060 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
8061 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
8063 val = REG_RD(sc, reg_offset);
8064 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
8065 REG_WR(sc, reg_offset, val);
8067 BLOGE(sc, "FATAL HW block attention set2 0x%x\n",
8068 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2));
8069 bxe_panic(sc, ("HW block attention set2\n"));
8074 bxe_attn_int_deasserted1(struct bxe_softc *sc,
8077 int port = SC_PORT(sc);
8081 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
8082 val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR);
8083 BLOGE(sc, "DB hw attention 0x%08x\n", val);
8084 /* DORQ discard attention */
8086 BLOGE(sc, "FATAL error from DORQ\n");
8090 if (attn & HW_INTERRUT_ASSERT_SET_1) {
8091 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
8092 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
8094 val = REG_RD(sc, reg_offset);
8095 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
8096 REG_WR(sc, reg_offset, val);
8098 BLOGE(sc, "FATAL HW block attention set1 0x%08x\n",
8099 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1));
8100 bxe_panic(sc, ("HW block attention set1\n"));
8105 bxe_attn_int_deasserted0(struct bxe_softc *sc,
8108 int port = SC_PORT(sc);
8112 reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
8113 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
8115 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
8116 val = REG_RD(sc, reg_offset);
8117 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
8118 REG_WR(sc, reg_offset, val);
8120 BLOGW(sc, "SPIO5 hw attention\n");
8122 /* Fan failure attention */
8123 elink_hw_reset_phy(&sc->link_params);
8124 bxe_fan_failure(sc);
8127 if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) {
8128 bxe_acquire_phy_lock(sc);
8129 elink_handle_module_detect_int(&sc->link_params);
8130 bxe_release_phy_lock(sc);
8133 if (attn & HW_INTERRUT_ASSERT_SET_0) {
8134 val = REG_RD(sc, reg_offset);
8135 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
8136 REG_WR(sc, reg_offset, val);
8138 bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n",
8139 (attn & HW_INTERRUT_ASSERT_SET_0)));
8144 bxe_attn_int_deasserted(struct bxe_softc *sc,
8145 uint32_t deasserted)
8147 struct attn_route attn;
8148 struct attn_route *group_mask;
8149 int port = SC_PORT(sc);
8154 uint8_t global = FALSE;
8157 * Need to take HW lock because MCP or other port might also
8158 * try to handle this event.
8160 bxe_acquire_alr(sc);
8162 if (bxe_chk_parity_attn(sc, &global, TRUE)) {
8164 * In case of parity errors don't handle attentions so that
8165 * other function would "see" parity errors.
8167 sc->recovery_state = BXE_RECOVERY_INIT;
8168 // XXX schedule a recovery task...
8169 /* disable HW interrupts */
8170 bxe_int_disable(sc);
8171 bxe_release_alr(sc);
8175 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
8176 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
8177 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
8178 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
8179 if (!CHIP_IS_E1x(sc)) {
8180 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
8185 BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
8186 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
8188 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
8189 if (deasserted & (1 << index)) {
8190 group_mask = &sc->attn_group[index];
8193 "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index,
8194 group_mask->sig[0], group_mask->sig[1],
8195 group_mask->sig[2], group_mask->sig[3],
8196 group_mask->sig[4]);
8198 bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]);
8199 bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]);
8200 bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]);
8201 bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]);
8202 bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]);
8206 bxe_release_alr(sc);
8208 if (sc->devinfo.int_block == INT_BLOCK_HC) {
8209 reg_addr = (HC_REG_COMMAND_REG + port*32 +
8210 COMMAND_REG_ATTN_BITS_CLR);
8212 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
8217 "about to mask 0x%08x at %s addr 0x%08x\n", val,
8218 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
8219 REG_WR(sc, reg_addr, val);
8221 if (~sc->attn_state & deasserted) {
8222 BLOGE(sc, "IGU error\n");
8225 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8226 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8228 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8230 aeu_mask = REG_RD(sc, reg_addr);
8232 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n",
8233 aeu_mask, deasserted);
8234 aeu_mask |= (deasserted & 0x3ff);
8235 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
8237 REG_WR(sc, reg_addr, aeu_mask);
8238 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8240 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
8241 sc->attn_state &= ~deasserted;
8242 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
8246 bxe_attn_int(struct bxe_softc *sc)
8248 /* read local copy of bits */
8249 uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits);
8250 uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack);
8251 uint32_t attn_state = sc->attn_state;
8253 /* look for changed bits */
8254 uint32_t asserted = attn_bits & ~attn_ack & ~attn_state;
8255 uint32_t deasserted = ~attn_bits & attn_ack & attn_state;
8258 "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n",
8259 attn_bits, attn_ack, asserted, deasserted);
8261 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) {
8262 BLOGE(sc, "BAD attention state\n");
8265 /* handle bits that were raised */
8267 bxe_attn_int_asserted(sc, asserted);
8271 bxe_attn_int_deasserted(sc, deasserted);
8276 bxe_update_dsb_idx(struct bxe_softc *sc)
8278 struct host_sp_status_block *def_sb = sc->def_sb;
8281 mb(); /* status block is written to by the chip */
8283 if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
8284 sc->def_att_idx = def_sb->atten_status_block.attn_bits_index;
8285 rc |= BXE_DEF_SB_ATT_IDX;
8288 if (sc->def_idx != def_sb->sp_sb.running_index) {
8289 sc->def_idx = def_sb->sp_sb.running_index;
8290 rc |= BXE_DEF_SB_IDX;
8298 static inline struct ecore_queue_sp_obj *
8299 bxe_cid_to_q_obj(struct bxe_softc *sc,
8302 BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid);
8303 return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj);
8307 bxe_handle_mcast_eqe(struct bxe_softc *sc)
8309 struct ecore_mcast_ramrod_params rparam;
8312 memset(&rparam, 0, sizeof(rparam));
8314 rparam.mcast_obj = &sc->mcast_obj;
8318 /* clear pending state for the last command */
8319 sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw);
8321 /* if there are pending mcast commands - send them */
8322 if (sc->mcast_obj.check_pending(&sc->mcast_obj)) {
8323 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
8326 "ERROR: Failed to send pending mcast commands (%d)\n", rc);
8330 BXE_MCAST_UNLOCK(sc);
8334 bxe_handle_classification_eqe(struct bxe_softc *sc,
8335 union event_ring_elem *elem)
8337 unsigned long ramrod_flags = 0;
8339 uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8340 struct ecore_vlan_mac_obj *vlan_mac_obj;
8342 /* always push next commands out, don't wait here */
8343 bit_set(&ramrod_flags, RAMROD_CONT);
8345 switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) {
8346 case ECORE_FILTER_MAC_PENDING:
8347 BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n");
8348 vlan_mac_obj = &sc->sp_objs[cid].mac_obj;
8351 case ECORE_FILTER_MCAST_PENDING:
8352 BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n");
8354 * This is only relevant for 57710 where multicast MACs are
8355 * configured as unicast MACs using the same ramrod.
8357 bxe_handle_mcast_eqe(sc);
8361 BLOGE(sc, "Unsupported classification command: %d\n",
8362 elem->message.data.eth_event.echo);
8366 rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags);
8369 BLOGE(sc, "Failed to schedule new commands (%d)\n", rc);
8370 } else if (rc > 0) {
8371 BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n");
8376 bxe_handle_rx_mode_eqe(struct bxe_softc *sc,
8377 union event_ring_elem *elem)
8379 bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
8381 /* send rx_mode command again if was requested */
8382 if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED,
8384 bxe_set_storm_rx_mode(sc);
8389 bxe_update_eq_prod(struct bxe_softc *sc,
8392 storm_memset_eq_prod(sc, prod, SC_FUNC(sc));
8393 wmb(); /* keep prod updates ordered */
8397 bxe_eq_int(struct bxe_softc *sc)
8399 uint16_t hw_cons, sw_cons, sw_prod;
8400 union event_ring_elem *elem;
8405 struct ecore_queue_sp_obj *q_obj;
8406 struct ecore_func_sp_obj *f_obj = &sc->func_obj;
8407 struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw;
8409 hw_cons = le16toh(*sc->eq_cons_sb);
8412 * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256.
8413 * when we get to the next-page we need to adjust so the loop
8414 * condition below will be met. The next element is the size of a
8415 * regular element and hence incrementing by 1
8417 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) {
8422 * This function may never run in parallel with itself for a
8423 * specific sc and no need for a read memory barrier here.
8425 sw_cons = sc->eq_cons;
8426 sw_prod = sc->eq_prod;
8428 BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n",
8429 hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left));
8433 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
8435 elem = &sc->eq[EQ_DESC(sw_cons)];
8437 /* elem CID originates from FW, actually LE */
8438 cid = SW_CID(elem->message.data.cfc_del_event.cid);
8439 opcode = elem->message.opcode;
8441 /* handle eq element */
8444 case EVENT_RING_OPCODE_STAT_QUERY:
8445 BLOGD(sc, DBG_SP, "got statistics completion event %d\n",
8447 /* nothing to do with stats comp */
8450 case EVENT_RING_OPCODE_CFC_DEL:
8451 /* handle according to cid range */
8452 /* we may want to verify here that the sc state is HALTING */
8453 BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid);
8454 q_obj = bxe_cid_to_q_obj(sc, cid);
8455 if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) {
8460 case EVENT_RING_OPCODE_STOP_TRAFFIC:
8461 BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n");
8462 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) {
8465 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED);
8468 case EVENT_RING_OPCODE_START_TRAFFIC:
8469 BLOGD(sc, DBG_SP, "got START TRAFFIC\n");
8470 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) {
8473 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED);
8476 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
8477 echo = elem->message.data.function_update_event.echo;
8478 if (echo == SWITCH_UPDATE) {
8479 BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n");
8480 if (f_obj->complete_cmd(sc, f_obj,
8481 ECORE_F_CMD_SWITCH_UPDATE)) {
8487 "AFEX: ramrod completed FUNCTION_UPDATE\n");
8491 case EVENT_RING_OPCODE_FORWARD_SETUP:
8492 q_obj = &bxe_fwd_sp_obj(sc, q_obj);
8493 if (q_obj->complete_cmd(sc, q_obj,
8494 ECORE_Q_CMD_SETUP_TX_ONLY)) {
8499 case EVENT_RING_OPCODE_FUNCTION_START:
8500 BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n");
8501 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) {
8506 case EVENT_RING_OPCODE_FUNCTION_STOP:
8507 BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n");
8508 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) {
8514 switch (opcode | sc->state) {
8515 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN):
8516 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT):
8517 cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8518 BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid);
8519 rss_raw->clear_pending(rss_raw);
8522 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN):
8523 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG):
8524 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT):
8525 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN):
8526 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG):
8527 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8528 BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n");
8529 bxe_handle_classification_eqe(sc, elem);
8532 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN):
8533 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG):
8534 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8535 BLOGD(sc, DBG_SP, "got mcast ramrod\n");
8536 bxe_handle_mcast_eqe(sc);
8539 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN):
8540 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG):
8541 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8542 BLOGD(sc, DBG_SP, "got rx_mode ramrod\n");
8543 bxe_handle_rx_mode_eqe(sc, elem);
8547 /* unknown event log error and continue */
8548 BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n",
8549 elem->message.opcode, sc->state);
8557 atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt);
8559 sc->eq_cons = sw_cons;
8560 sc->eq_prod = sw_prod;
8562 /* make sure that above mem writes were issued towards the memory */
8565 /* update producer */
8566 bxe_update_eq_prod(sc, sc->eq_prod);
8570 bxe_handle_sp_tq(void *context,
8573 struct bxe_softc *sc = (struct bxe_softc *)context;
8576 BLOGD(sc, DBG_SP, "---> SP TASK <---\n");
8578 /* what work needs to be performed? */
8579 status = bxe_update_dsb_idx(sc);
8581 BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status);
8584 if (status & BXE_DEF_SB_ATT_IDX) {
8585 BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n");
8587 status &= ~BXE_DEF_SB_ATT_IDX;
8590 /* SP events: STAT_QUERY and others */
8591 if (status & BXE_DEF_SB_IDX) {
8592 /* handle EQ completions */
8593 BLOGD(sc, DBG_SP, "---> EQ INTR <---\n");
8595 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
8596 le16toh(sc->def_idx), IGU_INT_NOP, 1);
8597 status &= ~BXE_DEF_SB_IDX;
8600 /* if status is non zero then something went wrong */
8601 if (__predict_false(status)) {
8602 BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status);
8605 /* ack status block only if something was actually handled */
8606 bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID,
8607 le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1);
8610 * Must be called after the EQ processing (since eq leads to sriov
8611 * ramrod completion flows).
8612 * This flow may have been scheduled by the arrival of a ramrod
8613 * completion, or by the sriov code rescheduling itself.
8615 // XXX bxe_iov_sp_task(sc);
8620 bxe_handle_fp_tq(void *context,
8623 struct bxe_fastpath *fp = (struct bxe_fastpath *)context;
8624 struct bxe_softc *sc = fp->sc;
8625 uint8_t more_tx = FALSE;
8626 uint8_t more_rx = FALSE;
8628 BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index);
8631 * IFF_DRV_RUNNING state can't be checked here since we process
8632 * slowpath events on a client queue during setup. Instead
8633 * we need to add a "process/continue" flag here that the driver
8634 * can use to tell the task here not to do anything.
8637 if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
8642 /* update the fastpath index */
8643 bxe_update_fp_sb_idx(fp);
8645 /* XXX add loop here if ever support multiple tx CoS */
8646 /* fp->txdata[cos] */
8647 if (bxe_has_tx_work(fp)) {
8649 more_tx = bxe_txeof(sc, fp);
8650 BXE_FP_TX_UNLOCK(fp);
8653 if (bxe_has_rx_work(fp)) {
8654 more_rx = bxe_rxeof(sc, fp);
8657 if (more_rx /*|| more_tx*/) {
8658 /* still more work to do */
8659 taskqueue_enqueue(fp->tq, &fp->tq_task);
8663 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8664 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8668 bxe_task_fp(struct bxe_fastpath *fp)
8670 struct bxe_softc *sc = fp->sc;
8671 uint8_t more_tx = FALSE;
8672 uint8_t more_rx = FALSE;
8674 BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index);
8676 /* update the fastpath index */
8677 bxe_update_fp_sb_idx(fp);
8679 /* XXX add loop here if ever support multiple tx CoS */
8680 /* fp->txdata[cos] */
8681 if (bxe_has_tx_work(fp)) {
8683 more_tx = bxe_txeof(sc, fp);
8684 BXE_FP_TX_UNLOCK(fp);
8687 if (bxe_has_rx_work(fp)) {
8688 more_rx = bxe_rxeof(sc, fp);
8691 if (more_rx /*|| more_tx*/) {
8692 /* still more work to do, bail out if this ISR and process later */
8693 taskqueue_enqueue(fp->tq, &fp->tq_task);
8698 * Here we write the fastpath index taken before doing any tx or rx work.
8699 * It is very well possible other hw events occurred up to this point and
8700 * they were actually processed accordingly above. Since we're going to
8701 * write an older fastpath index, an interrupt is coming which we might
8702 * not do any work in.
8704 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8705 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8709 * Legacy interrupt entry point.
8711 * Verifies that the controller generated the interrupt and
8712 * then calls a separate routine to handle the various
8713 * interrupt causes: link, RX, and TX.
8716 bxe_intr_legacy(void *xsc)
8718 struct bxe_softc *sc = (struct bxe_softc *)xsc;
8719 struct bxe_fastpath *fp;
8720 uint16_t status, mask;
8723 BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n");
8726 * 0 for ustorm, 1 for cstorm
8727 * the bits returned from ack_int() are 0-15
8728 * bit 0 = attention status block
8729 * bit 1 = fast path status block
8730 * a mask of 0x2 or more = tx/rx event
8731 * a mask of 1 = slow path event
8734 status = bxe_ack_int(sc);
8736 /* the interrupt is not for us */
8737 if (__predict_false(status == 0)) {
8738 BLOGD(sc, DBG_INTR, "Not our interrupt!\n");
8742 BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status);
8744 FOR_EACH_ETH_QUEUE(sc, i) {
8746 mask = (0x2 << (fp->index + CNIC_SUPPORT(sc)));
8747 if (status & mask) {
8748 /* acknowledge and disable further fastpath interrupts */
8749 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8755 if (__predict_false(status & 0x1)) {
8756 /* acknowledge and disable further slowpath interrupts */
8757 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8759 /* schedule slowpath handler */
8760 taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8765 if (__predict_false(status)) {
8766 BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status);
8770 /* slowpath interrupt entry point */
8772 bxe_intr_sp(void *xsc)
8774 struct bxe_softc *sc = (struct bxe_softc *)xsc;
8776 BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n");
8778 /* acknowledge and disable further slowpath interrupts */
8779 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8781 /* schedule slowpath handler */
8782 taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8785 /* fastpath interrupt entry point */
8787 bxe_intr_fp(void *xfp)
8789 struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp;
8790 struct bxe_softc *sc = fp->sc;
8792 BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index);
8795 "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n",
8796 curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id);
8798 /* acknowledge and disable further fastpath interrupts */
8799 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8804 /* Release all interrupts allocated by the driver. */
8806 bxe_interrupt_free(struct bxe_softc *sc)
8810 switch (sc->interrupt_mode) {
8811 case INTR_MODE_INTX:
8812 BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n");
8813 if (sc->intr[0].resource != NULL) {
8814 bus_release_resource(sc->dev,
8817 sc->intr[0].resource);
8821 for (i = 0; i < sc->intr_count; i++) {
8822 BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i);
8823 if (sc->intr[i].resource && sc->intr[i].rid) {
8824 bus_release_resource(sc->dev,
8827 sc->intr[i].resource);
8830 pci_release_msi(sc->dev);
8832 case INTR_MODE_MSIX:
8833 for (i = 0; i < sc->intr_count; i++) {
8834 BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i);
8835 if (sc->intr[i].resource && sc->intr[i].rid) {
8836 bus_release_resource(sc->dev,
8839 sc->intr[i].resource);
8842 pci_release_msi(sc->dev);
8845 /* nothing to do as initial allocation failed */
8851 * This function determines and allocates the appropriate
8852 * interrupt based on system capabilites and user request.
8854 * The user may force a particular interrupt mode, specify
8855 * the number of receive queues, specify the method for
8856 * distribuitng received frames to receive queues, or use
8857 * the default settings which will automatically select the
8858 * best supported combination. In addition, the OS may or
8859 * may not support certain combinations of these settings.
8860 * This routine attempts to reconcile the settings requested
8861 * by the user with the capabilites available from the system
8862 * to select the optimal combination of features.
8865 * 0 = Success, !0 = Failure.
8868 bxe_interrupt_alloc(struct bxe_softc *sc)
8872 int num_requested = 0;
8873 int num_allocated = 0;
8877 /* get the number of available MSI/MSI-X interrupts from the OS */
8878 if (sc->interrupt_mode > 0) {
8879 if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) {
8880 msix_count = pci_msix_count(sc->dev);
8883 if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) {
8884 msi_count = pci_msi_count(sc->dev);
8887 BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n",
8888 msi_count, msix_count);
8891 do { /* try allocating MSI-X interrupt resources (at least 2) */
8892 if (sc->interrupt_mode != INTR_MODE_MSIX) {
8896 if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) ||
8898 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8902 /* ask for the necessary number of MSI-X vectors */
8903 num_requested = min((sc->num_queues + 1), msix_count);
8905 BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested);
8907 num_allocated = num_requested;
8908 if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) {
8909 BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc);
8910 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8914 if (num_allocated < 2) { /* possible? */
8915 BLOGE(sc, "MSI-X allocation less than 2!\n");
8916 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8917 pci_release_msi(sc->dev);
8921 BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n",
8922 num_requested, num_allocated);
8924 /* best effort so use the number of vectors allocated to us */
8925 sc->intr_count = num_allocated;
8926 sc->num_queues = num_allocated - 1;
8928 rid = 1; /* initial resource identifier */
8930 /* allocate the MSI-X vectors */
8931 for (i = 0; i < num_allocated; i++) {
8932 sc->intr[i].rid = (rid + i);
8934 if ((sc->intr[i].resource =
8935 bus_alloc_resource_any(sc->dev,
8938 RF_ACTIVE)) == NULL) {
8939 BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n",
8942 for (j = (i - 1); j >= 0; j--) {
8943 bus_release_resource(sc->dev,
8946 sc->intr[j].resource);
8951 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8952 pci_release_msi(sc->dev);
8956 BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i));
8960 do { /* try allocating MSI vector resources (at least 2) */
8961 if (sc->interrupt_mode != INTR_MODE_MSI) {
8965 if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) ||
8967 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
8971 /* ask for a single MSI vector */
8974 BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested);
8976 num_allocated = num_requested;
8977 if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) {
8978 BLOGE(sc, "MSI alloc failed (%d)!\n", rc);
8979 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
8983 if (num_allocated != 1) { /* possible? */
8984 BLOGE(sc, "MSI allocation is not 1!\n");
8985 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
8986 pci_release_msi(sc->dev);
8990 BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n",
8991 num_requested, num_allocated);
8993 /* best effort so use the number of vectors allocated to us */
8994 sc->intr_count = num_allocated;
8995 sc->num_queues = num_allocated;
8997 rid = 1; /* initial resource identifier */
8999 sc->intr[0].rid = rid;
9001 if ((sc->intr[0].resource =
9002 bus_alloc_resource_any(sc->dev,
9005 RF_ACTIVE)) == NULL) {
9006 BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid);
9009 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9010 pci_release_msi(sc->dev);
9014 BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid);
9017 do { /* try allocating INTx vector resources */
9018 if (sc->interrupt_mode != INTR_MODE_INTX) {
9022 BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n");
9024 /* only one vector for INTx */
9028 rid = 0; /* initial resource identifier */
9030 sc->intr[0].rid = rid;
9032 if ((sc->intr[0].resource =
9033 bus_alloc_resource_any(sc->dev,
9036 (RF_ACTIVE | RF_SHAREABLE))) == NULL) {
9037 BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid);
9040 sc->interrupt_mode = -1; /* Failed! */
9044 BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid);
9047 if (sc->interrupt_mode == -1) {
9048 BLOGE(sc, "Interrupt Allocation: FAILED!!!\n");
9052 "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n",
9053 sc->interrupt_mode, sc->num_queues);
9061 bxe_interrupt_detach(struct bxe_softc *sc)
9063 struct bxe_fastpath *fp;
9066 /* release interrupt resources */
9067 for (i = 0; i < sc->intr_count; i++) {
9068 if (sc->intr[i].resource && sc->intr[i].tag) {
9069 BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i);
9070 bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag);
9074 for (i = 0; i < sc->num_queues; i++) {
9077 taskqueue_drain(fp->tq, &fp->tq_task);
9078 taskqueue_drain(fp->tq, &fp->tx_task);
9079 while (taskqueue_cancel_timeout(fp->tq, &fp->tx_timeout_task,
9081 taskqueue_drain_timeout(fp->tq, &fp->tx_timeout_task);
9082 taskqueue_free(fp->tq);
9089 taskqueue_drain(sc->sp_tq, &sc->sp_tq_task);
9090 taskqueue_free(sc->sp_tq);
9096 * Enables interrupts and attach to the ISR.
9098 * When using multiple MSI/MSI-X vectors the first vector
9099 * is used for slowpath operations while all remaining
9100 * vectors are used for fastpath operations. If only a
9101 * single MSI/MSI-X vector is used (SINGLE_ISR) then the
9102 * ISR must look for both slowpath and fastpath completions.
9105 bxe_interrupt_attach(struct bxe_softc *sc)
9107 struct bxe_fastpath *fp;
9111 snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name),
9112 "bxe%d_sp_tq", sc->unit);
9113 TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc);
9114 sc->sp_tq = taskqueue_create(sc->sp_tq_name, M_NOWAIT,
9115 taskqueue_thread_enqueue,
9117 taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */
9118 "%s", sc->sp_tq_name);
9121 for (i = 0; i < sc->num_queues; i++) {
9123 snprintf(fp->tq_name, sizeof(fp->tq_name),
9124 "bxe%d_fp%d_tq", sc->unit, i);
9125 TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
9126 TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp);
9127 fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT,
9128 taskqueue_thread_enqueue,
9130 TIMEOUT_TASK_INIT(fp->tq, &fp->tx_timeout_task, 0,
9131 bxe_tx_mq_start_deferred, fp);
9132 taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */
9136 /* setup interrupt handlers */
9137 if (sc->interrupt_mode == INTR_MODE_MSIX) {
9138 BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n");
9141 * Setup the interrupt handler. Note that we pass the driver instance
9142 * to the interrupt handler for the slowpath.
9144 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9145 (INTR_TYPE_NET | INTR_MPSAFE),
9146 NULL, bxe_intr_sp, sc,
9147 &sc->intr[0].tag)) != 0) {
9148 BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc);
9149 goto bxe_interrupt_attach_exit;
9152 bus_describe_intr(sc->dev, sc->intr[0].resource,
9153 sc->intr[0].tag, "sp");
9155 /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */
9157 /* initialize the fastpath vectors (note the first was used for sp) */
9158 for (i = 0; i < sc->num_queues; i++) {
9160 BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1));
9163 * Setup the interrupt handler. Note that we pass the
9164 * fastpath context to the interrupt handler in this
9167 if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource,
9168 (INTR_TYPE_NET | INTR_MPSAFE),
9169 NULL, bxe_intr_fp, fp,
9170 &sc->intr[i + 1].tag)) != 0) {
9171 BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n",
9173 goto bxe_interrupt_attach_exit;
9176 bus_describe_intr(sc->dev, sc->intr[i + 1].resource,
9177 sc->intr[i + 1].tag, "fp%02d", i);
9179 /* bind the fastpath instance to a cpu */
9180 if (sc->num_queues > 1) {
9181 bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i);
9184 fp->state = BXE_FP_STATE_IRQ;
9186 } else if (sc->interrupt_mode == INTR_MODE_MSI) {
9187 BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n");
9190 * Setup the interrupt handler. Note that we pass the
9191 * driver instance to the interrupt handler which
9192 * will handle both the slowpath and fastpath.
9194 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9195 (INTR_TYPE_NET | INTR_MPSAFE),
9196 NULL, bxe_intr_legacy, sc,
9197 &sc->intr[0].tag)) != 0) {
9198 BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc);
9199 goto bxe_interrupt_attach_exit;
9202 } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */
9203 BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n");
9206 * Setup the interrupt handler. Note that we pass the
9207 * driver instance to the interrupt handler which
9208 * will handle both the slowpath and fastpath.
9210 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9211 (INTR_TYPE_NET | INTR_MPSAFE),
9212 NULL, bxe_intr_legacy, sc,
9213 &sc->intr[0].tag)) != 0) {
9214 BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc);
9215 goto bxe_interrupt_attach_exit;
9219 bxe_interrupt_attach_exit:
9224 static int bxe_init_hw_common_chip(struct bxe_softc *sc);
9225 static int bxe_init_hw_common(struct bxe_softc *sc);
9226 static int bxe_init_hw_port(struct bxe_softc *sc);
9227 static int bxe_init_hw_func(struct bxe_softc *sc);
9228 static void bxe_reset_common(struct bxe_softc *sc);
9229 static void bxe_reset_port(struct bxe_softc *sc);
9230 static void bxe_reset_func(struct bxe_softc *sc);
9231 static int bxe_gunzip_init(struct bxe_softc *sc);
9232 static void bxe_gunzip_end(struct bxe_softc *sc);
9233 static int bxe_init_firmware(struct bxe_softc *sc);
9234 static void bxe_release_firmware(struct bxe_softc *sc);
9237 ecore_func_sp_drv_ops bxe_func_sp_drv = {
9238 .init_hw_cmn_chip = bxe_init_hw_common_chip,
9239 .init_hw_cmn = bxe_init_hw_common,
9240 .init_hw_port = bxe_init_hw_port,
9241 .init_hw_func = bxe_init_hw_func,
9243 .reset_hw_cmn = bxe_reset_common,
9244 .reset_hw_port = bxe_reset_port,
9245 .reset_hw_func = bxe_reset_func,
9247 .gunzip_init = bxe_gunzip_init,
9248 .gunzip_end = bxe_gunzip_end,
9250 .init_fw = bxe_init_firmware,
9251 .release_fw = bxe_release_firmware,
9255 bxe_init_func_obj(struct bxe_softc *sc)
9259 ecore_init_func_obj(sc,
9261 BXE_SP(sc, func_rdata),
9262 BXE_SP_MAPPING(sc, func_rdata),
9263 BXE_SP(sc, func_afex_rdata),
9264 BXE_SP_MAPPING(sc, func_afex_rdata),
9269 bxe_init_hw(struct bxe_softc *sc,
9272 struct ecore_func_state_params func_params = { NULL };
9275 /* prepare the parameters for function state transitions */
9276 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
9278 func_params.f_obj = &sc->func_obj;
9279 func_params.cmd = ECORE_F_CMD_HW_INIT;
9281 func_params.params.hw_init.load_phase = load_code;
9284 * Via a plethora of function pointers, we will eventually reach
9285 * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func().
9287 rc = ecore_func_state_change(sc, &func_params);
9293 bxe_fill(struct bxe_softc *sc,
9300 if (!(len % 4) && !(addr % 4)) {
9301 for (i = 0; i < len; i += 4) {
9302 REG_WR(sc, (addr + i), fill);
9305 for (i = 0; i < len; i++) {
9306 REG_WR8(sc, (addr + i), fill);
9311 /* writes FP SP data to FW - data_size in dwords */
9313 bxe_wr_fp_sb_data(struct bxe_softc *sc,
9315 uint32_t *sb_data_p,
9320 for (index = 0; index < data_size; index++) {
9322 (BAR_CSTRORM_INTMEM +
9323 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
9324 (sizeof(uint32_t) * index)),
9325 *(sb_data_p + index));
9330 bxe_zero_fp_sb(struct bxe_softc *sc,
9333 struct hc_status_block_data_e2 sb_data_e2;
9334 struct hc_status_block_data_e1x sb_data_e1x;
9335 uint32_t *sb_data_p;
9336 uint32_t data_size = 0;
9338 if (!CHIP_IS_E1x(sc)) {
9339 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9340 sb_data_e2.common.state = SB_DISABLED;
9341 sb_data_e2.common.p_func.vf_valid = FALSE;
9342 sb_data_p = (uint32_t *)&sb_data_e2;
9343 data_size = (sizeof(struct hc_status_block_data_e2) /
9346 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9347 sb_data_e1x.common.state = SB_DISABLED;
9348 sb_data_e1x.common.p_func.vf_valid = FALSE;
9349 sb_data_p = (uint32_t *)&sb_data_e1x;
9350 data_size = (sizeof(struct hc_status_block_data_e1x) /
9354 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9356 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)),
9357 0, CSTORM_STATUS_BLOCK_SIZE);
9358 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)),
9359 0, CSTORM_SYNC_BLOCK_SIZE);
9363 bxe_wr_sp_sb_data(struct bxe_softc *sc,
9364 struct hc_sp_status_block_data *sp_sb_data)
9369 i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t));
9372 (BAR_CSTRORM_INTMEM +
9373 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) +
9374 (i * sizeof(uint32_t))),
9375 *((uint32_t *)sp_sb_data + i));
9380 bxe_zero_sp_sb(struct bxe_softc *sc)
9382 struct hc_sp_status_block_data sp_sb_data;
9384 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9386 sp_sb_data.state = SB_DISABLED;
9387 sp_sb_data.p_func.vf_valid = FALSE;
9389 bxe_wr_sp_sb_data(sc, &sp_sb_data);
9392 (BAR_CSTRORM_INTMEM +
9393 CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))),
9394 0, CSTORM_SP_STATUS_BLOCK_SIZE);
9396 (BAR_CSTRORM_INTMEM +
9397 CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))),
9398 0, CSTORM_SP_SYNC_BLOCK_SIZE);
9402 bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
9406 hc_sm->igu_sb_id = igu_sb_id;
9407 hc_sm->igu_seg_id = igu_seg_id;
9408 hc_sm->timer_value = 0xFF;
9409 hc_sm->time_to_expire = 0xFFFFFFFF;
9413 bxe_map_sb_state_machines(struct hc_index_data *index_data)
9415 /* zero out state machine indices */
9418 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9421 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9422 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
9423 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
9424 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
9429 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
9430 (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9433 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
9434 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9435 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
9436 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9437 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
9438 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9439 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
9440 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9444 bxe_init_sb(struct bxe_softc *sc,
9451 struct hc_status_block_data_e2 sb_data_e2;
9452 struct hc_status_block_data_e1x sb_data_e1x;
9453 struct hc_status_block_sm *hc_sm_p;
9454 uint32_t *sb_data_p;
9458 if (CHIP_INT_MODE_IS_BC(sc)) {
9459 igu_seg_id = HC_SEG_ACCESS_NORM;
9461 igu_seg_id = IGU_SEG_ACCESS_NORM;
9464 bxe_zero_fp_sb(sc, fw_sb_id);
9466 if (!CHIP_IS_E1x(sc)) {
9467 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9468 sb_data_e2.common.state = SB_ENABLED;
9469 sb_data_e2.common.p_func.pf_id = SC_FUNC(sc);
9470 sb_data_e2.common.p_func.vf_id = vfid;
9471 sb_data_e2.common.p_func.vf_valid = vf_valid;
9472 sb_data_e2.common.p_func.vnic_id = SC_VN(sc);
9473 sb_data_e2.common.same_igu_sb_1b = TRUE;
9474 sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr);
9475 sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr);
9476 hc_sm_p = sb_data_e2.common.state_machine;
9477 sb_data_p = (uint32_t *)&sb_data_e2;
9478 data_size = (sizeof(struct hc_status_block_data_e2) /
9480 bxe_map_sb_state_machines(sb_data_e2.index_data);
9482 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9483 sb_data_e1x.common.state = SB_ENABLED;
9484 sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc);
9485 sb_data_e1x.common.p_func.vf_id = 0xff;
9486 sb_data_e1x.common.p_func.vf_valid = FALSE;
9487 sb_data_e1x.common.p_func.vnic_id = SC_VN(sc);
9488 sb_data_e1x.common.same_igu_sb_1b = TRUE;
9489 sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr);
9490 sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr);
9491 hc_sm_p = sb_data_e1x.common.state_machine;
9492 sb_data_p = (uint32_t *)&sb_data_e1x;
9493 data_size = (sizeof(struct hc_status_block_data_e1x) /
9495 bxe_map_sb_state_machines(sb_data_e1x.index_data);
9498 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id);
9499 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id);
9501 BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id);
9503 /* write indices to HW - PCI guarantees endianity of regpairs */
9504 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9507 static inline uint8_t
9508 bxe_fp_qzone_id(struct bxe_fastpath *fp)
9510 if (CHIP_IS_E1x(fp->sc)) {
9511 return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H);
9517 static inline uint32_t
9518 bxe_rx_ustorm_prods_offset(struct bxe_softc *sc,
9519 struct bxe_fastpath *fp)
9521 uint32_t offset = BAR_USTRORM_INTMEM;
9523 if (!CHIP_IS_E1x(sc)) {
9524 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
9526 offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id);
9533 bxe_init_eth_fp(struct bxe_softc *sc,
9536 struct bxe_fastpath *fp = &sc->fp[idx];
9537 uint32_t cids[ECORE_MULTI_TX_COS] = { 0 };
9538 unsigned long q_type = 0;
9544 fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc));
9545 fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc));
9547 fp->cl_id = (CHIP_IS_E1x(sc)) ?
9548 (SC_L_ID(sc) + idx) :
9549 /* want client ID same as IGU SB ID for non-E1 */
9551 fp->cl_qzone_id = bxe_fp_qzone_id(fp);
9553 /* setup sb indices */
9554 if (!CHIP_IS_E1x(sc)) {
9555 fp->sb_index_values = fp->status_block.e2_sb->sb.index_values;
9556 fp->sb_running_index = fp->status_block.e2_sb->sb.running_index;
9558 fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values;
9559 fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index;
9563 fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp);
9565 fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS];
9568 * XXX If multiple CoS is ever supported then each fastpath structure
9569 * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
9571 for (cos = 0; cos < sc->max_cos; cos++) {
9574 fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0];
9576 /* nothing more for a VF to do */
9581 bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE,
9582 fp->fw_sb_id, fp->igu_sb_id);
9584 bxe_update_fp_sb_idx(fp);
9586 /* Configure Queue State object */
9587 bit_set(&q_type, ECORE_Q_TYPE_HAS_RX);
9588 bit_set(&q_type, ECORE_Q_TYPE_HAS_TX);
9590 ecore_init_queue_obj(sc,
9591 &sc->sp_objs[idx].q_obj,
9596 BXE_SP(sc, q_rdata),
9597 BXE_SP_MAPPING(sc, q_rdata),
9600 /* configure classification DBs */
9601 ecore_init_mac_obj(sc,
9602 &sc->sp_objs[idx].mac_obj,
9606 BXE_SP(sc, mac_rdata),
9607 BXE_SP_MAPPING(sc, mac_rdata),
9608 ECORE_FILTER_MAC_PENDING,
9610 ECORE_OBJ_TYPE_RX_TX,
9613 BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n",
9614 idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id);
9618 bxe_update_rx_prod(struct bxe_softc *sc,
9619 struct bxe_fastpath *fp,
9620 uint16_t rx_bd_prod,
9621 uint16_t rx_cq_prod,
9622 uint16_t rx_sge_prod)
9624 struct ustorm_eth_rx_producers rx_prods = { 0 };
9627 /* update producers */
9628 rx_prods.bd_prod = rx_bd_prod;
9629 rx_prods.cqe_prod = rx_cq_prod;
9630 rx_prods.sge_prod = rx_sge_prod;
9633 * Make sure that the BD and SGE data is updated before updating the
9634 * producers since FW might read the BD/SGE right after the producer
9636 * This is only applicable for weak-ordered memory model archs such
9637 * as IA-64. The following barrier is also mandatory since FW will
9638 * assumes BDs must have buffers.
9642 for (i = 0; i < (sizeof(rx_prods) / 4); i++) {
9644 (fp->ustorm_rx_prods_offset + (i * 4)),
9645 ((uint32_t *)&rx_prods)[i]);
9648 wmb(); /* keep prod updates ordered */
9651 "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n",
9652 fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod);
9656 bxe_init_rx_rings(struct bxe_softc *sc)
9658 struct bxe_fastpath *fp;
9661 for (i = 0; i < sc->num_queues; i++) {
9667 * Activate the BD ring...
9668 * Warning, this will generate an interrupt (to the TSTORM)
9669 * so this can only be done after the chip is initialized
9671 bxe_update_rx_prod(sc, fp,
9680 if (CHIP_IS_E1(sc)) {
9682 (BAR_USTRORM_INTMEM +
9683 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))),
9684 U64_LO(fp->rcq_dma.paddr));
9686 (BAR_USTRORM_INTMEM +
9687 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4),
9688 U64_HI(fp->rcq_dma.paddr));
9694 bxe_init_tx_ring_one(struct bxe_fastpath *fp)
9696 SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1);
9697 fp->tx_db.data.zero_fill1 = 0;
9698 fp->tx_db.data.prod = 0;
9700 fp->tx_pkt_prod = 0;
9701 fp->tx_pkt_cons = 0;
9704 fp->eth_q_stats.tx_pkts = 0;
9708 bxe_init_tx_rings(struct bxe_softc *sc)
9712 for (i = 0; i < sc->num_queues; i++) {
9713 bxe_init_tx_ring_one(&sc->fp[i]);
9718 bxe_init_def_sb(struct bxe_softc *sc)
9720 struct host_sp_status_block *def_sb = sc->def_sb;
9721 bus_addr_t mapping = sc->def_sb_dma.paddr;
9722 int igu_sp_sb_index;
9724 int port = SC_PORT(sc);
9725 int func = SC_FUNC(sc);
9726 int reg_offset, reg_offset_en5;
9729 struct hc_sp_status_block_data sp_sb_data;
9731 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9733 if (CHIP_INT_MODE_IS_BC(sc)) {
9734 igu_sp_sb_index = DEF_SB_IGU_ID;
9735 igu_seg_id = HC_SEG_ACCESS_DEF;
9737 igu_sp_sb_index = sc->igu_dsb_id;
9738 igu_seg_id = IGU_SEG_ACCESS_DEF;
9742 section = ((uint64_t)mapping +
9743 offsetof(struct host_sp_status_block, atten_status_block));
9744 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
9747 reg_offset = (port) ?
9748 MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
9749 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
9750 reg_offset_en5 = (port) ?
9751 MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
9752 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0;
9754 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
9755 /* take care of sig[0]..sig[4] */
9756 for (sindex = 0; sindex < 4; sindex++) {
9757 sc->attn_group[index].sig[sindex] =
9758 REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index)));
9761 if (!CHIP_IS_E1x(sc)) {
9763 * enable5 is separate from the rest of the registers,
9764 * and the address skip is 4 and not 16 between the
9767 sc->attn_group[index].sig[4] =
9768 REG_RD(sc, (reg_offset_en5 + (0x4 * index)));
9770 sc->attn_group[index].sig[4] = 0;
9774 if (sc->devinfo.int_block == INT_BLOCK_HC) {
9775 reg_offset = (port) ?
9776 HC_REG_ATTN_MSG1_ADDR_L :
9777 HC_REG_ATTN_MSG0_ADDR_L;
9778 REG_WR(sc, reg_offset, U64_LO(section));
9779 REG_WR(sc, (reg_offset + 4), U64_HI(section));
9780 } else if (!CHIP_IS_E1x(sc)) {
9781 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
9782 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
9785 section = ((uint64_t)mapping +
9786 offsetof(struct host_sp_status_block, sp_sb));
9790 /* PCI guarantees endianity of regpair */
9791 sp_sb_data.state = SB_ENABLED;
9792 sp_sb_data.host_sb_addr.lo = U64_LO(section);
9793 sp_sb_data.host_sb_addr.hi = U64_HI(section);
9794 sp_sb_data.igu_sb_id = igu_sp_sb_index;
9795 sp_sb_data.igu_seg_id = igu_seg_id;
9796 sp_sb_data.p_func.pf_id = func;
9797 sp_sb_data.p_func.vnic_id = SC_VN(sc);
9798 sp_sb_data.p_func.vf_id = 0xff;
9800 bxe_wr_sp_sb_data(sc, &sp_sb_data);
9802 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
9806 bxe_init_sp_ring(struct bxe_softc *sc)
9808 atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING);
9809 sc->spq_prod_idx = 0;
9810 sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS];
9811 sc->spq_prod_bd = sc->spq;
9812 sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT);
9816 bxe_init_eq_ring(struct bxe_softc *sc)
9818 union event_ring_elem *elem;
9821 for (i = 1; i <= NUM_EQ_PAGES; i++) {
9822 elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1];
9824 elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr +
9826 (i % NUM_EQ_PAGES)));
9827 elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr +
9829 (i % NUM_EQ_PAGES)));
9833 sc->eq_prod = NUM_EQ_DESC;
9834 sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS];
9836 atomic_store_rel_long(&sc->eq_spq_left,
9837 (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING),
9842 bxe_init_internal_common(struct bxe_softc *sc)
9847 * Zero this manually as its initialization is currently missing
9850 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) {
9852 (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)),
9856 if (!CHIP_IS_E1x(sc)) {
9857 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET),
9858 CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
9863 bxe_init_internal(struct bxe_softc *sc,
9866 switch (load_code) {
9867 case FW_MSG_CODE_DRV_LOAD_COMMON:
9868 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
9869 bxe_init_internal_common(sc);
9872 case FW_MSG_CODE_DRV_LOAD_PORT:
9876 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
9877 /* internal memory per function is initialized inside bxe_pf_init */
9881 BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code);
9887 storm_memset_func_cfg(struct bxe_softc *sc,
9888 struct tstorm_eth_function_common_config *tcfg,
9894 addr = (BAR_TSTRORM_INTMEM +
9895 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid));
9896 size = sizeof(struct tstorm_eth_function_common_config);
9897 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg);
9901 bxe_func_init(struct bxe_softc *sc,
9902 struct bxe_func_init_params *p)
9904 struct tstorm_eth_function_common_config tcfg = { 0 };
9906 if (CHIP_IS_E1x(sc)) {
9907 storm_memset_func_cfg(sc, &tcfg, p->func_id);
9910 /* Enable the function in the FW */
9911 storm_memset_vf_to_pf(sc, p->func_id, p->pf_id);
9912 storm_memset_func_en(sc, p->func_id, 1);
9915 if (p->func_flgs & FUNC_FLG_SPQ) {
9916 storm_memset_spq_addr(sc, p->spq_map, p->func_id);
9918 (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)),
9924 * Calculates the sum of vn_min_rates.
9925 * It's needed for further normalizing of the min_rates.
9927 * sum of vn_min_rates.
9929 * 0 - if all the min_rates are 0.
9930 * In the later case fainess algorithm should be deactivated.
9931 * If all min rates are not zero then those that are zeroes will be set to 1.
9934 bxe_calc_vn_min(struct bxe_softc *sc,
9935 struct cmng_init_input *input)
9938 uint32_t vn_min_rate;
9942 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
9943 vn_cfg = sc->devinfo.mf_info.mf_config[vn];
9944 vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
9945 FUNC_MF_CFG_MIN_BW_SHIFT) * 100);
9947 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
9948 /* skip hidden VNs */
9950 } else if (!vn_min_rate) {
9951 /* If min rate is zero - set it to 100 */
9952 vn_min_rate = DEF_MIN_RATE;
9957 input->vnic_min_rate[vn] = vn_min_rate;
9960 /* if ETS or all min rates are zeros - disable fairness */
9961 if (BXE_IS_ETS_ENABLED(sc)) {
9962 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
9963 BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n");
9964 } else if (all_zero) {
9965 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
9967 "Fariness disabled (all MIN values are zeroes)\n");
9969 input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
9973 static inline uint16_t
9974 bxe_extract_max_cfg(struct bxe_softc *sc,
9977 uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
9978 FUNC_MF_CFG_MAX_BW_SHIFT);
9981 BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n");
9989 bxe_calc_vn_max(struct bxe_softc *sc,
9991 struct cmng_init_input *input)
9993 uint16_t vn_max_rate;
9994 uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn];
9997 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10000 max_cfg = bxe_extract_max_cfg(sc, vn_cfg);
10002 if (IS_MF_SI(sc)) {
10003 /* max_cfg in percents of linkspeed */
10004 vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100);
10005 } else { /* SD modes */
10006 /* max_cfg is absolute in 100Mb units */
10007 vn_max_rate = (max_cfg * 100);
10011 BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
10013 input->vnic_max_rate[vn] = vn_max_rate;
10017 bxe_cmng_fns_init(struct bxe_softc *sc,
10021 struct cmng_init_input input;
10024 memset(&input, 0, sizeof(struct cmng_init_input));
10026 input.port_rate = sc->link_vars.line_speed;
10028 if (cmng_type == CMNG_FNS_MINMAX) {
10029 /* read mf conf from shmem */
10031 bxe_read_mf_cfg(sc);
10034 /* get VN min rate and enable fairness if not 0 */
10035 bxe_calc_vn_min(sc, &input);
10037 /* get VN max rate */
10038 if (sc->port.pmf) {
10039 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10040 bxe_calc_vn_max(sc, vn, &input);
10044 /* always enable rate shaping and fairness */
10045 input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
10047 ecore_init_cmng(&input, &sc->cmng);
10051 /* rate shaping and fairness are disabled */
10052 BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n");
10056 bxe_get_cmng_fns_mode(struct bxe_softc *sc)
10058 if (CHIP_REV_IS_SLOW(sc)) {
10059 return (CMNG_FNS_NONE);
10063 return (CMNG_FNS_MINMAX);
10066 return (CMNG_FNS_NONE);
10070 storm_memset_cmng(struct bxe_softc *sc,
10071 struct cmng_init *cmng,
10079 addr = (BAR_XSTRORM_INTMEM +
10080 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port));
10081 size = sizeof(struct cmng_struct_per_port);
10082 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port);
10084 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10085 func = func_by_vn(sc, vn);
10087 addr = (BAR_XSTRORM_INTMEM +
10088 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func));
10089 size = sizeof(struct rate_shaping_vars_per_vn);
10090 ecore_storm_memset_struct(sc, addr, size,
10091 (uint32_t *)&cmng->vnic.vnic_max_rate[vn]);
10093 addr = (BAR_XSTRORM_INTMEM +
10094 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func));
10095 size = sizeof(struct fairness_vars_per_vn);
10096 ecore_storm_memset_struct(sc, addr, size,
10097 (uint32_t *)&cmng->vnic.vnic_min_rate[vn]);
10102 bxe_pf_init(struct bxe_softc *sc)
10104 struct bxe_func_init_params func_init = { 0 };
10105 struct event_ring_data eq_data = { { 0 } };
10108 if (!CHIP_IS_E1x(sc)) {
10109 /* reset IGU PF statistics: MSIX + ATTN */
10112 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10113 (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10114 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10118 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10119 (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10120 (BXE_IGU_STAS_MSG_PF_CNT * 4) +
10121 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10125 /* function setup flags */
10126 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
10129 * This flag is relevant for E1x only.
10130 * E2 doesn't have a TPA configuration in a function level.
10132 flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0;
10134 func_init.func_flgs = flags;
10135 func_init.pf_id = SC_FUNC(sc);
10136 func_init.func_id = SC_FUNC(sc);
10137 func_init.spq_map = sc->spq_dma.paddr;
10138 func_init.spq_prod = sc->spq_prod_idx;
10140 bxe_func_init(sc, &func_init);
10142 memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port));
10145 * Congestion management values depend on the link rate.
10146 * There is no active link so initial link rate is set to 10Gbps.
10147 * When the link comes up the congestion management values are
10148 * re-calculated according to the actual link rate.
10150 sc->link_vars.line_speed = SPEED_10000;
10151 bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc));
10153 /* Only the PMF sets the HW */
10154 if (sc->port.pmf) {
10155 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
10158 /* init Event Queue - PCI bus guarantees correct endainity */
10159 eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr);
10160 eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr);
10161 eq_data.producer = sc->eq_prod;
10162 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
10163 eq_data.sb_id = DEF_SB_ID;
10164 storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc));
10168 bxe_hc_int_enable(struct bxe_softc *sc)
10170 int port = SC_PORT(sc);
10171 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10172 uint32_t val = REG_RD(sc, addr);
10173 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10174 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10175 (sc->intr_count == 1)) ? TRUE : FALSE;
10176 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10179 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10180 HC_CONFIG_0_REG_INT_LINE_EN_0);
10181 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10182 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10184 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
10187 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
10188 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10189 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10190 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10192 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10193 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10194 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10195 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10197 if (!CHIP_IS_E1(sc)) {
10198 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n",
10201 REG_WR(sc, addr, val);
10203 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
10207 if (CHIP_IS_E1(sc)) {
10208 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF);
10211 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
10212 val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10214 REG_WR(sc, addr, val);
10216 /* ensure that HC_CONFIG is written before leading/trailing edge config */
10219 if (!CHIP_IS_E1(sc)) {
10220 /* init leading/trailing edge */
10222 val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10223 if (sc->port.pmf) {
10224 /* enable nig and gpio3 attention */
10231 REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val);
10232 REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val);
10235 /* make sure that interrupts are indeed enabled from here on */
10240 bxe_igu_int_enable(struct bxe_softc *sc)
10243 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10244 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10245 (sc->intr_count == 1)) ? TRUE : FALSE;
10246 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10248 val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10251 val &= ~(IGU_PF_CONF_INT_LINE_EN |
10252 IGU_PF_CONF_SINGLE_ISR_EN);
10253 val |= (IGU_PF_CONF_MSI_MSIX_EN |
10254 IGU_PF_CONF_ATTN_BIT_EN);
10256 val |= IGU_PF_CONF_SINGLE_ISR_EN;
10259 val &= ~IGU_PF_CONF_INT_LINE_EN;
10260 val |= (IGU_PF_CONF_MSI_MSIX_EN |
10261 IGU_PF_CONF_ATTN_BIT_EN |
10262 IGU_PF_CONF_SINGLE_ISR_EN);
10264 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
10265 val |= (IGU_PF_CONF_INT_LINE_EN |
10266 IGU_PF_CONF_ATTN_BIT_EN |
10267 IGU_PF_CONF_SINGLE_ISR_EN);
10270 /* clean previous status - need to configure igu prior to ack*/
10271 if ((!msix) || single_msix) {
10272 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10276 val |= IGU_PF_CONF_FUNC_EN;
10278 BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n",
10279 val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10281 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10285 /* init leading/trailing edge */
10287 val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10288 if (sc->port.pmf) {
10289 /* enable nig and gpio3 attention */
10296 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
10297 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
10299 /* make sure that interrupts are indeed enabled from here on */
10304 bxe_int_enable(struct bxe_softc *sc)
10306 if (sc->devinfo.int_block == INT_BLOCK_HC) {
10307 bxe_hc_int_enable(sc);
10309 bxe_igu_int_enable(sc);
10314 bxe_hc_int_disable(struct bxe_softc *sc)
10316 int port = SC_PORT(sc);
10317 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10318 uint32_t val = REG_RD(sc, addr);
10321 * In E1 we must use only PCI configuration space to disable MSI/MSIX
10322 * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC
10325 if (CHIP_IS_E1(sc)) {
10327 * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register
10328 * to prevent from HC sending interrupts after we exit the function
10330 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0);
10332 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10333 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10334 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10336 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10337 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10338 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10339 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10342 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr);
10344 /* flush all outstanding writes */
10347 REG_WR(sc, addr, val);
10348 if (REG_RD(sc, addr) != val) {
10349 BLOGE(sc, "proper val not read from HC IGU!\n");
10354 bxe_igu_int_disable(struct bxe_softc *sc)
10356 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10358 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
10359 IGU_PF_CONF_INT_LINE_EN |
10360 IGU_PF_CONF_ATTN_BIT_EN);
10362 BLOGD(sc, DBG_INTR, "write %x to IGU\n", val);
10364 /* flush all outstanding writes */
10367 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10368 if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) {
10369 BLOGE(sc, "proper val not read from IGU!\n");
10374 bxe_int_disable(struct bxe_softc *sc)
10376 if (sc->devinfo.int_block == INT_BLOCK_HC) {
10377 bxe_hc_int_disable(sc);
10379 bxe_igu_int_disable(sc);
10384 bxe_nic_init(struct bxe_softc *sc,
10389 for (i = 0; i < sc->num_queues; i++) {
10390 bxe_init_eth_fp(sc, i);
10393 rmb(); /* ensure status block indices were read */
10395 bxe_init_rx_rings(sc);
10396 bxe_init_tx_rings(sc);
10402 /* initialize MOD_ABS interrupts */
10403 elink_init_mod_abs_int(sc, &sc->link_vars,
10404 sc->devinfo.chip_id,
10405 sc->devinfo.shmem_base,
10406 sc->devinfo.shmem2_base,
10409 bxe_init_def_sb(sc);
10410 bxe_update_dsb_idx(sc);
10411 bxe_init_sp_ring(sc);
10412 bxe_init_eq_ring(sc);
10413 bxe_init_internal(sc, load_code);
10415 bxe_stats_init(sc);
10417 /* flush all before enabling interrupts */
10420 bxe_int_enable(sc);
10422 /* check for SPIO5 */
10423 bxe_attn_int_deasserted0(sc,
10425 (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
10427 AEU_INPUTS_ATTN_BITS_SPIO5);
10431 bxe_init_objs(struct bxe_softc *sc)
10433 /* mcast rules must be added to tx if tx switching is enabled */
10434 ecore_obj_type o_type =
10435 (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX :
10438 /* RX_MODE controlling object */
10439 ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj);
10441 /* multicast configuration controlling object */
10442 ecore_init_mcast_obj(sc,
10448 BXE_SP(sc, mcast_rdata),
10449 BXE_SP_MAPPING(sc, mcast_rdata),
10450 ECORE_FILTER_MCAST_PENDING,
10454 /* Setup CAM credit pools */
10455 ecore_init_mac_credit_pool(sc,
10458 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10459 VNICS_PER_PATH(sc));
10461 ecore_init_vlan_credit_pool(sc,
10463 SC_ABS_FUNC(sc) >> 1,
10464 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10465 VNICS_PER_PATH(sc));
10467 /* RSS configuration object */
10468 ecore_init_rss_config_obj(sc,
10474 BXE_SP(sc, rss_rdata),
10475 BXE_SP_MAPPING(sc, rss_rdata),
10476 ECORE_FILTER_RSS_CONF_PENDING,
10477 &sc->sp_state, ECORE_OBJ_TYPE_RX);
10481 * Initialize the function. This must be called before sending CLIENT_SETUP
10482 * for the first client.
10485 bxe_func_start(struct bxe_softc *sc)
10487 struct ecore_func_state_params func_params = { NULL };
10488 struct ecore_func_start_params *start_params = &func_params.params.start;
10490 /* Prepare parameters for function state transitions */
10491 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
10493 func_params.f_obj = &sc->func_obj;
10494 func_params.cmd = ECORE_F_CMD_START;
10496 /* Function parameters */
10497 start_params->mf_mode = sc->devinfo.mf_info.mf_mode;
10498 start_params->sd_vlan_tag = OVLAN(sc);
10500 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
10501 start_params->network_cos_mode = STATIC_COS;
10502 } else { /* CHIP_IS_E1X */
10503 start_params->network_cos_mode = FW_WRR;
10506 //start_params->gre_tunnel_mode = 0;
10507 //start_params->gre_tunnel_rss = 0;
10509 return (ecore_func_state_change(sc, &func_params));
10513 bxe_set_power_state(struct bxe_softc *sc,
10518 /* If there is no power capability, silently succeed */
10519 if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) {
10520 BLOGW(sc, "No power capability\n");
10524 pmcsr = pci_read_config(sc->dev,
10525 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10530 pci_write_config(sc->dev,
10531 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10532 ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2);
10534 if (pmcsr & PCIM_PSTAT_DMASK) {
10535 /* delay required during transition out of D3hot */
10542 /* XXX if there are other clients above don't shut down the power */
10544 /* don't shut down the power for emulation and FPGA */
10545 if (CHIP_REV_IS_SLOW(sc)) {
10549 pmcsr &= ~PCIM_PSTAT_DMASK;
10550 pmcsr |= PCIM_PSTAT_D3;
10553 pmcsr |= PCIM_PSTAT_PMEENABLE;
10556 pci_write_config(sc->dev,
10557 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10561 * No more memory access after this point until device is brought back
10567 BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n",
10576 /* return true if succeeded to acquire the lock */
10578 bxe_trylock_hw_lock(struct bxe_softc *sc,
10581 uint32_t lock_status;
10582 uint32_t resource_bit = (1 << resource);
10583 int func = SC_FUNC(sc);
10584 uint32_t hw_lock_control_reg;
10586 BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource);
10588 /* Validating that the resource is within range */
10589 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
10590 BLOGD(sc, DBG_LOAD,
10591 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
10592 resource, HW_LOCK_MAX_RESOURCE_VALUE);
10597 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
10599 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
10602 /* try to acquire the lock */
10603 REG_WR(sc, hw_lock_control_reg + 4, resource_bit);
10604 lock_status = REG_RD(sc, hw_lock_control_reg);
10605 if (lock_status & resource_bit) {
10609 BLOGE(sc, "Failed to get a resource lock 0x%x func %d "
10610 "lock_status 0x%x resource_bit 0x%x\n", resource, func,
10611 lock_status, resource_bit);
10617 * Get the recovery leader resource id according to the engine this function
10618 * belongs to. Currently only only 2 engines is supported.
10621 bxe_get_leader_lock_resource(struct bxe_softc *sc)
10624 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1);
10626 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0);
10630 /* try to acquire a leader lock for current engine */
10632 bxe_trylock_leader_lock(struct bxe_softc *sc)
10634 return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10638 bxe_release_leader_lock(struct bxe_softc *sc)
10640 return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10643 /* close gates #2, #3 and #4 */
10645 bxe_set_234_gates(struct bxe_softc *sc,
10650 /* gates #2 and #4a are closed/opened for "not E1" only */
10651 if (!CHIP_IS_E1(sc)) {
10653 REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
10655 REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
10659 if (CHIP_IS_E1x(sc)) {
10660 /* prevent interrupts from HC on both ports */
10661 val = REG_RD(sc, HC_REG_CONFIG_1);
10662 REG_WR(sc, HC_REG_CONFIG_1,
10663 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
10664 (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
10666 val = REG_RD(sc, HC_REG_CONFIG_0);
10667 REG_WR(sc, HC_REG_CONFIG_0,
10668 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
10669 (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
10671 /* Prevent incoming interrupts in IGU */
10672 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
10674 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION,
10676 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
10677 (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
10680 BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n",
10681 close ? "closing" : "opening");
10686 /* poll for pending writes bit, it should get cleared in no more than 1s */
10688 bxe_er_poll_igu_vq(struct bxe_softc *sc)
10690 uint32_t cnt = 1000;
10691 uint32_t pend_bits = 0;
10694 pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS);
10696 if (pend_bits == 0) {
10701 } while (--cnt > 0);
10704 BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits);
10711 #define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */
10714 bxe_clp_reset_prep(struct bxe_softc *sc,
10715 uint32_t *magic_val)
10717 /* Do some magic... */
10718 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10719 *magic_val = val & SHARED_MF_CLP_MAGIC;
10720 MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
10723 /* restore the value of the 'magic' bit */
10725 bxe_clp_reset_done(struct bxe_softc *sc,
10726 uint32_t magic_val)
10728 /* Restore the 'magic' bit value... */
10729 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10730 MFCFG_WR(sc, shared_mf_config.clp_mb,
10731 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
10734 /* prepare for MCP reset, takes care of CLP configurations */
10736 bxe_reset_mcp_prep(struct bxe_softc *sc,
10737 uint32_t *magic_val)
10740 uint32_t validity_offset;
10742 /* set `magic' bit in order to save MF config */
10743 if (!CHIP_IS_E1(sc)) {
10744 bxe_clp_reset_prep(sc, magic_val);
10747 /* get shmem offset */
10748 shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10750 offsetof(struct shmem_region, validity_map[SC_PORT(sc)]);
10752 /* Clear validity map flags */
10754 REG_WR(sc, shmem + validity_offset, 0);
10758 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
10759 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
10762 bxe_mcp_wait_one(struct bxe_softc *sc)
10764 /* special handling for emulation and FPGA (10 times longer) */
10765 if (CHIP_REV_IS_SLOW(sc)) {
10766 DELAY((MCP_ONE_TIMEOUT*10) * 1000);
10768 DELAY((MCP_ONE_TIMEOUT) * 1000);
10772 /* initialize shmem_base and waits for validity signature to appear */
10774 bxe_init_shmem(struct bxe_softc *sc)
10780 sc->devinfo.shmem_base =
10781 sc->link_params.shmem_base =
10782 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10784 if (sc->devinfo.shmem_base) {
10785 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
10786 if (val & SHR_MEM_VALIDITY_MB)
10790 bxe_mcp_wait_one(sc);
10792 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
10794 BLOGE(sc, "BAD MCP validity signature\n");
10800 bxe_reset_mcp_comp(struct bxe_softc *sc,
10801 uint32_t magic_val)
10803 int rc = bxe_init_shmem(sc);
10805 /* Restore the `magic' bit value */
10806 if (!CHIP_IS_E1(sc)) {
10807 bxe_clp_reset_done(sc, magic_val);
10814 bxe_pxp_prep(struct bxe_softc *sc)
10816 if (!CHIP_IS_E1(sc)) {
10817 REG_WR(sc, PXP2_REG_RD_START_INIT, 0);
10818 REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0);
10824 * Reset the whole chip except for:
10826 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit)
10828 * - MISC (including AEU)
10833 bxe_process_kill_chip_reset(struct bxe_softc *sc,
10836 uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
10837 uint32_t global_bits2, stay_reset2;
10840 * Bits that have to be set in reset_mask2 if we want to reset 'global'
10841 * (per chip) blocks.
10844 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
10845 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
10848 * Don't reset the following blocks.
10849 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
10850 * reset, as in 4 port device they might still be owned
10851 * by the MCP (there is only one leader per path).
10854 MISC_REGISTERS_RESET_REG_1_RST_HC |
10855 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
10856 MISC_REGISTERS_RESET_REG_1_RST_PXP;
10859 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
10860 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
10861 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
10862 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
10863 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
10864 MISC_REGISTERS_RESET_REG_2_RST_GRC |
10865 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
10866 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
10867 MISC_REGISTERS_RESET_REG_2_RST_ATC |
10868 MISC_REGISTERS_RESET_REG_2_PGLC |
10869 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
10870 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
10871 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
10872 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
10873 MISC_REGISTERS_RESET_REG_2_UMAC0 |
10874 MISC_REGISTERS_RESET_REG_2_UMAC1;
10877 * Keep the following blocks in reset:
10878 * - all xxMACs are handled by the elink code.
10881 MISC_REGISTERS_RESET_REG_2_XMAC |
10882 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
10884 /* Full reset masks according to the chip */
10885 reset_mask1 = 0xffffffff;
10887 if (CHIP_IS_E1(sc))
10888 reset_mask2 = 0xffff;
10889 else if (CHIP_IS_E1H(sc))
10890 reset_mask2 = 0x1ffff;
10891 else if (CHIP_IS_E2(sc))
10892 reset_mask2 = 0xfffff;
10893 else /* CHIP_IS_E3 */
10894 reset_mask2 = 0x3ffffff;
10896 /* Don't reset global blocks unless we need to */
10898 reset_mask2 &= ~global_bits2;
10901 * In case of attention in the QM, we need to reset PXP
10902 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
10903 * because otherwise QM reset would release 'close the gates' shortly
10904 * before resetting the PXP, then the PSWRQ would send a write
10905 * request to PGLUE. Then when PXP is reset, PGLUE would try to
10906 * read the payload data from PSWWR, but PSWWR would not
10907 * respond. The write queue in PGLUE would stuck, dmae commands
10908 * would not return. Therefore it's important to reset the second
10909 * reset register (containing the
10910 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
10911 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
10914 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
10915 reset_mask2 & (~not_reset_mask2));
10917 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
10918 reset_mask1 & (~not_reset_mask1));
10923 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
10924 reset_mask2 & (~stay_reset2));
10929 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
10934 bxe_process_kill(struct bxe_softc *sc,
10939 uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
10940 uint32_t tags_63_32 = 0;
10942 /* Empty the Tetris buffer, wait for 1s */
10944 sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT);
10945 blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT);
10946 port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0);
10947 port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1);
10948 pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2);
10949 if (CHIP_IS_E3(sc)) {
10950 tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32);
10953 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
10954 ((port_is_idle_0 & 0x1) == 0x1) &&
10955 ((port_is_idle_1 & 0x1) == 0x1) &&
10956 (pgl_exp_rom2 == 0xffffffff) &&
10957 (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff)))
10960 } while (cnt-- > 0);
10963 BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there "
10964 "are still outstanding read requests after 1s! "
10965 "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, "
10966 "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
10967 sr_cnt, blk_cnt, port_is_idle_0,
10968 port_is_idle_1, pgl_exp_rom2);
10974 /* Close gates #2, #3 and #4 */
10975 bxe_set_234_gates(sc, TRUE);
10977 /* Poll for IGU VQs for 57712 and newer chips */
10978 if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) {
10982 /* XXX indicate that "process kill" is in progress to MCP */
10984 /* clear "unprepared" bit */
10985 REG_WR(sc, MISC_REG_UNPREPARED, 0);
10988 /* Make sure all is written to the chip before the reset */
10992 * Wait for 1ms to empty GLUE and PCI-E core queues,
10993 * PSWHST, GRC and PSWRD Tetris buffer.
10997 /* Prepare to chip reset: */
11000 bxe_reset_mcp_prep(sc, &val);
11007 /* reset the chip */
11008 bxe_process_kill_chip_reset(sc, global);
11011 /* clear errors in PGB */
11012 if (!CHIP_IS_E1(sc))
11013 REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
11015 /* Recover after reset: */
11017 if (global && bxe_reset_mcp_comp(sc, val)) {
11021 /* XXX add resetting the NO_MCP mode DB here */
11023 /* Open the gates #2, #3 and #4 */
11024 bxe_set_234_gates(sc, FALSE);
11027 * IGU/AEU preparation bring back the AEU/IGU to a reset state
11028 * re-enable attentions
11035 bxe_leader_reset(struct bxe_softc *sc)
11038 uint8_t global = bxe_reset_is_global(sc);
11039 uint32_t load_code;
11042 * If not going to reset MCP, load "fake" driver to reset HW while
11043 * driver is owner of the HW.
11045 if (!global && !BXE_NOMCP(sc)) {
11046 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
11047 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
11049 BLOGE(sc, "MCP response failure, aborting\n");
11051 goto exit_leader_reset;
11054 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
11055 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
11056 BLOGE(sc, "MCP unexpected response, aborting\n");
11058 goto exit_leader_reset2;
11061 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
11063 BLOGE(sc, "MCP response failure, aborting\n");
11065 goto exit_leader_reset2;
11069 /* try to recover after the failure */
11070 if (bxe_process_kill(sc, global)) {
11071 BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc));
11073 goto exit_leader_reset2;
11077 * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver
11080 bxe_set_reset_done(sc);
11082 bxe_clear_reset_global(sc);
11085 exit_leader_reset2:
11087 /* unload "fake driver" if it was loaded */
11088 if (!global && !BXE_NOMCP(sc)) {
11089 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
11090 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
11096 bxe_release_leader_lock(sc);
11103 * prepare INIT transition, parameters configured:
11104 * - HC configuration
11105 * - Queue's CDU context
11108 bxe_pf_q_prep_init(struct bxe_softc *sc,
11109 struct bxe_fastpath *fp,
11110 struct ecore_queue_init_params *init_params)
11113 int cxt_index, cxt_offset;
11115 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags);
11116 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags);
11118 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags);
11119 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags);
11122 init_params->rx.hc_rate =
11123 sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0;
11124 init_params->tx.hc_rate =
11125 sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0;
11128 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id;
11130 /* CQ index among the SB indices */
11131 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11132 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
11134 /* set maximum number of COSs supported by this queue */
11135 init_params->max_cos = sc->max_cos;
11137 BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n",
11138 fp->index, init_params->max_cos);
11140 /* set the context pointers queue object */
11141 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
11142 /* XXX change index/cid here if ever support multiple tx CoS */
11143 /* fp->txdata[cos]->cid */
11144 cxt_index = fp->index / ILT_PAGE_CIDS;
11145 cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS);
11146 init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth;
11150 /* set flags that are common for the Tx-only and not normal connections */
11151 static unsigned long
11152 bxe_get_common_flags(struct bxe_softc *sc,
11153 struct bxe_fastpath *fp,
11154 uint8_t zero_stats)
11156 unsigned long flags = 0;
11158 /* PF driver will always initialize the Queue to an ACTIVE state */
11159 bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags);
11162 * tx only connections collect statistics (on the same index as the
11163 * parent connection). The statistics are zeroed when the parent
11164 * connection is initialized.
11167 bxe_set_bit(ECORE_Q_FLG_STATS, &flags);
11169 bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags);
11173 * tx only connections can support tx-switching, though their
11174 * CoS-ness doesn't survive the loopback
11176 if (sc->flags & BXE_TX_SWITCHING) {
11177 bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags);
11180 bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags);
11185 static unsigned long
11186 bxe_get_q_flags(struct bxe_softc *sc,
11187 struct bxe_fastpath *fp,
11190 unsigned long flags = 0;
11192 if (IS_MF_SD(sc)) {
11193 bxe_set_bit(ECORE_Q_FLG_OV, &flags);
11196 if (if_getcapenable(sc->ifp) & IFCAP_LRO) {
11197 bxe_set_bit(ECORE_Q_FLG_TPA, &flags);
11198 bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags);
11202 bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags);
11203 bxe_set_bit(ECORE_Q_FLG_MCAST, &flags);
11206 bxe_set_bit(ECORE_Q_FLG_VLAN, &flags);
11208 /* merge with common flags */
11209 return (flags | bxe_get_common_flags(sc, fp, TRUE));
11213 bxe_pf_q_prep_general(struct bxe_softc *sc,
11214 struct bxe_fastpath *fp,
11215 struct ecore_general_setup_params *gen_init,
11218 gen_init->stat_id = bxe_stats_id(fp);
11219 gen_init->spcl_id = fp->cl_id;
11220 gen_init->mtu = sc->mtu;
11221 gen_init->cos = cos;
11225 bxe_pf_rx_q_prep(struct bxe_softc *sc,
11226 struct bxe_fastpath *fp,
11227 struct rxq_pause_params *pause,
11228 struct ecore_rxq_setup_params *rxq_init)
11230 uint8_t max_sge = 0;
11231 uint16_t sge_sz = 0;
11232 uint16_t tpa_agg_size = 0;
11234 pause->sge_th_lo = SGE_TH_LO(sc);
11235 pause->sge_th_hi = SGE_TH_HI(sc);
11237 /* validate SGE ring has enough to cross high threshold */
11238 if (sc->dropless_fc &&
11239 (pause->sge_th_hi + FW_PREFETCH_CNT) >
11240 (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) {
11241 BLOGW(sc, "sge ring threshold limit\n");
11244 /* minimum max_aggregation_size is 2*MTU (two full buffers) */
11245 tpa_agg_size = (2 * sc->mtu);
11246 if (tpa_agg_size < sc->max_aggregation_size) {
11247 tpa_agg_size = sc->max_aggregation_size;
11250 max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT;
11251 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
11252 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
11253 sge_sz = (uint16_t)min(SGE_PAGES, 0xffff);
11255 /* pause - not for e1 */
11256 if (!CHIP_IS_E1(sc)) {
11257 pause->bd_th_lo = BD_TH_LO(sc);
11258 pause->bd_th_hi = BD_TH_HI(sc);
11260 pause->rcq_th_lo = RCQ_TH_LO(sc);
11261 pause->rcq_th_hi = RCQ_TH_HI(sc);
11263 /* validate rings have enough entries to cross high thresholds */
11264 if (sc->dropless_fc &&
11265 pause->bd_th_hi + FW_PREFETCH_CNT >
11266 sc->rx_ring_size) {
11267 BLOGW(sc, "rx bd ring threshold limit\n");
11270 if (sc->dropless_fc &&
11271 pause->rcq_th_hi + FW_PREFETCH_CNT >
11272 RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) {
11273 BLOGW(sc, "rcq ring threshold limit\n");
11276 pause->pri_map = 1;
11280 rxq_init->dscr_map = fp->rx_dma.paddr;
11281 rxq_init->sge_map = fp->rx_sge_dma.paddr;
11282 rxq_init->rcq_map = fp->rcq_dma.paddr;
11283 rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE);
11286 * This should be a maximum number of data bytes that may be
11287 * placed on the BD (not including paddings).
11289 rxq_init->buf_sz = (fp->rx_buf_size -
11290 IP_HEADER_ALIGNMENT_PADDING);
11292 rxq_init->cl_qzone_id = fp->cl_qzone_id;
11293 rxq_init->tpa_agg_sz = tpa_agg_size;
11294 rxq_init->sge_buf_sz = sge_sz;
11295 rxq_init->max_sges_pkt = max_sge;
11296 rxq_init->rss_engine_id = SC_FUNC(sc);
11297 rxq_init->mcast_engine_id = SC_FUNC(sc);
11300 * Maximum number or simultaneous TPA aggregation for this Queue.
11301 * For PF Clients it should be the maximum available number.
11302 * VF driver(s) may want to define it to a smaller value.
11304 rxq_init->max_tpa_queues = MAX_AGG_QS(sc);
11306 rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT;
11307 rxq_init->fw_sb_id = fp->fw_sb_id;
11309 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11312 * configure silent vlan removal
11313 * if multi function mode is afex, then mask default vlan
11315 if (IS_MF_AFEX(sc)) {
11316 rxq_init->silent_removal_value =
11317 sc->devinfo.mf_info.afex_def_vlan_tag;
11318 rxq_init->silent_removal_mask = EVL_VLID_MASK;
11323 bxe_pf_tx_q_prep(struct bxe_softc *sc,
11324 struct bxe_fastpath *fp,
11325 struct ecore_txq_setup_params *txq_init,
11329 * XXX If multiple CoS is ever supported then each fastpath structure
11330 * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
11331 * fp->txdata[cos]->tx_dma.paddr;
11333 txq_init->dscr_map = fp->tx_dma.paddr;
11334 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
11335 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
11336 txq_init->fw_sb_id = fp->fw_sb_id;
11339 * set the TSS leading client id for TX classfication to the
11340 * leading RSS client id
11342 txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id);
11346 * This function performs 2 steps in a queue state machine:
11351 bxe_setup_queue(struct bxe_softc *sc,
11352 struct bxe_fastpath *fp,
11355 struct ecore_queue_state_params q_params = { NULL };
11356 struct ecore_queue_setup_params *setup_params =
11357 &q_params.params.setup;
11360 BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index);
11362 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
11364 q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
11366 /* we want to wait for completion in this context */
11367 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
11369 /* prepare the INIT parameters */
11370 bxe_pf_q_prep_init(sc, fp, &q_params.params.init);
11372 /* Set the command */
11373 q_params.cmd = ECORE_Q_CMD_INIT;
11375 /* Change the state to INIT */
11376 rc = ecore_queue_state_change(sc, &q_params);
11378 BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc);
11382 BLOGD(sc, DBG_LOAD, "init complete\n");
11384 /* now move the Queue to the SETUP state */
11385 memset(setup_params, 0, sizeof(*setup_params));
11387 /* set Queue flags */
11388 setup_params->flags = bxe_get_q_flags(sc, fp, leading);
11390 /* set general SETUP parameters */
11391 bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params,
11392 FIRST_TX_COS_INDEX);
11394 bxe_pf_rx_q_prep(sc, fp,
11395 &setup_params->pause_params,
11396 &setup_params->rxq_params);
11398 bxe_pf_tx_q_prep(sc, fp,
11399 &setup_params->txq_params,
11400 FIRST_TX_COS_INDEX);
11402 /* Set the command */
11403 q_params.cmd = ECORE_Q_CMD_SETUP;
11405 /* change the state to SETUP */
11406 rc = ecore_queue_state_change(sc, &q_params);
11408 BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc);
11416 bxe_setup_leading(struct bxe_softc *sc)
11418 return (bxe_setup_queue(sc, &sc->fp[0], TRUE));
11422 bxe_config_rss_pf(struct bxe_softc *sc,
11423 struct ecore_rss_config_obj *rss_obj,
11424 uint8_t config_hash)
11426 struct ecore_config_rss_params params = { NULL };
11430 * Although RSS is meaningless when there is a single HW queue we
11431 * still need it enabled in order to have HW Rx hash generated.
11434 params.rss_obj = rss_obj;
11436 bxe_set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
11438 bxe_set_bit(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags);
11440 /* RSS configuration */
11441 bxe_set_bit(ECORE_RSS_IPV4, ¶ms.rss_flags);
11442 bxe_set_bit(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags);
11443 bxe_set_bit(ECORE_RSS_IPV6, ¶ms.rss_flags);
11444 bxe_set_bit(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags);
11445 if (rss_obj->udp_rss_v4) {
11446 bxe_set_bit(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags);
11448 if (rss_obj->udp_rss_v6) {
11449 bxe_set_bit(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags);
11453 params.rss_result_mask = MULTI_MASK;
11455 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
11459 for (i = 0; i < sizeof(params.rss_key) / 4; i++) {
11460 params.rss_key[i] = arc4random();
11463 bxe_set_bit(ECORE_RSS_SET_SRCH, ¶ms.rss_flags);
11466 return (ecore_config_rss(sc, ¶ms));
11470 bxe_config_rss_eth(struct bxe_softc *sc,
11471 uint8_t config_hash)
11473 return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash));
11477 bxe_init_rss_pf(struct bxe_softc *sc)
11479 uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc);
11483 * Prepare the initial contents of the indirection table if
11486 for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) {
11487 sc->rss_conf_obj.ind_table[i] =
11488 (sc->fp->cl_id + (i % num_eth_queues));
11492 sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1;
11496 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
11497 * per-port, so if explicit configuration is needed, do it only
11500 * For 57712 and newer it's a per-function configuration.
11502 return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)));
11506 bxe_set_mac_one(struct bxe_softc *sc,
11508 struct ecore_vlan_mac_obj *obj,
11511 unsigned long *ramrod_flags)
11513 struct ecore_vlan_mac_ramrod_params ramrod_param;
11516 memset(&ramrod_param, 0, sizeof(ramrod_param));
11518 /* fill in general parameters */
11519 ramrod_param.vlan_mac_obj = obj;
11520 ramrod_param.ramrod_flags = *ramrod_flags;
11522 /* fill a user request section if needed */
11523 if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) {
11524 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
11526 bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
11528 /* Set the command: ADD or DEL */
11529 ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD :
11530 ECORE_VLAN_MAC_DEL;
11533 rc = ecore_config_vlan_mac(sc, &ramrod_param);
11535 if (rc == ECORE_EXISTS) {
11536 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
11537 /* do not treat adding same MAC as error */
11539 } else if (rc < 0) {
11540 BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc);
11547 bxe_set_eth_mac(struct bxe_softc *sc,
11550 unsigned long ramrod_flags = 0;
11552 BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n");
11554 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
11556 /* Eth MAC is set on RSS leading client (fp[0]) */
11557 return (bxe_set_mac_one(sc, sc->link_params.mac_addr,
11558 &sc->sp_objs->mac_obj,
11559 set, ECORE_ETH_MAC, &ramrod_flags));
11563 bxe_get_cur_phy_idx(struct bxe_softc *sc)
11565 uint32_t sel_phy_idx = 0;
11567 if (sc->link_params.num_phys <= 1) {
11568 return (ELINK_INT_PHY);
11571 if (sc->link_vars.link_up) {
11572 sel_phy_idx = ELINK_EXT_PHY1;
11573 /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */
11574 if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
11575 (sc->link_params.phy[ELINK_EXT_PHY2].supported &
11576 ELINK_SUPPORTED_FIBRE))
11577 sel_phy_idx = ELINK_EXT_PHY2;
11579 switch (elink_phy_selection(&sc->link_params)) {
11580 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
11581 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
11582 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
11583 sel_phy_idx = ELINK_EXT_PHY1;
11585 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
11586 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
11587 sel_phy_idx = ELINK_EXT_PHY2;
11592 return (sel_phy_idx);
11596 bxe_get_link_cfg_idx(struct bxe_softc *sc)
11598 uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc);
11601 * The selected activated PHY is always after swapping (in case PHY
11602 * swapping is enabled). So when swapping is enabled, we need to reverse
11603 * the configuration
11606 if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11607 if (sel_phy_idx == ELINK_EXT_PHY1)
11608 sel_phy_idx = ELINK_EXT_PHY2;
11609 else if (sel_phy_idx == ELINK_EXT_PHY2)
11610 sel_phy_idx = ELINK_EXT_PHY1;
11613 return (ELINK_LINK_CONFIG_IDX(sel_phy_idx));
11617 bxe_set_requested_fc(struct bxe_softc *sc)
11620 * Initialize link parameters structure variables
11621 * It is recommended to turn off RX FC for jumbo frames
11622 * for better performance
11624 if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) {
11625 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX;
11627 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH;
11632 bxe_calc_fc_adv(struct bxe_softc *sc)
11634 uint8_t cfg_idx = bxe_get_link_cfg_idx(sc);
11635 switch (sc->link_vars.ieee_fc &
11636 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
11637 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
11639 sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
11643 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
11644 sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
11648 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
11649 sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
11655 bxe_get_mf_speed(struct bxe_softc *sc)
11657 uint16_t line_speed = sc->link_vars.line_speed;
11660 bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]);
11662 /* calculate the current MAX line speed limit for the MF devices */
11663 if (IS_MF_SI(sc)) {
11664 line_speed = (line_speed * maxCfg) / 100;
11665 } else { /* SD mode */
11666 uint16_t vn_max_rate = maxCfg * 100;
11668 if (vn_max_rate < line_speed) {
11669 line_speed = vn_max_rate;
11674 return (line_speed);
11678 bxe_fill_report_data(struct bxe_softc *sc,
11679 struct bxe_link_report_data *data)
11681 uint16_t line_speed = bxe_get_mf_speed(sc);
11683 memset(data, 0, sizeof(*data));
11685 /* fill the report data with the effective line speed */
11686 data->line_speed = line_speed;
11689 if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) {
11690 bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags);
11694 if (sc->link_vars.duplex == DUPLEX_FULL) {
11695 bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags);
11698 /* Rx Flow Control is ON */
11699 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) {
11700 bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
11703 /* Tx Flow Control is ON */
11704 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
11705 bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
11709 /* report link status to OS, should be called under phy_lock */
11711 bxe_link_report_locked(struct bxe_softc *sc)
11713 struct bxe_link_report_data cur_data;
11715 /* reread mf_cfg */
11716 if (IS_PF(sc) && !CHIP_IS_E1(sc)) {
11717 bxe_read_mf_cfg(sc);
11720 /* Read the current link report info */
11721 bxe_fill_report_data(sc, &cur_data);
11723 /* Don't report link down or exactly the same link status twice */
11724 if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) ||
11725 (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11726 &sc->last_reported_link.link_report_flags) &&
11727 bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11728 &cur_data.link_report_flags))) {
11734 /* report new link params and remember the state for the next time */
11735 memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
11737 if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11738 &cur_data.link_report_flags)) {
11739 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
11740 BLOGI(sc, "NIC Link is Down\n");
11742 const char *duplex;
11745 if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX,
11746 &cur_data.link_report_flags)) {
11753 * Handle the FC at the end so that only these flags would be
11754 * possibly set. This way we may easily check if there is no FC
11757 if (cur_data.link_report_flags) {
11758 if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11759 &cur_data.link_report_flags) &&
11760 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11761 &cur_data.link_report_flags)) {
11762 flow = "ON - receive & transmit";
11763 } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11764 &cur_data.link_report_flags) &&
11765 !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11766 &cur_data.link_report_flags)) {
11767 flow = "ON - receive";
11768 } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11769 &cur_data.link_report_flags) &&
11770 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11771 &cur_data.link_report_flags)) {
11772 flow = "ON - transmit";
11774 flow = "none"; /* possible? */
11780 if_link_state_change(sc->ifp, LINK_STATE_UP);
11781 BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
11782 cur_data.line_speed, duplex, flow);
11787 bxe_link_report(struct bxe_softc *sc)
11789 bxe_acquire_phy_lock(sc);
11790 bxe_link_report_locked(sc);
11791 bxe_release_phy_lock(sc);
11795 bxe_link_status_update(struct bxe_softc *sc)
11797 if (sc->state != BXE_STATE_OPEN) {
11801 if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) {
11802 elink_link_status_update(&sc->link_params, &sc->link_vars);
11804 sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half |
11805 ELINK_SUPPORTED_10baseT_Full |
11806 ELINK_SUPPORTED_100baseT_Half |
11807 ELINK_SUPPORTED_100baseT_Full |
11808 ELINK_SUPPORTED_1000baseT_Full |
11809 ELINK_SUPPORTED_2500baseX_Full |
11810 ELINK_SUPPORTED_10000baseT_Full |
11811 ELINK_SUPPORTED_TP |
11812 ELINK_SUPPORTED_FIBRE |
11813 ELINK_SUPPORTED_Autoneg |
11814 ELINK_SUPPORTED_Pause |
11815 ELINK_SUPPORTED_Asym_Pause);
11816 sc->port.advertising[0] = sc->port.supported[0];
11818 sc->link_params.sc = sc;
11819 sc->link_params.port = SC_PORT(sc);
11820 sc->link_params.req_duplex[0] = DUPLEX_FULL;
11821 sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE;
11822 sc->link_params.req_line_speed[0] = SPEED_10000;
11823 sc->link_params.speed_cap_mask[0] = 0x7f0000;
11824 sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G;
11826 if (CHIP_REV_IS_FPGA(sc)) {
11827 sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC;
11828 sc->link_vars.line_speed = ELINK_SPEED_1000;
11829 sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11830 LINK_STATUS_SPEED_AND_DUPLEX_1000TFD);
11832 sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC;
11833 sc->link_vars.line_speed = ELINK_SPEED_10000;
11834 sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11835 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
11838 sc->link_vars.link_up = 1;
11840 sc->link_vars.duplex = DUPLEX_FULL;
11841 sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE;
11844 REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0);
11845 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11846 bxe_link_report(sc);
11851 if (sc->link_vars.link_up) {
11852 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11854 bxe_stats_handle(sc, STATS_EVENT_STOP);
11856 bxe_link_report(sc);
11858 bxe_link_report(sc);
11859 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11864 bxe_initial_phy_init(struct bxe_softc *sc,
11867 int rc, cfg_idx = bxe_get_link_cfg_idx(sc);
11868 uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx];
11869 struct elink_params *lp = &sc->link_params;
11871 bxe_set_requested_fc(sc);
11873 if (CHIP_REV_IS_SLOW(sc)) {
11874 uint32_t bond = CHIP_BOND_ID(sc);
11877 if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) {
11878 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11879 } else if (bond & 0x4) {
11880 if (CHIP_IS_E3(sc)) {
11881 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC;
11883 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11885 } else if (bond & 0x8) {
11886 if (CHIP_IS_E3(sc)) {
11887 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC;
11889 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11893 /* disable EMAC for E3 and above */
11895 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11898 sc->link_params.feature_config_flags |= feat;
11901 bxe_acquire_phy_lock(sc);
11903 if (load_mode == LOAD_DIAG) {
11904 lp->loopback_mode = ELINK_LOOPBACK_XGXS;
11905 /* Prefer doing PHY loopback at 10G speed, if possible */
11906 if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) {
11907 if (lp->speed_cap_mask[cfg_idx] &
11908 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
11909 lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000;
11911 lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000;
11916 if (load_mode == LOAD_LOOPBACK_EXT) {
11917 lp->loopback_mode = ELINK_LOOPBACK_EXT;
11920 rc = elink_phy_init(&sc->link_params, &sc->link_vars);
11922 bxe_release_phy_lock(sc);
11924 bxe_calc_fc_adv(sc);
11926 if (sc->link_vars.link_up) {
11927 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11928 bxe_link_report(sc);
11931 if (!CHIP_REV_IS_SLOW(sc)) {
11932 bxe_periodic_start(sc);
11935 sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
11939 /* must be called under IF_ADDR_LOCK */
11942 bxe_set_mc_list(struct bxe_softc *sc)
11944 struct ecore_mcast_ramrod_params rparam = { NULL };
11948 struct ecore_mcast_list_elem *mc_mac, *mc_mac_start;
11949 unsigned char *mta;
11950 if_t ifp = sc->ifp;
11952 mc_count = if_multiaddr_count(ifp, -1);/* XXX they don't have a limit */
11956 mta = malloc(sizeof(unsigned char) * ETHER_ADDR_LEN *
11957 mc_count, M_DEVBUF, M_NOWAIT);
11960 BLOGE(sc, "Failed to allocate temp mcast list\n");
11963 bzero(mta, (sizeof(unsigned char) * ETHER_ADDR_LEN * mc_count));
11965 mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF, (M_NOWAIT | M_ZERO));
11966 mc_mac_start = mc_mac;
11969 free(mta, M_DEVBUF);
11970 BLOGE(sc, "Failed to allocate temp mcast list\n");
11973 bzero(mc_mac, (sizeof(*mc_mac) * mc_count));
11975 /* mta and mcnt not expected to be different */
11976 if_multiaddr_array(ifp, mta, &mcnt, mc_count);
11979 rparam.mcast_obj = &sc->mcast_obj;
11980 ECORE_LIST_INIT(&rparam.mcast_list);
11982 for(i=0; i< mcnt; i++) {
11984 mc_mac->mac = (uint8_t *)(mta + (i * ETHER_ADDR_LEN));
11985 ECORE_LIST_PUSH_TAIL(&mc_mac->link, &rparam.mcast_list);
11987 BLOGD(sc, DBG_LOAD,
11988 "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X\n",
11989 mc_mac->mac[0], mc_mac->mac[1], mc_mac->mac[2],
11990 mc_mac->mac[3], mc_mac->mac[4], mc_mac->mac[5]);
11994 rparam.mcast_list_len = mc_count;
11996 BXE_MCAST_LOCK(sc);
11998 /* first, clear all configured multicast MACs */
11999 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
12001 BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc);
12002 BXE_MCAST_UNLOCK(sc);
12003 free(mc_mac_start, M_DEVBUF);
12004 free(mta, M_DEVBUF);
12008 /* Now add the new MACs */
12009 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD);
12011 BLOGE(sc, "Failed to set new mcast config (%d)\n", rc);
12014 BXE_MCAST_UNLOCK(sc);
12016 free(mc_mac_start, M_DEVBUF);
12017 free(mta, M_DEVBUF);
12023 bxe_set_uc_list(struct bxe_softc *sc)
12025 if_t ifp = sc->ifp;
12026 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
12027 struct ifaddr *ifa;
12028 unsigned long ramrod_flags = 0;
12031 #if __FreeBSD_version < 800000
12034 if_addr_rlock(ifp);
12037 /* first schedule a cleanup up of old configuration */
12038 rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE);
12040 BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc);
12041 #if __FreeBSD_version < 800000
12042 IF_ADDR_UNLOCK(ifp);
12044 if_addr_runlock(ifp);
12049 ifa = if_getifaddr(ifp); /* XXX Is this structure */
12051 if (ifa->ifa_addr->sa_family != AF_LINK) {
12052 ifa = TAILQ_NEXT(ifa, ifa_link);
12056 rc = bxe_set_mac_one(sc, (uint8_t *)LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
12057 mac_obj, TRUE, ECORE_UC_LIST_MAC, &ramrod_flags);
12058 if (rc == -EEXIST) {
12059 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
12060 /* do not treat adding same MAC as an error */
12062 } else if (rc < 0) {
12063 BLOGE(sc, "Failed to schedule ADD operations (%d)\n", rc);
12064 #if __FreeBSD_version < 800000
12065 IF_ADDR_UNLOCK(ifp);
12067 if_addr_runlock(ifp);
12072 ifa = TAILQ_NEXT(ifa, ifa_link);
12075 #if __FreeBSD_version < 800000
12076 IF_ADDR_UNLOCK(ifp);
12078 if_addr_runlock(ifp);
12081 /* Execute the pending commands */
12082 bit_set(&ramrod_flags, RAMROD_CONT);
12083 return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */,
12084 ECORE_UC_LIST_MAC, &ramrod_flags));
12088 bxe_set_rx_mode(struct bxe_softc *sc)
12090 if_t ifp = sc->ifp;
12091 uint32_t rx_mode = BXE_RX_MODE_NORMAL;
12093 if (sc->state != BXE_STATE_OPEN) {
12094 BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state);
12098 BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp));
12100 if (if_getflags(ifp) & IFF_PROMISC) {
12101 rx_mode = BXE_RX_MODE_PROMISC;
12102 } else if ((if_getflags(ifp) & IFF_ALLMULTI) ||
12103 ((if_getamcount(ifp) > BXE_MAX_MULTICAST) &&
12105 rx_mode = BXE_RX_MODE_ALLMULTI;
12108 /* some multicasts */
12109 if (bxe_set_mc_list(sc) < 0) {
12110 rx_mode = BXE_RX_MODE_ALLMULTI;
12112 if (bxe_set_uc_list(sc) < 0) {
12113 rx_mode = BXE_RX_MODE_PROMISC;
12118 sc->rx_mode = rx_mode;
12120 /* schedule the rx_mode command */
12121 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
12122 BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n");
12123 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
12128 bxe_set_storm_rx_mode(sc);
12133 /* update flags in shmem */
12135 bxe_update_drv_flags(struct bxe_softc *sc,
12139 uint32_t drv_flags;
12141 if (SHMEM2_HAS(sc, drv_flags)) {
12142 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12143 drv_flags = SHMEM2_RD(sc, drv_flags);
12146 SET_FLAGS(drv_flags, flags);
12148 RESET_FLAGS(drv_flags, flags);
12151 SHMEM2_WR(sc, drv_flags, drv_flags);
12152 BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags);
12154 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12158 /* periodic timer callout routine, only runs when the interface is up */
12161 bxe_periodic_callout_func(void *xsc)
12163 struct bxe_softc *sc = (struct bxe_softc *)xsc;
12166 if (!BXE_CORE_TRYLOCK(sc)) {
12167 /* just bail and try again next time */
12169 if ((sc->state == BXE_STATE_OPEN) &&
12170 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12171 /* schedule the next periodic callout */
12172 callout_reset(&sc->periodic_callout, hz,
12173 bxe_periodic_callout_func, sc);
12179 if ((sc->state != BXE_STATE_OPEN) ||
12180 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
12181 BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state);
12182 BXE_CORE_UNLOCK(sc);
12187 /* Check for TX timeouts on any fastpath. */
12188 FOR_EACH_QUEUE(sc, i) {
12189 if (bxe_watchdog(sc, &sc->fp[i]) != 0) {
12190 /* Ruh-Roh, chip was reset! */
12195 if (!CHIP_REV_IS_SLOW(sc)) {
12197 * This barrier is needed to ensure the ordering between the writing
12198 * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and
12199 * the reading here.
12202 if (sc->port.pmf) {
12203 bxe_acquire_phy_lock(sc);
12204 elink_period_func(&sc->link_params, &sc->link_vars);
12205 bxe_release_phy_lock(sc);
12209 if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) {
12210 int mb_idx = SC_FW_MB_IDX(sc);
12211 uint32_t drv_pulse;
12212 uint32_t mcp_pulse;
12214 ++sc->fw_drv_pulse_wr_seq;
12215 sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
12217 drv_pulse = sc->fw_drv_pulse_wr_seq;
12220 mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) &
12221 MCP_PULSE_SEQ_MASK);
12224 * The delta between driver pulse and mcp response should
12225 * be 1 (before mcp response) or 0 (after mcp response).
12227 if ((drv_pulse != mcp_pulse) &&
12228 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
12229 /* someone lost a heartbeat... */
12230 BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
12231 drv_pulse, mcp_pulse);
12235 /* state is BXE_STATE_OPEN */
12236 bxe_stats_handle(sc, STATS_EVENT_UPDATE);
12238 BXE_CORE_UNLOCK(sc);
12240 if ((sc->state == BXE_STATE_OPEN) &&
12241 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12242 /* schedule the next periodic callout */
12243 callout_reset(&sc->periodic_callout, hz,
12244 bxe_periodic_callout_func, sc);
12249 bxe_periodic_start(struct bxe_softc *sc)
12251 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
12252 callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc);
12256 bxe_periodic_stop(struct bxe_softc *sc)
12258 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
12259 callout_drain(&sc->periodic_callout);
12262 /* start the controller */
12263 static __noinline int
12264 bxe_nic_load(struct bxe_softc *sc,
12271 BXE_CORE_LOCK_ASSERT(sc);
12273 BLOGD(sc, DBG_LOAD, "Starting NIC load...\n");
12275 sc->state = BXE_STATE_OPENING_WAITING_LOAD;
12278 /* must be called before memory allocation and HW init */
12279 bxe_ilt_set_info(sc);
12282 sc->last_reported_link_state = LINK_STATE_UNKNOWN;
12284 bxe_set_fp_rx_buf_size(sc);
12286 if (bxe_alloc_fp_buffers(sc) != 0) {
12287 BLOGE(sc, "Failed to allocate fastpath memory\n");
12288 sc->state = BXE_STATE_CLOSED;
12290 goto bxe_nic_load_error0;
12293 if (bxe_alloc_mem(sc) != 0) {
12294 sc->state = BXE_STATE_CLOSED;
12296 goto bxe_nic_load_error0;
12299 if (bxe_alloc_fw_stats_mem(sc) != 0) {
12300 sc->state = BXE_STATE_CLOSED;
12302 goto bxe_nic_load_error0;
12306 /* set pf load just before approaching the MCP */
12307 bxe_set_pf_load(sc);
12309 /* if MCP exists send load request and analyze response */
12310 if (!BXE_NOMCP(sc)) {
12311 /* attempt to load pf */
12312 if (bxe_nic_load_request(sc, &load_code) != 0) {
12313 sc->state = BXE_STATE_CLOSED;
12315 goto bxe_nic_load_error1;
12318 /* what did the MCP say? */
12319 if (bxe_nic_load_analyze_req(sc, load_code) != 0) {
12320 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12321 sc->state = BXE_STATE_CLOSED;
12323 goto bxe_nic_load_error2;
12326 BLOGI(sc, "Device has no MCP!\n");
12327 load_code = bxe_nic_load_no_mcp(sc);
12330 /* mark PMF if applicable */
12331 bxe_nic_load_pmf(sc, load_code);
12333 /* Init Function state controlling object */
12334 bxe_init_func_obj(sc);
12336 /* Initialize HW */
12337 if (bxe_init_hw(sc, load_code) != 0) {
12338 BLOGE(sc, "HW init failed\n");
12339 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12340 sc->state = BXE_STATE_CLOSED;
12342 goto bxe_nic_load_error2;
12346 /* set ALWAYS_ALIVE bit in shmem */
12347 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
12349 sc->flags |= BXE_NO_PULSE;
12351 /* attach interrupts */
12352 if (bxe_interrupt_attach(sc) != 0) {
12353 sc->state = BXE_STATE_CLOSED;
12355 goto bxe_nic_load_error2;
12358 bxe_nic_init(sc, load_code);
12360 /* Init per-function objects */
12363 // XXX bxe_iov_nic_init(sc);
12365 /* set AFEX default VLAN tag to an invalid value */
12366 sc->devinfo.mf_info.afex_def_vlan_tag = -1;
12367 // XXX bxe_nic_load_afex_dcc(sc, load_code);
12369 sc->state = BXE_STATE_OPENING_WAITING_PORT;
12370 rc = bxe_func_start(sc);
12372 BLOGE(sc, "Function start failed! rc = %d\n", rc);
12373 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12374 sc->state = BXE_STATE_ERROR;
12375 goto bxe_nic_load_error3;
12378 /* send LOAD_DONE command to MCP */
12379 if (!BXE_NOMCP(sc)) {
12380 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12382 BLOGE(sc, "MCP response failure, aborting\n");
12383 sc->state = BXE_STATE_ERROR;
12385 goto bxe_nic_load_error3;
12389 rc = bxe_setup_leading(sc);
12391 BLOGE(sc, "Setup leading failed! rc = %d\n", rc);
12392 sc->state = BXE_STATE_ERROR;
12393 goto bxe_nic_load_error3;
12396 FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) {
12397 rc = bxe_setup_queue(sc, &sc->fp[i], FALSE);
12399 BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc);
12400 sc->state = BXE_STATE_ERROR;
12401 goto bxe_nic_load_error3;
12405 rc = bxe_init_rss_pf(sc);
12407 BLOGE(sc, "PF RSS init failed\n");
12408 sc->state = BXE_STATE_ERROR;
12409 goto bxe_nic_load_error3;
12414 /* now when Clients are configured we are ready to work */
12415 sc->state = BXE_STATE_OPEN;
12417 /* Configure a ucast MAC */
12419 rc = bxe_set_eth_mac(sc, TRUE);
12422 BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc);
12423 sc->state = BXE_STATE_ERROR;
12424 goto bxe_nic_load_error3;
12427 if (sc->port.pmf) {
12428 rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN);
12430 sc->state = BXE_STATE_ERROR;
12431 goto bxe_nic_load_error3;
12435 sc->link_params.feature_config_flags &=
12436 ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN;
12438 /* start fast path */
12440 /* Initialize Rx filter */
12441 bxe_set_rx_mode(sc);
12444 switch (/* XXX load_mode */LOAD_OPEN) {
12450 case LOAD_LOOPBACK_EXT:
12451 sc->state = BXE_STATE_DIAG;
12458 if (sc->port.pmf) {
12459 bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0);
12461 bxe_link_status_update(sc);
12464 /* start the periodic timer callout */
12465 bxe_periodic_start(sc);
12467 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
12468 /* mark driver is loaded in shmem2 */
12469 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
12470 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
12472 DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
12473 DRV_FLAGS_CAPABILITIES_LOADED_L2));
12476 /* wait for all pending SP commands to complete */
12477 if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) {
12478 BLOGE(sc, "Timeout waiting for all SPs to complete!\n");
12479 bxe_periodic_stop(sc);
12480 bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE);
12484 /* Tell the stack the driver is running! */
12485 if_setdrvflags(sc->ifp, IFF_DRV_RUNNING);
12487 BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n");
12491 bxe_nic_load_error3:
12494 bxe_int_disable_sync(sc, 1);
12496 /* clean out queued objects */
12497 bxe_squeeze_objects(sc);
12500 bxe_interrupt_detach(sc);
12502 bxe_nic_load_error2:
12504 if (IS_PF(sc) && !BXE_NOMCP(sc)) {
12505 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
12506 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
12511 bxe_nic_load_error1:
12513 /* clear pf_load status, as it was already set */
12515 bxe_clear_pf_load(sc);
12518 bxe_nic_load_error0:
12520 bxe_free_fw_stats_mem(sc);
12521 bxe_free_fp_buffers(sc);
12528 bxe_init_locked(struct bxe_softc *sc)
12530 int other_engine = SC_PATH(sc) ? 0 : 1;
12531 uint8_t other_load_status, load_status;
12532 uint8_t global = FALSE;
12535 BXE_CORE_LOCK_ASSERT(sc);
12537 /* check if the driver is already running */
12538 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12539 BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n");
12543 bxe_set_power_state(sc, PCI_PM_D0);
12546 * If parity occurred during the unload, then attentions and/or
12547 * RECOVERY_IN_PROGRES may still be set. If so we want the first function
12548 * loaded on the current engine to complete the recovery. Parity recovery
12549 * is only relevant for PF driver.
12552 other_load_status = bxe_get_load_status(sc, other_engine);
12553 load_status = bxe_get_load_status(sc, SC_PATH(sc));
12555 if (!bxe_reset_is_done(sc, SC_PATH(sc)) ||
12556 bxe_chk_parity_attn(sc, &global, TRUE)) {
12559 * If there are attentions and they are in global blocks, set
12560 * the GLOBAL_RESET bit regardless whether it will be this
12561 * function that will complete the recovery or not.
12564 bxe_set_reset_global(sc);
12568 * Only the first function on the current engine should try
12569 * to recover in open. In case of attentions in global blocks
12570 * only the first in the chip should try to recover.
12572 if ((!load_status && (!global || !other_load_status)) &&
12573 bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) {
12574 BLOGI(sc, "Recovered during init\n");
12578 /* recovery has failed... */
12579 bxe_set_power_state(sc, PCI_PM_D3hot);
12580 sc->recovery_state = BXE_RECOVERY_FAILED;
12582 BLOGE(sc, "Recovery flow hasn't properly "
12583 "completed yet, try again later. "
12584 "If you still see this message after a "
12585 "few retries then power cycle is required.\n");
12588 goto bxe_init_locked_done;
12593 sc->recovery_state = BXE_RECOVERY_DONE;
12595 rc = bxe_nic_load(sc, LOAD_OPEN);
12597 bxe_init_locked_done:
12600 /* Tell the stack the driver is NOT running! */
12601 BLOGE(sc, "Initialization failed, "
12602 "stack notified driver is NOT running!\n");
12603 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
12610 bxe_stop_locked(struct bxe_softc *sc)
12612 BXE_CORE_LOCK_ASSERT(sc);
12613 return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE));
12617 * Handles controller initialization when called from an unlocked routine.
12618 * ifconfig calls this function.
12624 bxe_init(void *xsc)
12626 struct bxe_softc *sc = (struct bxe_softc *)xsc;
12629 bxe_init_locked(sc);
12630 BXE_CORE_UNLOCK(sc);
12634 bxe_init_ifnet(struct bxe_softc *sc)
12639 /* ifconfig entrypoint for media type/status reporting */
12640 ifmedia_init(&sc->ifmedia, IFM_IMASK,
12641 bxe_ifmedia_update,
12642 bxe_ifmedia_status);
12644 /* set the default interface values */
12645 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL);
12646 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
12647 ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO));
12649 sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */
12651 /* allocate the ifnet structure */
12652 if ((ifp = if_gethandle(IFT_ETHER)) == NULL) {
12653 BLOGE(sc, "Interface allocation failed!\n");
12657 if_setsoftc(ifp, sc);
12658 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
12659 if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST));
12660 if_setioctlfn(ifp, bxe_ioctl);
12661 if_setstartfn(ifp, bxe_tx_start);
12662 if_setgetcounterfn(ifp, bxe_get_counter);
12663 #if __FreeBSD_version >= 901504
12664 if_settransmitfn(ifp, bxe_tx_mq_start);
12665 if_setqflushfn(ifp, bxe_mq_flush);
12668 if_settimer(ifp, 0);
12670 if_setinitfn(ifp, bxe_init);
12671 if_setmtu(ifp, sc->mtu);
12672 if_sethwassist(ifp, (CSUM_IP |
12680 #if __FreeBSD_version < 700000
12682 IFCAP_VLAN_HWTAGGING |
12688 IFCAP_VLAN_HWTAGGING |
12690 IFCAP_VLAN_HWFILTER |
12691 IFCAP_VLAN_HWCSUM |
12699 if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */
12700 if_setcapenable(ifp, if_getcapabilities(ifp));
12701 if_setbaudrate(ifp, IF_Gbps(10));
12703 if_setsendqlen(ifp, sc->tx_ring_size);
12704 if_setsendqready(ifp);
12709 /* attach to the Ethernet interface list */
12710 ether_ifattach(ifp, sc->link_params.mac_addr);
12716 bxe_deallocate_bars(struct bxe_softc *sc)
12720 for (i = 0; i < MAX_BARS; i++) {
12721 if (sc->bar[i].resource != NULL) {
12722 bus_release_resource(sc->dev,
12725 sc->bar[i].resource);
12726 BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n",
12733 bxe_allocate_bars(struct bxe_softc *sc)
12738 memset(sc->bar, 0, sizeof(sc->bar));
12740 for (i = 0; i < MAX_BARS; i++) {
12742 /* memory resources reside at BARs 0, 2, 4 */
12743 /* Run `pciconf -lb` to see mappings */
12744 if ((i != 0) && (i != 2) && (i != 4)) {
12748 sc->bar[i].rid = PCIR_BAR(i);
12752 flags |= RF_SHAREABLE;
12755 if ((sc->bar[i].resource =
12756 bus_alloc_resource_any(sc->dev,
12763 sc->bar[i].tag = rman_get_bustag(sc->bar[i].resource);
12764 sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource);
12765 sc->bar[i].kva = (vm_offset_t)rman_get_virtual(sc->bar[i].resource);
12767 BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %p-%p (%jd) -> %p\n",
12769 (void *)rman_get_start(sc->bar[i].resource),
12770 (void *)rman_get_end(sc->bar[i].resource),
12771 rman_get_size(sc->bar[i].resource),
12772 (void *)sc->bar[i].kva);
12779 bxe_get_function_num(struct bxe_softc *sc)
12784 * Read the ME register to get the function number. The ME register
12785 * holds the relative-function number and absolute-function number. The
12786 * absolute-function number appears only in E2 and above. Before that
12787 * these bits always contained zero, therefore we cannot blindly use them.
12790 val = REG_RD(sc, BAR_ME_REGISTER);
12793 (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT);
12795 (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1;
12797 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
12798 sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id);
12800 sc->pfunc_abs = (sc->pfunc_rel | sc->path_id);
12803 BLOGD(sc, DBG_LOAD,
12804 "Relative function %d, Absolute function %d, Path %d\n",
12805 sc->pfunc_rel, sc->pfunc_abs, sc->path_id);
12809 bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc)
12811 uint32_t shmem2_size;
12813 uint32_t mf_cfg_offset_value;
12816 offset = (SHMEM_RD(sc, func_mb) +
12817 (MAX_FUNC_NUM * sizeof(struct drv_func_mb)));
12820 if (sc->devinfo.shmem2_base != 0) {
12821 shmem2_size = SHMEM2_RD(sc, size);
12822 if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) {
12823 mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr);
12824 if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) {
12825 offset = mf_cfg_offset_value;
12834 bxe_pcie_capability_read(struct bxe_softc *sc,
12840 /* ensure PCIe capability is enabled */
12841 if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) {
12842 if (pcie_reg != 0) {
12843 BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg);
12844 return (pci_read_config(sc->dev, (pcie_reg + reg), width));
12848 BLOGE(sc, "PCIe capability NOT FOUND!!!\n");
12854 bxe_is_pcie_pending(struct bxe_softc *sc)
12856 return (bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA, 2) &
12857 PCIM_EXP_STA_TRANSACTION_PND);
12861 * Walk the PCI capabiites list for the device to find what features are
12862 * supported. These capabilites may be enabled/disabled by firmware so it's
12863 * best to walk the list rather than make assumptions.
12866 bxe_probe_pci_caps(struct bxe_softc *sc)
12868 uint16_t link_status;
12871 /* check if PCI Power Management is enabled */
12872 if (pci_find_cap(sc->dev, PCIY_PMG, ®) == 0) {
12874 BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg);
12876 sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG;
12877 sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg;
12881 link_status = bxe_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA, 2);
12883 /* handle PCIe 2.0 workarounds for 57710 */
12884 if (CHIP_IS_E1(sc)) {
12885 /* workaround for 57710 errata E4_57710_27462 */
12886 sc->devinfo.pcie_link_speed =
12887 (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1;
12889 /* workaround for 57710 errata E4_57710_27488 */
12890 sc->devinfo.pcie_link_width =
12891 ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
12892 if (sc->devinfo.pcie_link_speed > 1) {
12893 sc->devinfo.pcie_link_width =
12894 ((link_status & PCIM_LINK_STA_WIDTH) >> 4) >> 1;
12897 sc->devinfo.pcie_link_speed =
12898 (link_status & PCIM_LINK_STA_SPEED);
12899 sc->devinfo.pcie_link_width =
12900 ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
12903 BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n",
12904 sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width);
12906 sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG;
12907 sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg;
12909 /* check if MSI capability is enabled */
12910 if (pci_find_cap(sc->dev, PCIY_MSI, ®) == 0) {
12912 BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg);
12914 sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG;
12915 sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg;
12919 /* check if MSI-X capability is enabled */
12920 if (pci_find_cap(sc->dev, PCIY_MSIX, ®) == 0) {
12922 BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg);
12924 sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG;
12925 sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg;
12931 bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc)
12933 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
12936 /* get the outer vlan if we're in switch-dependent mode */
12938 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
12939 mf_info->ext_id = (uint16_t)val;
12941 mf_info->multi_vnics_mode = 1;
12943 if (!VALID_OVLAN(mf_info->ext_id)) {
12944 BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id);
12948 /* get the capabilities */
12949 if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
12950 FUNC_MF_CFG_PROTOCOL_ISCSI) {
12951 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI;
12952 } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
12953 FUNC_MF_CFG_PROTOCOL_FCOE) {
12954 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE;
12956 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET;
12959 mf_info->vnics_per_port =
12960 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
12966 bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc)
12968 uint32_t retval = 0;
12971 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
12973 if (val & MACP_FUNC_CFG_FLAGS_ENABLED) {
12974 if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) {
12975 retval |= MF_PROTO_SUPPORT_ETHERNET;
12977 if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
12978 retval |= MF_PROTO_SUPPORT_ISCSI;
12980 if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
12981 retval |= MF_PROTO_SUPPORT_FCOE;
12989 bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc)
12991 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
12995 * There is no outer vlan if we're in switch-independent mode.
12996 * If the mac is valid then assume multi-function.
12999 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13001 mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0);
13003 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13005 mf_info->vnics_per_port =
13006 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13012 bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc)
13014 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13015 uint32_t e1hov_tag;
13016 uint32_t func_config;
13017 uint32_t niv_config;
13019 mf_info->multi_vnics_mode = 1;
13021 e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13022 func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13023 niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config);
13026 (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >>
13027 FUNC_MF_CFG_E1HOV_TAG_SHIFT);
13029 mf_info->default_vlan =
13030 (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >>
13031 FUNC_MF_CFG_AFEX_VLAN_SHIFT);
13033 mf_info->niv_allowed_priorities =
13034 (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
13035 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT);
13037 mf_info->niv_default_cos =
13038 (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
13039 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT);
13041 mf_info->afex_vlan_mode =
13042 ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
13043 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT);
13045 mf_info->niv_mba_enabled =
13046 ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >>
13047 FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT);
13049 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13051 mf_info->vnics_per_port =
13052 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13058 bxe_check_valid_mf_cfg(struct bxe_softc *sc)
13060 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13067 BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n",
13069 BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n",
13070 mf_info->mf_config[SC_VN(sc)]);
13071 BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n",
13072 mf_info->multi_vnics_mode);
13073 BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n",
13074 mf_info->vnics_per_port);
13075 BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n",
13077 BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n",
13078 mf_info->min_bw[0], mf_info->min_bw[1],
13079 mf_info->min_bw[2], mf_info->min_bw[3]);
13080 BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n",
13081 mf_info->max_bw[0], mf_info->max_bw[1],
13082 mf_info->max_bw[2], mf_info->max_bw[3]);
13083 BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n",
13086 /* various MF mode sanity checks... */
13088 if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) {
13089 BLOGE(sc, "Enumerated function %d is marked as hidden\n",
13094 if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) {
13095 BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n",
13096 mf_info->vnics_per_port, mf_info->multi_vnics_mode);
13100 if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13101 /* vnic id > 0 must have valid ovlan in switch-dependent mode */
13102 if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) {
13103 BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n",
13104 SC_VN(sc), OVLAN(sc));
13108 if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) {
13109 BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n",
13110 mf_info->multi_vnics_mode, OVLAN(sc));
13115 * Verify all functions are either MF or SF mode. If MF, make sure
13116 * sure that all non-hidden functions have a valid ovlan. If SF,
13117 * make sure that all non-hidden functions have an invalid ovlan.
13119 FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13120 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13121 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13122 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13123 (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) ||
13124 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) {
13125 BLOGE(sc, "mf_mode=SD function %d MF config "
13126 "mismatch, multi_vnics_mode=%d ovlan=%d\n",
13127 i, mf_info->multi_vnics_mode, ovlan1);
13132 /* Verify all funcs on the same port each have a different ovlan. */
13133 FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13134 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13135 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13136 /* iterate from the next function on the port to the max func */
13137 for (j = i + 2; j < MAX_FUNC_NUM; j += 2) {
13138 mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config);
13139 ovlan2 = MFCFG_RD(sc, func_mf_config[j].e1hov_tag);
13140 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13141 VALID_OVLAN(ovlan1) &&
13142 !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) &&
13143 VALID_OVLAN(ovlan2) &&
13144 (ovlan1 == ovlan2)) {
13145 BLOGE(sc, "mf_mode=SD functions %d and %d "
13146 "have the same ovlan (%d)\n",
13152 } /* MULTI_FUNCTION_SD */
13158 bxe_get_mf_cfg_info(struct bxe_softc *sc)
13160 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13161 uint32_t val, mac_upper;
13164 /* initialize mf_info defaults */
13165 mf_info->vnics_per_port = 1;
13166 mf_info->multi_vnics_mode = FALSE;
13167 mf_info->path_has_ovlan = FALSE;
13168 mf_info->mf_mode = SINGLE_FUNCTION;
13170 if (!CHIP_IS_MF_CAP(sc)) {
13174 if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) {
13175 BLOGE(sc, "Invalid mf_cfg_base!\n");
13179 /* get the MF mode (switch dependent / independent / single-function) */
13181 val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13183 switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)
13185 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
13187 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13189 /* check for legal upper mac bytes */
13190 if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) {
13191 mf_info->mf_mode = MULTI_FUNCTION_SI;
13193 BLOGE(sc, "Invalid config for Switch Independent mode\n");
13198 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
13199 case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4:
13201 /* get outer vlan configuration */
13202 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13204 if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) !=
13205 FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
13206 mf_info->mf_mode = MULTI_FUNCTION_SD;
13208 BLOGE(sc, "Invalid config for Switch Dependent mode\n");
13213 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
13215 /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */
13218 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
13221 * Mark MF mode as NIV if MCP version includes NPAR-SD support
13222 * and the MAC address is valid.
13224 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13226 if ((SHMEM2_HAS(sc, afex_driver_support)) &&
13227 (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) {
13228 mf_info->mf_mode = MULTI_FUNCTION_AFEX;
13230 BLOGE(sc, "Invalid config for AFEX mode\n");
13237 BLOGE(sc, "Unknown MF mode (0x%08x)\n",
13238 (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK));
13243 /* set path mf_mode (which could be different than function mf_mode) */
13244 if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13245 mf_info->path_has_ovlan = TRUE;
13246 } else if (mf_info->mf_mode == SINGLE_FUNCTION) {
13248 * Decide on path multi vnics mode. If we're not in MF mode and in
13249 * 4-port mode, this is good enough to check vnic-0 of the other port
13252 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13253 uint8_t other_port = !(PORT_ID(sc) & 1);
13254 uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port));
13256 val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag);
13258 mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0;
13262 if (mf_info->mf_mode == SINGLE_FUNCTION) {
13263 /* invalid MF config */
13264 if (SC_VN(sc) >= 1) {
13265 BLOGE(sc, "VNIC ID >= 1 in SF mode\n");
13272 /* get the MF configuration */
13273 mf_info->mf_config[SC_VN(sc)] =
13274 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13276 switch(mf_info->mf_mode)
13278 case MULTI_FUNCTION_SD:
13280 bxe_get_shmem_mf_cfg_info_sd(sc);
13283 case MULTI_FUNCTION_SI:
13285 bxe_get_shmem_mf_cfg_info_si(sc);
13288 case MULTI_FUNCTION_AFEX:
13290 bxe_get_shmem_mf_cfg_info_niv(sc);
13295 BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n",
13300 /* get the congestion management parameters */
13303 FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13304 /* get min/max bw */
13305 val = MFCFG_RD(sc, func_mf_config[i].config);
13306 mf_info->min_bw[vnic] =
13307 ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT);
13308 mf_info->max_bw[vnic] =
13309 ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT);
13313 return (bxe_check_valid_mf_cfg(sc));
13317 bxe_get_shmem_info(struct bxe_softc *sc)
13320 uint32_t mac_hi, mac_lo, val;
13322 port = SC_PORT(sc);
13323 mac_hi = mac_lo = 0;
13325 sc->link_params.sc = sc;
13326 sc->link_params.port = port;
13328 /* get the hardware config info */
13329 sc->devinfo.hw_config =
13330 SHMEM_RD(sc, dev_info.shared_hw_config.config);
13331 sc->devinfo.hw_config2 =
13332 SHMEM_RD(sc, dev_info.shared_hw_config.config2);
13334 sc->link_params.hw_led_mode =
13335 ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
13336 SHARED_HW_CFG_LED_MODE_SHIFT);
13338 /* get the port feature config */
13340 SHMEM_RD(sc, dev_info.port_feature_config[port].config);
13342 /* get the link params */
13343 sc->link_params.speed_cap_mask[0] =
13344 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask);
13345 sc->link_params.speed_cap_mask[1] =
13346 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2);
13348 /* get the lane config */
13349 sc->link_params.lane_config =
13350 SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config);
13352 /* get the link config */
13353 val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config);
13354 sc->port.link_config[ELINK_INT_PHY] = val;
13355 sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK);
13356 sc->port.link_config[ELINK_EXT_PHY1] =
13357 SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2);
13359 /* get the override preemphasis flag and enable it or turn it off */
13360 val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13361 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) {
13362 sc->link_params.feature_config_flags |=
13363 ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13365 sc->link_params.feature_config_flags &=
13366 ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13369 /* get the initial value of the link params */
13370 sc->link_params.multi_phy_config =
13371 SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config);
13373 /* get external phy info */
13374 sc->port.ext_phy_config =
13375 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
13377 /* get the multifunction configuration */
13378 bxe_get_mf_cfg_info(sc);
13380 /* get the mac address */
13382 mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13383 mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower);
13385 mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper);
13386 mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower);
13389 if ((mac_lo == 0) && (mac_hi == 0)) {
13390 *sc->mac_addr_str = 0;
13391 BLOGE(sc, "No Ethernet address programmed!\n");
13393 sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8);
13394 sc->link_params.mac_addr[1] = (uint8_t)(mac_hi);
13395 sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24);
13396 sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16);
13397 sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8);
13398 sc->link_params.mac_addr[5] = (uint8_t)(mac_lo);
13399 snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str),
13400 "%02x:%02x:%02x:%02x:%02x:%02x",
13401 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1],
13402 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3],
13403 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]);
13404 BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str);
13411 bxe_get_tunable_params(struct bxe_softc *sc)
13413 /* sanity checks */
13415 if ((bxe_interrupt_mode != INTR_MODE_INTX) &&
13416 (bxe_interrupt_mode != INTR_MODE_MSI) &&
13417 (bxe_interrupt_mode != INTR_MODE_MSIX)) {
13418 BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode);
13419 bxe_interrupt_mode = INTR_MODE_MSIX;
13422 if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) {
13423 BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count);
13424 bxe_queue_count = 0;
13427 if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) {
13428 if (bxe_max_rx_bufs == 0) {
13429 bxe_max_rx_bufs = RX_BD_USABLE;
13431 BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs);
13432 bxe_max_rx_bufs = 2048;
13436 if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) {
13437 BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks);
13438 bxe_hc_rx_ticks = 25;
13441 if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) {
13442 BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks);
13443 bxe_hc_tx_ticks = 50;
13446 if (bxe_max_aggregation_size == 0) {
13447 bxe_max_aggregation_size = TPA_AGG_SIZE;
13450 if (bxe_max_aggregation_size > 0xffff) {
13451 BLOGW(sc, "invalid max_aggregation_size (%d)\n",
13452 bxe_max_aggregation_size);
13453 bxe_max_aggregation_size = TPA_AGG_SIZE;
13456 if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) {
13457 BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs);
13461 if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) {
13462 BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen);
13463 bxe_autogreeen = 0;
13466 if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) {
13467 BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss);
13471 /* pull in user settings */
13473 sc->interrupt_mode = bxe_interrupt_mode;
13474 sc->max_rx_bufs = bxe_max_rx_bufs;
13475 sc->hc_rx_ticks = bxe_hc_rx_ticks;
13476 sc->hc_tx_ticks = bxe_hc_tx_ticks;
13477 sc->max_aggregation_size = bxe_max_aggregation_size;
13478 sc->mrrs = bxe_mrrs;
13479 sc->autogreeen = bxe_autogreeen;
13480 sc->udp_rss = bxe_udp_rss;
13482 if (bxe_interrupt_mode == INTR_MODE_INTX) {
13483 sc->num_queues = 1;
13484 } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */
13486 min((bxe_queue_count ? bxe_queue_count : mp_ncpus),
13488 if (sc->num_queues > mp_ncpus) {
13489 sc->num_queues = mp_ncpus;
13493 BLOGD(sc, DBG_LOAD,
13496 "interrupt_mode=%d "
13501 "max_aggregation_size=%d "
13506 sc->interrupt_mode,
13511 sc->max_aggregation_size,
13518 bxe_media_detect(struct bxe_softc *sc)
13521 uint32_t phy_idx = bxe_get_cur_phy_idx(sc);
13523 switch (sc->link_params.phy[phy_idx].media_type) {
13524 case ELINK_ETH_PHY_SFPP_10G_FIBER:
13525 case ELINK_ETH_PHY_XFP_FIBER:
13526 BLOGI(sc, "Found 10Gb Fiber media.\n");
13527 sc->media = IFM_10G_SR;
13528 port_type = PORT_FIBRE;
13530 case ELINK_ETH_PHY_SFP_1G_FIBER:
13531 BLOGI(sc, "Found 1Gb Fiber media.\n");
13532 sc->media = IFM_1000_SX;
13533 port_type = PORT_FIBRE;
13535 case ELINK_ETH_PHY_KR:
13536 case ELINK_ETH_PHY_CX4:
13537 BLOGI(sc, "Found 10GBase-CX4 media.\n");
13538 sc->media = IFM_10G_CX4;
13539 port_type = PORT_FIBRE;
13541 case ELINK_ETH_PHY_DA_TWINAX:
13542 BLOGI(sc, "Found 10Gb Twinax media.\n");
13543 sc->media = IFM_10G_TWINAX;
13544 port_type = PORT_DA;
13546 case ELINK_ETH_PHY_BASE_T:
13547 if (sc->link_params.speed_cap_mask[0] &
13548 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
13549 BLOGI(sc, "Found 10GBase-T media.\n");
13550 sc->media = IFM_10G_T;
13551 port_type = PORT_TP;
13553 BLOGI(sc, "Found 1000Base-T media.\n");
13554 sc->media = IFM_1000_T;
13555 port_type = PORT_TP;
13558 case ELINK_ETH_PHY_NOT_PRESENT:
13559 BLOGI(sc, "Media not present.\n");
13561 port_type = PORT_OTHER;
13563 case ELINK_ETH_PHY_UNSPECIFIED:
13565 BLOGI(sc, "Unknown media!\n");
13567 port_type = PORT_OTHER;
13573 #define GET_FIELD(value, fname) \
13574 (((value) & (fname##_MASK)) >> (fname##_SHIFT))
13575 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
13576 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
13579 bxe_get_igu_cam_info(struct bxe_softc *sc)
13581 int pfid = SC_FUNC(sc);
13584 uint8_t fid, igu_sb_cnt = 0;
13586 sc->igu_base_sb = 0xff;
13588 if (CHIP_INT_MODE_IS_BC(sc)) {
13589 int vn = SC_VN(sc);
13590 igu_sb_cnt = sc->igu_sb_cnt;
13591 sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) *
13593 sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x +
13594 (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn));
13598 /* IGU in normal mode - read CAM */
13599 for (igu_sb_id = 0;
13600 igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
13602 val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
13603 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) {
13606 fid = IGU_FID(val);
13607 if ((fid & IGU_FID_ENCODE_IS_PF)) {
13608 if ((fid & IGU_FID_PF_NUM_MASK) != pfid) {
13611 if (IGU_VEC(val) == 0) {
13612 /* default status block */
13613 sc->igu_dsb_id = igu_sb_id;
13615 if (sc->igu_base_sb == 0xff) {
13616 sc->igu_base_sb = igu_sb_id;
13624 * Due to new PF resource allocation by MFW T7.4 and above, it's optional
13625 * that number of CAM entries will not be equal to the value advertised in
13626 * PCI. Driver should use the minimal value of both as the actual status
13629 sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt);
13631 if (igu_sb_cnt == 0) {
13632 BLOGE(sc, "CAM configuration error\n");
13640 * Gather various information from the device config space, the device itself,
13641 * shmem, and the user input.
13644 bxe_get_device_info(struct bxe_softc *sc)
13649 /* Get the data for the device */
13650 sc->devinfo.vendor_id = pci_get_vendor(sc->dev);
13651 sc->devinfo.device_id = pci_get_device(sc->dev);
13652 sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev);
13653 sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev);
13655 /* get the chip revision (chip metal comes from pci config space) */
13656 sc->devinfo.chip_id =
13657 sc->link_params.chip_id =
13658 (((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) |
13659 ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) |
13660 (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) |
13661 ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0));
13663 /* force 57811 according to MISC register */
13664 if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
13665 if (CHIP_IS_57810(sc)) {
13666 sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) |
13667 (sc->devinfo.chip_id & 0x0000ffff));
13668 } else if (CHIP_IS_57810_MF(sc)) {
13669 sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) |
13670 (sc->devinfo.chip_id & 0x0000ffff));
13672 sc->devinfo.chip_id |= 0x1;
13675 BLOGD(sc, DBG_LOAD,
13676 "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n",
13677 sc->devinfo.chip_id,
13678 ((sc->devinfo.chip_id >> 16) & 0xffff),
13679 ((sc->devinfo.chip_id >> 12) & 0xf),
13680 ((sc->devinfo.chip_id >> 4) & 0xff),
13681 ((sc->devinfo.chip_id >> 0) & 0xf));
13683 val = (REG_RD(sc, 0x2874) & 0x55);
13684 if ((sc->devinfo.chip_id & 0x1) ||
13685 (CHIP_IS_E1(sc) && val) ||
13686 (CHIP_IS_E1H(sc) && (val == 0x55))) {
13687 sc->flags |= BXE_ONE_PORT_FLAG;
13688 BLOGD(sc, DBG_LOAD, "single port device\n");
13691 /* set the doorbell size */
13692 sc->doorbell_size = (1 << BXE_DB_SHIFT);
13694 /* determine whether the device is in 2 port or 4 port mode */
13695 sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/
13696 if (CHIP_IS_E2E3(sc)) {
13698 * Read port4mode_en_ovwr[0]:
13699 * If 1, four port mode is in port4mode_en_ovwr[1].
13700 * If 0, four port mode is in port4mode_en[0].
13702 val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR);
13704 val = ((val >> 1) & 1);
13706 val = REG_RD(sc, MISC_REG_PORT4MODE_EN);
13709 sc->devinfo.chip_port_mode =
13710 (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE;
13712 BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2");
13715 /* get the function and path info for the device */
13716 bxe_get_function_num(sc);
13718 /* get the shared memory base address */
13719 sc->devinfo.shmem_base =
13720 sc->link_params.shmem_base =
13721 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
13722 sc->devinfo.shmem2_base =
13723 REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 :
13724 MISC_REG_GENERIC_CR_0));
13726 BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n",
13727 sc->devinfo.shmem_base, sc->devinfo.shmem2_base);
13729 if (!sc->devinfo.shmem_base) {
13730 /* this should ONLY prevent upcoming shmem reads */
13731 BLOGI(sc, "MCP not active\n");
13732 sc->flags |= BXE_NO_MCP_FLAG;
13736 /* make sure the shared memory contents are valid */
13737 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
13738 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
13739 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
13740 BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val);
13743 BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val);
13745 /* get the bootcode version */
13746 sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev);
13747 snprintf(sc->devinfo.bc_ver_str,
13748 sizeof(sc->devinfo.bc_ver_str),
13750 ((sc->devinfo.bc_ver >> 24) & 0xff),
13751 ((sc->devinfo.bc_ver >> 16) & 0xff),
13752 ((sc->devinfo.bc_ver >> 8) & 0xff));
13753 BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str);
13755 /* get the bootcode shmem address */
13756 sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc);
13757 BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base);
13759 /* clean indirect addresses as they're not used */
13760 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
13762 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0);
13763 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0);
13764 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0);
13765 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0);
13766 if (CHIP_IS_E1x(sc)) {
13767 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0);
13768 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0);
13769 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
13770 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
13774 * Enable internal target-read (in case we are probed after PF
13775 * FLR). Must be done prior to any BAR read access. Only for
13778 if (!CHIP_IS_E1x(sc)) {
13779 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
13783 /* get the nvram size */
13784 val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4);
13785 sc->devinfo.flash_size =
13786 (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
13787 BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size);
13789 /* get PCI capabilites */
13790 bxe_probe_pci_caps(sc);
13792 bxe_set_power_state(sc, PCI_PM_D0);
13794 /* get various configuration parameters from shmem */
13795 bxe_get_shmem_info(sc);
13797 if (sc->devinfo.pcie_msix_cap_reg != 0) {
13798 val = pci_read_config(sc->dev,
13799 (sc->devinfo.pcie_msix_cap_reg +
13802 sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE);
13804 sc->igu_sb_cnt = 1;
13807 sc->igu_base_addr = BAR_IGU_INTMEM;
13809 /* initialize IGU parameters */
13810 if (CHIP_IS_E1x(sc)) {
13811 sc->devinfo.int_block = INT_BLOCK_HC;
13812 sc->igu_dsb_id = DEF_SB_IGU_ID;
13813 sc->igu_base_sb = 0;
13815 sc->devinfo.int_block = INT_BLOCK_IGU;
13817 /* do not allow device reset during IGU info preocessing */
13818 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13820 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
13822 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
13825 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n");
13827 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
13828 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val);
13829 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f);
13831 while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
13836 if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
13837 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n");
13838 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13843 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
13844 BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n");
13845 sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP;
13847 BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n");
13850 rc = bxe_get_igu_cam_info(sc);
13852 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13860 * Get base FW non-default (fast path) status block ID. This value is
13861 * used to initialize the fw_sb_id saved on the fp/queue structure to
13862 * determine the id used by the FW.
13864 if (CHIP_IS_E1x(sc)) {
13865 sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc));
13868 * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of
13869 * the same queue are indicated on the same IGU SB). So we prefer
13870 * FW and IGU SBs to be the same value.
13872 sc->base_fw_ndsb = sc->igu_base_sb;
13875 BLOGD(sc, DBG_LOAD,
13876 "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n",
13877 sc->igu_dsb_id, sc->igu_base_sb,
13878 sc->igu_sb_cnt, sc->base_fw_ndsb);
13880 elink_phy_probe(&sc->link_params);
13886 bxe_link_settings_supported(struct bxe_softc *sc,
13887 uint32_t switch_cfg)
13889 uint32_t cfg_size = 0;
13891 uint8_t port = SC_PORT(sc);
13893 /* aggregation of supported attributes of all external phys */
13894 sc->port.supported[0] = 0;
13895 sc->port.supported[1] = 0;
13897 switch (sc->link_params.num_phys) {
13899 sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported;
13903 sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported;
13907 if (sc->link_params.multi_phy_config &
13908 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
13909 sc->port.supported[1] =
13910 sc->link_params.phy[ELINK_EXT_PHY1].supported;
13911 sc->port.supported[0] =
13912 sc->link_params.phy[ELINK_EXT_PHY2].supported;
13914 sc->port.supported[0] =
13915 sc->link_params.phy[ELINK_EXT_PHY1].supported;
13916 sc->port.supported[1] =
13917 sc->link_params.phy[ELINK_EXT_PHY2].supported;
13923 if (!(sc->port.supported[0] || sc->port.supported[1])) {
13924 BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n",
13926 dev_info.port_hw_config[port].external_phy_config),
13928 dev_info.port_hw_config[port].external_phy_config2));
13932 if (CHIP_IS_E3(sc))
13933 sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR);
13935 switch (switch_cfg) {
13936 case ELINK_SWITCH_CFG_1G:
13937 sc->port.phy_addr =
13938 REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
13940 case ELINK_SWITCH_CFG_10G:
13941 sc->port.phy_addr =
13942 REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
13945 BLOGE(sc, "Invalid switch config in link_config=0x%08x\n",
13946 sc->port.link_config[0]);
13951 BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr);
13953 /* mask what we support according to speed_cap_mask per configuration */
13954 for (idx = 0; idx < cfg_size; idx++) {
13955 if (!(sc->link_params.speed_cap_mask[idx] &
13956 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
13957 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half;
13960 if (!(sc->link_params.speed_cap_mask[idx] &
13961 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) {
13962 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full;
13965 if (!(sc->link_params.speed_cap_mask[idx] &
13966 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
13967 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half;
13970 if (!(sc->link_params.speed_cap_mask[idx] &
13971 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) {
13972 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full;
13975 if (!(sc->link_params.speed_cap_mask[idx] &
13976 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) {
13977 sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full;
13980 if (!(sc->link_params.speed_cap_mask[idx] &
13981 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) {
13982 sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full;
13985 if (!(sc->link_params.speed_cap_mask[idx] &
13986 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
13987 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full;
13990 if (!(sc->link_params.speed_cap_mask[idx] &
13991 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) {
13992 sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full;
13996 BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n",
13997 sc->port.supported[0], sc->port.supported[1]);
14001 bxe_link_settings_requested(struct bxe_softc *sc)
14003 uint32_t link_config;
14005 uint32_t cfg_size = 0;
14007 sc->port.advertising[0] = 0;
14008 sc->port.advertising[1] = 0;
14010 switch (sc->link_params.num_phys) {
14020 for (idx = 0; idx < cfg_size; idx++) {
14021 sc->link_params.req_duplex[idx] = DUPLEX_FULL;
14022 link_config = sc->port.link_config[idx];
14024 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
14025 case PORT_FEATURE_LINK_SPEED_AUTO:
14026 if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) {
14027 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14028 sc->port.advertising[idx] |= sc->port.supported[idx];
14029 if (sc->link_params.phy[ELINK_EXT_PHY1].type ==
14030 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
14031 sc->port.advertising[idx] |=
14032 (ELINK_SUPPORTED_100baseT_Half |
14033 ELINK_SUPPORTED_100baseT_Full);
14035 /* force 10G, no AN */
14036 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14037 sc->port.advertising[idx] |=
14038 (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
14043 case PORT_FEATURE_LINK_SPEED_10M_FULL:
14044 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) {
14045 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14046 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full |
14049 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14050 "speed_cap_mask=0x%08x\n",
14051 link_config, sc->link_params.speed_cap_mask[idx]);
14056 case PORT_FEATURE_LINK_SPEED_10M_HALF:
14057 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) {
14058 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14059 sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14060 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half |
14063 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14064 "speed_cap_mask=0x%08x\n",
14065 link_config, sc->link_params.speed_cap_mask[idx]);
14070 case PORT_FEATURE_LINK_SPEED_100M_FULL:
14071 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) {
14072 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14073 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full |
14076 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14077 "speed_cap_mask=0x%08x\n",
14078 link_config, sc->link_params.speed_cap_mask[idx]);
14083 case PORT_FEATURE_LINK_SPEED_100M_HALF:
14084 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) {
14085 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14086 sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14087 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half |
14090 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14091 "speed_cap_mask=0x%08x\n",
14092 link_config, sc->link_params.speed_cap_mask[idx]);
14097 case PORT_FEATURE_LINK_SPEED_1G:
14098 if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) {
14099 sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000;
14100 sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full |
14103 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14104 "speed_cap_mask=0x%08x\n",
14105 link_config, sc->link_params.speed_cap_mask[idx]);
14110 case PORT_FEATURE_LINK_SPEED_2_5G:
14111 if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) {
14112 sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500;
14113 sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full |
14116 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14117 "speed_cap_mask=0x%08x\n",
14118 link_config, sc->link_params.speed_cap_mask[idx]);
14123 case PORT_FEATURE_LINK_SPEED_10G_CX4:
14124 if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) {
14125 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14126 sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full |
14129 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14130 "speed_cap_mask=0x%08x\n",
14131 link_config, sc->link_params.speed_cap_mask[idx]);
14136 case PORT_FEATURE_LINK_SPEED_20G:
14137 sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000;
14141 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14142 "speed_cap_mask=0x%08x\n",
14143 link_config, sc->link_params.speed_cap_mask[idx]);
14144 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14145 sc->port.advertising[idx] = sc->port.supported[idx];
14149 sc->link_params.req_flow_ctrl[idx] =
14150 (link_config & PORT_FEATURE_FLOW_CONTROL_MASK);
14152 if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) {
14153 if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) {
14154 sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE;
14156 bxe_set_requested_fc(sc);
14160 BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d "
14161 "req_flow_ctrl=0x%x advertising=0x%x\n",
14162 sc->link_params.req_line_speed[idx],
14163 sc->link_params.req_duplex[idx],
14164 sc->link_params.req_flow_ctrl[idx],
14165 sc->port.advertising[idx]);
14170 bxe_get_phy_info(struct bxe_softc *sc)
14172 uint8_t port = SC_PORT(sc);
14173 uint32_t config = sc->port.config;
14176 /* shmem data already read in bxe_get_shmem_info() */
14178 BLOGD(sc, DBG_LOAD, "lane_config=0x%08x speed_cap_mask0=0x%08x "
14179 "link_config0=0x%08x\n",
14180 sc->link_params.lane_config,
14181 sc->link_params.speed_cap_mask[0],
14182 sc->port.link_config[0]);
14184 bxe_link_settings_supported(sc, sc->link_params.switch_cfg);
14185 bxe_link_settings_requested(sc);
14187 if (sc->autogreeen == AUTO_GREEN_FORCE_ON) {
14188 sc->link_params.feature_config_flags |=
14189 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14190 } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) {
14191 sc->link_params.feature_config_flags &=
14192 ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14193 } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) {
14194 sc->link_params.feature_config_flags |=
14195 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14198 /* configure link feature according to nvram value */
14200 (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) &
14201 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
14202 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
14203 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
14204 sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI |
14205 ELINK_EEE_MODE_ENABLE_LPI |
14206 ELINK_EEE_MODE_OUTPUT_TIME);
14208 sc->link_params.eee_mode = 0;
14211 /* get the media type */
14212 bxe_media_detect(sc);
14216 bxe_get_params(struct bxe_softc *sc)
14218 /* get user tunable params */
14219 bxe_get_tunable_params(sc);
14221 /* select the RX and TX ring sizes */
14222 sc->tx_ring_size = TX_BD_USABLE;
14223 sc->rx_ring_size = RX_BD_USABLE;
14225 /* XXX disable WoL */
14230 bxe_set_modes_bitmap(struct bxe_softc *sc)
14232 uint32_t flags = 0;
14234 if (CHIP_REV_IS_FPGA(sc)) {
14235 SET_FLAGS(flags, MODE_FPGA);
14236 } else if (CHIP_REV_IS_EMUL(sc)) {
14237 SET_FLAGS(flags, MODE_EMUL);
14239 SET_FLAGS(flags, MODE_ASIC);
14242 if (CHIP_IS_MODE_4_PORT(sc)) {
14243 SET_FLAGS(flags, MODE_PORT4);
14245 SET_FLAGS(flags, MODE_PORT2);
14248 if (CHIP_IS_E2(sc)) {
14249 SET_FLAGS(flags, MODE_E2);
14250 } else if (CHIP_IS_E3(sc)) {
14251 SET_FLAGS(flags, MODE_E3);
14252 if (CHIP_REV(sc) == CHIP_REV_Ax) {
14253 SET_FLAGS(flags, MODE_E3_A0);
14254 } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ {
14255 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
14260 SET_FLAGS(flags, MODE_MF);
14261 switch (sc->devinfo.mf_info.mf_mode) {
14262 case MULTI_FUNCTION_SD:
14263 SET_FLAGS(flags, MODE_MF_SD);
14265 case MULTI_FUNCTION_SI:
14266 SET_FLAGS(flags, MODE_MF_SI);
14268 case MULTI_FUNCTION_AFEX:
14269 SET_FLAGS(flags, MODE_MF_AFEX);
14273 SET_FLAGS(flags, MODE_SF);
14276 #if defined(__LITTLE_ENDIAN)
14277 SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
14278 #else /* __BIG_ENDIAN */
14279 SET_FLAGS(flags, MODE_BIG_ENDIAN);
14282 INIT_MODE_FLAGS(sc) = flags;
14286 bxe_alloc_hsi_mem(struct bxe_softc *sc)
14288 struct bxe_fastpath *fp;
14289 bus_addr_t busaddr;
14290 int max_agg_queues;
14292 bus_size_t max_size;
14293 bus_size_t max_seg_size;
14298 /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */
14300 /* allocate the parent bus DMA tag */
14301 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */
14303 0, /* boundary limit */
14304 BUS_SPACE_MAXADDR, /* restricted low */
14305 BUS_SPACE_MAXADDR, /* restricted hi */
14306 NULL, /* addr filter() */
14307 NULL, /* addr filter() arg */
14308 BUS_SPACE_MAXSIZE_32BIT, /* max map size */
14309 BUS_SPACE_UNRESTRICTED, /* num discontinuous */
14310 BUS_SPACE_MAXSIZE_32BIT, /* max seg size */
14313 NULL, /* lock() arg */
14314 &sc->parent_dma_tag); /* returned dma tag */
14316 BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc);
14320 /************************/
14321 /* DEFAULT STATUS BLOCK */
14322 /************************/
14324 if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block),
14325 &sc->def_sb_dma, "default status block") != 0) {
14327 bus_dma_tag_destroy(sc->parent_dma_tag);
14331 sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr;
14337 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14338 &sc->eq_dma, "event queue") != 0) {
14340 bxe_dma_free(sc, &sc->def_sb_dma);
14342 bus_dma_tag_destroy(sc->parent_dma_tag);
14346 sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr;
14352 if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath),
14353 &sc->sp_dma, "slow path") != 0) {
14355 bxe_dma_free(sc, &sc->eq_dma);
14357 bxe_dma_free(sc, &sc->def_sb_dma);
14359 bus_dma_tag_destroy(sc->parent_dma_tag);
14363 sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr;
14365 /*******************/
14366 /* SLOW PATH QUEUE */
14367 /*******************/
14369 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14370 &sc->spq_dma, "slow path queue") != 0) {
14372 bxe_dma_free(sc, &sc->sp_dma);
14374 bxe_dma_free(sc, &sc->eq_dma);
14376 bxe_dma_free(sc, &sc->def_sb_dma);
14378 bus_dma_tag_destroy(sc->parent_dma_tag);
14382 sc->spq = (struct eth_spe *)sc->spq_dma.vaddr;
14384 /***************************/
14385 /* FW DECOMPRESSION BUFFER */
14386 /***************************/
14388 if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
14389 "fw decompression buffer") != 0) {
14391 bxe_dma_free(sc, &sc->spq_dma);
14393 bxe_dma_free(sc, &sc->sp_dma);
14395 bxe_dma_free(sc, &sc->eq_dma);
14397 bxe_dma_free(sc, &sc->def_sb_dma);
14399 bus_dma_tag_destroy(sc->parent_dma_tag);
14403 sc->gz_buf = (void *)sc->gz_buf_dma.vaddr;
14406 malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) {
14408 bxe_dma_free(sc, &sc->gz_buf_dma);
14410 bxe_dma_free(sc, &sc->spq_dma);
14412 bxe_dma_free(sc, &sc->sp_dma);
14414 bxe_dma_free(sc, &sc->eq_dma);
14416 bxe_dma_free(sc, &sc->def_sb_dma);
14418 bus_dma_tag_destroy(sc->parent_dma_tag);
14426 /* allocate DMA memory for each fastpath structure */
14427 for (i = 0; i < sc->num_queues; i++) {
14432 /*******************/
14433 /* FP STATUS BLOCK */
14434 /*******************/
14436 snprintf(buf, sizeof(buf), "fp %d status block", i);
14437 if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block),
14438 &fp->sb_dma, buf) != 0) {
14439 /* XXX unwind and free previous fastpath allocations */
14440 BLOGE(sc, "Failed to alloc %s\n", buf);
14443 if (CHIP_IS_E2E3(sc)) {
14444 fp->status_block.e2_sb =
14445 (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr;
14447 fp->status_block.e1x_sb =
14448 (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr;
14452 /******************/
14453 /* FP TX BD CHAIN */
14454 /******************/
14456 snprintf(buf, sizeof(buf), "fp %d tx bd chain", i);
14457 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES),
14458 &fp->tx_dma, buf) != 0) {
14459 /* XXX unwind and free previous fastpath allocations */
14460 BLOGE(sc, "Failed to alloc %s\n", buf);
14463 fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr;
14466 /* link together the tx bd chain pages */
14467 for (j = 1; j <= TX_BD_NUM_PAGES; j++) {
14468 /* index into the tx bd chain array to last entry per page */
14469 struct eth_tx_next_bd *tx_next_bd =
14470 &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd;
14471 /* point to the next page and wrap from last page */
14472 busaddr = (fp->tx_dma.paddr +
14473 (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES)));
14474 tx_next_bd->addr_hi = htole32(U64_HI(busaddr));
14475 tx_next_bd->addr_lo = htole32(U64_LO(busaddr));
14478 /******************/
14479 /* FP RX BD CHAIN */
14480 /******************/
14482 snprintf(buf, sizeof(buf), "fp %d rx bd chain", i);
14483 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES),
14484 &fp->rx_dma, buf) != 0) {
14485 /* XXX unwind and free previous fastpath allocations */
14486 BLOGE(sc, "Failed to alloc %s\n", buf);
14489 fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr;
14492 /* link together the rx bd chain pages */
14493 for (j = 1; j <= RX_BD_NUM_PAGES; j++) {
14494 /* index into the rx bd chain array to last entry per page */
14495 struct eth_rx_bd *rx_bd =
14496 &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2];
14497 /* point to the next page and wrap from last page */
14498 busaddr = (fp->rx_dma.paddr +
14499 (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES)));
14500 rx_bd->addr_hi = htole32(U64_HI(busaddr));
14501 rx_bd->addr_lo = htole32(U64_LO(busaddr));
14504 /*******************/
14505 /* FP RX RCQ CHAIN */
14506 /*******************/
14508 snprintf(buf, sizeof(buf), "fp %d rcq chain", i);
14509 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES),
14510 &fp->rcq_dma, buf) != 0) {
14511 /* XXX unwind and free previous fastpath allocations */
14512 BLOGE(sc, "Failed to alloc %s\n", buf);
14515 fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr;
14518 /* link together the rcq chain pages */
14519 for (j = 1; j <= RCQ_NUM_PAGES; j++) {
14520 /* index into the rcq chain array to last entry per page */
14521 struct eth_rx_cqe_next_page *rx_cqe_next =
14522 (struct eth_rx_cqe_next_page *)
14523 &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1];
14524 /* point to the next page and wrap from last page */
14525 busaddr = (fp->rcq_dma.paddr +
14526 (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES)));
14527 rx_cqe_next->addr_hi = htole32(U64_HI(busaddr));
14528 rx_cqe_next->addr_lo = htole32(U64_LO(busaddr));
14531 /*******************/
14532 /* FP RX SGE CHAIN */
14533 /*******************/
14535 snprintf(buf, sizeof(buf), "fp %d sge chain", i);
14536 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES),
14537 &fp->rx_sge_dma, buf) != 0) {
14538 /* XXX unwind and free previous fastpath allocations */
14539 BLOGE(sc, "Failed to alloc %s\n", buf);
14542 fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr;
14545 /* link together the sge chain pages */
14546 for (j = 1; j <= RX_SGE_NUM_PAGES; j++) {
14547 /* index into the rcq chain array to last entry per page */
14548 struct eth_rx_sge *rx_sge =
14549 &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2];
14550 /* point to the next page and wrap from last page */
14551 busaddr = (fp->rx_sge_dma.paddr +
14552 (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES)));
14553 rx_sge->addr_hi = htole32(U64_HI(busaddr));
14554 rx_sge->addr_lo = htole32(U64_LO(busaddr));
14557 /***********************/
14558 /* FP TX MBUF DMA MAPS */
14559 /***********************/
14561 /* set required sizes before mapping to conserve resources */
14562 if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
14563 max_size = BXE_TSO_MAX_SIZE;
14564 max_segments = BXE_TSO_MAX_SEGMENTS;
14565 max_seg_size = BXE_TSO_MAX_SEG_SIZE;
14567 max_size = (MCLBYTES * BXE_MAX_SEGMENTS);
14568 max_segments = BXE_MAX_SEGMENTS;
14569 max_seg_size = MCLBYTES;
14572 /* create a dma tag for the tx mbufs */
14573 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14575 0, /* boundary limit */
14576 BUS_SPACE_MAXADDR, /* restricted low */
14577 BUS_SPACE_MAXADDR, /* restricted hi */
14578 NULL, /* addr filter() */
14579 NULL, /* addr filter() arg */
14580 max_size, /* max map size */
14581 max_segments, /* num discontinuous */
14582 max_seg_size, /* max seg size */
14585 NULL, /* lock() arg */
14586 &fp->tx_mbuf_tag); /* returned dma tag */
14588 /* XXX unwind and free previous fastpath allocations */
14589 BLOGE(sc, "Failed to create dma tag for "
14590 "'fp %d tx mbufs' (%d)\n", i, rc);
14594 /* create dma maps for each of the tx mbuf clusters */
14595 for (j = 0; j < TX_BD_TOTAL; j++) {
14596 if (bus_dmamap_create(fp->tx_mbuf_tag,
14598 &fp->tx_mbuf_chain[j].m_map)) {
14599 /* XXX unwind and free previous fastpath allocations */
14600 BLOGE(sc, "Failed to create dma map for "
14601 "'fp %d tx mbuf %d' (%d)\n", i, j, rc);
14606 /***********************/
14607 /* FP RX MBUF DMA MAPS */
14608 /***********************/
14610 /* create a dma tag for the rx mbufs */
14611 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14613 0, /* boundary limit */
14614 BUS_SPACE_MAXADDR, /* restricted low */
14615 BUS_SPACE_MAXADDR, /* restricted hi */
14616 NULL, /* addr filter() */
14617 NULL, /* addr filter() arg */
14618 MJUM9BYTES, /* max map size */
14619 1, /* num discontinuous */
14620 MJUM9BYTES, /* max seg size */
14623 NULL, /* lock() arg */
14624 &fp->rx_mbuf_tag); /* returned dma tag */
14626 /* XXX unwind and free previous fastpath allocations */
14627 BLOGE(sc, "Failed to create dma tag for "
14628 "'fp %d rx mbufs' (%d)\n", i, rc);
14632 /* create dma maps for each of the rx mbuf clusters */
14633 for (j = 0; j < RX_BD_TOTAL; j++) {
14634 if (bus_dmamap_create(fp->rx_mbuf_tag,
14636 &fp->rx_mbuf_chain[j].m_map)) {
14637 /* XXX unwind and free previous fastpath allocations */
14638 BLOGE(sc, "Failed to create dma map for "
14639 "'fp %d rx mbuf %d' (%d)\n", i, j, rc);
14644 /* create dma map for the spare rx mbuf cluster */
14645 if (bus_dmamap_create(fp->rx_mbuf_tag,
14647 &fp->rx_mbuf_spare_map)) {
14648 /* XXX unwind and free previous fastpath allocations */
14649 BLOGE(sc, "Failed to create dma map for "
14650 "'fp %d spare rx mbuf' (%d)\n", i, rc);
14654 /***************************/
14655 /* FP RX SGE MBUF DMA MAPS */
14656 /***************************/
14658 /* create a dma tag for the rx sge mbufs */
14659 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14661 0, /* boundary limit */
14662 BUS_SPACE_MAXADDR, /* restricted low */
14663 BUS_SPACE_MAXADDR, /* restricted hi */
14664 NULL, /* addr filter() */
14665 NULL, /* addr filter() arg */
14666 BCM_PAGE_SIZE, /* max map size */
14667 1, /* num discontinuous */
14668 BCM_PAGE_SIZE, /* max seg size */
14671 NULL, /* lock() arg */
14672 &fp->rx_sge_mbuf_tag); /* returned dma tag */
14674 /* XXX unwind and free previous fastpath allocations */
14675 BLOGE(sc, "Failed to create dma tag for "
14676 "'fp %d rx sge mbufs' (%d)\n", i, rc);
14680 /* create dma maps for the rx sge mbuf clusters */
14681 for (j = 0; j < RX_SGE_TOTAL; j++) {
14682 if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
14684 &fp->rx_sge_mbuf_chain[j].m_map)) {
14685 /* XXX unwind and free previous fastpath allocations */
14686 BLOGE(sc, "Failed to create dma map for "
14687 "'fp %d rx sge mbuf %d' (%d)\n", i, j, rc);
14692 /* create dma map for the spare rx sge mbuf cluster */
14693 if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
14695 &fp->rx_sge_mbuf_spare_map)) {
14696 /* XXX unwind and free previous fastpath allocations */
14697 BLOGE(sc, "Failed to create dma map for "
14698 "'fp %d spare rx sge mbuf' (%d)\n", i, rc);
14702 /***************************/
14703 /* FP RX TPA MBUF DMA MAPS */
14704 /***************************/
14706 /* create dma maps for the rx tpa mbuf clusters */
14707 max_agg_queues = MAX_AGG_QS(sc);
14709 for (j = 0; j < max_agg_queues; j++) {
14710 if (bus_dmamap_create(fp->rx_mbuf_tag,
14712 &fp->rx_tpa_info[j].bd.m_map)) {
14713 /* XXX unwind and free previous fastpath allocations */
14714 BLOGE(sc, "Failed to create dma map for "
14715 "'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc);
14720 /* create dma map for the spare rx tpa mbuf cluster */
14721 if (bus_dmamap_create(fp->rx_mbuf_tag,
14723 &fp->rx_tpa_info_mbuf_spare_map)) {
14724 /* XXX unwind and free previous fastpath allocations */
14725 BLOGE(sc, "Failed to create dma map for "
14726 "'fp %d spare rx tpa mbuf' (%d)\n", i, rc);
14730 bxe_init_sge_ring_bit_mask(fp);
14737 bxe_free_hsi_mem(struct bxe_softc *sc)
14739 struct bxe_fastpath *fp;
14740 int max_agg_queues;
14743 if (sc->parent_dma_tag == NULL) {
14744 return; /* assume nothing was allocated */
14747 for (i = 0; i < sc->num_queues; i++) {
14750 /*******************/
14751 /* FP STATUS BLOCK */
14752 /*******************/
14754 bxe_dma_free(sc, &fp->sb_dma);
14755 memset(&fp->status_block, 0, sizeof(fp->status_block));
14757 /******************/
14758 /* FP TX BD CHAIN */
14759 /******************/
14761 bxe_dma_free(sc, &fp->tx_dma);
14762 fp->tx_chain = NULL;
14764 /******************/
14765 /* FP RX BD CHAIN */
14766 /******************/
14768 bxe_dma_free(sc, &fp->rx_dma);
14769 fp->rx_chain = NULL;
14771 /*******************/
14772 /* FP RX RCQ CHAIN */
14773 /*******************/
14775 bxe_dma_free(sc, &fp->rcq_dma);
14776 fp->rcq_chain = NULL;
14778 /*******************/
14779 /* FP RX SGE CHAIN */
14780 /*******************/
14782 bxe_dma_free(sc, &fp->rx_sge_dma);
14783 fp->rx_sge_chain = NULL;
14785 /***********************/
14786 /* FP TX MBUF DMA MAPS */
14787 /***********************/
14789 if (fp->tx_mbuf_tag != NULL) {
14790 for (j = 0; j < TX_BD_TOTAL; j++) {
14791 if (fp->tx_mbuf_chain[j].m_map != NULL) {
14792 bus_dmamap_unload(fp->tx_mbuf_tag,
14793 fp->tx_mbuf_chain[j].m_map);
14794 bus_dmamap_destroy(fp->tx_mbuf_tag,
14795 fp->tx_mbuf_chain[j].m_map);
14799 bus_dma_tag_destroy(fp->tx_mbuf_tag);
14800 fp->tx_mbuf_tag = NULL;
14803 /***********************/
14804 /* FP RX MBUF DMA MAPS */
14805 /***********************/
14807 if (fp->rx_mbuf_tag != NULL) {
14808 for (j = 0; j < RX_BD_TOTAL; j++) {
14809 if (fp->rx_mbuf_chain[j].m_map != NULL) {
14810 bus_dmamap_unload(fp->rx_mbuf_tag,
14811 fp->rx_mbuf_chain[j].m_map);
14812 bus_dmamap_destroy(fp->rx_mbuf_tag,
14813 fp->rx_mbuf_chain[j].m_map);
14817 if (fp->rx_mbuf_spare_map != NULL) {
14818 bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
14819 bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
14822 /***************************/
14823 /* FP RX TPA MBUF DMA MAPS */
14824 /***************************/
14826 max_agg_queues = MAX_AGG_QS(sc);
14828 for (j = 0; j < max_agg_queues; j++) {
14829 if (fp->rx_tpa_info[j].bd.m_map != NULL) {
14830 bus_dmamap_unload(fp->rx_mbuf_tag,
14831 fp->rx_tpa_info[j].bd.m_map);
14832 bus_dmamap_destroy(fp->rx_mbuf_tag,
14833 fp->rx_tpa_info[j].bd.m_map);
14837 if (fp->rx_tpa_info_mbuf_spare_map != NULL) {
14838 bus_dmamap_unload(fp->rx_mbuf_tag,
14839 fp->rx_tpa_info_mbuf_spare_map);
14840 bus_dmamap_destroy(fp->rx_mbuf_tag,
14841 fp->rx_tpa_info_mbuf_spare_map);
14844 bus_dma_tag_destroy(fp->rx_mbuf_tag);
14845 fp->rx_mbuf_tag = NULL;
14848 /***************************/
14849 /* FP RX SGE MBUF DMA MAPS */
14850 /***************************/
14852 if (fp->rx_sge_mbuf_tag != NULL) {
14853 for (j = 0; j < RX_SGE_TOTAL; j++) {
14854 if (fp->rx_sge_mbuf_chain[j].m_map != NULL) {
14855 bus_dmamap_unload(fp->rx_sge_mbuf_tag,
14856 fp->rx_sge_mbuf_chain[j].m_map);
14857 bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
14858 fp->rx_sge_mbuf_chain[j].m_map);
14862 if (fp->rx_sge_mbuf_spare_map != NULL) {
14863 bus_dmamap_unload(fp->rx_sge_mbuf_tag,
14864 fp->rx_sge_mbuf_spare_map);
14865 bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
14866 fp->rx_sge_mbuf_spare_map);
14869 bus_dma_tag_destroy(fp->rx_sge_mbuf_tag);
14870 fp->rx_sge_mbuf_tag = NULL;
14874 /***************************/
14875 /* FW DECOMPRESSION BUFFER */
14876 /***************************/
14878 bxe_dma_free(sc, &sc->gz_buf_dma);
14880 free(sc->gz_strm, M_DEVBUF);
14881 sc->gz_strm = NULL;
14883 /*******************/
14884 /* SLOW PATH QUEUE */
14885 /*******************/
14887 bxe_dma_free(sc, &sc->spq_dma);
14894 bxe_dma_free(sc, &sc->sp_dma);
14901 bxe_dma_free(sc, &sc->eq_dma);
14904 /************************/
14905 /* DEFAULT STATUS BLOCK */
14906 /************************/
14908 bxe_dma_free(sc, &sc->def_sb_dma);
14911 bus_dma_tag_destroy(sc->parent_dma_tag);
14912 sc->parent_dma_tag = NULL;
14916 * Previous driver DMAE transaction may have occurred when pre-boot stage
14917 * ended and boot began. This would invalidate the addresses of the
14918 * transaction, resulting in was-error bit set in the PCI causing all
14919 * hw-to-host PCIe transactions to timeout. If this happened we want to clear
14920 * the interrupt which detected this from the pglueb and the was-done bit
14923 bxe_prev_interrupted_dmae(struct bxe_softc *sc)
14927 if (!CHIP_IS_E1x(sc)) {
14928 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS);
14929 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
14930 BLOGD(sc, DBG_LOAD,
14931 "Clearing 'was-error' bit that was set in pglueb");
14932 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc));
14938 bxe_prev_mcp_done(struct bxe_softc *sc)
14940 uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE,
14941 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
14943 BLOGE(sc, "MCP response failure, aborting\n");
14950 static struct bxe_prev_list_node *
14951 bxe_prev_path_get_entry(struct bxe_softc *sc)
14953 struct bxe_prev_list_node *tmp;
14955 LIST_FOREACH(tmp, &bxe_prev_list, node) {
14956 if ((sc->pcie_bus == tmp->bus) &&
14957 (sc->pcie_device == tmp->slot) &&
14958 (SC_PATH(sc) == tmp->path)) {
14967 bxe_prev_is_path_marked(struct bxe_softc *sc)
14969 struct bxe_prev_list_node *tmp;
14972 mtx_lock(&bxe_prev_mtx);
14974 tmp = bxe_prev_path_get_entry(sc);
14977 BLOGD(sc, DBG_LOAD,
14978 "Path %d/%d/%d was marked by AER\n",
14979 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
14982 BLOGD(sc, DBG_LOAD,
14983 "Path %d/%d/%d was already cleaned from previous drivers\n",
14984 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
14988 mtx_unlock(&bxe_prev_mtx);
14994 bxe_prev_mark_path(struct bxe_softc *sc,
14995 uint8_t after_undi)
14997 struct bxe_prev_list_node *tmp;
14999 mtx_lock(&bxe_prev_mtx);
15001 /* Check whether the entry for this path already exists */
15002 tmp = bxe_prev_path_get_entry(sc);
15005 BLOGD(sc, DBG_LOAD,
15006 "Re-marking AER in path %d/%d/%d\n",
15007 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15009 BLOGD(sc, DBG_LOAD,
15010 "Removing AER indication from path %d/%d/%d\n",
15011 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15015 mtx_unlock(&bxe_prev_mtx);
15019 mtx_unlock(&bxe_prev_mtx);
15021 /* Create an entry for this path and add it */
15022 tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF,
15023 (M_NOWAIT | M_ZERO));
15025 BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n");
15029 tmp->bus = sc->pcie_bus;
15030 tmp->slot = sc->pcie_device;
15031 tmp->path = SC_PATH(sc);
15033 tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0;
15035 mtx_lock(&bxe_prev_mtx);
15037 BLOGD(sc, DBG_LOAD,
15038 "Marked path %d/%d/%d - finished previous unload\n",
15039 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15040 LIST_INSERT_HEAD(&bxe_prev_list, tmp, node);
15042 mtx_unlock(&bxe_prev_mtx);
15048 bxe_do_flr(struct bxe_softc *sc)
15052 /* only E2 and onwards support FLR */
15053 if (CHIP_IS_E1x(sc)) {
15054 BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n");
15058 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
15059 if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
15060 BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n",
15061 sc->devinfo.bc_ver);
15065 /* Wait for Transaction Pending bit clean */
15066 for (i = 0; i < 4; i++) {
15068 DELAY(((1 << (i - 1)) * 100) * 1000);
15071 if (!bxe_is_pcie_pending(sc)) {
15076 BLOGE(sc, "PCIE transaction is not cleared, "
15077 "proceeding with reset anyway\n");
15081 BLOGD(sc, DBG_LOAD, "Initiating FLR\n");
15082 bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0);
15087 struct bxe_mac_vals {
15088 uint32_t xmac_addr;
15090 uint32_t emac_addr;
15092 uint32_t umac_addr;
15094 uint32_t bmac_addr;
15095 uint32_t bmac_val[2];
15099 bxe_prev_unload_close_mac(struct bxe_softc *sc,
15100 struct bxe_mac_vals *vals)
15102 uint32_t val, base_addr, offset, mask, reset_reg;
15103 uint8_t mac_stopped = FALSE;
15104 uint8_t port = SC_PORT(sc);
15105 uint32_t wb_data[2];
15107 /* reset addresses as they also mark which values were changed */
15108 vals->bmac_addr = 0;
15109 vals->umac_addr = 0;
15110 vals->xmac_addr = 0;
15111 vals->emac_addr = 0;
15113 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2);
15115 if (!CHIP_IS_E3(sc)) {
15116 val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
15117 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
15118 if ((mask & reset_reg) && val) {
15119 BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n");
15120 base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM
15121 : NIG_REG_INGRESS_BMAC0_MEM;
15122 offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL
15123 : BIGMAC_REGISTER_BMAC_CONTROL;
15126 * use rd/wr since we cannot use dmae. This is safe
15127 * since MCP won't access the bus due to the request
15128 * to unload, and no function on the path can be
15129 * loaded at this time.
15131 wb_data[0] = REG_RD(sc, base_addr + offset);
15132 wb_data[1] = REG_RD(sc, base_addr + offset + 0x4);
15133 vals->bmac_addr = base_addr + offset;
15134 vals->bmac_val[0] = wb_data[0];
15135 vals->bmac_val[1] = wb_data[1];
15136 wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE;
15137 REG_WR(sc, vals->bmac_addr, wb_data[0]);
15138 REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]);
15141 BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n");
15142 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4;
15143 vals->emac_val = REG_RD(sc, vals->emac_addr);
15144 REG_WR(sc, vals->emac_addr, 0);
15145 mac_stopped = TRUE;
15147 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
15148 BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n");
15149 base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
15150 val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI);
15151 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1));
15152 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1));
15153 vals->xmac_addr = base_addr + XMAC_REG_CTRL;
15154 vals->xmac_val = REG_RD(sc, vals->xmac_addr);
15155 REG_WR(sc, vals->xmac_addr, 0);
15156 mac_stopped = TRUE;
15159 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
15160 if (mask & reset_reg) {
15161 BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n");
15162 base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
15163 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
15164 vals->umac_val = REG_RD(sc, vals->umac_addr);
15165 REG_WR(sc, vals->umac_addr, 0);
15166 mac_stopped = TRUE;
15175 #define BXE_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
15176 #define BXE_PREV_UNDI_RCQ(val) ((val) & 0xffff)
15177 #define BXE_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
15178 #define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
15181 bxe_prev_unload_undi_inc(struct bxe_softc *sc,
15186 uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port));
15188 rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc;
15189 bd = BXE_PREV_UNDI_BD(tmp_reg) + inc;
15191 tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd);
15192 REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg);
15194 BLOGD(sc, DBG_LOAD,
15195 "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
15200 bxe_prev_unload_common(struct bxe_softc *sc)
15202 uint32_t reset_reg, tmp_reg = 0, rc;
15203 uint8_t prev_undi = FALSE;
15204 struct bxe_mac_vals mac_vals;
15205 uint32_t timer_count = 1000;
15209 * It is possible a previous function received 'common' answer,
15210 * but hasn't loaded yet, therefore creating a scenario of
15211 * multiple functions receiving 'common' on the same path.
15213 BLOGD(sc, DBG_LOAD, "Common unload Flow\n");
15215 memset(&mac_vals, 0, sizeof(mac_vals));
15217 if (bxe_prev_is_path_marked(sc)) {
15218 return (bxe_prev_mcp_done(sc));
15221 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1);
15223 /* Reset should be performed after BRB is emptied */
15224 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
15225 /* Close the MAC Rx to prevent BRB from filling up */
15226 bxe_prev_unload_close_mac(sc, &mac_vals);
15228 /* close LLH filters towards the BRB */
15229 elink_set_rx_filter(&sc->link_params, 0);
15232 * Check if the UNDI driver was previously loaded.
15233 * UNDI driver initializes CID offset for normal bell to 0x7
15235 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
15236 tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST);
15237 if (tmp_reg == 0x7) {
15238 BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n");
15240 /* clear the UNDI indication */
15241 REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0);
15242 /* clear possible idle check errors */
15243 REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0);
15247 /* wait until BRB is empty */
15248 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15249 while (timer_count) {
15250 prev_brb = tmp_reg;
15252 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15257 BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg);
15259 /* reset timer as long as BRB actually gets emptied */
15260 if (prev_brb > tmp_reg) {
15261 timer_count = 1000;
15266 /* If UNDI resides in memory, manually increment it */
15268 bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1);
15274 if (!timer_count) {
15275 BLOGE(sc, "Failed to empty BRB\n");
15279 /* No packets are in the pipeline, path is ready for reset */
15280 bxe_reset_common(sc);
15282 if (mac_vals.xmac_addr) {
15283 REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val);
15285 if (mac_vals.umac_addr) {
15286 REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val);
15288 if (mac_vals.emac_addr) {
15289 REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val);
15291 if (mac_vals.bmac_addr) {
15292 REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
15293 REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
15296 rc = bxe_prev_mark_path(sc, prev_undi);
15298 bxe_prev_mcp_done(sc);
15302 return (bxe_prev_mcp_done(sc));
15306 bxe_prev_unload_uncommon(struct bxe_softc *sc)
15310 BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n");
15312 /* Test if previous unload process was already finished for this path */
15313 if (bxe_prev_is_path_marked(sc)) {
15314 return (bxe_prev_mcp_done(sc));
15317 BLOGD(sc, DBG_LOAD, "Path is unmarked\n");
15320 * If function has FLR capabilities, and existing FW version matches
15321 * the one required, then FLR will be sufficient to clean any residue
15322 * left by previous driver
15324 rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION);
15326 /* fw version is good */
15327 BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n");
15328 rc = bxe_do_flr(sc);
15332 /* FLR was performed */
15333 BLOGD(sc, DBG_LOAD, "FLR successful\n");
15337 BLOGD(sc, DBG_LOAD, "Could not FLR\n");
15339 /* Close the MCP request, return failure*/
15340 rc = bxe_prev_mcp_done(sc);
15342 rc = BXE_PREV_WAIT_NEEDED;
15349 bxe_prev_unload(struct bxe_softc *sc)
15351 int time_counter = 10;
15352 uint32_t fw, hw_lock_reg, hw_lock_val;
15356 * Clear HW from errors which may have resulted from an interrupted
15357 * DMAE transaction.
15359 bxe_prev_interrupted_dmae(sc);
15361 /* Release previously held locks */
15363 (SC_FUNC(sc) <= 5) ?
15364 (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) :
15365 (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
15367 hw_lock_val = (REG_RD(sc, hw_lock_reg));
15369 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
15370 BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n");
15371 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
15372 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc)));
15374 BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n");
15375 REG_WR(sc, hw_lock_reg, 0xffffffff);
15377 BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n");
15380 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) {
15381 BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n");
15382 REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0);
15386 /* Lock MCP using an unload request */
15387 fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
15389 BLOGE(sc, "MCP response failure, aborting\n");
15394 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
15395 rc = bxe_prev_unload_common(sc);
15399 /* non-common reply from MCP night require looping */
15400 rc = bxe_prev_unload_uncommon(sc);
15401 if (rc != BXE_PREV_WAIT_NEEDED) {
15406 } while (--time_counter);
15408 if (!time_counter || rc) {
15409 BLOGE(sc, "Failed to unload previous driver!"
15410 " time_counter %d rc %d\n", time_counter, rc);
15418 bxe_dcbx_set_state(struct bxe_softc *sc,
15420 uint32_t dcbx_enabled)
15422 if (!CHIP_IS_E1x(sc)) {
15423 sc->dcb_state = dcb_on;
15424 sc->dcbx_enabled = dcbx_enabled;
15426 sc->dcb_state = FALSE;
15427 sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID;
15429 BLOGD(sc, DBG_LOAD,
15430 "DCB state [%s:%s]\n",
15431 dcb_on ? "ON" : "OFF",
15432 (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" :
15433 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" :
15434 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ?
15435 "on-chip with negotiation" : "invalid");
15438 /* must be called after sriov-enable */
15440 bxe_set_qm_cid_count(struct bxe_softc *sc)
15442 int cid_count = BXE_L2_MAX_CID(sc);
15444 if (IS_SRIOV(sc)) {
15445 cid_count += BXE_VF_CIDS;
15448 if (CNIC_SUPPORT(sc)) {
15449 cid_count += CNIC_CID_MAX;
15452 return (roundup(cid_count, QM_CID_ROUND));
15456 bxe_init_multi_cos(struct bxe_softc *sc)
15460 uint32_t pri_map = 0; /* XXX change to user config */
15462 for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) {
15463 cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4));
15464 if (cos < sc->max_cos) {
15465 sc->prio_to_cos[pri] = cos;
15467 BLOGW(sc, "Invalid COS %d for priority %d "
15468 "(max COS is %d), setting to 0\n",
15469 cos, pri, (sc->max_cos - 1));
15470 sc->prio_to_cos[pri] = 0;
15476 bxe_sysctl_state(SYSCTL_HANDLER_ARGS)
15478 struct bxe_softc *sc;
15482 error = sysctl_handle_int(oidp, &result, 0, req);
15484 if (error || !req->newptr) {
15490 sc = (struct bxe_softc *)arg1;
15492 BLOGI(sc, "... dumping driver state ...\n");
15493 temp = SHMEM2_RD(sc, temperature_in_half_celsius);
15494 BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2));
15501 bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS)
15503 struct bxe_softc *sc = (struct bxe_softc *)arg1;
15504 uint32_t *eth_stats = (uint32_t *)&sc->eth_stats;
15506 uint64_t value = 0;
15507 int index = (int)arg2;
15509 if (index >= BXE_NUM_ETH_STATS) {
15510 BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index);
15514 offset = (eth_stats + bxe_eth_stats_arr[index].offset);
15516 switch (bxe_eth_stats_arr[index].size) {
15518 value = (uint64_t)*offset;
15521 value = HILO_U64(*offset, *(offset + 1));
15524 BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n",
15525 index, bxe_eth_stats_arr[index].size);
15529 return (sysctl_handle_64(oidp, &value, 0, req));
15533 bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS)
15535 struct bxe_softc *sc = (struct bxe_softc *)arg1;
15536 uint32_t *eth_stats;
15538 uint64_t value = 0;
15539 uint32_t q_stat = (uint32_t)arg2;
15540 uint32_t fp_index = ((q_stat >> 16) & 0xffff);
15541 uint32_t index = (q_stat & 0xffff);
15543 eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats;
15545 if (index >= BXE_NUM_ETH_Q_STATS) {
15546 BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index);
15550 offset = (eth_stats + bxe_eth_q_stats_arr[index].offset);
15552 switch (bxe_eth_q_stats_arr[index].size) {
15554 value = (uint64_t)*offset;
15557 value = HILO_U64(*offset, *(offset + 1));
15560 BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n",
15561 index, bxe_eth_q_stats_arr[index].size);
15565 return (sysctl_handle_64(oidp, &value, 0, req));
15569 bxe_add_sysctls(struct bxe_softc *sc)
15571 struct sysctl_ctx_list *ctx;
15572 struct sysctl_oid_list *children;
15573 struct sysctl_oid *queue_top, *queue;
15574 struct sysctl_oid_list *queue_top_children, *queue_children;
15575 char queue_num_buf[32];
15579 ctx = device_get_sysctl_ctx(sc->dev);
15580 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
15582 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version",
15583 CTLFLAG_RD, BXE_DRIVER_VERSION, 0,
15586 snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d",
15587 BCM_5710_FW_MAJOR_VERSION,
15588 BCM_5710_FW_MINOR_VERSION,
15589 BCM_5710_FW_REVISION_VERSION,
15590 BCM_5710_FW_ENGINEERING_VERSION);
15592 snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s",
15593 ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION) ? "Single" :
15594 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD) ? "MF-SD" :
15595 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI) ? "MF-SI" :
15596 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" :
15598 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics",
15599 CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0,
15600 "multifunction vnics per port");
15602 snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d",
15603 ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" :
15604 (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" :
15605 (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" :
15607 sc->devinfo.pcie_link_width);
15609 sc->debug = bxe_debug;
15611 #if __FreeBSD_version >= 900000
15612 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
15613 CTLFLAG_RD, sc->devinfo.bc_ver_str, 0,
15614 "bootcode version");
15615 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
15616 CTLFLAG_RD, sc->fw_ver_str, 0,
15617 "firmware version");
15618 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
15619 CTLFLAG_RD, sc->mf_mode_str, 0,
15620 "multifunction mode");
15621 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
15622 CTLFLAG_RD, sc->mac_addr_str, 0,
15624 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
15625 CTLFLAG_RD, sc->pci_link_str, 0,
15626 "pci link status");
15627 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug",
15628 CTLFLAG_RW, &sc->debug,
15629 "debug logging mode");
15631 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
15632 CTLFLAG_RD, &sc->devinfo.bc_ver_str, 0,
15633 "bootcode version");
15634 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
15635 CTLFLAG_RD, &sc->fw_ver_str, 0,
15636 "firmware version");
15637 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
15638 CTLFLAG_RD, &sc->mf_mode_str, 0,
15639 "multifunction mode");
15640 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
15641 CTLFLAG_RD, &sc->mac_addr_str, 0,
15643 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
15644 CTLFLAG_RD, &sc->pci_link_str, 0,
15645 "pci link status");
15646 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "debug",
15647 CTLFLAG_RW, &sc->debug, 0,
15648 "debug logging mode");
15649 #endif /* #if __FreeBSD_version >= 900000 */
15651 sc->trigger_grcdump = 0;
15652 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "trigger_grcdump",
15653 CTLFLAG_RW, &sc->trigger_grcdump, 0,
15654 "trigger grcdump should be invoked"
15655 " before collecting grcdump");
15657 sc->grcdump_started = 0;
15658 sc->grcdump_done = 0;
15659 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done",
15660 CTLFLAG_RD, &sc->grcdump_done, 0,
15661 "set by driver when grcdump is done");
15663 sc->rx_budget = bxe_rx_budget;
15664 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget",
15665 CTLFLAG_RW, &sc->rx_budget, 0,
15666 "rx processing budget");
15668 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state",
15669 CTLTYPE_UINT | CTLFLAG_RW, sc, 0,
15670 bxe_sysctl_state, "IU", "dump driver state");
15672 for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
15673 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
15674 bxe_eth_stats_arr[i].string,
15675 CTLTYPE_U64 | CTLFLAG_RD, sc, i,
15676 bxe_sysctl_eth_stat, "LU",
15677 bxe_eth_stats_arr[i].string);
15680 /* add a new parent node for all queues "dev.bxe.#.queue" */
15681 queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue",
15682 CTLFLAG_RD, NULL, "queue");
15683 queue_top_children = SYSCTL_CHILDREN(queue_top);
15685 for (i = 0; i < sc->num_queues; i++) {
15686 /* add a new parent node for a single queue "dev.bxe.#.queue.#" */
15687 snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i);
15688 queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO,
15689 queue_num_buf, CTLFLAG_RD, NULL,
15691 queue_children = SYSCTL_CHILDREN(queue);
15693 for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) {
15694 q_stat = ((i << 16) | j);
15695 SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO,
15696 bxe_eth_q_stats_arr[j].string,
15697 CTLTYPE_U64 | CTLFLAG_RD, sc, q_stat,
15698 bxe_sysctl_eth_q_stat, "LU",
15699 bxe_eth_q_stats_arr[j].string);
15705 bxe_alloc_buf_rings(struct bxe_softc *sc)
15707 #if __FreeBSD_version >= 901504
15710 struct bxe_fastpath *fp;
15712 for (i = 0; i < sc->num_queues; i++) {
15716 fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF,
15717 M_NOWAIT, &fp->tx_mtx);
15718 if (fp->tx_br == NULL)
15726 bxe_free_buf_rings(struct bxe_softc *sc)
15728 #if __FreeBSD_version >= 901504
15731 struct bxe_fastpath *fp;
15733 for (i = 0; i < sc->num_queues; i++) {
15738 buf_ring_free(fp->tx_br, M_DEVBUF);
15747 bxe_init_fp_mutexs(struct bxe_softc *sc)
15750 struct bxe_fastpath *fp;
15752 for (i = 0; i < sc->num_queues; i++) {
15756 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
15757 "bxe%d_fp%d_tx_lock", sc->unit, i);
15758 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
15760 snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name),
15761 "bxe%d_fp%d_rx_lock", sc->unit, i);
15762 mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF);
15767 bxe_destroy_fp_mutexs(struct bxe_softc *sc)
15770 struct bxe_fastpath *fp;
15772 for (i = 0; i < sc->num_queues; i++) {
15776 if (mtx_initialized(&fp->tx_mtx)) {
15777 mtx_destroy(&fp->tx_mtx);
15780 if (mtx_initialized(&fp->rx_mtx)) {
15781 mtx_destroy(&fp->rx_mtx);
15788 * Device attach function.
15790 * Allocates device resources, performs secondary chip identification, and
15791 * initializes driver instance variables. This function is called from driver
15792 * load after a successful probe.
15795 * 0 = Success, >0 = Failure
15798 bxe_attach(device_t dev)
15800 struct bxe_softc *sc;
15802 sc = device_get_softc(dev);
15804 BLOGD(sc, DBG_LOAD, "Starting attach...\n");
15806 sc->state = BXE_STATE_CLOSED;
15809 sc->unit = device_get_unit(dev);
15811 BLOGD(sc, DBG_LOAD, "softc = %p\n", sc);
15813 sc->pcie_bus = pci_get_bus(dev);
15814 sc->pcie_device = pci_get_slot(dev);
15815 sc->pcie_func = pci_get_function(dev);
15817 /* enable bus master capability */
15818 pci_enable_busmaster(dev);
15821 if (bxe_allocate_bars(sc) != 0) {
15825 /* initialize the mutexes */
15826 bxe_init_mutexes(sc);
15828 /* prepare the periodic callout */
15829 callout_init(&sc->periodic_callout, 0);
15831 /* prepare the chip taskqueue */
15832 sc->chip_tq_flags = CHIP_TQ_NONE;
15833 snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name),
15834 "bxe%d_chip_tq", sc->unit);
15835 TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc);
15836 sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT,
15837 taskqueue_thread_enqueue,
15839 taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */
15840 "%s", sc->chip_tq_name);
15842 /* get device info and set params */
15843 if (bxe_get_device_info(sc) != 0) {
15844 BLOGE(sc, "getting device info\n");
15845 bxe_deallocate_bars(sc);
15846 pci_disable_busmaster(dev);
15850 /* get final misc params */
15851 bxe_get_params(sc);
15853 /* set the default MTU (changed via ifconfig) */
15854 sc->mtu = ETHERMTU;
15856 bxe_set_modes_bitmap(sc);
15859 * If in AFEX mode and the function is configured for FCoE
15860 * then bail... no L2 allowed.
15863 /* get phy settings from shmem and 'and' against admin settings */
15864 bxe_get_phy_info(sc);
15866 /* initialize the FreeBSD ifnet interface */
15867 if (bxe_init_ifnet(sc) != 0) {
15868 bxe_release_mutexes(sc);
15869 bxe_deallocate_bars(sc);
15870 pci_disable_busmaster(dev);
15874 if (bxe_add_cdev(sc) != 0) {
15875 if (sc->ifp != NULL) {
15876 ether_ifdetach(sc->ifp);
15878 ifmedia_removeall(&sc->ifmedia);
15879 bxe_release_mutexes(sc);
15880 bxe_deallocate_bars(sc);
15881 pci_disable_busmaster(dev);
15885 /* allocate device interrupts */
15886 if (bxe_interrupt_alloc(sc) != 0) {
15888 if (sc->ifp != NULL) {
15889 ether_ifdetach(sc->ifp);
15891 ifmedia_removeall(&sc->ifmedia);
15892 bxe_release_mutexes(sc);
15893 bxe_deallocate_bars(sc);
15894 pci_disable_busmaster(dev);
15898 bxe_init_fp_mutexs(sc);
15900 if (bxe_alloc_buf_rings(sc) != 0) {
15901 bxe_free_buf_rings(sc);
15902 bxe_interrupt_free(sc);
15904 if (sc->ifp != NULL) {
15905 ether_ifdetach(sc->ifp);
15907 ifmedia_removeall(&sc->ifmedia);
15908 bxe_release_mutexes(sc);
15909 bxe_deallocate_bars(sc);
15910 pci_disable_busmaster(dev);
15915 if (bxe_alloc_ilt_mem(sc) != 0) {
15916 bxe_free_buf_rings(sc);
15917 bxe_interrupt_free(sc);
15919 if (sc->ifp != NULL) {
15920 ether_ifdetach(sc->ifp);
15922 ifmedia_removeall(&sc->ifmedia);
15923 bxe_release_mutexes(sc);
15924 bxe_deallocate_bars(sc);
15925 pci_disable_busmaster(dev);
15929 /* allocate the host hardware/software hsi structures */
15930 if (bxe_alloc_hsi_mem(sc) != 0) {
15931 bxe_free_ilt_mem(sc);
15932 bxe_free_buf_rings(sc);
15933 bxe_interrupt_free(sc);
15935 if (sc->ifp != NULL) {
15936 ether_ifdetach(sc->ifp);
15938 ifmedia_removeall(&sc->ifmedia);
15939 bxe_release_mutexes(sc);
15940 bxe_deallocate_bars(sc);
15941 pci_disable_busmaster(dev);
15945 /* need to reset chip if UNDI was active */
15946 if (IS_PF(sc) && !BXE_NOMCP(sc)) {
15949 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
15950 DRV_MSG_SEQ_NUMBER_MASK);
15951 BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq);
15952 bxe_prev_unload(sc);
15957 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
15959 if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) &&
15960 SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) &&
15961 SHMEM2_RD(sc, dcbx_lldp_params_offset) &&
15962 SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) {
15963 bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON);
15964 bxe_dcbx_init_params(sc);
15966 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
15970 /* calculate qm_cid_count */
15971 sc->qm_cid_count = bxe_set_qm_cid_count(sc);
15972 BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count);
15975 bxe_init_multi_cos(sc);
15977 bxe_add_sysctls(sc);
15983 * Device detach function.
15985 * Stops the controller, resets the controller, and releases resources.
15988 * 0 = Success, >0 = Failure
15991 bxe_detach(device_t dev)
15993 struct bxe_softc *sc;
15996 sc = device_get_softc(dev);
15998 BLOGD(sc, DBG_LOAD, "Starting detach...\n");
16001 if (ifp != NULL && if_vlantrunkinuse(ifp)) {
16002 BLOGE(sc, "Cannot detach while VLANs are in use.\n");
16008 /* stop the periodic callout */
16009 bxe_periodic_stop(sc);
16011 /* stop the chip taskqueue */
16012 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE);
16014 taskqueue_drain(sc->chip_tq, &sc->chip_tq_task);
16015 taskqueue_free(sc->chip_tq);
16016 sc->chip_tq = NULL;
16019 /* stop and reset the controller if it was open */
16020 if (sc->state != BXE_STATE_CLOSED) {
16022 bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE);
16023 sc->state = BXE_STATE_DISABLED;
16024 BXE_CORE_UNLOCK(sc);
16027 /* release the network interface */
16029 ether_ifdetach(ifp);
16031 ifmedia_removeall(&sc->ifmedia);
16033 /* XXX do the following based on driver state... */
16035 /* free the host hardware/software hsi structures */
16036 bxe_free_hsi_mem(sc);
16039 bxe_free_ilt_mem(sc);
16041 bxe_free_buf_rings(sc);
16043 /* release the interrupts */
16044 bxe_interrupt_free(sc);
16046 /* Release the mutexes*/
16047 bxe_destroy_fp_mutexs(sc);
16048 bxe_release_mutexes(sc);
16051 /* Release the PCIe BAR mapped memory */
16052 bxe_deallocate_bars(sc);
16054 /* Release the FreeBSD interface. */
16055 if (sc->ifp != NULL) {
16059 pci_disable_busmaster(dev);
16065 * Device shutdown function.
16067 * Stops and resets the controller.
16073 bxe_shutdown(device_t dev)
16075 struct bxe_softc *sc;
16077 sc = device_get_softc(dev);
16079 BLOGD(sc, DBG_LOAD, "Starting shutdown...\n");
16081 /* stop the periodic callout */
16082 bxe_periodic_stop(sc);
16085 bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE);
16086 BXE_CORE_UNLOCK(sc);
16092 bxe_igu_ack_sb(struct bxe_softc *sc,
16099 uint32_t igu_addr = sc->igu_base_addr;
16100 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
16101 bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr);
16105 bxe_igu_clear_sb_gen(struct bxe_softc *sc,
16110 uint32_t data, ctl, cnt = 100;
16111 uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
16112 uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
16113 uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
16114 uint32_t sb_bit = 1 << (idu_sb_id%32);
16115 uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
16116 uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
16118 /* Not supported in BC mode */
16119 if (CHIP_INT_MODE_IS_BC(sc)) {
16123 data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup <<
16124 IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
16125 IGU_REGULAR_CLEANUP_SET |
16126 IGU_REGULAR_BCLEANUP);
16128 ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) |
16129 (func_encode << IGU_CTRL_REG_FID_SHIFT) |
16130 (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
16132 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16133 data, igu_addr_data);
16134 REG_WR(sc, igu_addr_data, data);
16136 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16137 BUS_SPACE_BARRIER_WRITE);
16140 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16141 ctl, igu_addr_ctl);
16142 REG_WR(sc, igu_addr_ctl, ctl);
16144 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16145 BUS_SPACE_BARRIER_WRITE);
16148 /* wait for clean up to finish */
16149 while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) {
16153 if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) {
16154 BLOGD(sc, DBG_LOAD,
16155 "Unable to finish IGU cleanup: "
16156 "idu_sb_id %d offset %d bit %d (cnt %d)\n",
16157 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
16162 bxe_igu_clear_sb(struct bxe_softc *sc,
16165 bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/);
16174 /*******************/
16175 /* ECORE CALLBACKS */
16176 /*******************/
16179 bxe_reset_common(struct bxe_softc *sc)
16181 uint32_t val = 0x1400;
16184 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f);
16186 if (CHIP_IS_E3(sc)) {
16187 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16188 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16191 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val);
16195 bxe_common_init_phy(struct bxe_softc *sc)
16197 uint32_t shmem_base[2];
16198 uint32_t shmem2_base[2];
16200 /* Avoid common init in case MFW supports LFA */
16201 if (SHMEM2_RD(sc, size) >
16202 (uint32_t)offsetof(struct shmem2_region,
16203 lfa_host_addr[SC_PORT(sc)])) {
16207 shmem_base[0] = sc->devinfo.shmem_base;
16208 shmem2_base[0] = sc->devinfo.shmem2_base;
16210 if (!CHIP_IS_E1x(sc)) {
16211 shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr);
16212 shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr);
16215 bxe_acquire_phy_lock(sc);
16216 elink_common_init_phy(sc, shmem_base, shmem2_base,
16217 sc->devinfo.chip_id, 0);
16218 bxe_release_phy_lock(sc);
16222 bxe_pf_disable(struct bxe_softc *sc)
16224 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
16226 val &= ~IGU_PF_CONF_FUNC_EN;
16228 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
16229 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16230 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0);
16234 bxe_init_pxp(struct bxe_softc *sc)
16237 int r_order, w_order;
16239 devctl = bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL, 2);
16241 BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl);
16243 w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5);
16245 if (sc->mrrs == -1) {
16246 r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12);
16248 BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs);
16249 r_order = sc->mrrs;
16252 ecore_init_pxp_arb(sc, r_order, w_order);
16256 bxe_get_pretend_reg(struct bxe_softc *sc)
16258 uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0;
16259 uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base);
16260 return (base + (SC_ABS_FUNC(sc)) * stride);
16264 * Called only on E1H or E2.
16265 * When pretending to be PF, the pretend value is the function number 0..7.
16266 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
16270 bxe_pretend_func(struct bxe_softc *sc,
16271 uint16_t pretend_func_val)
16273 uint32_t pretend_reg;
16275 if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) {
16279 /* get my own pretend register */
16280 pretend_reg = bxe_get_pretend_reg(sc);
16281 REG_WR(sc, pretend_reg, pretend_func_val);
16282 REG_RD(sc, pretend_reg);
16287 bxe_iov_init_dmae(struct bxe_softc *sc)
16293 bxe_iov_init_dq(struct bxe_softc *sc)
16298 /* send a NIG loopback debug packet */
16300 bxe_lb_pckt(struct bxe_softc *sc)
16302 uint32_t wb_write[3];
16304 /* Ethernet source and destination addresses */
16305 wb_write[0] = 0x55555555;
16306 wb_write[1] = 0x55555555;
16307 wb_write[2] = 0x20; /* SOP */
16308 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16310 /* NON-IP protocol */
16311 wb_write[0] = 0x09000000;
16312 wb_write[1] = 0x55555555;
16313 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
16314 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16318 * Some of the internal memories are not directly readable from the driver.
16319 * To test them we send debug packets.
16322 bxe_int_mem_test(struct bxe_softc *sc)
16328 if (CHIP_REV_IS_FPGA(sc)) {
16330 } else if (CHIP_REV_IS_EMUL(sc)) {
16336 /* disable inputs of parser neighbor blocks */
16337 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16338 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16339 REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16340 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16342 /* write 0 to parser credits for CFC search request */
16343 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16345 /* send Ethernet packet */
16348 /* TODO do i reset NIG statistic? */
16349 /* Wait until NIG register shows 1 packet of size 0x10 */
16350 count = 1000 * factor;
16352 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16353 val = *BXE_SP(sc, wb_data[0]);
16363 BLOGE(sc, "NIG timeout val=0x%x\n", val);
16367 /* wait until PRS register shows 1 packet */
16368 count = (1000 * factor);
16370 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16380 BLOGE(sc, "PRS timeout val=0x%x\n", val);
16384 /* Reset and init BRB, PRS */
16385 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16387 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16389 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16390 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16392 /* Disable inputs of parser neighbor blocks */
16393 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16394 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16395 REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16396 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16398 /* Write 0 to parser credits for CFC search request */
16399 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16401 /* send 10 Ethernet packets */
16402 for (i = 0; i < 10; i++) {
16406 /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */
16407 count = (1000 * factor);
16409 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16410 val = *BXE_SP(sc, wb_data[0]);
16420 BLOGE(sc, "NIG timeout val=0x%x\n", val);
16424 /* Wait until PRS register shows 2 packets */
16425 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16427 BLOGE(sc, "PRS timeout val=0x%x\n", val);
16430 /* Write 1 to parser credits for CFC search request */
16431 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
16433 /* Wait until PRS register shows 3 packets */
16434 DELAY(10000 * factor);
16436 /* Wait until NIG register shows 1 packet of size 0x10 */
16437 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16439 BLOGE(sc, "PRS timeout val=0x%x\n", val);
16442 /* clear NIG EOP FIFO */
16443 for (i = 0; i < 11; i++) {
16444 REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO);
16447 val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY);
16449 BLOGE(sc, "clear of NIG failed val=0x%x\n", val);
16453 /* Reset and init BRB, PRS, NIG */
16454 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16456 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16458 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16459 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16460 if (!CNIC_SUPPORT(sc)) {
16462 REG_WR(sc, PRS_REG_NIC_MODE, 1);
16465 /* Enable inputs of parser neighbor blocks */
16466 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff);
16467 REG_WR(sc, TCM_REG_PRS_IFEN, 0x1);
16468 REG_WR(sc, CFC_REG_DEBUG0, 0x0);
16469 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1);
16475 bxe_setup_fan_failure_detection(struct bxe_softc *sc)
16482 val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) &
16483 SHARED_HW_CFG_FAN_FAILURE_MASK);
16485 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) {
16489 * The fan failure mechanism is usually related to the PHY type since
16490 * the power consumption of the board is affected by the PHY. Currently,
16491 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
16493 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) {
16494 for (port = PORT_0; port < PORT_MAX; port++) {
16495 is_required |= elink_fan_failure_det_req(sc,
16496 sc->devinfo.shmem_base,
16497 sc->devinfo.shmem2_base,
16502 BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required);
16504 if (is_required == 0) {
16508 /* Fan failure is indicated by SPIO 5 */
16509 bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
16511 /* set to active low mode */
16512 val = REG_RD(sc, MISC_REG_SPIO_INT);
16513 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
16514 REG_WR(sc, MISC_REG_SPIO_INT, val);
16516 /* enable interrupt to signal the IGU */
16517 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
16518 val |= MISC_SPIO_SPIO5;
16519 REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val);
16523 bxe_enable_blocks_attention(struct bxe_softc *sc)
16527 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16528 if (!CHIP_IS_E1x(sc)) {
16529 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40);
16531 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0);
16533 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
16534 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
16536 * mask read length error interrupts in brb for parser
16537 * (parsing unit and 'checksum and crc' unit)
16538 * these errors are legal (PU reads fixed length and CAC can cause
16539 * read length error on truncated packets)
16541 REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00);
16542 REG_WR(sc, QM_REG_QM_INT_MASK, 0);
16543 REG_WR(sc, TM_REG_TM_INT_MASK, 0);
16544 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0);
16545 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0);
16546 REG_WR(sc, XCM_REG_XCM_INT_MASK, 0);
16547 /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */
16548 /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */
16549 REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0);
16550 REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0);
16551 REG_WR(sc, UCM_REG_UCM_INT_MASK, 0);
16552 /* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */
16553 /* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */
16554 REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
16555 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0);
16556 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0);
16557 REG_WR(sc, CCM_REG_CCM_INT_MASK, 0);
16558 /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */
16559 /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */
16561 val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
16562 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
16563 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN);
16564 if (!CHIP_IS_E1x(sc)) {
16565 val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
16566 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED);
16568 REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val);
16570 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0);
16571 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0);
16572 REG_WR(sc, TCM_REG_TCM_INT_MASK, 0);
16573 /* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */
16575 if (!CHIP_IS_E1x(sc)) {
16576 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
16577 REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
16580 REG_WR(sc, CDU_REG_CDU_INT_MASK, 0);
16581 REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0);
16582 /* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */
16583 REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
16587 * bxe_init_hw_common - initialize the HW at the COMMON phase.
16589 * @sc: driver handle
16592 bxe_init_hw_common(struct bxe_softc *sc)
16594 uint8_t abs_func_id;
16597 BLOGD(sc, DBG_LOAD, "starting common init for func %d\n",
16601 * take the RESET lock to protect undi_unload flow from accessing
16602 * registers while we are resetting the chip
16604 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
16606 bxe_reset_common(sc);
16608 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff);
16611 if (CHIP_IS_E3(sc)) {
16612 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16613 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16616 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val);
16618 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
16620 ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON);
16621 BLOGD(sc, DBG_LOAD, "after misc block init\n");
16623 if (!CHIP_IS_E1x(sc)) {
16625 * 4-port mode or 2-port mode we need to turn off master-enable for
16626 * everyone. After that we turn it back on for self. So, we disregard
16627 * multi-function, and always disable all functions on the given path,
16628 * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1
16630 for (abs_func_id = SC_PATH(sc);
16631 abs_func_id < (E2_FUNC_MAX * 2);
16632 abs_func_id += 2) {
16633 if (abs_func_id == SC_ABS_FUNC(sc)) {
16634 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
16638 bxe_pretend_func(sc, abs_func_id);
16640 /* clear pf enable */
16641 bxe_pf_disable(sc);
16643 bxe_pretend_func(sc, SC_ABS_FUNC(sc));
16647 BLOGD(sc, DBG_LOAD, "after pf disable\n");
16649 ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON);
16651 if (CHIP_IS_E1(sc)) {
16653 * enable HW interrupt from PXP on USDM overflow
16654 * bit 16 on INT_MASK_0
16656 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16659 ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON);
16662 #ifdef __BIG_ENDIAN
16663 REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1);
16664 REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1);
16665 REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
16666 REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
16667 REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
16668 /* make sure this value is 0 */
16669 REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0);
16671 //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1);
16672 REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1);
16673 REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1);
16674 REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1);
16675 REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
16678 ecore_ilt_init_page_size(sc, INITOP_SET);
16680 if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) {
16681 REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
16684 /* let the HW do it's magic... */
16687 /* finish PXP init */
16688 val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE);
16690 BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n",
16694 val = REG_RD(sc, PXP2_REG_RD_INIT_DONE);
16696 BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val);
16700 BLOGD(sc, DBG_LOAD, "after pxp init\n");
16703 * Timer bug workaround for E2 only. We need to set the entire ILT to have
16704 * entries with value "0" and valid bit on. This needs to be done by the
16705 * first PF that is loaded in a path (i.e. common phase)
16707 if (!CHIP_IS_E1x(sc)) {
16709 * In E2 there is a bug in the timers block that can cause function 6 / 7
16710 * (i.e. vnic3) to start even if it is marked as "scan-off".
16711 * This occurs when a different function (func2,3) is being marked
16712 * as "scan-off". Real-life scenario for example: if a driver is being
16713 * load-unloaded while func6,7 are down. This will cause the timer to access
16714 * the ilt, translate to a logical address and send a request to read/write.
16715 * Since the ilt for the function that is down is not valid, this will cause
16716 * a translation error which is unrecoverable.
16717 * The Workaround is intended to make sure that when this happens nothing
16718 * fatal will occur. The workaround:
16719 * 1. First PF driver which loads on a path will:
16720 * a. After taking the chip out of reset, by using pretend,
16721 * it will write "0" to the following registers of
16723 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16724 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
16725 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
16726 * And for itself it will write '1' to
16727 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
16728 * dmae-operations (writing to pram for example.)
16729 * note: can be done for only function 6,7 but cleaner this
16731 * b. Write zero+valid to the entire ILT.
16732 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of
16733 * VNIC3 (of that port). The range allocated will be the
16734 * entire ILT. This is needed to prevent ILT range error.
16735 * 2. Any PF driver load flow:
16736 * a. ILT update with the physical addresses of the allocated
16738 * b. Wait 20msec. - note that this timeout is needed to make
16739 * sure there are no requests in one of the PXP internal
16740 * queues with "old" ILT addresses.
16741 * c. PF enable in the PGLC.
16742 * d. Clear the was_error of the PF in the PGLC. (could have
16743 * occurred while driver was down)
16744 * e. PF enable in the CFC (WEAK + STRONG)
16745 * f. Timers scan enable
16746 * 3. PF driver unload flow:
16747 * a. Clear the Timers scan_en.
16748 * b. Polling for scan_on=0 for that PF.
16749 * c. Clear the PF enable bit in the PXP.
16750 * d. Clear the PF enable in the CFC (WEAK + STRONG)
16751 * e. Write zero+valid to all ILT entries (The valid bit must
16753 * f. If this is VNIC 3 of a port then also init
16754 * first_timers_ilt_entry to zero and last_timers_ilt_entry
16755 * to the last enrty in the ILT.
16758 * Currently the PF error in the PGLC is non recoverable.
16759 * In the future the there will be a recovery routine for this error.
16760 * Currently attention is masked.
16761 * Having an MCP lock on the load/unload process does not guarantee that
16762 * there is no Timer disable during Func6/7 enable. This is because the
16763 * Timers scan is currently being cleared by the MCP on FLR.
16764 * Step 2.d can be done only for PF6/7 and the driver can also check if
16765 * there is error before clearing it. But the flow above is simpler and
16767 * All ILT entries are written by zero+valid and not just PF6/7
16768 * ILT entries since in the future the ILT entries allocation for
16769 * PF-s might be dynamic.
16771 struct ilt_client_info ilt_cli;
16772 struct ecore_ilt ilt;
16774 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
16775 memset(&ilt, 0, sizeof(struct ecore_ilt));
16777 /* initialize dummy TM client */
16779 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
16780 ilt_cli.client_num = ILT_CLIENT_TM;
16783 * Step 1: set zeroes to all ilt page entries with valid bit on
16784 * Step 2: set the timers first/last ilt entry to point
16785 * to the entire range to prevent ILT range error for 3rd/4th
16786 * vnic (this code assumes existence of the vnic)
16788 * both steps performed by call to ecore_ilt_client_init_op()
16789 * with dummy TM client
16791 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
16792 * and his brother are split registers
16795 bxe_pretend_func(sc, (SC_PATH(sc) + 6));
16796 ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR);
16797 bxe_pretend_func(sc, SC_ABS_FUNC(sc));
16799 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN);
16800 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN);
16801 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
16804 REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0);
16805 REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0);
16807 if (!CHIP_IS_E1x(sc)) {
16808 int factor = CHIP_REV_IS_EMUL(sc) ? 1000 :
16809 (CHIP_REV_IS_FPGA(sc) ? 400 : 0);
16811 ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON);
16812 ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON);
16814 /* let the HW do it's magic... */
16817 val = REG_RD(sc, ATC_REG_ATC_INIT_DONE);
16818 } while (factor-- && (val != 1));
16821 BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val);
16826 BLOGD(sc, DBG_LOAD, "after pglue and atc init\n");
16828 ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON);
16830 bxe_iov_init_dmae(sc);
16832 /* clean the DMAE memory */
16833 sc->dmae_ready = 1;
16834 ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1);
16836 ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON);
16838 ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON);
16840 ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON);
16842 ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON);
16844 bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3);
16845 bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3);
16846 bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3);
16847 bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3);
16849 ecore_init_block(sc, BLOCK_QM, PHASE_COMMON);
16851 /* QM queues pointers table */
16852 ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET);
16854 /* soft reset pulse */
16855 REG_WR(sc, QM_REG_SOFT_RESET, 1);
16856 REG_WR(sc, QM_REG_SOFT_RESET, 0);
16858 if (CNIC_SUPPORT(sc))
16859 ecore_init_block(sc, BLOCK_TM, PHASE_COMMON);
16861 ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON);
16862 REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT);
16863 if (!CHIP_REV_IS_SLOW(sc)) {
16864 /* enable hw interrupt from doorbell Q */
16865 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
16868 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16870 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16871 REG_WR(sc, PRS_REG_A_PRSU_20, 0xf);
16873 if (!CHIP_IS_E1(sc)) {
16874 REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan);
16877 if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) {
16878 if (IS_MF_AFEX(sc)) {
16880 * configure that AFEX and VLAN headers must be
16881 * received in AFEX mode
16883 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE);
16884 REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA);
16885 REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
16886 REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
16887 REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4);
16890 * Bit-map indicating which L2 hdrs may appear
16891 * after the basic Ethernet header
16893 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC,
16894 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
16898 ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON);
16899 ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON);
16900 ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON);
16901 ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON);
16903 if (!CHIP_IS_E1x(sc)) {
16904 /* reset VFC memories */
16905 REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
16906 VFC_MEMORIES_RST_REG_CAM_RST |
16907 VFC_MEMORIES_RST_REG_RAM_RST);
16908 REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
16909 VFC_MEMORIES_RST_REG_CAM_RST |
16910 VFC_MEMORIES_RST_REG_RAM_RST);
16915 ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON);
16916 ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON);
16917 ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON);
16918 ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON);
16920 /* sync semi rtc */
16921 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
16923 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
16926 ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON);
16927 ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON);
16928 ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON);
16930 if (!CHIP_IS_E1x(sc)) {
16931 if (IS_MF_AFEX(sc)) {
16933 * configure that AFEX and VLAN headers must be
16934 * sent in AFEX mode
16936 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE);
16937 REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA);
16938 REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
16939 REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
16940 REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4);
16942 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC,
16943 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
16947 REG_WR(sc, SRC_REG_SOFT_RST, 1);
16949 ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON);
16951 if (CNIC_SUPPORT(sc)) {
16952 REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672);
16953 REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
16954 REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b);
16955 REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a);
16956 REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116);
16957 REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
16958 REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf);
16959 REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
16960 REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f);
16961 REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7);
16963 REG_WR(sc, SRC_REG_SOFT_RST, 0);
16965 if (sizeof(union cdu_context) != 1024) {
16966 /* we currently assume that a context is 1024 bytes */
16967 BLOGE(sc, "please adjust the size of cdu_context(%ld)\n",
16968 (long)sizeof(union cdu_context));
16971 ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON);
16972 val = (4 << 24) + (0 << 12) + 1024;
16973 REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val);
16975 ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON);
16977 REG_WR(sc, CFC_REG_INIT_REG, 0x7FF);
16978 /* enable context validation interrupt from CFC */
16979 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
16981 /* set the thresholds to prevent CFC/CDU race */
16982 REG_WR(sc, CFC_REG_DEBUG0, 0x20020000);
16983 ecore_init_block(sc, BLOCK_HC, PHASE_COMMON);
16985 if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) {
16986 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36);
16989 ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON);
16990 ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON);
16992 /* Reset PCIE errors for debug */
16993 REG_WR(sc, 0x2814, 0xffffffff);
16994 REG_WR(sc, 0x3820, 0xffffffff);
16996 if (!CHIP_IS_E1x(sc)) {
16997 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
16998 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
16999 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
17000 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
17001 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
17002 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
17003 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
17004 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
17005 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
17006 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
17007 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
17010 ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON);
17012 if (!CHIP_IS_E1(sc)) {
17013 /* in E3 this done in per-port section */
17014 if (!CHIP_IS_E3(sc))
17015 REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc));
17018 if (CHIP_IS_E1H(sc)) {
17019 /* not applicable for E2 (and above ...) */
17020 REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc));
17023 if (CHIP_REV_IS_SLOW(sc)) {
17027 /* finish CFC init */
17028 val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10);
17030 BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val);
17033 val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10);
17035 BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val);
17038 val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
17040 BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val);
17043 REG_WR(sc, CFC_REG_DEBUG0, 0);
17045 if (CHIP_IS_E1(sc)) {
17046 /* read NIG statistic to see if this is our first up since powerup */
17047 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
17048 val = *BXE_SP(sc, wb_data[0]);
17050 /* do internal memory self test */
17051 if ((val == 0) && bxe_int_mem_test(sc)) {
17052 BLOGE(sc, "internal mem self test failed val=0x%x\n", val);
17057 bxe_setup_fan_failure_detection(sc);
17059 /* clear PXP2 attentions */
17060 REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
17062 bxe_enable_blocks_attention(sc);
17064 if (!CHIP_REV_IS_SLOW(sc)) {
17065 ecore_enable_blocks_parity(sc);
17068 if (!BXE_NOMCP(sc)) {
17069 if (CHIP_IS_E1x(sc)) {
17070 bxe_common_init_phy(sc);
17078 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase.
17080 * @sc: driver handle
17083 bxe_init_hw_common_chip(struct bxe_softc *sc)
17085 int rc = bxe_init_hw_common(sc);
17088 BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc);
17092 /* In E2 2-PORT mode, same ext phy is used for the two paths */
17093 if (!BXE_NOMCP(sc)) {
17094 bxe_common_init_phy(sc);
17101 bxe_init_hw_port(struct bxe_softc *sc)
17103 int port = SC_PORT(sc);
17104 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
17105 uint32_t low, high;
17108 BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port);
17110 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
17112 ecore_init_block(sc, BLOCK_MISC, init_phase);
17113 ecore_init_block(sc, BLOCK_PXP, init_phase);
17114 ecore_init_block(sc, BLOCK_PXP2, init_phase);
17117 * Timers bug workaround: disables the pf_master bit in pglue at
17118 * common phase, we need to enable it here before any dmae access are
17119 * attempted. Therefore we manually added the enable-master to the
17120 * port phase (it also happens in the function phase)
17122 if (!CHIP_IS_E1x(sc)) {
17123 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17126 ecore_init_block(sc, BLOCK_ATC, init_phase);
17127 ecore_init_block(sc, BLOCK_DMAE, init_phase);
17128 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17129 ecore_init_block(sc, BLOCK_QM, init_phase);
17131 ecore_init_block(sc, BLOCK_TCM, init_phase);
17132 ecore_init_block(sc, BLOCK_UCM, init_phase);
17133 ecore_init_block(sc, BLOCK_CCM, init_phase);
17134 ecore_init_block(sc, BLOCK_XCM, init_phase);
17136 /* QM cid (connection) count */
17137 ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET);
17139 if (CNIC_SUPPORT(sc)) {
17140 ecore_init_block(sc, BLOCK_TM, init_phase);
17141 REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20);
17142 REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
17145 ecore_init_block(sc, BLOCK_DORQ, init_phase);
17147 ecore_init_block(sc, BLOCK_BRB1, init_phase);
17149 if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) {
17151 low = (BXE_ONE_PORT(sc) ? 160 : 246);
17152 } else if (sc->mtu > 4096) {
17153 if (BXE_ONE_PORT(sc)) {
17157 /* (24*1024 + val*4)/256 */
17158 low = (96 + (val / 64) + ((val % 64) ? 1 : 0));
17161 low = (BXE_ONE_PORT(sc) ? 80 : 160);
17163 high = (low + 56); /* 14*1024/256 */
17164 REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
17165 REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
17168 if (CHIP_IS_MODE_4_PORT(sc)) {
17169 REG_WR(sc, SC_PORT(sc) ?
17170 BRB1_REG_MAC_GUARANTIED_1 :
17171 BRB1_REG_MAC_GUARANTIED_0, 40);
17174 ecore_init_block(sc, BLOCK_PRS, init_phase);
17175 if (CHIP_IS_E3B0(sc)) {
17176 if (IS_MF_AFEX(sc)) {
17177 /* configure headers for AFEX mode */
17178 REG_WR(sc, SC_PORT(sc) ?
17179 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17180 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
17181 REG_WR(sc, SC_PORT(sc) ?
17182 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
17183 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
17184 REG_WR(sc, SC_PORT(sc) ?
17185 PRS_REG_MUST_HAVE_HDRS_PORT_1 :
17186 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
17188 /* Ovlan exists only if we are in multi-function +
17189 * switch-dependent mode, in switch-independent there
17190 * is no ovlan headers
17192 REG_WR(sc, SC_PORT(sc) ?
17193 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17194 PRS_REG_HDRS_AFTER_BASIC_PORT_0,
17195 (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6));
17199 ecore_init_block(sc, BLOCK_TSDM, init_phase);
17200 ecore_init_block(sc, BLOCK_CSDM, init_phase);
17201 ecore_init_block(sc, BLOCK_USDM, init_phase);
17202 ecore_init_block(sc, BLOCK_XSDM, init_phase);
17204 ecore_init_block(sc, BLOCK_TSEM, init_phase);
17205 ecore_init_block(sc, BLOCK_USEM, init_phase);
17206 ecore_init_block(sc, BLOCK_CSEM, init_phase);
17207 ecore_init_block(sc, BLOCK_XSEM, init_phase);
17209 ecore_init_block(sc, BLOCK_UPB, init_phase);
17210 ecore_init_block(sc, BLOCK_XPB, init_phase);
17212 ecore_init_block(sc, BLOCK_PBF, init_phase);
17214 if (CHIP_IS_E1x(sc)) {
17215 /* configure PBF to work without PAUSE mtu 9000 */
17216 REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
17218 /* update threshold */
17219 REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
17220 /* update init credit */
17221 REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
17223 /* probe changes */
17224 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1);
17226 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0);
17229 if (CNIC_SUPPORT(sc)) {
17230 ecore_init_block(sc, BLOCK_SRC, init_phase);
17233 ecore_init_block(sc, BLOCK_CDU, init_phase);
17234 ecore_init_block(sc, BLOCK_CFC, init_phase);
17236 if (CHIP_IS_E1(sc)) {
17237 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17238 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17240 ecore_init_block(sc, BLOCK_HC, init_phase);
17242 ecore_init_block(sc, BLOCK_IGU, init_phase);
17244 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17245 /* init aeu_mask_attn_func_0/1:
17246 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
17247 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
17248 * bits 4-7 are used for "per vn group attention" */
17249 val = IS_MF(sc) ? 0xF7 : 0x7;
17250 /* Enable DCBX attention for all but E1 */
17251 val |= CHIP_IS_E1(sc) ? 0 : 0x10;
17252 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
17254 ecore_init_block(sc, BLOCK_NIG, init_phase);
17256 if (!CHIP_IS_E1x(sc)) {
17257 /* Bit-map indicating which L2 hdrs may appear after the
17258 * basic Ethernet header
17260 if (IS_MF_AFEX(sc)) {
17261 REG_WR(sc, SC_PORT(sc) ?
17262 NIG_REG_P1_HDRS_AFTER_BASIC :
17263 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
17265 REG_WR(sc, SC_PORT(sc) ?
17266 NIG_REG_P1_HDRS_AFTER_BASIC :
17267 NIG_REG_P0_HDRS_AFTER_BASIC,
17268 IS_MF_SD(sc) ? 7 : 6);
17271 if (CHIP_IS_E3(sc)) {
17272 REG_WR(sc, SC_PORT(sc) ?
17273 NIG_REG_LLH1_MF_MODE :
17274 NIG_REG_LLH_MF_MODE, IS_MF(sc));
17277 if (!CHIP_IS_E3(sc)) {
17278 REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
17281 if (!CHIP_IS_E1(sc)) {
17282 /* 0x2 disable mf_ov, 0x1 enable */
17283 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
17284 (IS_MF_SD(sc) ? 0x1 : 0x2));
17286 if (!CHIP_IS_E1x(sc)) {
17288 switch (sc->devinfo.mf_info.mf_mode) {
17289 case MULTI_FUNCTION_SD:
17292 case MULTI_FUNCTION_SI:
17293 case MULTI_FUNCTION_AFEX:
17298 REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE :
17299 NIG_REG_LLH0_CLS_TYPE), val);
17301 REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
17302 REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
17303 REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
17306 /* If SPIO5 is set to generate interrupts, enable it for this port */
17307 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
17308 if (val & MISC_SPIO_SPIO5) {
17309 uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
17310 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
17311 val = REG_RD(sc, reg_addr);
17312 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
17313 REG_WR(sc, reg_addr, val);
17320 bxe_flr_clnup_reg_poll(struct bxe_softc *sc,
17323 uint32_t poll_count)
17325 uint32_t cur_cnt = poll_count;
17328 while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) {
17329 DELAY(FLR_WAIT_INTERVAL);
17336 bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc,
17341 uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt);
17344 BLOGE(sc, "%s usage count=%d\n", msg, val);
17351 /* Common routines with VF FLR cleanup */
17353 bxe_flr_clnup_poll_count(struct bxe_softc *sc)
17355 /* adjust polling timeout */
17356 if (CHIP_REV_IS_EMUL(sc)) {
17357 return (FLR_POLL_CNT * 2000);
17360 if (CHIP_REV_IS_FPGA(sc)) {
17361 return (FLR_POLL_CNT * 120);
17364 return (FLR_POLL_CNT);
17368 bxe_poll_hw_usage_counters(struct bxe_softc *sc,
17371 /* wait for CFC PF usage-counter to zero (includes all the VFs) */
17372 if (bxe_flr_clnup_poll_hw_counter(sc,
17373 CFC_REG_NUM_LCIDS_INSIDE_PF,
17374 "CFC PF usage counter timed out",
17379 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
17380 if (bxe_flr_clnup_poll_hw_counter(sc,
17381 DORQ_REG_PF_USAGE_CNT,
17382 "DQ PF usage counter timed out",
17387 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
17388 if (bxe_flr_clnup_poll_hw_counter(sc,
17389 QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc),
17390 "QM PF usage counter timed out",
17395 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
17396 if (bxe_flr_clnup_poll_hw_counter(sc,
17397 TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc),
17398 "Timers VNIC usage counter timed out",
17403 if (bxe_flr_clnup_poll_hw_counter(sc,
17404 TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc),
17405 "Timers NUM_SCANS usage counter timed out",
17410 /* Wait DMAE PF usage counter to zero */
17411 if (bxe_flr_clnup_poll_hw_counter(sc,
17412 dmae_reg_go_c[INIT_DMAE_C(sc)],
17413 "DMAE dommand register timed out",
17421 #define OP_GEN_PARAM(param) \
17422 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
17423 #define OP_GEN_TYPE(type) \
17424 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
17425 #define OP_GEN_AGG_VECT(index) \
17426 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
17429 bxe_send_final_clnup(struct bxe_softc *sc,
17430 uint8_t clnup_func,
17433 uint32_t op_gen_command = 0;
17434 uint32_t comp_addr = (BAR_CSTRORM_INTMEM +
17435 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func));
17438 if (REG_RD(sc, comp_addr)) {
17439 BLOGE(sc, "Cleanup complete was not 0 before sending\n");
17443 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
17444 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
17445 op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
17446 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
17448 BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n");
17449 REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command);
17451 if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) {
17452 BLOGE(sc, "FW final cleanup did not succeed\n");
17453 BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n",
17454 (REG_RD(sc, comp_addr)));
17455 bxe_panic(sc, ("FLR cleanup failed\n"));
17459 /* Zero completion for nxt FLR */
17460 REG_WR(sc, comp_addr, 0);
17466 bxe_pbf_pN_buf_flushed(struct bxe_softc *sc,
17467 struct pbf_pN_buf_regs *regs,
17468 uint32_t poll_count)
17470 uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start;
17471 uint32_t cur_cnt = poll_count;
17473 crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed);
17474 crd = crd_start = REG_RD(sc, regs->crd);
17475 init_crd = REG_RD(sc, regs->init_crd);
17477 BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
17478 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : s:%x\n", regs->pN, crd);
17479 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
17481 while ((crd != init_crd) &&
17482 ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) <
17483 (init_crd - crd_start))) {
17485 DELAY(FLR_WAIT_INTERVAL);
17486 crd = REG_RD(sc, regs->crd);
17487 crd_freed = REG_RD(sc, regs->crd_freed);
17489 BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN);
17490 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : c:%x\n", regs->pN, crd);
17491 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed);
17496 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n",
17497 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17501 bxe_pbf_pN_cmd_flushed(struct bxe_softc *sc,
17502 struct pbf_pN_cmd_regs *regs,
17503 uint32_t poll_count)
17505 uint32_t occup, to_free, freed, freed_start;
17506 uint32_t cur_cnt = poll_count;
17508 occup = to_free = REG_RD(sc, regs->lines_occup);
17509 freed = freed_start = REG_RD(sc, regs->lines_freed);
17511 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
17512 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17515 ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) {
17517 DELAY(FLR_WAIT_INTERVAL);
17518 occup = REG_RD(sc, regs->lines_occup);
17519 freed = REG_RD(sc, regs->lines_freed);
17521 BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN);
17522 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
17523 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17528 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n",
17529 poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17533 bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count)
17535 struct pbf_pN_cmd_regs cmd_regs[] = {
17536 {0, (CHIP_IS_E3B0(sc)) ?
17537 PBF_REG_TQ_OCCUPANCY_Q0 :
17538 PBF_REG_P0_TQ_OCCUPANCY,
17539 (CHIP_IS_E3B0(sc)) ?
17540 PBF_REG_TQ_LINES_FREED_CNT_Q0 :
17541 PBF_REG_P0_TQ_LINES_FREED_CNT},
17542 {1, (CHIP_IS_E3B0(sc)) ?
17543 PBF_REG_TQ_OCCUPANCY_Q1 :
17544 PBF_REG_P1_TQ_OCCUPANCY,
17545 (CHIP_IS_E3B0(sc)) ?
17546 PBF_REG_TQ_LINES_FREED_CNT_Q1 :
17547 PBF_REG_P1_TQ_LINES_FREED_CNT},
17548 {4, (CHIP_IS_E3B0(sc)) ?
17549 PBF_REG_TQ_OCCUPANCY_LB_Q :
17550 PBF_REG_P4_TQ_OCCUPANCY,
17551 (CHIP_IS_E3B0(sc)) ?
17552 PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
17553 PBF_REG_P4_TQ_LINES_FREED_CNT}
17556 struct pbf_pN_buf_regs buf_regs[] = {
17557 {0, (CHIP_IS_E3B0(sc)) ?
17558 PBF_REG_INIT_CRD_Q0 :
17559 PBF_REG_P0_INIT_CRD ,
17560 (CHIP_IS_E3B0(sc)) ?
17561 PBF_REG_CREDIT_Q0 :
17563 (CHIP_IS_E3B0(sc)) ?
17564 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
17565 PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
17566 {1, (CHIP_IS_E3B0(sc)) ?
17567 PBF_REG_INIT_CRD_Q1 :
17568 PBF_REG_P1_INIT_CRD,
17569 (CHIP_IS_E3B0(sc)) ?
17570 PBF_REG_CREDIT_Q1 :
17572 (CHIP_IS_E3B0(sc)) ?
17573 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
17574 PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
17575 {4, (CHIP_IS_E3B0(sc)) ?
17576 PBF_REG_INIT_CRD_LB_Q :
17577 PBF_REG_P4_INIT_CRD,
17578 (CHIP_IS_E3B0(sc)) ?
17579 PBF_REG_CREDIT_LB_Q :
17581 (CHIP_IS_E3B0(sc)) ?
17582 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
17583 PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
17588 /* Verify the command queues are flushed P0, P1, P4 */
17589 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) {
17590 bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count);
17593 /* Verify the transmission buffers are flushed P0, P1, P4 */
17594 for (i = 0; i < ARRAY_SIZE(buf_regs); i++) {
17595 bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count);
17600 bxe_hw_enable_status(struct bxe_softc *sc)
17604 val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF);
17605 BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
17607 val = REG_RD(sc, PBF_REG_DISABLE_PF);
17608 BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val);
17610 val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN);
17611 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
17613 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN);
17614 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
17616 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
17617 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
17619 val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
17620 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
17622 val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
17623 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
17625 val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
17626 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val);
17630 bxe_pf_flr_clnup(struct bxe_softc *sc)
17632 uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc);
17634 BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc));
17636 /* Re-enable PF target read access */
17637 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
17639 /* Poll HW usage counters */
17640 BLOGD(sc, DBG_LOAD, "Polling usage counters\n");
17641 if (bxe_poll_hw_usage_counters(sc, poll_cnt)) {
17645 /* Zero the igu 'trailing edge' and 'leading edge' */
17647 /* Send the FW cleanup command */
17648 if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) {
17654 /* Verify TX hw is flushed */
17655 bxe_tx_hw_flushed(sc, poll_cnt);
17657 /* Wait 100ms (not adjusted according to platform) */
17660 /* Verify no pending pci transactions */
17661 if (bxe_is_pcie_pending(sc)) {
17662 BLOGE(sc, "PCIE Transactions still pending\n");
17666 bxe_hw_enable_status(sc);
17669 * Master enable - Due to WB DMAE writes performed before this
17670 * register is re-initialized as part of the regular function init
17672 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17678 bxe_init_hw_func(struct bxe_softc *sc)
17680 int port = SC_PORT(sc);
17681 int func = SC_FUNC(sc);
17682 int init_phase = PHASE_PF0 + func;
17683 struct ecore_ilt *ilt = sc->ilt;
17684 uint16_t cdu_ilt_start;
17685 uint32_t addr, val;
17686 uint32_t main_mem_base, main_mem_size, main_mem_prty_clr;
17687 int i, main_mem_width, rc;
17689 BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func);
17692 if (!CHIP_IS_E1x(sc)) {
17693 rc = bxe_pf_flr_clnup(sc);
17695 BLOGE(sc, "FLR cleanup failed!\n");
17696 // XXX bxe_fw_dump(sc);
17697 // XXX bxe_idle_chk(sc);
17702 /* set MSI reconfigure capability */
17703 if (sc->devinfo.int_block == INT_BLOCK_HC) {
17704 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
17705 val = REG_RD(sc, addr);
17706 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
17707 REG_WR(sc, addr, val);
17710 ecore_init_block(sc, BLOCK_PXP, init_phase);
17711 ecore_init_block(sc, BLOCK_PXP2, init_phase);
17714 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
17716 for (i = 0; i < L2_ILT_LINES(sc); i++) {
17717 ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
17718 ilt->lines[cdu_ilt_start + i].page_mapping =
17719 sc->context[i].vcxt_dma.paddr;
17720 ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
17722 ecore_ilt_init_op(sc, INITOP_SET);
17725 REG_WR(sc, PRS_REG_NIC_MODE, 1);
17726 BLOGD(sc, DBG_LOAD, "NIC MODE configured\n");
17728 if (!CHIP_IS_E1x(sc)) {
17729 uint32_t pf_conf = IGU_PF_CONF_FUNC_EN;
17731 /* Turn on a single ISR mode in IGU if driver is going to use
17734 if (sc->interrupt_mode != INTR_MODE_MSIX) {
17735 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
17739 * Timers workaround bug: function init part.
17740 * Need to wait 20msec after initializing ILT,
17741 * needed to make sure there are no requests in
17742 * one of the PXP internal queues with "old" ILT addresses
17747 * Master enable - Due to WB DMAE writes performed before this
17748 * register is re-initialized as part of the regular function
17751 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17752 /* Enable the function in IGU */
17753 REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf);
17756 sc->dmae_ready = 1;
17758 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17760 if (!CHIP_IS_E1x(sc))
17761 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
17763 ecore_init_block(sc, BLOCK_ATC, init_phase);
17764 ecore_init_block(sc, BLOCK_DMAE, init_phase);
17765 ecore_init_block(sc, BLOCK_NIG, init_phase);
17766 ecore_init_block(sc, BLOCK_SRC, init_phase);
17767 ecore_init_block(sc, BLOCK_MISC, init_phase);
17768 ecore_init_block(sc, BLOCK_TCM, init_phase);
17769 ecore_init_block(sc, BLOCK_UCM, init_phase);
17770 ecore_init_block(sc, BLOCK_CCM, init_phase);
17771 ecore_init_block(sc, BLOCK_XCM, init_phase);
17772 ecore_init_block(sc, BLOCK_TSEM, init_phase);
17773 ecore_init_block(sc, BLOCK_USEM, init_phase);
17774 ecore_init_block(sc, BLOCK_CSEM, init_phase);
17775 ecore_init_block(sc, BLOCK_XSEM, init_phase);
17777 if (!CHIP_IS_E1x(sc))
17778 REG_WR(sc, QM_REG_PF_EN, 1);
17780 if (!CHIP_IS_E1x(sc)) {
17781 REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17782 REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17783 REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17784 REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17786 ecore_init_block(sc, BLOCK_QM, init_phase);
17788 ecore_init_block(sc, BLOCK_TM, init_phase);
17789 ecore_init_block(sc, BLOCK_DORQ, init_phase);
17791 bxe_iov_init_dq(sc);
17793 ecore_init_block(sc, BLOCK_BRB1, init_phase);
17794 ecore_init_block(sc, BLOCK_PRS, init_phase);
17795 ecore_init_block(sc, BLOCK_TSDM, init_phase);
17796 ecore_init_block(sc, BLOCK_CSDM, init_phase);
17797 ecore_init_block(sc, BLOCK_USDM, init_phase);
17798 ecore_init_block(sc, BLOCK_XSDM, init_phase);
17799 ecore_init_block(sc, BLOCK_UPB, init_phase);
17800 ecore_init_block(sc, BLOCK_XPB, init_phase);
17801 ecore_init_block(sc, BLOCK_PBF, init_phase);
17802 if (!CHIP_IS_E1x(sc))
17803 REG_WR(sc, PBF_REG_DISABLE_PF, 0);
17805 ecore_init_block(sc, BLOCK_CDU, init_phase);
17807 ecore_init_block(sc, BLOCK_CFC, init_phase);
17809 if (!CHIP_IS_E1x(sc))
17810 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1);
17813 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
17814 REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc));
17817 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17819 /* HC init per function */
17820 if (sc->devinfo.int_block == INT_BLOCK_HC) {
17821 if (CHIP_IS_E1H(sc)) {
17822 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
17824 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17825 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17827 ecore_init_block(sc, BLOCK_HC, init_phase);
17830 int num_segs, sb_idx, prod_offset;
17832 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
17834 if (!CHIP_IS_E1x(sc)) {
17835 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
17836 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
17839 ecore_init_block(sc, BLOCK_IGU, init_phase);
17841 if (!CHIP_IS_E1x(sc)) {
17845 * E2 mode: address 0-135 match to the mapping memory;
17846 * 136 - PF0 default prod; 137 - PF1 default prod;
17847 * 138 - PF2 default prod; 139 - PF3 default prod;
17848 * 140 - PF0 attn prod; 141 - PF1 attn prod;
17849 * 142 - PF2 attn prod; 143 - PF3 attn prod;
17850 * 144-147 reserved.
17852 * E1.5 mode - In backward compatible mode;
17853 * for non default SB; each even line in the memory
17854 * holds the U producer and each odd line hold
17855 * the C producer. The first 128 producers are for
17856 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
17857 * producers are for the DSB for each PF.
17858 * Each PF has five segments: (the order inside each
17859 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
17860 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
17861 * 144-147 attn prods;
17863 /* non-default-status-blocks */
17864 num_segs = CHIP_INT_MODE_IS_BC(sc) ?
17865 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
17866 for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) {
17867 prod_offset = (sc->igu_base_sb + sb_idx) *
17870 for (i = 0; i < num_segs; i++) {
17871 addr = IGU_REG_PROD_CONS_MEMORY +
17872 (prod_offset + i) * 4;
17873 REG_WR(sc, addr, 0);
17875 /* send consumer update with value 0 */
17876 bxe_ack_sb(sc, sc->igu_base_sb + sb_idx,
17877 USTORM_ID, 0, IGU_INT_NOP, 1);
17878 bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx);
17881 /* default-status-blocks */
17882 num_segs = CHIP_INT_MODE_IS_BC(sc) ?
17883 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
17885 if (CHIP_IS_MODE_4_PORT(sc))
17886 dsb_idx = SC_FUNC(sc);
17888 dsb_idx = SC_VN(sc);
17890 prod_offset = (CHIP_INT_MODE_IS_BC(sc) ?
17891 IGU_BC_BASE_DSB_PROD + dsb_idx :
17892 IGU_NORM_BASE_DSB_PROD + dsb_idx);
17895 * igu prods come in chunks of E1HVN_MAX (4) -
17896 * does not matters what is the current chip mode
17898 for (i = 0; i < (num_segs * E1HVN_MAX);
17900 addr = IGU_REG_PROD_CONS_MEMORY +
17901 (prod_offset + i)*4;
17902 REG_WR(sc, addr, 0);
17904 /* send consumer update with 0 */
17905 if (CHIP_INT_MODE_IS_BC(sc)) {
17906 bxe_ack_sb(sc, sc->igu_dsb_id,
17907 USTORM_ID, 0, IGU_INT_NOP, 1);
17908 bxe_ack_sb(sc, sc->igu_dsb_id,
17909 CSTORM_ID, 0, IGU_INT_NOP, 1);
17910 bxe_ack_sb(sc, sc->igu_dsb_id,
17911 XSTORM_ID, 0, IGU_INT_NOP, 1);
17912 bxe_ack_sb(sc, sc->igu_dsb_id,
17913 TSTORM_ID, 0, IGU_INT_NOP, 1);
17914 bxe_ack_sb(sc, sc->igu_dsb_id,
17915 ATTENTION_ID, 0, IGU_INT_NOP, 1);
17917 bxe_ack_sb(sc, sc->igu_dsb_id,
17918 USTORM_ID, 0, IGU_INT_NOP, 1);
17919 bxe_ack_sb(sc, sc->igu_dsb_id,
17920 ATTENTION_ID, 0, IGU_INT_NOP, 1);
17922 bxe_igu_clear_sb(sc, sc->igu_dsb_id);
17924 /* !!! these should become driver const once
17925 rf-tool supports split-68 const */
17926 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
17927 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
17928 REG_WR(sc, IGU_REG_SB_MASK_LSB, 0);
17929 REG_WR(sc, IGU_REG_SB_MASK_MSB, 0);
17930 REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0);
17931 REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0);
17935 /* Reset PCIE errors for debug */
17936 REG_WR(sc, 0x2114, 0xffffffff);
17937 REG_WR(sc, 0x2120, 0xffffffff);
17939 if (CHIP_IS_E1x(sc)) {
17940 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
17941 main_mem_base = HC_REG_MAIN_MEMORY +
17942 SC_PORT(sc) * (main_mem_size * 4);
17943 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
17944 main_mem_width = 8;
17946 val = REG_RD(sc, main_mem_prty_clr);
17948 BLOGD(sc, DBG_LOAD,
17949 "Parity errors in HC block during function init (0x%x)!\n",
17953 /* Clear "false" parity errors in MSI-X table */
17954 for (i = main_mem_base;
17955 i < main_mem_base + main_mem_size * 4;
17956 i += main_mem_width) {
17957 bxe_read_dmae(sc, i, main_mem_width / 4);
17958 bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data),
17959 i, main_mem_width / 4);
17961 /* Clear HC parity attention */
17962 REG_RD(sc, main_mem_prty_clr);
17966 /* Enable STORMs SP logging */
17967 REG_WR8(sc, BAR_USTRORM_INTMEM +
17968 USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
17969 REG_WR8(sc, BAR_TSTRORM_INTMEM +
17970 TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
17971 REG_WR8(sc, BAR_CSTRORM_INTMEM +
17972 CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
17973 REG_WR8(sc, BAR_XSTRORM_INTMEM +
17974 XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
17977 elink_phy_probe(&sc->link_params);
17983 bxe_link_reset(struct bxe_softc *sc)
17985 if (!BXE_NOMCP(sc)) {
17986 bxe_acquire_phy_lock(sc);
17987 elink_lfa_reset(&sc->link_params, &sc->link_vars);
17988 bxe_release_phy_lock(sc);
17990 if (!CHIP_REV_IS_SLOW(sc)) {
17991 BLOGW(sc, "Bootcode is missing - cannot reset link\n");
17997 bxe_reset_port(struct bxe_softc *sc)
17999 int port = SC_PORT(sc);
18002 /* reset physical Link */
18003 bxe_link_reset(sc);
18005 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
18007 /* Do not rcv packets to BRB */
18008 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
18009 /* Do not direct rcv packets that are not for MCP to the BRB */
18010 REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
18011 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
18013 /* Configure AEU */
18014 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
18018 /* Check for BRB port occupancy */
18019 val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
18021 BLOGD(sc, DBG_LOAD,
18022 "BRB1 is not empty, %d blocks are occupied\n", val);
18025 /* TODO: Close Doorbell port? */
18029 bxe_ilt_wr(struct bxe_softc *sc,
18034 uint32_t wb_write[2];
18036 if (CHIP_IS_E1(sc)) {
18037 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
18039 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
18042 wb_write[0] = ONCHIP_ADDR1(addr);
18043 wb_write[1] = ONCHIP_ADDR2(addr);
18044 REG_WR_DMAE(sc, reg, wb_write, 2);
18048 bxe_clear_func_ilt(struct bxe_softc *sc,
18051 uint32_t i, base = FUNC_ILT_BASE(func);
18052 for (i = base; i < base + ILT_PER_FUNC; i++) {
18053 bxe_ilt_wr(sc, i, 0);
18058 bxe_reset_func(struct bxe_softc *sc)
18060 struct bxe_fastpath *fp;
18061 int port = SC_PORT(sc);
18062 int func = SC_FUNC(sc);
18065 /* Disable the function in the FW */
18066 REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
18067 REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
18068 REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
18069 REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
18072 FOR_EACH_ETH_QUEUE(sc, i) {
18074 REG_WR8(sc, BAR_CSTRORM_INTMEM +
18075 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
18080 REG_WR8(sc, BAR_CSTRORM_INTMEM +
18081 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
18084 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) {
18085 REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0);
18088 /* Configure IGU */
18089 if (sc->devinfo.int_block == INT_BLOCK_HC) {
18090 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18091 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18093 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18094 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18097 if (CNIC_LOADED(sc)) {
18098 /* Disable Timer scan */
18099 REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
18101 * Wait for at least 10ms and up to 2 second for the timers
18104 for (i = 0; i < 200; i++) {
18106 if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4))
18112 bxe_clear_func_ilt(sc, func);
18115 * Timers workaround bug for E2: if this is vnic-3,
18116 * we need to set the entire ilt range for this timers.
18118 if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) {
18119 struct ilt_client_info ilt_cli;
18120 /* use dummy TM client */
18121 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
18123 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
18124 ilt_cli.client_num = ILT_CLIENT_TM;
18126 ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR);
18129 /* this assumes that reset_port() called before reset_func()*/
18130 if (!CHIP_IS_E1x(sc)) {
18131 bxe_pf_disable(sc);
18134 sc->dmae_ready = 0;
18138 bxe_gunzip_init(struct bxe_softc *sc)
18144 bxe_gunzip_end(struct bxe_softc *sc)
18150 bxe_init_firmware(struct bxe_softc *sc)
18152 if (CHIP_IS_E1(sc)) {
18153 ecore_init_e1_firmware(sc);
18154 sc->iro_array = e1_iro_arr;
18155 } else if (CHIP_IS_E1H(sc)) {
18156 ecore_init_e1h_firmware(sc);
18157 sc->iro_array = e1h_iro_arr;
18158 } else if (!CHIP_IS_E1x(sc)) {
18159 ecore_init_e2_firmware(sc);
18160 sc->iro_array = e2_iro_arr;
18162 BLOGE(sc, "Unsupported chip revision\n");
18170 bxe_release_firmware(struct bxe_softc *sc)
18177 ecore_gunzip(struct bxe_softc *sc,
18178 const uint8_t *zbuf,
18181 /* XXX : Implement... */
18182 BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n");
18187 ecore_reg_wr_ind(struct bxe_softc *sc,
18191 bxe_reg_wr_ind(sc, addr, val);
18195 ecore_write_dmae_phys_len(struct bxe_softc *sc,
18196 bus_addr_t phys_addr,
18200 bxe_write_dmae_phys_len(sc, phys_addr, addr, len);
18204 ecore_storm_memset_struct(struct bxe_softc *sc,
18210 for (i = 0; i < size/4; i++) {
18211 REG_WR(sc, addr + (i * 4), data[i]);
18217 * character device - ioctl interface definitions
18221 #include "bxe_dump.h"
18222 #include "bxe_ioctl.h"
18223 #include <sys/conf.h>
18225 static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
18226 struct thread *td);
18228 static struct cdevsw bxe_cdevsw = {
18229 .d_version = D_VERSION,
18230 .d_ioctl = bxe_eioctl,
18231 .d_name = "bxecnic",
18234 #define BXE_PATH(sc) (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1))
18237 #define DUMP_ALL_PRESETS 0x1FFF
18238 #define DUMP_MAX_PRESETS 13
18239 #define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
18240 #define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
18241 #define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
18242 #define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
18243 #define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
18245 #define IS_REG_IN_PRESET(presets, idx) \
18246 ((presets & (1 << (idx-1))) == (1 << (idx-1)))
18250 bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset)
18252 if (CHIP_IS_E1(sc))
18253 return dump_num_registers[0][preset-1];
18254 else if (CHIP_IS_E1H(sc))
18255 return dump_num_registers[1][preset-1];
18256 else if (CHIP_IS_E2(sc))
18257 return dump_num_registers[2][preset-1];
18258 else if (CHIP_IS_E3A0(sc))
18259 return dump_num_registers[3][preset-1];
18260 else if (CHIP_IS_E3B0(sc))
18261 return dump_num_registers[4][preset-1];
18267 bxe_get_total_regs_len32(struct bxe_softc *sc)
18269 uint32_t preset_idx;
18270 int regdump_len32 = 0;
18273 /* Calculate the total preset regs length */
18274 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18275 regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx);
18278 return regdump_len32;
18281 static const uint32_t *
18282 __bxe_get_page_addr_ar(struct bxe_softc *sc)
18284 if (CHIP_IS_E2(sc))
18285 return page_vals_e2;
18286 else if (CHIP_IS_E3(sc))
18287 return page_vals_e3;
18293 __bxe_get_page_reg_num(struct bxe_softc *sc)
18295 if (CHIP_IS_E2(sc))
18296 return PAGE_MODE_VALUES_E2;
18297 else if (CHIP_IS_E3(sc))
18298 return PAGE_MODE_VALUES_E3;
18303 static const uint32_t *
18304 __bxe_get_page_write_ar(struct bxe_softc *sc)
18306 if (CHIP_IS_E2(sc))
18307 return page_write_regs_e2;
18308 else if (CHIP_IS_E3(sc))
18309 return page_write_regs_e3;
18315 __bxe_get_page_write_num(struct bxe_softc *sc)
18317 if (CHIP_IS_E2(sc))
18318 return PAGE_WRITE_REGS_E2;
18319 else if (CHIP_IS_E3(sc))
18320 return PAGE_WRITE_REGS_E3;
18325 static const struct reg_addr *
18326 __bxe_get_page_read_ar(struct bxe_softc *sc)
18328 if (CHIP_IS_E2(sc))
18329 return page_read_regs_e2;
18330 else if (CHIP_IS_E3(sc))
18331 return page_read_regs_e3;
18337 __bxe_get_page_read_num(struct bxe_softc *sc)
18339 if (CHIP_IS_E2(sc))
18340 return PAGE_READ_REGS_E2;
18341 else if (CHIP_IS_E3(sc))
18342 return PAGE_READ_REGS_E3;
18348 bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info)
18350 if (CHIP_IS_E1(sc))
18351 return IS_E1_REG(reg_info->chips);
18352 else if (CHIP_IS_E1H(sc))
18353 return IS_E1H_REG(reg_info->chips);
18354 else if (CHIP_IS_E2(sc))
18355 return IS_E2_REG(reg_info->chips);
18356 else if (CHIP_IS_E3A0(sc))
18357 return IS_E3A0_REG(reg_info->chips);
18358 else if (CHIP_IS_E3B0(sc))
18359 return IS_E3B0_REG(reg_info->chips);
18365 bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info)
18367 if (CHIP_IS_E1(sc))
18368 return IS_E1_REG(wreg_info->chips);
18369 else if (CHIP_IS_E1H(sc))
18370 return IS_E1H_REG(wreg_info->chips);
18371 else if (CHIP_IS_E2(sc))
18372 return IS_E2_REG(wreg_info->chips);
18373 else if (CHIP_IS_E3A0(sc))
18374 return IS_E3A0_REG(wreg_info->chips);
18375 else if (CHIP_IS_E3B0(sc))
18376 return IS_E3B0_REG(wreg_info->chips);
18382 * bxe_read_pages_regs - read "paged" registers
18384 * @bp device handle
18387 * Reads "paged" memories: memories that may only be read by first writing to a
18388 * specific address ("write address") and then reading from a specific address
18389 * ("read address"). There may be more than one write address per "page" and
18390 * more than one read address per write address.
18393 bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18395 uint32_t i, j, k, n;
18397 /* addresses of the paged registers */
18398 const uint32_t *page_addr = __bxe_get_page_addr_ar(sc);
18399 /* number of paged registers */
18400 int num_pages = __bxe_get_page_reg_num(sc);
18401 /* write addresses */
18402 const uint32_t *write_addr = __bxe_get_page_write_ar(sc);
18403 /* number of write addresses */
18404 int write_num = __bxe_get_page_write_num(sc);
18405 /* read addresses info */
18406 const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc);
18407 /* number of read addresses */
18408 int read_num = __bxe_get_page_read_num(sc);
18409 uint32_t addr, size;
18411 for (i = 0; i < num_pages; i++) {
18412 for (j = 0; j < write_num; j++) {
18413 REG_WR(sc, write_addr[j], page_addr[i]);
18415 for (k = 0; k < read_num; k++) {
18416 if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) {
18417 size = read_addr[k].size;
18418 for (n = 0; n < size; n++) {
18419 addr = read_addr[k].addr + n*4;
18420 *p++ = REG_RD(sc, addr);
18431 bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18433 uint32_t i, j, addr;
18434 const struct wreg_addr *wreg_addr_p = NULL;
18436 if (CHIP_IS_E1(sc))
18437 wreg_addr_p = &wreg_addr_e1;
18438 else if (CHIP_IS_E1H(sc))
18439 wreg_addr_p = &wreg_addr_e1h;
18440 else if (CHIP_IS_E2(sc))
18441 wreg_addr_p = &wreg_addr_e2;
18442 else if (CHIP_IS_E3A0(sc))
18443 wreg_addr_p = &wreg_addr_e3;
18444 else if (CHIP_IS_E3B0(sc))
18445 wreg_addr_p = &wreg_addr_e3b0;
18449 /* Read the idle_chk registers */
18450 for (i = 0; i < IDLE_REGS_COUNT; i++) {
18451 if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) &&
18452 IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
18453 for (j = 0; j < idle_reg_addrs[i].size; j++)
18454 *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4);
18458 /* Read the regular registers */
18459 for (i = 0; i < REGS_COUNT; i++) {
18460 if (bxe_is_reg_in_chip(sc, ®_addrs[i]) &&
18461 IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
18462 for (j = 0; j < reg_addrs[i].size; j++)
18463 *p++ = REG_RD(sc, reg_addrs[i].addr + j*4);
18467 /* Read the CAM registers */
18468 if (bxe_is_wreg_in_chip(sc, wreg_addr_p) &&
18469 IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
18470 for (i = 0; i < wreg_addr_p->size; i++) {
18471 *p++ = REG_RD(sc, wreg_addr_p->addr + i*4);
18473 /* In case of wreg_addr register, read additional
18474 registers from read_regs array
18476 for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
18477 addr = *(wreg_addr_p->read_regs);
18478 *p++ = REG_RD(sc, addr + j*4);
18483 /* Paged registers are supported in E2 & E3 only */
18484 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
18485 /* Read "paged" registers */
18486 bxe_read_pages_regs(sc, p, preset);
18493 bxe_grc_dump(struct bxe_softc *sc)
18496 uint32_t preset_idx;
18499 struct dump_header *d_hdr;
18503 uint32_t cmd_offset;
18506 struct ecore_ilt *ilt = SC_ILT(sc);
18507 struct bxe_fastpath *fp;
18508 struct ilt_client_info *ilt_cli;
18512 if (sc->grcdump_done || sc->grcdump_started)
18515 sc->grcdump_started = 1;
18516 BLOGI(sc, "Started collecting grcdump\n");
18518 grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18519 sizeof(struct dump_header);
18521 sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT);
18523 if (sc->grc_dump == NULL) {
18524 BLOGW(sc, "Unable to allocate memory for grcdump collection\n");
18530 /* Disable parity attentions as long as following dump may
18531 * cause false alarms by reading never written registers. We
18532 * will re-enable parity attentions right after the dump.
18535 /* Disable parity on path 0 */
18536 bxe_pretend_func(sc, 0);
18538 ecore_disable_blocks_parity(sc);
18540 /* Disable parity on path 1 */
18541 bxe_pretend_func(sc, 1);
18542 ecore_disable_blocks_parity(sc);
18544 /* Return to current function */
18545 bxe_pretend_func(sc, SC_ABS_FUNC(sc));
18547 buf = sc->grc_dump;
18548 d_hdr = sc->grc_dump;
18550 d_hdr->header_size = (sizeof(struct dump_header) >> 2) - 1;
18551 d_hdr->version = BNX2X_DUMP_VERSION;
18552 d_hdr->preset = DUMP_ALL_PRESETS;
18554 if (CHIP_IS_E1(sc)) {
18555 d_hdr->dump_meta_data = DUMP_CHIP_E1;
18556 } else if (CHIP_IS_E1H(sc)) {
18557 d_hdr->dump_meta_data = DUMP_CHIP_E1H;
18558 } else if (CHIP_IS_E2(sc)) {
18559 d_hdr->dump_meta_data = DUMP_CHIP_E2 |
18560 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18561 } else if (CHIP_IS_E3A0(sc)) {
18562 d_hdr->dump_meta_data = DUMP_CHIP_E3A0 |
18563 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18564 } else if (CHIP_IS_E3B0(sc)) {
18565 d_hdr->dump_meta_data = DUMP_CHIP_E3B0 |
18566 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18569 buf += sizeof(struct dump_header);
18571 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18573 /* Skip presets with IOR */
18574 if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) ||
18575 (preset_idx == 11))
18578 rval = bxe_get_preset_regs(sc, (uint32_t *)buf, preset_idx);
18583 size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t));
18588 bxe_pretend_func(sc, 0);
18589 ecore_clear_blocks_parity(sc);
18590 ecore_enable_blocks_parity(sc);
18592 bxe_pretend_func(sc, 1);
18593 ecore_clear_blocks_parity(sc);
18594 ecore_enable_blocks_parity(sc);
18596 /* Return to current function */
18597 bxe_pretend_func(sc, SC_ABS_FUNC(sc));
18600 context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc));
18601 for (i = 0, allocated = 0; allocated < context_size; i++) {
18603 BLOGI(sc, "cdu_context i %d paddr %#jx vaddr %p size 0x%zx\n", i,
18604 (uintmax_t)sc->context[i].vcxt_dma.paddr,
18605 sc->context[i].vcxt_dma.vaddr,
18606 sc->context[i].size);
18607 allocated += sc->context[i].size;
18609 BLOGI(sc, "fw stats start_paddr %#jx end_paddr %#jx vaddr %p size 0x%x\n",
18610 (uintmax_t)sc->fw_stats_req_mapping,
18611 (uintmax_t)sc->fw_stats_data_mapping,
18612 sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size));
18613 BLOGI(sc, "def_status_block paddr %p vaddr %p size 0x%zx\n",
18614 (void *)sc->def_sb_dma.paddr, sc->def_sb,
18615 sizeof(struct host_sp_status_block));
18616 BLOGI(sc, "event_queue paddr %#jx vaddr %p size 0x%x\n",
18617 (uintmax_t)sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE);
18618 BLOGI(sc, "slow path paddr %#jx vaddr %p size 0x%zx\n",
18619 (uintmax_t)sc->sp_dma.paddr, sc->sp_dma.vaddr,
18620 sizeof(struct bxe_slowpath));
18621 BLOGI(sc, "slow path queue paddr %#jx vaddr %p size 0x%x\n",
18622 (uintmax_t)sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE);
18623 BLOGI(sc, "fw_buf paddr %#jx vaddr %p size 0x%x\n",
18624 (uintmax_t)sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr,
18626 for (i = 0; i < sc->num_queues; i++) {
18628 BLOGI(sc, "FP status block fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
18629 (uintmax_t)fp->sb_dma.paddr, fp->sb_dma.vaddr,
18630 sizeof(union bxe_host_hc_status_block));
18631 BLOGI(sc, "TX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
18632 (uintmax_t)fp->tx_dma.paddr, fp->tx_dma.vaddr,
18633 (BCM_PAGE_SIZE * TX_BD_NUM_PAGES));
18634 BLOGI(sc, "RX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
18635 (uintmax_t)fp->rx_dma.paddr, fp->rx_dma.vaddr,
18636 (BCM_PAGE_SIZE * RX_BD_NUM_PAGES));
18637 BLOGI(sc, "RX RCQ CHAIN fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
18638 (uintmax_t)fp->rcq_dma.paddr, fp->rcq_dma.vaddr,
18639 (BCM_PAGE_SIZE * RCQ_NUM_PAGES));
18640 BLOGI(sc, "RX SGE CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
18641 (uintmax_t)fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr,
18642 (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES));
18645 ilt_cli = &ilt->clients[1];
18646 for (i = ilt_cli->start; i <= ilt_cli->end; i++) {
18647 BLOGI(sc, "ECORE_ILT paddr %#jx vaddr %p size 0x%x\n",
18648 (uintmax_t)(((struct bxe_dma *)((&ilt->lines[i])->page))->paddr),
18649 ((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE);
18653 cmd_offset = DMAE_REG_CMD_MEM;
18654 for (i = 0; i < 224; i++) {
18655 reg_addr = (cmd_offset +(i * 4));
18656 reg_val = REG_RD(sc, reg_addr);
18657 BLOGI(sc, "DMAE_REG_CMD_MEM i=%d reg_addr 0x%x reg_val 0x%08x\n",i,
18658 reg_addr, reg_val);
18662 BLOGI(sc, "Collection of grcdump done\n");
18663 sc->grcdump_done = 1;
18668 bxe_add_cdev(struct bxe_softc *sc)
18670 sc->eeprom = malloc(BXE_EEPROM_MAX_DATA_LEN, M_DEVBUF, M_NOWAIT);
18672 if (sc->eeprom == NULL) {
18673 BLOGW(sc, "Unable to alloc for eeprom size buffer\n");
18677 sc->ioctl_dev = make_dev(&bxe_cdevsw,
18685 if (sc->ioctl_dev == NULL) {
18686 free(sc->eeprom, M_DEVBUF);
18691 sc->ioctl_dev->si_drv1 = sc;
18697 bxe_del_cdev(struct bxe_softc *sc)
18699 if (sc->ioctl_dev != NULL)
18700 destroy_dev(sc->ioctl_dev);
18702 if (sc->eeprom != NULL) {
18703 free(sc->eeprom, M_DEVBUF);
18706 sc->ioctl_dev = NULL;
18711 static bool bxe_is_nvram_accessible(struct bxe_softc *sc)
18714 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
18722 bxe_wr_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
18726 if(!bxe_is_nvram_accessible(sc)) {
18727 BLOGW(sc, "Cannot access eeprom when interface is down\n");
18730 rval = bxe_nvram_write(sc, offset, (uint8_t *)data, len);
18737 bxe_rd_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
18741 if(!bxe_is_nvram_accessible(sc)) {
18742 BLOGW(sc, "Cannot access eeprom when interface is down\n");
18745 rval = bxe_nvram_read(sc, offset, (uint8_t *)data, len);
18751 bxe_eeprom_rd_wr(struct bxe_softc *sc, bxe_eeprom_t *eeprom)
18755 switch (eeprom->eeprom_cmd) {
18757 case BXE_EEPROM_CMD_SET_EEPROM:
18759 rval = copyin(eeprom->eeprom_data, sc->eeprom,
18760 eeprom->eeprom_data_len);
18765 rval = bxe_wr_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
18766 eeprom->eeprom_data_len);
18769 case BXE_EEPROM_CMD_GET_EEPROM:
18771 rval = bxe_rd_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
18772 eeprom->eeprom_data_len);
18778 rval = copyout(sc->eeprom, eeprom->eeprom_data,
18779 eeprom->eeprom_data_len);
18788 BLOGW(sc, "ioctl cmd %d failed rval %d\n", eeprom->eeprom_cmd, rval);
18795 bxe_get_settings(struct bxe_softc *sc, bxe_dev_setting_t *dev_p)
18797 uint32_t ext_phy_config;
18798 int port = SC_PORT(sc);
18799 int cfg_idx = bxe_get_link_cfg_idx(sc);
18801 dev_p->supported = sc->port.supported[cfg_idx] |
18802 (sc->port.supported[cfg_idx ^ 1] &
18803 (ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE));
18804 dev_p->advertising = sc->port.advertising[cfg_idx];
18805 if(sc->link_params.phy[bxe_get_cur_phy_idx(sc)].media_type ==
18806 ELINK_ETH_PHY_SFP_1G_FIBER) {
18807 dev_p->supported = ~(ELINK_SUPPORTED_10000baseT_Full);
18808 dev_p->advertising &= ~(ADVERTISED_10000baseT_Full);
18810 if ((sc->state == BXE_STATE_OPEN) && sc->link_vars.link_up &&
18811 !(sc->flags & BXE_MF_FUNC_DIS)) {
18812 dev_p->duplex = sc->link_vars.duplex;
18813 if (IS_MF(sc) && !BXE_NOMCP(sc))
18814 dev_p->speed = bxe_get_mf_speed(sc);
18816 dev_p->speed = sc->link_vars.line_speed;
18818 dev_p->duplex = DUPLEX_UNKNOWN;
18819 dev_p->speed = SPEED_UNKNOWN;
18822 dev_p->port = bxe_media_detect(sc);
18824 ext_phy_config = SHMEM_RD(sc,
18825 dev_info.port_hw_config[port].external_phy_config);
18826 if((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) ==
18827 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
18828 dev_p->phy_address = sc->port.phy_addr;
18829 else if(((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
18830 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
18831 ((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
18832 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
18833 dev_p->phy_address = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config);
18835 dev_p->phy_address = 0;
18837 if(sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG)
18838 dev_p->autoneg = AUTONEG_ENABLE;
18840 dev_p->autoneg = AUTONEG_DISABLE;
18847 bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
18850 struct bxe_softc *sc;
18853 bxe_grcdump_t *dump = NULL;
18855 bxe_drvinfo_t *drv_infop = NULL;
18856 bxe_dev_setting_t *dev_p;
18857 bxe_dev_setting_t dev_set;
18858 bxe_get_regs_t *reg_p;
18859 bxe_reg_rdw_t *reg_rdw_p;
18860 bxe_pcicfg_rdw_t *cfg_rdw_p;
18861 bxe_perm_mac_addr_t *mac_addr_p;
18864 if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL)
18869 dump = (bxe_grcdump_t *)data;
18873 case BXE_GRC_DUMP_SIZE:
18874 dump->pci_func = sc->pcie_func;
18875 dump->grcdump_size =
18876 (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18877 sizeof(struct dump_header);
18882 grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18883 sizeof(struct dump_header);
18884 if ((!sc->trigger_grcdump) || (dump->grcdump == NULL) ||
18885 (dump->grcdump_size < grc_dump_size)) {
18890 if((sc->trigger_grcdump) && (!sc->grcdump_done) &&
18891 (!sc->grcdump_started)) {
18892 rval = bxe_grc_dump(sc);
18895 if((!rval) && (sc->grcdump_done) && (sc->grcdump_started) &&
18896 (sc->grc_dump != NULL)) {
18897 dump->grcdump_dwords = grc_dump_size >> 2;
18898 rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size);
18899 free(sc->grc_dump, M_DEVBUF);
18900 sc->grc_dump = NULL;
18901 sc->grcdump_started = 0;
18902 sc->grcdump_done = 0;
18908 drv_infop = (bxe_drvinfo_t *)data;
18909 snprintf(drv_infop->drv_name, BXE_DRV_NAME_LENGTH, "%s", "bxe");
18910 snprintf(drv_infop->drv_version, BXE_DRV_VERSION_LENGTH, "v:%s",
18911 BXE_DRIVER_VERSION);
18912 snprintf(drv_infop->mfw_version, BXE_MFW_VERSION_LENGTH, "%s",
18913 sc->devinfo.bc_ver_str);
18914 snprintf(drv_infop->stormfw_version, BXE_STORMFW_VERSION_LENGTH,
18915 "%s", sc->fw_ver_str);
18916 drv_infop->eeprom_dump_len = sc->devinfo.flash_size;
18917 drv_infop->reg_dump_len =
18918 (bxe_get_total_regs_len32(sc) * sizeof(uint32_t))
18919 + sizeof(struct dump_header);
18920 snprintf(drv_infop->bus_info, BXE_BUS_INFO_LENGTH, "%d:%d:%d",
18921 sc->pcie_bus, sc->pcie_device, sc->pcie_func);
18924 case BXE_DEV_SETTING:
18925 dev_p = (bxe_dev_setting_t *)data;
18926 bxe_get_settings(sc, &dev_set);
18927 dev_p->supported = dev_set.supported;
18928 dev_p->advertising = dev_set.advertising;
18929 dev_p->speed = dev_set.speed;
18930 dev_p->duplex = dev_set.duplex;
18931 dev_p->port = dev_set.port;
18932 dev_p->phy_address = dev_set.phy_address;
18933 dev_p->autoneg = dev_set.autoneg;
18939 reg_p = (bxe_get_regs_t *)data;
18940 grc_dump_size = reg_p->reg_buf_len;
18942 if((!sc->grcdump_done) && (!sc->grcdump_started)) {
18945 if((sc->grcdump_done) && (sc->grcdump_started) &&
18946 (sc->grc_dump != NULL)) {
18947 rval = copyout(sc->grc_dump, reg_p->reg_buf, grc_dump_size);
18948 free(sc->grc_dump, M_DEVBUF);
18949 sc->grc_dump = NULL;
18950 sc->grcdump_started = 0;
18951 sc->grcdump_done = 0;
18957 reg_rdw_p = (bxe_reg_rdw_t *)data;
18958 if((reg_rdw_p->reg_cmd == BXE_READ_REG_CMD) &&
18959 (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
18960 reg_rdw_p->reg_val = REG_RD(sc, reg_rdw_p->reg_id);
18962 if((reg_rdw_p->reg_cmd == BXE_WRITE_REG_CMD) &&
18963 (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
18964 REG_WR(sc, reg_rdw_p->reg_id, reg_rdw_p->reg_val);
18968 case BXE_RDW_PCICFG:
18969 cfg_rdw_p = (bxe_pcicfg_rdw_t *)data;
18970 if(cfg_rdw_p->cfg_cmd == BXE_READ_PCICFG) {
18972 cfg_rdw_p->cfg_val = pci_read_config(sc->dev, cfg_rdw_p->cfg_id,
18973 cfg_rdw_p->cfg_width);
18975 } else if(cfg_rdw_p->cfg_cmd == BXE_WRITE_PCICFG) {
18976 pci_write_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_val,
18977 cfg_rdw_p->cfg_width);
18979 BLOGW(sc, "BXE_RDW_PCICFG ioctl wrong cmd passed\n");
18984 mac_addr_p = (bxe_perm_mac_addr_t *)data;
18985 snprintf(mac_addr_p->mac_addr_str, sizeof(sc->mac_addr_str), "%s",
18990 rval = bxe_eeprom_rd_wr(sc, (bxe_eeprom_t *)data);