2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
29 * File : ecore_dbg_fw_funcs.c
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
37 #include "ecore_mcp.h"
38 #include "spad_layout.h"
41 #include "ecore_hsi_common.h"
42 #include "ecore_hsi_debug_tools.h"
43 #include "mcp_public.h"
45 #ifndef USE_DBG_BIN_FILE
46 #include "ecore_dbg_values.h"
48 #include "ecore_dbg_fw_funcs.h"
50 /* Memory groups enum */
64 MEM_GROUP_CONN_CFC_MEM,
65 MEM_GROUP_TASK_CFC_MEM,
83 /* Memory groups names */
84 static const char* s_mem_group_names[] = {
115 /* Idle check conditions */
117 #ifndef __PREVENT_COND_ARR__
119 static u32 cond5(const u32 *r, const u32 *imm) {
120 return (((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]));
123 static u32 cond7(const u32 *r, const u32 *imm) {
124 return (((r[0] >> imm[0]) & imm[1]) != imm[2]);
127 static u32 cond14(const u32 *r, const u32 *imm) {
128 return ((r[0] != imm[0]) && (((r[1] >> imm[1]) & imm[2]) == imm[3]));
131 static u32 cond6(const u32 *r, const u32 *imm) {
132 return ((r[0] & imm[0]) != imm[1]);
135 static u32 cond9(const u32 *r, const u32 *imm) {
136 return ((r[0] & imm[0]) >> imm[1]) != (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
139 static u32 cond10(const u32 *r, const u32 *imm) {
140 return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
143 static u32 cond4(const u32 *r, const u32 *imm) {
144 return ((r[0] & ~imm[0]) != imm[1]);
147 static u32 cond0(const u32 *r, const u32 *imm) {
148 return ((r[0] & ~r[1]) != imm[0]);
151 static u32 cond1(const u32 *r, const u32 *imm) {
152 return (r[0] != imm[0]);
155 static u32 cond11(const u32 *r, const u32 *imm) {
156 return (r[0] != r[1] && r[2] == imm[0]);
159 static u32 cond12(const u32 *r, const u32 *imm) {
160 return (r[0] != r[1] && r[2] > imm[0]);
163 static u32 cond3(const u32 *r, const u32 OSAL_UNUSED *imm) {
164 return (r[0] != r[1]);
167 static u32 cond13(const u32 *r, const u32 *imm) {
168 return (r[0] & imm[0]);
171 static u32 cond8(const u32 *r, const u32 *imm) {
172 return (r[0] < (r[1] - imm[0]));
175 static u32 cond2(const u32 *r, const u32 *imm) {
176 return (r[0] > imm[0]);
179 /* Array of Idle Check conditions */
180 static u32 (*cond_arr[])(const u32 *r, const u32 *imm) = {
198 #endif /* __PREVENT_COND_ARR__ */
201 /******************************* Data Types **********************************/
206 PLATFORM_EMUL_REDUCED,
211 struct chip_platform_defs {
217 /* Chip constant definitions */
220 struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
223 /* Platform constant definitions */
224 struct platform_defs {
229 /* Storm constant definitions.
230 * Addresses are in bytes, sizes are in quad-regs.
234 enum block_id block_id;
235 enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
237 u32 sem_fast_mem_addr;
238 u32 sem_frame_mode_addr;
239 u32 sem_slow_enable_addr;
240 u32 sem_slow_mode_addr;
241 u32 sem_slow_mode1_conf_addr;
242 u32 sem_sync_dbg_empty_addr;
243 u32 sem_slow_dbg_empty_addr;
245 u32 cm_conn_ag_ctx_lid_size;
246 u32 cm_conn_ag_ctx_rd_addr;
247 u32 cm_conn_st_ctx_lid_size;
248 u32 cm_conn_st_ctx_rd_addr;
249 u32 cm_task_ag_ctx_lid_size;
250 u32 cm_task_ag_ctx_rd_addr;
251 u32 cm_task_st_ctx_lid_size;
252 u32 cm_task_st_ctx_rd_addr;
255 /* Block constant definitions */
258 bool exists[MAX_CHIP_IDS];
259 bool associated_to_storm;
261 /* Valid only if associated_to_storm is true */
263 enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
267 u32 dbg_force_valid_addr;
268 u32 dbg_force_frame_addr;
271 /* If true, block is taken out of reset before dump */
273 enum dbg_reset_regs reset_reg;
275 /* Bit offset in reset register */
279 /* Reset register definitions */
280 struct reset_reg_defs {
282 bool exists[MAX_CHIP_IDS];
283 u32 unreset_val[MAX_CHIP_IDS];
286 /* Debug Bus Constraint operation constant definitions */
287 struct dbg_bus_constraint_op_defs {
292 /* Storm Mode definitions */
293 struct storm_mode_defs {
299 struct grc_param_defs {
300 u32 default_val[MAX_CHIP_IDS];
304 u32 exclude_all_preset_val;
305 u32 crash_preset_val;
308 /* address is in 128b units. Width is in bits. */
309 struct rss_mem_defs {
310 const char *mem_name;
311 const char *type_name;
314 u32 num_entries[MAX_CHIP_IDS];
317 struct vfc_ram_defs {
318 const char *mem_name;
319 const char *type_name;
324 struct big_ram_defs {
325 const char *instance_name;
326 enum mem_groups mem_group_id;
327 enum mem_groups ram_mem_group_id;
328 enum dbg_grc_params grc_param;
331 u32 num_of_blocks[MAX_CHIP_IDS];
335 const char *phy_name;
337 /* PHY base GRC address */
340 /* Relative address of indirect TBUS address register (bits 0..7) */
341 u32 tbus_addr_lo_addr;
343 /* Relative address of indirect TBUS address register (bits 8..10) */
344 u32 tbus_addr_hi_addr;
346 /* Relative address of indirect TBUS data register (bits 0..7) */
347 u32 tbus_data_lo_addr;
349 /* Relative address of indirect TBUS data register (bits 8..11) */
350 u32 tbus_data_hi_addr;
353 /******************************** Constants **********************************/
355 #define MAX_LCIDS 320
356 #define MAX_LTIDS 320
358 #define NUM_IOR_SETS 2
359 #define IORS_PER_SET 176
360 #define IOR_SET_OFFSET(set_id) ((set_id) * 256)
362 #define BYTES_IN_DWORD sizeof(u32)
365 #define SHR(val, val_width, amount) (((val) | ((val) << (val_width))) >> (amount)) & ((1 << (val_width)) - 1)
367 /* In the macros below, size and offset are specified in bits */
368 #define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32)
369 #define FIELD_BIT_OFFSET(type, field) type##_##field##_##OFFSET
370 #define FIELD_BIT_SIZE(type, field) type##_##field##_##SIZE
371 #define FIELD_DWORD_OFFSET(type, field) (int)(FIELD_BIT_OFFSET(type, field) / 32)
372 #define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32)
373 #define FIELD_BIT_MASK(type, field) (((1 << FIELD_BIT_SIZE(type, field)) - 1) << FIELD_DWORD_SHIFT(type, field))
375 #define SET_VAR_FIELD(var, type, field, val) var[FIELD_DWORD_OFFSET(type, field)] &= (~FIELD_BIT_MASK(type, field)); var[FIELD_DWORD_OFFSET(type, field)] |= (val) << FIELD_DWORD_SHIFT(type, field)
377 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) for (i = 0; i < (arr_size); i++) ecore_wr(dev, ptt, addr, (arr)[i])
379 #define ARR_REG_RD(dev, ptt, addr, arr, arr_size) for (i = 0; i < (arr_size); i++) (arr)[i] = ecore_rd(dev, ptt, addr)
381 #define CHECK_ARR_SIZE(arr, size) OSAL_BUILD_BUG_ON(!(OSAL_ARRAY_SIZE(arr) == size))
383 #ifndef DWORDS_TO_BYTES
384 #define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
386 #ifndef BYTES_TO_DWORDS
387 #define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
390 /* extra lines include a signature line + optional latency events line */
391 #ifndef NUM_DBG_LINES
392 #define NUM_EXTRA_DBG_LINES(block_desc) (1 + (block_desc->has_latency_events ? 1 : 0))
393 #define NUM_DBG_LINES(block_desc) (block_desc->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
396 #define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
397 #define RAM_LINES_TO_BYTES(lines) DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
399 #define REG_DUMP_LEN_SHIFT 24
400 #define MEM_DUMP_ENTRY_SIZE_DWORDS BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
402 #define IDLE_CHK_RULE_SIZE_DWORDS BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
404 #define IDLE_CHK_RESULT_HDR_DWORDS BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
406 #define IDLE_CHK_RESULT_REG_HDR_DWORDS BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
408 #define IDLE_CHK_MAX_ENTRIES_SIZE 32
410 /* The sizes and offsets below are specified in bits */
411 #define VFC_CAM_CMD_STRUCT_SIZE 64
412 #define VFC_CAM_CMD_ROW_OFFSET 48
413 #define VFC_CAM_CMD_ROW_SIZE 9
414 #define VFC_CAM_ADDR_STRUCT_SIZE 16
415 #define VFC_CAM_ADDR_OP_OFFSET 0
416 #define VFC_CAM_ADDR_OP_SIZE 4
417 #define VFC_CAM_RESP_STRUCT_SIZE 256
418 #define VFC_RAM_ADDR_STRUCT_SIZE 16
419 #define VFC_RAM_ADDR_OP_OFFSET 0
420 #define VFC_RAM_ADDR_OP_SIZE 2
421 #define VFC_RAM_ADDR_ROW_OFFSET 2
422 #define VFC_RAM_ADDR_ROW_SIZE 10
423 #define VFC_RAM_RESP_STRUCT_SIZE 256
425 #define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
426 #define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
427 #define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
428 #define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS
429 #define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
430 #define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
432 #define NUM_VFC_RAM_TYPES 4
434 #define VFC_CAM_NUM_ROWS 512
436 #define VFC_OPCODE_CAM_RD 14
437 #define VFC_OPCODE_RAM_RD 0
439 #define NUM_RSS_MEM_TYPES 5
441 #define NUM_BIG_RAM_TYPES 3
442 #define BIG_RAM_BLOCK_SIZE_BYTES 128
443 #define BIG_RAM_BLOCK_SIZE_DWORDS BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES)
445 #define NUM_PHY_TBUS_ADDRESSES 2048
446 #define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
448 #define SEM_FAST_MODE6_SRC_ENABLE 0x10
449 #define SEM_FAST_MODE6_SRC_DISABLE 0x3f
451 #define SEM_SLOW_MODE1_DATA_ENABLE 0x1
453 #define VALUES_PER_CYCLE 4
454 #define MAX_CYCLE_VALUES_MASK ((1 << VALUES_PER_CYCLE) - 1)
456 #define MAX_DWORDS_PER_CYCLE 8
460 #define NUM_CALENDAR_SLOTS 16
462 #define MAX_TRIGGER_STATES 3
463 #define TRIGGER_SETS_PER_STATE 2
464 #define MAX_CONSTRAINTS 4
466 #define SEM_FILTER_CID_EN_MASK 0x008
467 #define SEM_FILTER_EID_MASK_EN_MASK 0x010
468 #define SEM_FILTER_EID_RANGE_EN_MASK 0x110
470 #define CHUNK_SIZE_IN_DWORDS 64
471 #define CHUNK_SIZE_IN_BYTES DWORDS_TO_BYTES(CHUNK_SIZE_IN_DWORDS)
473 #define INT_BUF_NUM_OF_LINES 192
474 #define INT_BUF_LINE_SIZE_IN_DWORDS 16
475 #define INT_BUF_SIZE_IN_DWORDS (INT_BUF_NUM_OF_LINES * INT_BUF_LINE_SIZE_IN_DWORDS)
476 #define INT_BUF_SIZE_IN_CHUNKS (INT_BUF_SIZE_IN_DWORDS / CHUNK_SIZE_IN_DWORDS)
478 #define PCI_BUF_LINE_SIZE_IN_DWORDS 8
479 #define PCI_BUF_LINE_SIZE_IN_BYTES DWORDS_TO_BYTES(PCI_BUF_LINE_SIZE_IN_DWORDS)
481 #define TARGET_EN_MASK_PCI 0x3
482 #define TARGET_EN_MASK_NIG 0x4
484 #define PCI_REQ_CREDIT 1
485 #define PCI_PHYS_ADDR_TYPE 0
487 #define OPAQUE_FID(pci_func) ((pci_func << 4) | 0xff00)
489 #define RESET_REG_UNRESET_OFFSET 4
491 #define PCI_PKT_SIZE_IN_CHUNKS 1
492 #define PCI_PKT_SIZE_IN_BYTES (PCI_PKT_SIZE_IN_CHUNKS * CHUNK_SIZE_IN_BYTES)
494 #define NIG_PKT_SIZE_IN_CHUNKS 4
496 #define FLUSH_DELAY_MS 500
497 #define STALL_DELAY_MS 500
499 #define SRC_MAC_ADDR_LO16 0x0a0b
500 #define SRC_MAC_ADDR_HI32 0x0c0d0e0f
501 #define ETH_TYPE 0x1000
503 #define STATIC_DEBUG_LINE_DWORDS 9
505 #define NUM_COMMON_GLOBAL_PARAMS 8
507 #define FW_IMG_KUKU 0
508 #define FW_IMG_MAIN 1
511 #ifndef REG_FIFO_ELEMENT_DWORDS
512 #define REG_FIFO_ELEMENT_DWORDS 2
514 #define REG_FIFO_DEPTH_ELEMENTS 32
515 #define REG_FIFO_DEPTH_DWORDS (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
517 #ifndef IGU_FIFO_ELEMENT_DWORDS
518 #define IGU_FIFO_ELEMENT_DWORDS 4
520 #define IGU_FIFO_DEPTH_ELEMENTS 64
521 #define IGU_FIFO_DEPTH_DWORDS (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
523 #define SEMI_SYNC_FIFO_POLLING_DELAY_MS 5
524 #define SEMI_SYNC_FIFO_POLLING_COUNT 20
526 #ifndef PROTECTION_OVERRIDE_ELEMENT_DWORDS
527 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
529 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
530 #define PROTECTION_OVERRIDE_DEPTH_DWORDS (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * PROTECTION_OVERRIDE_ELEMENT_DWORDS)
532 #define MCP_SPAD_TRACE_OFFSIZE_ADDR (MCP_REG_SCRATCH + OFFSETOF(struct static_init, sections[SPAD_SECTION_TRACE]))
534 #define EMPTY_FW_VERSION_STR "???_???_???_???"
535 #define EMPTY_FW_IMAGE_STR "???????????????"
538 /***************************** Constant Arrays *******************************/
546 #ifdef USE_DBG_BIN_FILE
547 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { { OSAL_NULL } };
549 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = {
551 /* BIN_BUF_DBG_MODE_TREE */
552 { (const u32*)dbg_modes_tree_buf, OSAL_ARRAY_SIZE(dbg_modes_tree_buf)},
554 /* BIN_BUF_DBG_DUMP_REG */
555 { dump_reg, OSAL_ARRAY_SIZE(dump_reg) },
557 /* BIN_BUF_DBG_DUMP_MEM */
558 { dump_mem, OSAL_ARRAY_SIZE(dump_mem) },
560 /* BIN_BUF_DBG_IDLE_CHK_REGS */
561 { idle_chk_regs, OSAL_ARRAY_SIZE(idle_chk_regs) },
563 /* BIN_BUF_DBG_IDLE_CHK_IMMS */
564 { idle_chk_imms, OSAL_ARRAY_SIZE(idle_chk_imms) },
566 /* BIN_BUF_DBG_IDLE_CHK_RULES */
567 { idle_chk_rules, OSAL_ARRAY_SIZE(idle_chk_rules) },
569 /* BIN_BUF_DBG_IDLE_CHK_PARSING_DATA */
572 /* BIN_BUF_DBG_ATTN_BLOCKS */
573 { attn_block, OSAL_ARRAY_SIZE(attn_block) },
575 /* BIN_BUF_DBG_ATTN_REGSS */
576 { attn_reg, OSAL_ARRAY_SIZE(attn_reg) },
578 /* BIN_BUF_DBG_ATTN_INDEXES */
581 /* BIN_BUF_DBG_ATTN_NAME_OFFSETS */
584 /* BIN_BUF_DBG_BUS_BLOCKS */
585 { dbg_bus_blocks, OSAL_ARRAY_SIZE(dbg_bus_blocks) },
587 /* BIN_BUF_DBG_BUS_LINES */
588 { dbg_bus_lines, OSAL_ARRAY_SIZE(dbg_bus_lines) },
590 /* BIN_BUF_DBG_BUS_BLOCKS_USER_DATA */
593 /* BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS */
596 /* BIN_BUF_DBG_PARSING_STRINGS */
601 /* Chip constant definitions array */
602 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
606 { { MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB },
609 { MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB },
612 { MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB },
615 { MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB } } },
620 { { MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2 },
623 { MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2 },
626 { MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2 },
629 { MAX_NUM_PORTS_K2, 8, MAX_NUM_VFS_K2 } } },
634 { { MAX_NUM_PORTS_E5, MAX_NUM_PFS_E5, MAX_NUM_VFS_E5 },
637 { MAX_NUM_PORTS_E5, MAX_NUM_PFS_E5, MAX_NUM_VFS_E5 },
640 { MAX_NUM_PORTS_E5, MAX_NUM_PFS_E5, MAX_NUM_VFS_E5 },
643 { MAX_NUM_PORTS_E5, 8, MAX_NUM_VFS_E5 } } }
646 /* Storm constant definitions array */
647 static struct storm_defs s_storm_defs[] = {
651 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT }, true,
652 TSEM_REG_FAST_MEMORY,
653 TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
654 TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
655 TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
656 TCM_REG_CTX_RBC_ACCS,
657 4, TCM_REG_AGG_CON_CTX,
658 16, TCM_REG_SM_CON_CTX,
659 2, TCM_REG_AGG_TASK_CTX,
660 4, TCM_REG_SM_TASK_CTX },
664 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM }, false,
665 MSEM_REG_FAST_MEMORY,
666 MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
667 MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
668 MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2,
669 MCM_REG_CTX_RBC_ACCS,
670 1, MCM_REG_AGG_CON_CTX,
671 10, MCM_REG_SM_CON_CTX,
672 2, MCM_REG_AGG_TASK_CTX,
673 7, MCM_REG_SM_TASK_CTX },
677 { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU }, false,
678 USEM_REG_FAST_MEMORY,
679 USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
680 USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
681 USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2,
682 UCM_REG_CTX_RBC_ACCS,
683 2, UCM_REG_AGG_CON_CTX,
684 13, UCM_REG_SM_CON_CTX,
685 3, UCM_REG_AGG_TASK_CTX,
686 3, UCM_REG_SM_TASK_CTX },
690 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX }, false,
691 XSEM_REG_FAST_MEMORY,
692 XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
693 XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
694 XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2,
695 XCM_REG_CTX_RBC_ACCS,
696 9, XCM_REG_AGG_CON_CTX,
697 15, XCM_REG_SM_CON_CTX,
703 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY }, false,
704 YSEM_REG_FAST_MEMORY,
705 YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
706 YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
707 YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
708 YCM_REG_CTX_RBC_ACCS,
709 2, YCM_REG_AGG_CON_CTX,
710 3, YCM_REG_SM_CON_CTX,
711 2, YCM_REG_AGG_TASK_CTX,
712 12, YCM_REG_SM_TASK_CTX },
716 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS }, true,
717 PSEM_REG_FAST_MEMORY,
718 PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
719 PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
720 PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2,
721 PCM_REG_CTX_RBC_ACCS,
723 10, PCM_REG_SM_CON_CTX,
728 /* Block definitions array */
730 static struct block_defs block_grc_defs = {
731 "grc", { true, true, true }, false, 0,
732 { DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN },
733 GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
734 GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
735 GRC_REG_DBG_FORCE_FRAME,
736 true, false, DBG_RESET_REG_MISC_PL_UA, 1 };
738 static struct block_defs block_miscs_defs = {
739 "miscs", { true, true, true }, false, 0,
740 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
742 false, false, MAX_DBG_RESET_REGS, 0 };
744 static struct block_defs block_misc_defs = {
745 "misc", { true, true, true }, false, 0,
746 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
748 false, false, MAX_DBG_RESET_REGS, 0 };
750 static struct block_defs block_dbu_defs = {
751 "dbu", { true, true, true }, false, 0,
752 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
754 false, false, MAX_DBG_RESET_REGS, 0 };
756 static struct block_defs block_pglue_b_defs = {
757 "pglue_b", { true, true, true }, false, 0,
758 { DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH },
759 PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
760 PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
761 PGLUE_B_REG_DBG_FORCE_FRAME,
762 true, false, DBG_RESET_REG_MISCS_PL_HV, 1 };
764 static struct block_defs block_cnig_defs = {
765 "cnig", { true, true, true }, false, 0,
766 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW },
767 CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
768 CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
769 CNIG_REG_DBG_FORCE_FRAME_K2_E5,
770 true, false, DBG_RESET_REG_MISCS_PL_HV, 0 };
772 static struct block_defs block_cpmu_defs = {
773 "cpmu", { true, true, true }, false, 0,
774 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
776 true, false, DBG_RESET_REG_MISCS_PL_HV, 8 };
778 static struct block_defs block_ncsi_defs = {
779 "ncsi", { true, true, true }, false, 0,
780 { DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
781 NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
782 NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
783 NCSI_REG_DBG_FORCE_FRAME,
784 true, false, DBG_RESET_REG_MISCS_PL_HV, 5 };
786 static struct block_defs block_opte_defs = {
787 "opte", { true, true, false }, false, 0,
788 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
790 true, false, DBG_RESET_REG_MISCS_PL_HV, 4 };
792 static struct block_defs block_bmb_defs = {
793 "bmb", { true, true, true }, false, 0,
794 { DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB },
795 BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
796 BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
797 BMB_REG_DBG_FORCE_FRAME,
798 true, false, DBG_RESET_REG_MISCS_PL_UA, 7 };
800 static struct block_defs block_pcie_defs = {
801 "pcie", { true, true, true }, false, 0,
802 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH },
803 PCIE_REG_DBG_COMMON_SELECT_K2_E5, PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
804 PCIE_REG_DBG_COMMON_SHIFT_K2_E5, PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
805 PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
806 false, false, MAX_DBG_RESET_REGS, 0 };
808 static struct block_defs block_mcp_defs = {
809 "mcp", { true, true, true }, false, 0,
810 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
812 false, false, MAX_DBG_RESET_REGS, 0 };
814 static struct block_defs block_mcp2_defs = {
815 "mcp2", { true, true, true }, false, 0,
816 { DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
817 MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
818 MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
819 MCP2_REG_DBG_FORCE_FRAME,
820 false, false, MAX_DBG_RESET_REGS, 0 };
822 static struct block_defs block_pswhst_defs = {
823 "pswhst", { true, true, true }, false, 0,
824 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
825 PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
826 PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
827 PSWHST_REG_DBG_FORCE_FRAME,
828 true, false, DBG_RESET_REG_MISC_PL_HV, 0 };
830 static struct block_defs block_pswhst2_defs = {
831 "pswhst2", { true, true, true }, false, 0,
832 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
833 PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
834 PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
835 PSWHST2_REG_DBG_FORCE_FRAME,
836 true, false, DBG_RESET_REG_MISC_PL_HV, 0 };
838 static struct block_defs block_pswrd_defs = {
839 "pswrd", { true, true, true }, false, 0,
840 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
841 PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
842 PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
843 PSWRD_REG_DBG_FORCE_FRAME,
844 true, false, DBG_RESET_REG_MISC_PL_HV, 2 };
846 static struct block_defs block_pswrd2_defs = {
847 "pswrd2", { true, true, true }, false, 0,
848 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
849 PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
850 PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
851 PSWRD2_REG_DBG_FORCE_FRAME,
852 true, false, DBG_RESET_REG_MISC_PL_HV, 2 };
854 static struct block_defs block_pswwr_defs = {
855 "pswwr", { true, true, true }, false, 0,
856 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
857 PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
858 PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
859 PSWWR_REG_DBG_FORCE_FRAME,
860 true, false, DBG_RESET_REG_MISC_PL_HV, 3 };
862 static struct block_defs block_pswwr2_defs = {
863 "pswwr2", { true, true, true }, false, 0,
864 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
866 true, false, DBG_RESET_REG_MISC_PL_HV, 3 };
868 static struct block_defs block_pswrq_defs = {
869 "pswrq", { true, true, true }, false, 0,
870 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
871 PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
872 PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
873 PSWRQ_REG_DBG_FORCE_FRAME,
874 true, false, DBG_RESET_REG_MISC_PL_HV, 1 };
876 static struct block_defs block_pswrq2_defs = {
877 "pswrq2", { true, true, true }, false, 0,
878 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
879 PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
880 PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
881 PSWRQ2_REG_DBG_FORCE_FRAME,
882 true, false, DBG_RESET_REG_MISC_PL_HV, 1 };
884 static struct block_defs block_pglcs_defs = {
885 "pglcs", { true, true, true }, false, 0,
886 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH },
887 PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
888 PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
889 PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
890 true, false, DBG_RESET_REG_MISCS_PL_HV, 2 };
892 static struct block_defs block_ptu_defs ={
893 "ptu", { true, true, true }, false, 0,
894 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
895 PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
896 PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
897 PTU_REG_DBG_FORCE_FRAME,
898 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20 };
900 static struct block_defs block_dmae_defs = {
901 "dmae", { true, true, true }, false, 0,
902 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
903 DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
904 DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
905 DMAE_REG_DBG_FORCE_FRAME,
906 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28 };
908 static struct block_defs block_tcm_defs = {
909 "tcm", { true, true, true }, true, DBG_TSTORM_ID,
910 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
911 TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
912 TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
913 TCM_REG_DBG_FORCE_FRAME,
914 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5 };
916 static struct block_defs block_mcm_defs = {
917 "mcm", { true, true, true }, true, DBG_MSTORM_ID,
918 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
919 MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
920 MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
921 MCM_REG_DBG_FORCE_FRAME,
922 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3 };
924 static struct block_defs block_ucm_defs = {
925 "ucm", { true, true, true }, true, DBG_USTORM_ID,
926 { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
927 UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
928 UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
929 UCM_REG_DBG_FORCE_FRAME,
930 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8 };
932 static struct block_defs block_xcm_defs = {
933 "xcm", { true, true, true }, true, DBG_XSTORM_ID,
934 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
935 XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
936 XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
937 XCM_REG_DBG_FORCE_FRAME,
938 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19 };
940 static struct block_defs block_ycm_defs = {
941 "ycm", { true, true, true }, true, DBG_YSTORM_ID,
942 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY },
943 YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
944 YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
945 YCM_REG_DBG_FORCE_FRAME,
946 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5 };
948 static struct block_defs block_pcm_defs = {
949 "pcm", { true, true, true }, true, DBG_PSTORM_ID,
950 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
951 PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
952 PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
953 PCM_REG_DBG_FORCE_FRAME,
954 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4 };
956 static struct block_defs block_qm_defs = {
957 "qm", { true, true, true }, false, 0,
958 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ },
959 QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
960 QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
961 QM_REG_DBG_FORCE_FRAME,
962 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16 };
964 static struct block_defs block_tm_defs = {
965 "tm", { true, true, true }, false, 0,
966 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
967 TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
968 TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
969 TM_REG_DBG_FORCE_FRAME,
970 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17 };
972 static struct block_defs block_dorq_defs = {
973 "dorq", { true, true, true }, false, 0,
974 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY },
975 DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
976 DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
977 DORQ_REG_DBG_FORCE_FRAME,
978 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18 };
980 static struct block_defs block_brb_defs = {
981 "brb", { true, true, true }, false, 0,
982 { DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR },
983 BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
984 BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
985 BRB_REG_DBG_FORCE_FRAME,
986 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0 };
988 static struct block_defs block_src_defs = {
989 "src", { true, true, true }, false, 0,
990 { DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
991 SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
992 SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
993 SRC_REG_DBG_FORCE_FRAME,
994 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2 };
996 static struct block_defs block_prs_defs = {
997 "prs", { true, true, true }, false, 0,
998 { DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR },
999 PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
1000 PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
1001 PRS_REG_DBG_FORCE_FRAME,
1002 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1 };
1004 static struct block_defs block_tsdm_defs = {
1005 "tsdm", { true, true, true }, true, DBG_TSTORM_ID,
1006 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
1007 TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
1008 TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
1009 TSDM_REG_DBG_FORCE_FRAME,
1010 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3 };
1012 static struct block_defs block_msdm_defs = {
1013 "msdm", { true, true, true }, true, DBG_MSTORM_ID,
1014 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1015 MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
1016 MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
1017 MSDM_REG_DBG_FORCE_FRAME,
1018 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6 };
1020 static struct block_defs block_usdm_defs = {
1021 "usdm", { true, true, true }, true, DBG_USTORM_ID,
1022 { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1023 USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
1024 USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
1025 USDM_REG_DBG_FORCE_FRAME,
1026 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
1028 static struct block_defs block_xsdm_defs = {
1029 "xsdm", { true, true, true }, true, DBG_XSTORM_ID,
1030 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
1031 XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
1032 XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
1033 XSDM_REG_DBG_FORCE_FRAME,
1034 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20 };
1036 static struct block_defs block_ysdm_defs = {
1037 "ysdm", { true, true, true }, true, DBG_YSTORM_ID,
1038 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY },
1039 YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
1040 YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
1041 YSDM_REG_DBG_FORCE_FRAME,
1042 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8 };
1044 static struct block_defs block_psdm_defs = {
1045 "psdm", { true, true, true }, true, DBG_PSTORM_ID,
1046 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
1047 PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
1048 PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
1049 PSDM_REG_DBG_FORCE_FRAME,
1050 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7 };
1052 static struct block_defs block_tsem_defs = {
1053 "tsem", { true, true, true }, true, DBG_TSTORM_ID,
1054 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
1055 TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
1056 TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
1057 TSEM_REG_DBG_FORCE_FRAME,
1058 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4 };
1060 static struct block_defs block_msem_defs = {
1061 "msem", { true, true, true }, true, DBG_MSTORM_ID,
1062 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1063 MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
1064 MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
1065 MSEM_REG_DBG_FORCE_FRAME,
1066 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9 };
1068 static struct block_defs block_usem_defs = {
1069 "usem", { true, true, true }, true, DBG_USTORM_ID,
1070 { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1071 USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
1072 USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
1073 USEM_REG_DBG_FORCE_FRAME,
1074 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9 };
1076 static struct block_defs block_xsem_defs = {
1077 "xsem", { true, true, true }, true, DBG_XSTORM_ID,
1078 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
1079 XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
1080 XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
1081 XSEM_REG_DBG_FORCE_FRAME,
1082 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21 };
1084 static struct block_defs block_ysem_defs = {
1085 "ysem", { true, true, true }, true, DBG_YSTORM_ID,
1086 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY },
1087 YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
1088 YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
1089 YSEM_REG_DBG_FORCE_FRAME,
1090 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11 };
1092 static struct block_defs block_psem_defs = {
1093 "psem", { true, true, true }, true, DBG_PSTORM_ID,
1094 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
1095 PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
1096 PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
1097 PSEM_REG_DBG_FORCE_FRAME,
1098 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10 };
1100 static struct block_defs block_rss_defs = {
1101 "rss", { true, true, true }, false, 0,
1102 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
1103 RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
1104 RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
1105 RSS_REG_DBG_FORCE_FRAME,
1106 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18 };
1108 static struct block_defs block_tmld_defs = {
1109 "tmld", { true, true, true }, false, 0,
1110 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1111 TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
1112 TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
1113 TMLD_REG_DBG_FORCE_FRAME,
1114 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13 };
1116 static struct block_defs block_muld_defs = {
1117 "muld", { true, true, true }, false, 0,
1118 { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1119 MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
1120 MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
1121 MULD_REG_DBG_FORCE_FRAME,
1122 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14 };
1124 static struct block_defs block_yuld_defs = {
1125 "yuld", { true, true, false }, false, 0,
1126 { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, MAX_DBG_BUS_CLIENTS },
1127 YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
1128 YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
1129 YULD_REG_DBG_FORCE_FRAME_BB_K2,
1130 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 15 };
1132 static struct block_defs block_xyld_defs = {
1133 "xyld", { true, true, true }, false, 0,
1134 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
1135 XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
1136 XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
1137 XYLD_REG_DBG_FORCE_FRAME,
1138 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12 };
1140 static struct block_defs block_prm_defs = {
1141 "prm", { true, true, true }, false, 0,
1142 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1143 PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
1144 PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
1145 PRM_REG_DBG_FORCE_FRAME,
1146 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21 };
1148 static struct block_defs block_pbf_pb1_defs = {
1149 "pbf_pb1", { true, true, true }, false, 0,
1150 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV },
1151 PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
1152 PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
1153 PBF_PB1_REG_DBG_FORCE_FRAME,
1154 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 11 };
1156 static struct block_defs block_pbf_pb2_defs = {
1157 "pbf_pb2", { true, true, true }, false, 0,
1158 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV },
1159 PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
1160 PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
1161 PBF_PB2_REG_DBG_FORCE_FRAME,
1162 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 12 };
1164 static struct block_defs block_rpb_defs = {
1165 "rpb", { true, true, true }, false, 0,
1166 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1167 RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
1168 RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
1169 RPB_REG_DBG_FORCE_FRAME,
1170 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13 };
1172 static struct block_defs block_btb_defs = {
1173 "btb", { true, true, true }, false, 0,
1174 { DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV },
1175 BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
1176 BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
1177 BTB_REG_DBG_FORCE_FRAME,
1178 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10 };
1180 static struct block_defs block_pbf_defs = {
1181 "pbf", { true, true, true }, false, 0,
1182 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV },
1183 PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
1184 PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
1185 PBF_REG_DBG_FORCE_FRAME,
1186 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15 };
1188 static struct block_defs block_rdif_defs = {
1189 "rdif", { true, true, true }, false, 0,
1190 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1191 RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
1192 RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
1193 RDIF_REG_DBG_FORCE_FRAME,
1194 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16 };
1196 static struct block_defs block_tdif_defs = {
1197 "tdif", { true, true, true }, false, 0,
1198 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
1199 TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
1200 TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
1201 TDIF_REG_DBG_FORCE_FRAME,
1202 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17 };
1204 static struct block_defs block_cdu_defs = {
1205 "cdu", { true, true, true }, false, 0,
1206 { DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
1207 CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
1208 CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
1209 CDU_REG_DBG_FORCE_FRAME,
1210 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23 };
1212 static struct block_defs block_ccfc_defs = {
1213 "ccfc", { true, true, true }, false, 0,
1214 { DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
1215 CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
1216 CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
1217 CCFC_REG_DBG_FORCE_FRAME,
1218 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24 };
1220 static struct block_defs block_tcfc_defs = {
1221 "tcfc", { true, true, true }, false, 0,
1222 { DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
1223 TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
1224 TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
1225 TCFC_REG_DBG_FORCE_FRAME,
1226 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25 };
1228 static struct block_defs block_igu_defs = {
1229 "igu", { true, true, true }, false, 0,
1230 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
1231 IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
1232 IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
1233 IGU_REG_DBG_FORCE_FRAME,
1234 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27 };
1236 static struct block_defs block_cau_defs = {
1237 "cau", { true, true, true }, false, 0,
1238 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
1239 CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
1240 CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
1241 CAU_REG_DBG_FORCE_FRAME,
1242 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19 };
1244 static struct block_defs block_umac_defs = {
1245 "umac", { true, true, true }, false, 0,
1246 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
1247 UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
1248 UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
1249 UMAC_REG_DBG_FORCE_FRAME_K2_E5,
1250 true, false, DBG_RESET_REG_MISCS_PL_HV, 6 };
1252 static struct block_defs block_xmac_defs = {
1253 "xmac", { true, false, false }, false, 0,
1254 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1256 false, false, MAX_DBG_RESET_REGS, 0 };
1258 static struct block_defs block_dbg_defs = {
1259 "dbg", { true, true, true }, false, 0,
1260 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1262 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3 };
1264 static struct block_defs block_nig_defs = {
1265 "nig", { true, true, true }, false, 0,
1266 { DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN },
1267 NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
1268 NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
1269 NIG_REG_DBG_FORCE_FRAME,
1270 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0 };
1272 static struct block_defs block_wol_defs = {
1273 "wol", { false, true, true }, false, 0,
1274 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
1275 WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
1276 WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
1277 WOL_REG_DBG_FORCE_FRAME_K2_E5,
1278 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7 };
1280 static struct block_defs block_bmbn_defs = {
1281 "bmbn", { false, true, true }, false, 0,
1282 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB },
1283 BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
1284 BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
1285 BMBN_REG_DBG_FORCE_FRAME_K2_E5,
1286 false, false, MAX_DBG_RESET_REGS, 0 };
1288 static struct block_defs block_ipc_defs = {
1289 "ipc", { true, true, true }, false, 0,
1290 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1292 true, false, DBG_RESET_REG_MISCS_PL_UA, 8 };
1294 static struct block_defs block_nwm_defs = {
1295 "nwm", { false, true, true }, false, 0,
1296 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW },
1297 NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
1298 NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
1299 NWM_REG_DBG_FORCE_FRAME_K2_E5,
1300 true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0 };
1302 static struct block_defs block_nws_defs = {
1303 "nws", { false, true, true }, false, 0,
1304 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW },
1305 NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
1306 NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
1307 NWS_REG_DBG_FORCE_FRAME_K2_E5,
1308 true, false, DBG_RESET_REG_MISCS_PL_HV, 12 };
1310 static struct block_defs block_ms_defs = {
1311 "ms", { false, true, true }, false, 0,
1312 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
1313 MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
1314 MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
1315 MS_REG_DBG_FORCE_FRAME_K2_E5,
1316 true, false, DBG_RESET_REG_MISCS_PL_HV, 13 };
1318 static struct block_defs block_phy_pcie_defs = {
1319 "phy_pcie", { false, true, true }, false, 0,
1320 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH },
1321 PCIE_REG_DBG_COMMON_SELECT_K2_E5, PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
1322 PCIE_REG_DBG_COMMON_SHIFT_K2_E5, PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
1323 PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
1324 false, false, MAX_DBG_RESET_REGS, 0 };
1326 static struct block_defs block_led_defs = {
1327 "led", { false, true, true }, false, 0,
1328 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1330 true, false, DBG_RESET_REG_MISCS_PL_HV, 14 };
1332 static struct block_defs block_avs_wrap_defs = {
1333 "avs_wrap", { false, true, false }, false, 0,
1334 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1336 true, false, DBG_RESET_REG_MISCS_PL_UA, 11 };
1338 /* TODO: add debug bus parameters when E5 RGFS RF is added */
1339 static struct block_defs block_rgfs_defs = {
1340 "rgfs", { false, false, true }, false, 0,
1341 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1343 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29 };
1345 static struct block_defs block_rgsrc_defs = {
1346 "rgsrc", { false, false, true }, false, 0,
1347 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH },
1348 RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5,
1349 RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5,
1350 RGSRC_REG_DBG_FORCE_FRAME_E5,
1351 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 30 };
1353 /* TODO: add debug bus parameters when E5 TGFS RF is added */
1354 static struct block_defs block_tgfs_defs = {
1355 "tgfs", { false, false, true }, false, 0,
1356 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1358 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30 };
1360 static struct block_defs block_tgsrc_defs = {
1361 "tgsrc", { false, false, true }, false, 0,
1362 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV },
1363 TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5,
1364 TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5,
1365 TGSRC_REG_DBG_FORCE_FRAME_E5,
1366 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 31 };
1368 static struct block_defs block_ptld_defs = {
1369 "ptld", { false, false, true }, false, 0,
1370 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT },
1371 PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5,
1372 PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5,
1373 PTLD_REG_DBG_FORCE_FRAME_E5,
1374 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 28 };
1376 static struct block_defs block_ypld_defs = {
1377 "ypld", { false, false, true }, false, 0,
1378 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS },
1379 YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5,
1380 YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5,
1381 YPLD_REG_DBG_FORCE_FRAME_E5,
1382 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 27 };
1384 static struct block_defs block_misc_aeu_defs = {
1385 "misc_aeu", { true, true, true }, false, 0,
1386 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1388 false, false, MAX_DBG_RESET_REGS, 0 };
1390 static struct block_defs block_bar0_map_defs = {
1391 "bar0_map", { true, true, true }, false, 0,
1392 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1394 false, false, MAX_DBG_RESET_REGS, 0 };
1397 static struct block_defs* s_block_defs[MAX_BLOCK_ID] = {
1402 &block_pglue_b_defs,
1412 &block_pswhst2_defs,
1454 &block_pbf_pb1_defs,
1455 &block_pbf_pb2_defs,
1480 &block_phy_pcie_defs,
1482 &block_avs_wrap_defs,
1483 &block_misc_aeu_defs,
1484 &block_bar0_map_defs,
1489 /* Constraint operation types */
1490 static struct dbg_bus_constraint_op_defs s_constraint_op_defs[] = {
1492 /* DBG_BUS_CONSTRAINT_OP_EQ */
1495 /* DBG_BUS_CONSTRAINT_OP_NE */
1498 /* DBG_BUS_CONSTRAINT_OP_LT */
1501 /* DBG_BUS_CONSTRAINT_OP_LTC */
1504 /* DBG_BUS_CONSTRAINT_OP_LE */
1507 /* DBG_BUS_CONSTRAINT_OP_LEC */
1510 /* DBG_BUS_CONSTRAINT_OP_GT */
1513 /* DBG_BUS_CONSTRAINT_OP_GTC */
1516 /* DBG_BUS_CONSTRAINT_OP_GE */
1519 /* DBG_BUS_CONSTRAINT_OP_GEC */
1523 static const char* s_dbg_target_names[] = {
1525 /* DBG_BUS_TARGET_ID_INT_BUF */
1528 /* DBG_BUS_TARGET_ID_NIG */
1531 /* DBG_BUS_TARGET_ID_PCI */
1535 static struct storm_mode_defs s_storm_mode_defs[] = {
1537 /* DBG_BUS_STORM_MODE_PRINTF */
1538 { "printf", true, 0 },
1540 /* DBG_BUS_STORM_MODE_PRAM_ADDR */
1541 { "pram_addr", true, 1 },
1543 /* DBG_BUS_STORM_MODE_DRA_RW */
1544 { "dra_rw", true, 2 },
1546 /* DBG_BUS_STORM_MODE_DRA_W */
1547 { "dra_w", true, 3 },
1549 /* DBG_BUS_STORM_MODE_LD_ST_ADDR */
1550 { "ld_st_addr", true, 4 },
1552 /* DBG_BUS_STORM_MODE_DRA_FSM */
1553 { "dra_fsm", true, 5 },
1555 /* DBG_BUS_STORM_MODE_RH */
1558 /* DBG_BUS_STORM_MODE_FOC */
1559 { "foc", false, 1 },
1561 /* DBG_BUS_STORM_MODE_EXT_STORE */
1562 { "ext_store", false, 3 }
1565 static struct platform_defs s_platform_defs[] = {
1570 /* PLATFORM_EMUL_FULL */
1571 { "emul_full", 2000 },
1573 /* PLATFORM_EMUL_REDUCED */
1574 { "emul_reduced", 2000 },
1580 static struct grc_param_defs s_grc_param_defs[] = {
1582 /* DBG_GRC_PARAM_DUMP_TSTORM */
1583 { { 1, 1, 1 }, 0, 1, false, 1, 1 },
1585 /* DBG_GRC_PARAM_DUMP_MSTORM */
1586 { { 1, 1, 1 }, 0, 1, false, 1, 1 },
1588 /* DBG_GRC_PARAM_DUMP_USTORM */
1589 { { 1, 1, 1 }, 0, 1, false, 1, 1 },
1591 /* DBG_GRC_PARAM_DUMP_XSTORM */
1592 { { 1, 1, 1 }, 0, 1, false, 1, 1 },
1594 /* DBG_GRC_PARAM_DUMP_YSTORM */
1595 { { 1, 1, 1 }, 0, 1, false, 1, 1 },
1597 /* DBG_GRC_PARAM_DUMP_PSTORM */
1598 { { 1, 1, 1 }, 0, 1, false, 1, 1 },
1600 /* DBG_GRC_PARAM_DUMP_REGS */
1601 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1603 /* DBG_GRC_PARAM_DUMP_RAM */
1604 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1606 /* DBG_GRC_PARAM_DUMP_PBUF */
1607 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1609 /* DBG_GRC_PARAM_DUMP_IOR */
1610 { { 0, 0, 0 }, 0, 1, false, 0, 1 },
1612 /* DBG_GRC_PARAM_DUMP_VFC */
1613 { { 0, 0, 0 }, 0, 1, false, 0, 1 },
1615 /* DBG_GRC_PARAM_DUMP_CM_CTX */
1616 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1618 /* DBG_GRC_PARAM_DUMP_ILT */
1619 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1621 /* DBG_GRC_PARAM_DUMP_RSS */
1622 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1624 /* DBG_GRC_PARAM_DUMP_CAU */
1625 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1627 /* DBG_GRC_PARAM_DUMP_QM */
1628 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1630 /* DBG_GRC_PARAM_DUMP_MCP */
1631 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1633 /* DBG_GRC_PARAM_RESERVED */
1634 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1636 /* DBG_GRC_PARAM_DUMP_CFC */
1637 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1639 /* DBG_GRC_PARAM_DUMP_IGU */
1640 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1642 /* DBG_GRC_PARAM_DUMP_BRB */
1643 { { 0, 0, 0 }, 0, 1, false, 0, 1 },
1645 /* DBG_GRC_PARAM_DUMP_BTB */
1646 { { 0, 0, 0 }, 0, 1, false, 0, 1 },
1648 /* DBG_GRC_PARAM_DUMP_BMB */
1649 { { 0, 0, 0 }, 0, 1, false, 0, 1 },
1651 /* DBG_GRC_PARAM_DUMP_NIG */
1652 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1654 /* DBG_GRC_PARAM_DUMP_MULD */
1655 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1657 /* DBG_GRC_PARAM_DUMP_PRS */
1658 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1660 /* DBG_GRC_PARAM_DUMP_DMAE */
1661 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1663 /* DBG_GRC_PARAM_DUMP_TM */
1664 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1666 /* DBG_GRC_PARAM_DUMP_SDM */
1667 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1669 /* DBG_GRC_PARAM_DUMP_DIF */
1670 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1672 /* DBG_GRC_PARAM_DUMP_STATIC */
1673 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1675 /* DBG_GRC_PARAM_UNSTALL */
1676 { { 0, 0, 0 }, 0, 1, false, 0, 0 },
1678 /* DBG_GRC_PARAM_NUM_LCIDS */
1679 { { MAX_LCIDS, MAX_LCIDS, MAX_LCIDS }, 1, MAX_LCIDS, false, MAX_LCIDS, MAX_LCIDS },
1681 /* DBG_GRC_PARAM_NUM_LTIDS */
1682 { { MAX_LTIDS, MAX_LTIDS, MAX_LTIDS }, 1, MAX_LTIDS, false, MAX_LTIDS, MAX_LTIDS },
1684 /* DBG_GRC_PARAM_EXCLUDE_ALL */
1685 { { 0, 0, 0 }, 0, 1, true, 0, 0 },
1687 /* DBG_GRC_PARAM_CRASH */
1688 { { 0, 0, 0 }, 0, 1, true, 0, 0 },
1690 /* DBG_GRC_PARAM_PARITY_SAFE */
1691 { { 0, 0, 0 }, 0, 1, false, 1, 0 },
1693 /* DBG_GRC_PARAM_DUMP_CM */
1694 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1696 /* DBG_GRC_PARAM_DUMP_PHY */
1697 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1699 /* DBG_GRC_PARAM_NO_MCP */
1700 { { 0, 0, 0 }, 0, 1, false, 0, 0 },
1702 /* DBG_GRC_PARAM_NO_FW_VER */
1703 { { 0, 0, 0 }, 0, 1, false, 0, 0 }
1706 static struct rss_mem_defs s_rss_mem_defs[] = {
1707 { "rss_mem_cid", "rss_cid", 0, 32,
1708 { 256, 320, 512 } },
1710 { "rss_mem_key_msb", "rss_key", 1024, 256,
1711 { 128, 208, 257 } },
1713 { "rss_mem_key_lsb", "rss_key", 2048, 64,
1714 { 128, 208, 257 } },
1716 { "rss_mem_info", "rss_info", 3072, 16,
1717 { 128, 208, 256 } },
1719 { "rss_mem_ind", "rss_ind", 4096, 16,
1720 { 16384, 26624, 32768 } }
1723 static struct vfc_ram_defs s_vfc_ram_defs[] = {
1724 { "vfc_ram_tt1", "vfc_ram", 0, 512 },
1725 { "vfc_ram_mtt2", "vfc_ram", 512, 128 },
1726 { "vfc_ram_stt2", "vfc_ram", 640, 32 },
1727 { "vfc_ram_ro_vect", "vfc_ram", 672, 32 }
1730 static struct big_ram_defs s_big_ram_defs[] = {
1731 { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB, BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
1732 { 4800, 5632, 4416 } },
1734 { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB, BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
1735 { 2880, 3680, 2640 } },
1737 { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB, BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
1738 { 1152, 1152, 1152 } }
1741 static struct reset_reg_defs s_reset_regs_defs[] = {
1743 /* DBG_RESET_REG_MISCS_PL_UA */
1744 { MISCS_REG_RESET_PL_UA, { true, true, true }, { 0x0, 0x0, 0x0 } },
1746 /* DBG_RESET_REG_MISCS_PL_HV */
1747 { MISCS_REG_RESET_PL_HV, { true, true, true }, { 0x0, 0x400, 0x600 } },
1749 /* DBG_RESET_REG_MISCS_PL_HV_2 */
1750 { MISCS_REG_RESET_PL_HV_2_K2_E5, { false, true, true }, { 0x0, 0x0, 0x0 } },
1752 /* DBG_RESET_REG_MISC_PL_UA */
1753 { MISC_REG_RESET_PL_UA, { true, true, true }, { 0x0, 0x0, 0x0 } },
1755 /* DBG_RESET_REG_MISC_PL_HV */
1756 { MISC_REG_RESET_PL_HV, { true, true, true }, { 0x0, 0x0, 0x0 } },
1758 /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
1759 { MISC_REG_RESET_PL_PDA_VMAIN_1, { true, true, true }, { 0x4404040, 0x4404040, 0x404040 } },
1761 /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
1762 { MISC_REG_RESET_PL_PDA_VMAIN_2, { true, true, true }, { 0x7, 0x7c00007, 0x5c08007 } },
1764 /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
1765 { MISC_REG_RESET_PL_PDA_VAUX, { true, true, true }, { 0x2, 0x2, 0x2 } },
1768 static struct phy_defs s_phy_defs[] = {
1769 { "nw_phy", NWS_REG_NWS_CMU_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5 },
1770 { "sgmii_phy", MS_REG_MS_CMU_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 },
1771 { "pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 },
1772 { "pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 },
1775 /* The order of indexes that should be applied to a PCI buffer line */
1776 static const u8 s_pci_buf_line_ind[PCI_BUF_LINE_SIZE_IN_DWORDS] = { 1, 0, 3, 2, 5, 4, 7, 6 };
1778 /******************************** Variables **********************************/
1780 /* The version of the calling app */
1781 static u32 s_app_ver;
1783 /**************************** Private Functions ******************************/
1785 static void ecore_static_asserts(void)
1787 CHECK_ARR_SIZE(s_dbg_arrays, MAX_BIN_DBG_BUFFER_TYPE);
1788 CHECK_ARR_SIZE(s_big_ram_defs, NUM_BIG_RAM_TYPES);
1789 CHECK_ARR_SIZE(s_vfc_ram_defs, NUM_VFC_RAM_TYPES);
1790 CHECK_ARR_SIZE(s_rss_mem_defs, NUM_RSS_MEM_TYPES);
1791 CHECK_ARR_SIZE(s_chip_defs, MAX_CHIP_IDS);
1792 CHECK_ARR_SIZE(s_platform_defs, MAX_PLATFORM_IDS);
1793 CHECK_ARR_SIZE(s_storm_defs, MAX_DBG_STORMS);
1794 CHECK_ARR_SIZE(s_constraint_op_defs, MAX_DBG_BUS_CONSTRAINT_OPS);
1795 CHECK_ARR_SIZE(s_dbg_target_names, MAX_DBG_BUS_TARGETS);
1796 CHECK_ARR_SIZE(s_storm_mode_defs, MAX_DBG_BUS_STORM_MODES);
1797 CHECK_ARR_SIZE(s_grc_param_defs, MAX_DBG_GRC_PARAMS);
1798 CHECK_ARR_SIZE(s_reset_regs_defs, MAX_DBG_RESET_REGS);
1801 /* Reads and returns a single dword from the specified unaligned buffer. */
1802 static u32 ecore_read_unaligned_dword(u8 *buf)
1806 OSAL_MEMCPY((u8*)&dword, buf, sizeof(dword));
1810 /* Returns the difference in bytes between the specified physical addresses.
1811 * Assumes that the first address is bigger then the second, and that the
1812 * difference is a 32-bit value.
1814 static u32 ecore_phys_addr_diff(struct dbg_bus_mem_addr *a,
1815 struct dbg_bus_mem_addr *b)
1817 return a->hi == b->hi ? a->lo - b->lo : b->lo - a->lo;
1820 /* Sets the value of the specified GRC param */
1821 static void ecore_grc_set_param(struct ecore_hwfn *p_hwfn,
1822 enum dbg_grc_params grc_param,
1825 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1827 dev_data->grc.param_val[grc_param] = val;
1830 /* Returns the value of the specified GRC param */
1831 static u32 ecore_grc_get_param(struct ecore_hwfn *p_hwfn,
1832 enum dbg_grc_params grc_param)
1834 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1836 return dev_data->grc.param_val[grc_param];
1839 /* Initializes the GRC parameters */
1840 static void ecore_dbg_grc_init_params(struct ecore_hwfn *p_hwfn)
1842 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1844 if (!dev_data->grc.params_initialized) {
1845 ecore_dbg_grc_set_params_default(p_hwfn);
1846 dev_data->grc.params_initialized = 1;
1850 /* Initializes debug data for the specified device */
1851 static enum dbg_status ecore_dbg_dev_init(struct ecore_hwfn *p_hwfn,
1852 struct ecore_ptt *p_ptt)
1854 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1856 if (dev_data->initialized)
1857 return DBG_STATUS_OK;
1860 return DBG_STATUS_APP_VERSION_NOT_SET;
1862 if (ECORE_IS_E5(p_hwfn->p_dev)) {
1863 dev_data->chip_id = CHIP_E5;
1864 dev_data->mode_enable[MODE_E5] = 1;
1866 else if (ECORE_IS_K2(p_hwfn->p_dev)) {
1867 dev_data->chip_id = CHIP_K2;
1868 dev_data->mode_enable[MODE_K2] = 1;
1870 else if (ECORE_IS_BB_B0(p_hwfn->p_dev)) {
1871 dev_data->chip_id = CHIP_BB;
1872 dev_data->mode_enable[MODE_BB] = 1;
1875 return DBG_STATUS_UNKNOWN_CHIP;
1879 dev_data->platform_id = PLATFORM_ASIC;
1880 dev_data->mode_enable[MODE_ASIC] = 1;
1882 if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) {
1883 dev_data->platform_id = PLATFORM_ASIC;
1884 dev_data->mode_enable[MODE_ASIC] = 1;
1886 else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1887 if (ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED) & 0x20000000) {
1888 dev_data->platform_id = PLATFORM_EMUL_FULL;
1889 dev_data->mode_enable[MODE_EMUL_FULL] = 1;
1892 dev_data->platform_id = PLATFORM_EMUL_REDUCED;
1893 dev_data->mode_enable[MODE_EMUL_REDUCED] = 1;
1896 else if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1897 dev_data->platform_id = PLATFORM_FPGA;
1898 dev_data->mode_enable[MODE_FPGA] = 1;
1901 return DBG_STATUS_UNKNOWN_CHIP;
1905 /* Initializes the GRC parameters */
1906 ecore_dbg_grc_init_params(p_hwfn);
1908 dev_data->initialized = true;
1910 return DBG_STATUS_OK;
1913 static struct dbg_bus_block* get_dbg_bus_block_desc(struct ecore_hwfn *p_hwfn,
1914 enum block_id block_id)
1916 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1918 return (struct dbg_bus_block*)&dbg_bus_blocks[block_id * MAX_CHIP_IDS + dev_data->chip_id];
1921 /* Returns OSAL_NULL for signature line, latency line and non-existing lines */
1922 static struct dbg_bus_line* get_dbg_bus_line_desc(struct ecore_hwfn *p_hwfn,
1923 enum block_id block_id)
1925 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1926 struct dbg_bus_block_data *block_bus;
1927 struct dbg_bus_block *block_desc;
1929 block_bus = &dev_data->bus.blocks[block_id];
1930 block_desc = get_dbg_bus_block_desc(p_hwfn, block_id);
1932 if (!block_bus->line_num ||
1933 (block_bus->line_num == 1 && block_desc->has_latency_events) ||
1934 block_bus->line_num >= NUM_DBG_LINES(block_desc))
1937 return (struct dbg_bus_line*)&dbg_bus_lines[block_desc->lines_offset + block_bus->line_num - NUM_EXTRA_DBG_LINES(block_desc)];
1940 /* Reads the FW info structure for the specified Storm from the chip,
1941 * and writes it to the specified fw_info pointer.
1943 static void ecore_read_fw_info(struct ecore_hwfn *p_hwfn,
1944 struct ecore_ptt *p_ptt,
1946 struct fw_info *fw_info)
1948 struct storm_defs *storm = &s_storm_defs[storm_id];
1949 struct fw_info_location fw_info_location;
1952 OSAL_MEMSET(&fw_info_location, 0, sizeof(fw_info_location));
1953 OSAL_MEMSET(fw_info, 0, sizeof(*fw_info));
1955 /* Read first the address that points to fw_info location.
1956 * The address is located in the last line of the Storm RAM.
1958 addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM + DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) - sizeof(fw_info_location);
1959 dest = (u32*)&fw_info_location;
1961 for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location)); i++, addr += BYTES_IN_DWORD)
1962 dest[i] = ecore_rd(p_hwfn, p_ptt, addr);
1964 /* Read FW version info from Storm RAM */
1965 if (fw_info_location.size > 0 && fw_info_location.size <= sizeof(*fw_info)) {
1966 addr = fw_info_location.grc_addr;
1967 dest = (u32*)fw_info;
1968 for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size); i++, addr += BYTES_IN_DWORD)
1969 dest[i] = ecore_rd(p_hwfn, p_ptt, addr);
1973 /* Dumps the specified string to the specified buffer.
1974 * Returns the dumped size in bytes.
1976 static u32 ecore_dump_str(char *dump_buf,
1981 OSAL_STRCPY(dump_buf, str);
1983 return (u32)OSAL_STRLEN(str) + 1;
1986 /* Dumps zeros to align the specified buffer to dwords.
1987 * Returns the dumped size in bytes.
1989 static u32 ecore_dump_align(char *dump_buf,
1993 u8 offset_in_dword, align_size;
1995 offset_in_dword = (u8)(byte_offset & 0x3);
1996 align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1998 if (dump && align_size)
1999 OSAL_MEMSET(dump_buf, 0, align_size);
2004 /* Writes the specified string param to the specified buffer.
2005 * Returns the dumped size in dwords.
2007 static u32 ecore_dump_str_param(u32 *dump_buf,
2009 const char *param_name,
2010 const char *param_val)
2012 char *char_buf = (char*)dump_buf;
2015 /* Dump param name */
2016 offset += ecore_dump_str(char_buf + offset, dump, param_name);
2018 /* Indicate a string param value */
2020 *(char_buf + offset) = 1;
2023 /* Dump param value */
2024 offset += ecore_dump_str(char_buf + offset, dump, param_val);
2026 /* Align buffer to next dword */
2027 offset += ecore_dump_align(char_buf + offset, dump, offset);
2029 return BYTES_TO_DWORDS(offset);
2032 /* Writes the specified numeric param to the specified buffer.
2033 * Returns the dumped size in dwords.
2035 static u32 ecore_dump_num_param(u32 *dump_buf,
2037 const char *param_name,
2040 char *char_buf = (char*)dump_buf;
2043 /* Dump param name */
2044 offset += ecore_dump_str(char_buf + offset, dump, param_name);
2046 /* Indicate a numeric param value */
2048 *(char_buf + offset) = 0;
2051 /* Align buffer to next dword */
2052 offset += ecore_dump_align(char_buf + offset, dump, offset);
2054 /* Dump param value (and change offset from bytes to dwords) */
2055 offset = BYTES_TO_DWORDS(offset);
2057 *(dump_buf + offset) = param_val;
2063 /* Reads the FW version and writes it as a param to the specified buffer.
2064 * Returns the dumped size in dwords.
2066 static u32 ecore_dump_fw_ver_param(struct ecore_hwfn *p_hwfn,
2067 struct ecore_ptt *p_ptt,
2071 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2072 char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
2073 char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
2074 struct fw_info fw_info = { { 0 }, { 0 } };
2077 if (dump && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2078 /* Read FW image/version from PRAM in a non-reset SEMI */
2082 for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found; storm_id++) {
2083 struct storm_defs *storm = &s_storm_defs[storm_id];
2085 /* Read FW version/image */
2086 if (dev_data->block_in_reset[storm->block_id])
2089 /* Read FW info for the current Storm */
2090 ecore_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
2092 /* Create FW version/image strings */
2093 if (OSAL_SNPRINTF(fw_ver_str, sizeof(fw_ver_str), "%d_%d_%d_%d", fw_info.ver.num.major, fw_info.ver.num.minor, fw_info.ver.num.rev, fw_info.ver.num.eng) < 0)
2094 DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid FW version string\n");
2095 switch (fw_info.ver.image_id) {
2096 case FW_IMG_KUKU: OSAL_STRCPY(fw_img_str, "kuku"); break;
2097 case FW_IMG_MAIN: OSAL_STRCPY(fw_img_str, "main"); break;
2098 case FW_IMG_L2B: OSAL_STRCPY(fw_img_str, "l2b"); break;
2099 default: OSAL_STRCPY(fw_img_str, "unknown"); break;
2106 /* Dump FW version, image and timestamp */
2107 offset += ecore_dump_str_param(dump_buf + offset, dump, "fw-version", fw_ver_str);
2108 offset += ecore_dump_str_param(dump_buf + offset, dump, "fw-image", fw_img_str);
2109 offset += ecore_dump_num_param(dump_buf + offset, dump, "fw-timestamp", fw_info.ver.timestamp);
2114 /* Reads the MFW version and writes it as a param to the specified buffer.
2115 * Returns the dumped size in dwords.
2117 static u32 ecore_dump_mfw_ver_param(struct ecore_hwfn *p_hwfn,
2118 struct ecore_ptt *p_ptt,
2122 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2123 char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
2125 if (dump && dev_data->platform_id == PLATFORM_ASIC && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2126 u32 public_data_addr, global_section_offsize_addr, global_section_offsize, global_section_addr, mfw_ver;
2128 /* Find MCP public data GRC address. Needs to be ORed with
2129 * MCP_REG_SCRATCH due to a HW bug.
2131 public_data_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR) | MCP_REG_SCRATCH;
2133 /* Find MCP public global section offset */
2134 global_section_offsize_addr = public_data_addr + OFFSETOF(struct mcp_public_data, sections) + sizeof(offsize_t) * PUBLIC_GLOBAL;
2135 global_section_offsize = ecore_rd(p_hwfn, p_ptt, global_section_offsize_addr);
2136 global_section_addr = MCP_REG_SCRATCH + (global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
2138 /* Read MFW version from MCP public global section */
2139 mfw_ver = ecore_rd(p_hwfn, p_ptt, global_section_addr + OFFSETOF(struct public_global, mfw_ver));
2141 /* Dump MFW version param */
2142 if (OSAL_SNPRINTF(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d", (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16), (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
2143 DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid MFW version string\n");
2146 return ecore_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
2149 /* Writes a section header to the specified buffer.
2150 * Returns the dumped size in dwords.
2152 static u32 ecore_dump_section_hdr(u32 *dump_buf,
2157 return ecore_dump_num_param(dump_buf, dump, name, num_params);
2160 /* Writes the common global params to the specified buffer.
2161 * Returns the dumped size in dwords.
2163 static u32 ecore_dump_common_global_params(struct ecore_hwfn *p_hwfn,
2164 struct ecore_ptt *p_ptt,
2167 u8 num_specific_global_params)
2169 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2173 /* Dump global params section header */
2174 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
2175 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "global_params", num_params);
2178 offset += ecore_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2179 offset += ecore_dump_mfw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2180 offset += ecore_dump_num_param(dump_buf + offset, dump, "tools-version", TOOLS_VERSION);
2181 offset += ecore_dump_str_param(dump_buf + offset, dump, "chip", s_chip_defs[dev_data->chip_id].name);
2182 offset += ecore_dump_str_param(dump_buf + offset, dump, "platform", s_platform_defs[dev_data->platform_id].name);
2183 offset += ecore_dump_num_param(dump_buf + offset, dump, "pci-func", p_hwfn->abs_pf_id);
2188 /* Writes the "last" section (including CRC) to the specified buffer at the
2189 * given offset. Returns the dumped size in dwords.
2191 static u32 ecore_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
2193 u32 start_offset = offset;
2195 /* Dump CRC section header */
2196 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "last", 0);
2198 /* Calculate CRC32 and add it to the dword after the "last" section */
2200 *(dump_buf + offset) = ~OSAL_CRC32(0xffffffff, (u8*)dump_buf, DWORDS_TO_BYTES(offset));
2204 return offset - start_offset;
2207 /* Update blocks reset state */
2208 static void ecore_update_blocks_reset_state(struct ecore_hwfn *p_hwfn,
2209 struct ecore_ptt *p_ptt)
2211 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2212 u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2215 /* Read reset registers */
2216 for (i = 0; i < MAX_DBG_RESET_REGS; i++)
2217 if (s_reset_regs_defs[i].exists[dev_data->chip_id])
2218 reg_val[i] = ecore_rd(p_hwfn, p_ptt, s_reset_regs_defs[i].addr);
2220 /* Check if blocks are in reset */
2221 for (i = 0; i < MAX_BLOCK_ID; i++) {
2222 struct block_defs *block = s_block_defs[i];
2224 dev_data->block_in_reset[i] = block->has_reset_bit && !(reg_val[block->reset_reg] & (1 << block->reset_bit_offset));
2228 /* Enable / disable the Debug block */
2229 static void ecore_bus_enable_dbg_block(struct ecore_hwfn *p_hwfn,
2230 struct ecore_ptt *p_ptt,
2233 ecore_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
2236 /* Resets the Debug block */
2237 static void ecore_bus_reset_dbg_block(struct ecore_hwfn *p_hwfn,
2238 struct ecore_ptt *p_ptt)
2240 u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
2241 struct block_defs *dbg_block = s_block_defs[BLOCK_DBG];
2243 dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr;
2244 old_reset_reg_val = ecore_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
2245 new_reset_reg_val = old_reset_reg_val & ~(1 << dbg_block->reset_bit_offset);
2247 ecore_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
2248 ecore_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
2251 static void ecore_bus_set_framing_mode(struct ecore_hwfn *p_hwfn,
2252 struct ecore_ptt *p_ptt,
2253 enum dbg_bus_frame_modes mode)
2255 ecore_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
2258 /* Enable / disable Debug Bus clients according to the specified mask
2259 * (1 = enable, 0 = disable).
2261 static void ecore_bus_enable_clients(struct ecore_hwfn *p_hwfn,
2262 struct ecore_ptt *p_ptt,
2265 ecore_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
2268 /* Enables the specified Storm for Debug Bus. Assumes a valid Storm ID. */
2269 static void ecore_bus_enable_storm(struct ecore_hwfn *p_hwfn,
2270 struct ecore_ptt *p_ptt,
2271 enum dbg_storms storm_id,
2272 enum dbg_bus_filter_types filter_type)
2274 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2275 u32 base_addr, sem_filter_params = filter_type;
2276 struct dbg_bus_storm_data *storm_bus;
2277 struct storm_mode_defs *storm_mode;
2278 struct storm_defs *storm;
2280 storm = &s_storm_defs[storm_id];
2281 storm_bus = &dev_data->bus.storms[storm_id];
2282 storm_mode = &s_storm_mode_defs[storm_bus->mode];
2283 base_addr = storm->sem_fast_mem_addr;
2286 if (storm_mode->is_fast_dbg) {
2288 /* Enable fast debug */
2289 ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST);
2290 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DEBUG_MODE, storm_mode->id_in_hw);
2291 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DEBUG_ACTIVE, 1);
2293 /* Enable all messages except STORE. Must be done after
2294 * enabling SEM_FAST_REG_DEBUG_ACTIVE, otherwise messages will
2295 * be dropped after the SEMI sync fifo is filled.
2297 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DBG_MODE6_SRC_DISABLE, SEM_FAST_MODE6_SRC_ENABLE);
2301 /* Ensable slow debug */
2302 ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST);
2303 ecore_wr(p_hwfn, p_ptt, storm->sem_slow_enable_addr, 1);
2304 ecore_wr(p_hwfn, p_ptt, storm->sem_slow_mode_addr, storm_mode->id_in_hw);
2305 ecore_wr(p_hwfn, p_ptt, storm->sem_slow_mode1_conf_addr, SEM_SLOW_MODE1_DATA_ENABLE);
2308 /* Config SEM cid filter */
2309 if (storm_bus->cid_filter_en) {
2310 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_FILTER_CID, storm_bus->cid);
2311 sem_filter_params |= SEM_FILTER_CID_EN_MASK;
2314 /* Config SEM eid filter */
2315 if (storm_bus->eid_filter_en) {
2316 const union dbg_bus_storm_eid_params *eid_filter = &storm_bus->eid_filter_params;
2318 if (storm_bus->eid_range_not_mask) {
2319 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_EVENT_ID_RANGE_STRT, eid_filter->range.min);
2320 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_EVENT_ID_RANGE_END, eid_filter->range.max);
2321 sem_filter_params |= SEM_FILTER_EID_RANGE_EN_MASK;
2324 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_FILTER_EVENT_ID, eid_filter->mask.val);
2325 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_EVENT_ID_MASK, ~eid_filter->mask.mask);
2326 sem_filter_params |= SEM_FILTER_EID_MASK_EN_MASK;
2330 /* Config accumulaed SEM filter parameters (if any) */
2331 if (sem_filter_params)
2332 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_RECORD_FILTER_ENABLE, sem_filter_params);
2335 /* Disables Debug Bus block inputs */
2336 static enum dbg_status ecore_bus_disable_inputs(struct ecore_hwfn *p_hwfn,
2337 struct ecore_ptt *p_ptt,
2338 bool empty_semi_fifos)
2340 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2341 u8 storm_id, num_fifos_to_empty = MAX_DBG_STORMS;
2342 bool is_fifo_empty[MAX_DBG_STORMS] = { false };
2345 /* Disable messages output in all Storms */
2346 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2347 struct storm_defs *storm = &s_storm_defs[storm_id];
2349 if (!dev_data->block_in_reset[storm->block_id])
2350 ecore_wr(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_DBG_MODE6_SRC_DISABLE, SEM_FAST_MODE6_SRC_DISABLE);
2353 /* Try to empty the SEMI sync fifo. Must be done after messages output
2354 * were disabled in all Storms (i.e. SEM_FAST_REG_DBG_MODE6_SRC_DISABLE
2355 * was set to all 1's.
2357 while (num_fifos_to_empty) {
2358 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2359 struct storm_defs *storm = &s_storm_defs[storm_id];
2361 if (is_fifo_empty[storm_id])
2364 /* Check if sync fifo got empty */
2365 if (dev_data->block_in_reset[storm->block_id] || ecore_rd(p_hwfn, p_ptt, storm->sem_sync_dbg_empty_addr)) {
2366 is_fifo_empty[storm_id] = true;
2367 num_fifos_to_empty--;
2371 /* Check if need to continue polling */
2372 if (num_fifos_to_empty) {
2373 u32 polling_ms = SEMI_SYNC_FIFO_POLLING_DELAY_MS * s_platform_defs[dev_data->platform_id].delay_factor;
2374 u32 polling_count = 0;
2376 if (empty_semi_fifos && polling_count < SEMI_SYNC_FIFO_POLLING_COUNT) {
2377 OSAL_MSLEEP(polling_ms);
2381 DP_NOTICE(p_hwfn, false, "Warning: failed to empty the SEMI sync FIFO. It means that the last few messages from the SEMI could not be sent to the DBG block. This can happen when the DBG block is blocked (e.g. due to a PCI problem).\n");
2387 /* Disable debug in all Storms */
2388 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2389 struct storm_defs *storm = &s_storm_defs[storm_id];
2390 u32 base_addr = storm->sem_fast_mem_addr;
2392 if (dev_data->block_in_reset[storm->block_id])
2395 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DEBUG_ACTIVE, 0);
2396 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_RECORD_FILTER_ENABLE, DBG_BUS_FILTER_TYPE_OFF);
2397 ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_FRAME_MODE_4HW_0ST);
2398 ecore_wr(p_hwfn, p_ptt, storm->sem_slow_enable_addr, 0);
2401 /* Disable all clients */
2402 ecore_bus_enable_clients(p_hwfn, p_ptt, 0);
2404 /* Disable all blocks */
2405 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2406 struct block_defs *block = s_block_defs[block_id];
2408 if (block->dbg_client_id[dev_data->chip_id] != MAX_DBG_BUS_CLIENTS && !dev_data->block_in_reset[block_id])
2409 ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
2412 /* Disable timestamp */
2413 ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_VALID_EN, 0);
2415 /* Disable filters and triggers */
2416 ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_ENABLE, DBG_BUS_FILTER_TYPE_OFF);
2417 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_ENABLE, 0);
2419 return DBG_STATUS_OK;
2422 /* Sets a Debug Bus trigger/filter constraint */
2423 static void ecore_bus_set_constraint(struct ecore_hwfn *p_hwfn,
2424 struct ecore_ptt *p_ptt,
2437 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2438 u32 reg_offset = constraint_id * BYTES_IN_DWORD;
2439 u8 curr_trigger_state;
2441 /* For trigger only - set register offset according to state */
2443 curr_trigger_state = dev_data->bus.next_trigger_state - 1;
2444 reg_offset += curr_trigger_state * TRIGGER_SETS_PER_STATE * MAX_CONSTRAINTS * BYTES_IN_DWORD;
2447 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_OPRTN_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_OPRTN_0) + reg_offset, hw_op_val);
2448 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_DATA_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_0) + reg_offset, data_val);
2449 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_DATA_MASK_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_MASK_0) + reg_offset, data_mask);
2450 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_FRAME_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_0) + reg_offset, frame_bit);
2451 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_FRAME_MASK_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_MASK_0) + reg_offset, frame_mask);
2452 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_OFFSET_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_OFFSET_0) + reg_offset, dword_offset);
2453 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_RANGE_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_RANGE_0) + reg_offset, range);
2454 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_CYCLIC_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_CYCLIC_0) + reg_offset, cyclic_bit);
2455 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_MUST_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_MUST_0) + reg_offset, must_bit);
2458 /* Reads the specified DBG Bus internal buffer range and copy it to the
2459 * specified buffer. Returns the dumped size in dwords.
2461 static u32 ecore_bus_dump_int_buf_range(struct ecore_hwfn *p_hwfn,
2462 struct ecore_ptt *p_ptt,
2468 u32 line, reg_addr, i, offset = 0;
2471 return (end_line - start_line + 1) * INT_BUF_LINE_SIZE_IN_DWORDS;
2473 for (line = start_line, reg_addr = DBG_REG_INTR_BUFFER + DWORDS_TO_BYTES(start_line * INT_BUF_LINE_SIZE_IN_DWORDS);
2475 line++, offset += INT_BUF_LINE_SIZE_IN_DWORDS)
2476 for (i = 0; i < INT_BUF_LINE_SIZE_IN_DWORDS; i++, reg_addr += BYTES_IN_DWORD)
2477 dump_buf[offset + INT_BUF_LINE_SIZE_IN_DWORDS - 1 - i] = ecore_rd(p_hwfn, p_ptt, reg_addr);
2482 /* Reads the DBG Bus internal buffer and copy its contents to a buffer.
2483 * Returns the dumped size in dwords.
2485 static u32 ecore_bus_dump_int_buf(struct ecore_hwfn *p_hwfn,
2486 struct ecore_ptt *p_ptt,
2490 u32 last_written_line, offset = 0;
2492 last_written_line = ecore_rd(p_hwfn, p_ptt, DBG_REG_INTR_BUFFER_WR_PTR);
2494 if (ecore_rd(p_hwfn, p_ptt, DBG_REG_WRAP_ON_INT_BUFFER)) {
2496 /* Internal buffer was wrapped: first dump from write pointer
2497 * to buffer end, then dump from buffer start to write pointer.
2499 if (last_written_line < INT_BUF_NUM_OF_LINES - 1)
2500 offset += ecore_bus_dump_int_buf_range(p_hwfn, p_ptt, dump_buf + offset, dump, last_written_line + 1, INT_BUF_NUM_OF_LINES - 1);
2501 offset += ecore_bus_dump_int_buf_range(p_hwfn, p_ptt, dump_buf + offset, dump, 0, last_written_line);
2503 else if (last_written_line) {
2505 /* Internal buffer wasn't wrapped: dump from buffer start until
2508 if (!ecore_rd(p_hwfn, p_ptt, DBG_REG_INTR_BUFFER_RD_PTR))
2509 offset += ecore_bus_dump_int_buf_range(p_hwfn, p_ptt, dump_buf + offset, dump, 0, last_written_line);
2511 DP_NOTICE(p_hwfn, true, "Unexpected Debug Bus error: internal buffer read pointer is not zero\n");
2517 /* Reads the specified DBG Bus PCI buffer range and copy it to the specified
2518 * buffer. Returns the dumped size in dwords.
2520 static u32 ecore_bus_dump_pci_buf_range(struct ecore_hwfn *p_hwfn,
2526 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2529 /* Extract PCI buffer pointer from virtual address */
2530 void *virt_addr_lo = &dev_data->bus.pci_buf.virt_addr.lo;
2531 u32 *pci_buf_start = (u32*)(osal_uintptr_t)*((u64*)virt_addr_lo);
2532 u32 *pci_buf, line, i;
2535 return (end_line - start_line + 1) * PCI_BUF_LINE_SIZE_IN_DWORDS;
2537 for (line = start_line, pci_buf = pci_buf_start + start_line * PCI_BUF_LINE_SIZE_IN_DWORDS;
2539 line++, offset += PCI_BUF_LINE_SIZE_IN_DWORDS)
2540 for (i = 0; i < PCI_BUF_LINE_SIZE_IN_DWORDS; i++, pci_buf++)
2541 dump_buf[offset + s_pci_buf_line_ind[i]] = *pci_buf;
2546 /* Copies the DBG Bus PCI buffer to the specified buffer.
2547 * Returns the dumped size in dwords.
2549 static u32 ecore_bus_dump_pci_buf(struct ecore_hwfn *p_hwfn,
2550 struct ecore_ptt *p_ptt,
2554 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2555 u32 next_wr_byte_offset, next_wr_line_offset;
2556 struct dbg_bus_mem_addr next_wr_phys_addr;
2557 u32 pci_buf_size_in_lines, offset = 0;
2559 pci_buf_size_in_lines = dev_data->bus.pci_buf.size / PCI_BUF_LINE_SIZE_IN_BYTES;
2561 /* Extract write pointer (physical address) */
2562 next_wr_phys_addr.lo = ecore_rd(p_hwfn, p_ptt, DBG_REG_EXT_BUFFER_WR_PTR);
2563 next_wr_phys_addr.hi = ecore_rd(p_hwfn, p_ptt, DBG_REG_EXT_BUFFER_WR_PTR + BYTES_IN_DWORD);
2565 /* Convert write pointer to offset */
2566 next_wr_byte_offset = ecore_phys_addr_diff(&next_wr_phys_addr, &dev_data->bus.pci_buf.phys_addr);
2567 if ((next_wr_byte_offset % PCI_BUF_LINE_SIZE_IN_BYTES) || next_wr_byte_offset > dev_data->bus.pci_buf.size)
2569 next_wr_line_offset = next_wr_byte_offset / PCI_BUF_LINE_SIZE_IN_BYTES;
2571 /* PCI buffer wrapped: first dump from write pointer to buffer end. */
2572 if (ecore_rd(p_hwfn, p_ptt, DBG_REG_WRAP_ON_EXT_BUFFER))
2573 offset += ecore_bus_dump_pci_buf_range(p_hwfn, dump_buf + offset, dump, next_wr_line_offset, pci_buf_size_in_lines - 1);
2575 /* Dump from buffer start until write pointer */
2576 if (next_wr_line_offset)
2577 offset += ecore_bus_dump_pci_buf_range(p_hwfn, dump_buf + offset, dump, 0, next_wr_line_offset - 1);
2582 /* Copies the DBG Bus recorded data to the specified buffer.
2583 * Returns the dumped size in dwords.
2585 static u32 ecore_bus_dump_data(struct ecore_hwfn *p_hwfn,
2586 struct ecore_ptt *p_ptt,
2590 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2592 switch (dev_data->bus.target) {
2593 case DBG_BUS_TARGET_ID_INT_BUF:
2594 return ecore_bus_dump_int_buf(p_hwfn, p_ptt, dump_buf, dump);
2595 case DBG_BUS_TARGET_ID_PCI:
2596 return ecore_bus_dump_pci_buf(p_hwfn, p_ptt, dump_buf, dump);
2604 /* Frees the Debug Bus PCI buffer */
2605 static void ecore_bus_free_pci_buf(struct ecore_hwfn *p_hwfn)
2607 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2608 dma_addr_t pci_buf_phys_addr;
2612 /* Extract PCI buffer pointer from virtual address */
2613 virt_addr_lo = &dev_data->bus.pci_buf.virt_addr.lo;
2614 pci_buf = (u32*)(osal_uintptr_t)*((u64*)virt_addr_lo);
2616 if (!dev_data->bus.pci_buf.size)
2619 OSAL_MEMCPY(&pci_buf_phys_addr, &dev_data->bus.pci_buf.phys_addr, sizeof(pci_buf_phys_addr));
2621 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, pci_buf, pci_buf_phys_addr, dev_data->bus.pci_buf.size);
2623 dev_data->bus.pci_buf.size = 0;
2626 /* Dumps the list of DBG Bus inputs (blocks/Storms) to the specified buffer.
2627 * Returns the dumped size in dwords.
2629 static u32 ecore_bus_dump_inputs(struct ecore_hwfn *p_hwfn,
2633 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2634 char storm_name[8] = "?storm";
2635 u32 block_id, offset = 0;
2639 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2640 struct dbg_bus_storm_data *storm_bus = &dev_data->bus.storms[storm_id];
2641 struct storm_defs *storm = &s_storm_defs[storm_id];
2643 if (!dev_data->bus.storms[storm_id].enabled)
2646 /* Dump section header */
2647 storm_name[0] = storm->letter;
2648 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "bus_input", 3);
2649 offset += ecore_dump_str_param(dump_buf + offset, dump, "name", storm_name);
2650 offset += ecore_dump_num_param(dump_buf + offset, dump, "id", storm_bus->hw_id);
2651 offset += ecore_dump_str_param(dump_buf + offset, dump, "mode", s_storm_mode_defs[storm_bus->mode].name);
2655 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2656 struct dbg_bus_block_data *block_bus = &dev_data->bus.blocks[block_id];
2657 struct block_defs *block = s_block_defs[block_id];
2659 if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
2662 /* Dump section header */
2663 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "bus_input", 4);
2664 offset += ecore_dump_str_param(dump_buf + offset, dump, "name", block->name);
2665 offset += ecore_dump_num_param(dump_buf + offset, dump, "line", block_bus->line_num);
2666 offset += ecore_dump_num_param(dump_buf + offset, dump, "en", GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK));
2667 offset += ecore_dump_num_param(dump_buf + offset, dump, "shr", GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT));
2673 /* Dumps the Debug Bus header (params, inputs, data header) to the specified
2674 * buffer. Returns the dumped size in dwords.
2676 static u32 ecore_bus_dump_hdr(struct ecore_hwfn *p_hwfn,
2677 struct ecore_ptt *p_ptt,
2681 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2682 char hw_id_mask_str[16];
2685 if (OSAL_SNPRINTF(hw_id_mask_str, sizeof(hw_id_mask_str), "0x%x", dev_data->bus.hw_id_mask) < 0)
2686 DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid HW ID mask\n");
2688 /* Dump global params */
2689 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 5);
2690 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "debug-bus");
2691 offset += ecore_dump_str_param(dump_buf + offset, dump, "wrap-mode", dev_data->bus.one_shot_en ? "one-shot" : "wrap-around");
2692 offset += ecore_dump_num_param(dump_buf + offset, dump, "hw-dwords", dev_data->bus.hw_dwords);
2693 offset += ecore_dump_str_param(dump_buf + offset, dump, "hw-id-mask", hw_id_mask_str);
2694 offset += ecore_dump_str_param(dump_buf + offset, dump, "target", s_dbg_target_names[dev_data->bus.target]);
2696 offset += ecore_bus_dump_inputs(p_hwfn, dump_buf + offset, dump);
2698 if (dev_data->bus.target != DBG_BUS_TARGET_ID_NIG) {
2699 u32 recorded_dwords = 0;
2702 recorded_dwords = ecore_bus_dump_data(p_hwfn, p_ptt, OSAL_NULL, false);
2704 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "bus_data", 1);
2705 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", recorded_dwords);
2711 static bool ecore_is_mode_match(struct ecore_hwfn *p_hwfn,
2712 u16 *modes_buf_offset)
2714 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2718 /* Get next element from modes tree buffer */
2719 tree_val = ((u8*)s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr)[(*modes_buf_offset)++];
2722 case INIT_MODE_OP_NOT:
2723 return !ecore_is_mode_match(p_hwfn, modes_buf_offset);
2724 case INIT_MODE_OP_OR:
2725 case INIT_MODE_OP_AND:
2726 arg1 = ecore_is_mode_match(p_hwfn, modes_buf_offset);
2727 arg2 = ecore_is_mode_match(p_hwfn, modes_buf_offset);
2728 return (tree_val == INIT_MODE_OP_OR) ? (arg1 || arg2) : (arg1 && arg2);
2729 default: return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
2733 /* Returns true if the specified entity (indicated by GRC param) should be
2734 * included in the dump, false otherwise.
2736 static bool ecore_grc_is_included(struct ecore_hwfn *p_hwfn,
2737 enum dbg_grc_params grc_param)
2739 return ecore_grc_get_param(p_hwfn, grc_param) > 0;
2742 /* Returns true of the specified Storm should be included in the dump, false
2745 static bool ecore_grc_is_storm_included(struct ecore_hwfn *p_hwfn,
2746 enum dbg_storms storm)
2748 return ecore_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
2751 /* Returns true if the specified memory should be included in the dump, false
2754 static bool ecore_grc_is_mem_included(struct ecore_hwfn *p_hwfn,
2755 enum block_id block_id,
2758 struct block_defs *block = s_block_defs[block_id];
2761 /* Check Storm match */
2762 if (block->associated_to_storm &&
2763 !ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)block->storm_id))
2766 for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
2767 struct big_ram_defs *big_ram = &s_big_ram_defs[i];
2769 if (mem_group_id == big_ram->mem_group_id || mem_group_id == big_ram->ram_mem_group_id)
2770 return ecore_grc_is_included(p_hwfn, big_ram->grc_param);
2773 switch (mem_group_id) {
2774 case MEM_GROUP_PXP_ILT:
2775 case MEM_GROUP_PXP_MEM:
2776 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
2778 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
2779 case MEM_GROUP_PBUF:
2780 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
2781 case MEM_GROUP_CAU_MEM:
2782 case MEM_GROUP_CAU_SB:
2783 case MEM_GROUP_CAU_PI:
2784 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
2785 case MEM_GROUP_QM_MEM:
2786 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
2787 case MEM_GROUP_CFC_MEM:
2788 case MEM_GROUP_CONN_CFC_MEM:
2789 case MEM_GROUP_TASK_CFC_MEM:
2790 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) || ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
2791 case MEM_GROUP_IGU_MEM:
2792 case MEM_GROUP_IGU_MSIX:
2793 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
2794 case MEM_GROUP_MULD_MEM:
2795 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
2796 case MEM_GROUP_PRS_MEM:
2797 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
2798 case MEM_GROUP_DMAE_MEM:
2799 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
2800 case MEM_GROUP_TM_MEM:
2801 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
2802 case MEM_GROUP_SDM_MEM:
2803 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
2804 case MEM_GROUP_TDIF_CTX:
2805 case MEM_GROUP_RDIF_CTX:
2806 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
2807 case MEM_GROUP_CM_MEM:
2808 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
2810 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
2816 /* Stalls all Storms */
2817 static void ecore_grc_stall_storms(struct ecore_hwfn *p_hwfn,
2818 struct ecore_ptt *p_ptt,
2824 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2825 if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id))
2828 reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr + SEM_FAST_REG_STALL_0_BB_K2;
2829 ecore_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
2832 OSAL_MSLEEP(STALL_DELAY_MS);
2835 /* Takes all blocks out of reset */
2836 static void ecore_grc_unreset_blocks(struct ecore_hwfn *p_hwfn,
2837 struct ecore_ptt *p_ptt)
2839 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2840 u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2843 /* Fill reset regs values */
2844 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2845 struct block_defs *block = s_block_defs[block_id];
2847 if (block->exists[dev_data->chip_id] && block->has_reset_bit && block->unreset)
2848 reg_val[block->reset_reg] |= (1 << block->reset_bit_offset);
2851 /* Write reset registers */
2852 for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2853 if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2856 reg_val[i] |= s_reset_regs_defs[i].unreset_val[dev_data->chip_id];
2859 ecore_wr(p_hwfn, p_ptt, s_reset_regs_defs[i].addr + RESET_REG_UNRESET_OFFSET, reg_val[i]);
2863 /* Returns the attention block data of the specified block */
2864 static const struct dbg_attn_block_type_data* ecore_get_block_attn_data(enum block_id block_id,
2865 enum dbg_attn_type attn_type)
2867 const struct dbg_attn_block *base_attn_block_arr = (const struct dbg_attn_block*)s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
2869 return &base_attn_block_arr[block_id].per_type_data[attn_type];
2872 /* Returns the attention registers of the specified block */
2873 static const struct dbg_attn_reg* ecore_get_block_attn_regs(enum block_id block_id,
2874 enum dbg_attn_type attn_type,
2877 const struct dbg_attn_block_type_data *block_type_data = ecore_get_block_attn_data(block_id, attn_type);
2879 *num_attn_regs = block_type_data->num_regs;
2881 return &((const struct dbg_attn_reg*)s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->regs_offset];
2884 /* For each block, clear the status of all parities */
2885 static void ecore_grc_clear_all_prty(struct ecore_hwfn *p_hwfn,
2886 struct ecore_ptt *p_ptt)
2888 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2889 const struct dbg_attn_reg *attn_reg_arr;
2890 u8 reg_idx, num_attn_regs;
2893 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2894 if (dev_data->block_in_reset[block_id])
2897 attn_reg_arr = ecore_get_block_attn_regs((enum block_id)block_id, ATTN_TYPE_PARITY, &num_attn_regs);
2899 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2900 const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
2901 u16 modes_buf_offset;
2905 eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
2906 modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
2908 /* If Mode match: clear parity status */
2909 if (!eval_mode || ecore_is_mode_match(p_hwfn, &modes_buf_offset))
2910 ecore_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(reg_data->sts_clr_address));
2915 /* Dumps GRC registers section header. Returns the dumped size in dwords.
2916 * the following parameters are dumped:
2917 * - count: no. of dumped entries
2918 * - split: split type
2919 * - id: split ID (dumped only if split_id >= 0)
2920 * - param_name: user parameter value (dumped only if param_name != OSAL_NULL
2921 * and param_val != OSAL_NULL).
2923 static u32 ecore_grc_dump_regs_hdr(u32 *dump_buf,
2925 u32 num_reg_entries,
2926 const char *split_type,
2928 const char *param_name,
2929 const char *param_val)
2931 u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0);
2934 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "grc_regs", num_params);
2935 offset += ecore_dump_num_param(dump_buf + offset, dump, "count", num_reg_entries);
2936 offset += ecore_dump_str_param(dump_buf + offset, dump, "split", split_type);
2938 offset += ecore_dump_num_param(dump_buf + offset, dump, "id", split_id);
2939 if (param_name && param_val)
2940 offset += ecore_dump_str_param(dump_buf + offset, dump, param_name, param_val);
2945 /* Dumps the GRC registers in the specified address range.
2946 * Returns the dumped size in dwords.
2947 * The addr and len arguments are specified in dwords.
2949 static u32 ecore_grc_dump_addr_range(struct ecore_hwfn *p_hwfn,
2950 struct ecore_ptt *p_ptt,
2955 bool OSAL_UNUSED wide_bus)
2957 u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i;
2962 for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
2963 *(dump_buf + offset) = ecore_rd(p_hwfn, p_ptt, byte_addr);
2968 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
2969 * The addr and len arguments are specified in dwords.
2971 static u32 ecore_grc_dump_reg_entry_hdr(u32 *dump_buf,
2977 *dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
2982 /* Dumps GRC registers sequence. Returns the dumped size in dwords.
2983 * The addr and len arguments are specified in dwords.
2985 static u32 ecore_grc_dump_reg_entry(struct ecore_hwfn *p_hwfn,
2986 struct ecore_ptt *p_ptt,
2991 bool OSAL_UNUSED wide_bus)
2995 offset += ecore_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
2996 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, len, wide_bus);
3001 /* Dumps GRC registers sequence with skip cycle.
3002 * Returns the dumped size in dwords.
3003 * - addr: start GRC address in dwords
3004 * - total_len: total no. of dwords to dump
3005 * - read_len: no. consecutive dwords to read
3006 * - skip_len: no. of dwords to skip (and fill with zeros)
3008 static u32 ecore_grc_dump_reg_entry_skip(struct ecore_hwfn *p_hwfn,
3009 struct ecore_ptt *p_ptt,
3017 u32 offset = 0, reg_offset = 0;
3019 offset += ecore_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
3022 return offset + total_len;
3024 while (reg_offset < total_len) {
3025 u32 curr_len = OSAL_MIN_T(u32, read_len, total_len - reg_offset);
3027 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, curr_len, false);
3028 reg_offset += curr_len;
3031 if (reg_offset < total_len) {
3032 curr_len = OSAL_MIN_T(u32, skip_len, total_len - skip_len);
3033 OSAL_MEMSET(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
3035 reg_offset += curr_len;
3043 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
3044 static u32 ecore_grc_dump_regs_entries(struct ecore_hwfn *p_hwfn,
3045 struct ecore_ptt *p_ptt,
3046 struct dbg_array input_regs_arr,
3049 bool block_enable[MAX_BLOCK_ID],
3050 u32 *num_dumped_reg_entries)
3052 u32 i, offset = 0, input_offset = 0;
3053 bool mode_match = true;
3055 *num_dumped_reg_entries = 0;
3057 while (input_offset < input_regs_arr.size_in_dwords) {
3058 const struct dbg_dump_cond_hdr* cond_hdr = (const struct dbg_dump_cond_hdr*)&input_regs_arr.ptr[input_offset++];
3059 u16 modes_buf_offset;
3062 /* Check mode/block */
3063 eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
3065 modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
3066 mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
3069 if (!mode_match || !block_enable[cond_hdr->block_id]) {
3070 input_offset += cond_hdr->data_size;
3074 for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
3075 const struct dbg_dump_reg *reg = (const struct dbg_dump_reg*)&input_regs_arr.ptr[input_offset];
3077 offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump,
3078 GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS),
3079 GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH),
3080 GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS));
3081 (*num_dumped_reg_entries)++;
3088 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
3089 static u32 ecore_grc_dump_split_data(struct ecore_hwfn *p_hwfn,
3090 struct ecore_ptt *p_ptt,
3091 struct dbg_array input_regs_arr,
3094 bool block_enable[MAX_BLOCK_ID],
3095 const char *split_type_name,
3097 const char *param_name,
3098 const char *param_val)
3100 u32 num_dumped_reg_entries, offset;
3102 /* Calculate register dump header size (and skip it for now) */
3103 offset = ecore_grc_dump_regs_hdr(dump_buf, false, 0, split_type_name, split_id, param_name, param_val);
3105 /* Dump registers */
3106 offset += ecore_grc_dump_regs_entries(p_hwfn, p_ptt, input_regs_arr, dump_buf + offset, dump, block_enable, &num_dumped_reg_entries);
3108 /* Write register dump header */
3109 if (dump && num_dumped_reg_entries > 0)
3110 ecore_grc_dump_regs_hdr(dump_buf, dump, num_dumped_reg_entries, split_type_name, split_id, param_name, param_val);
3112 return num_dumped_reg_entries > 0 ? offset : 0;
3115 /* Dumps registers according to the input registers array. Returns the dumped
3118 static u32 ecore_grc_dump_registers(struct ecore_hwfn *p_hwfn,
3119 struct ecore_ptt *p_ptt,
3122 bool block_enable[MAX_BLOCK_ID],
3123 const char *param_name,
3124 const char *param_val)
3126 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3127 struct chip_platform_defs *chip_platform;
3128 u32 offset = 0, input_offset = 0;
3129 u8 port_id, pf_id, vf_id;
3131 chip_platform = &s_chip_defs[dev_data->chip_id].per_platform[dev_data->platform_id];
3134 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Dumping registers...\n");
3136 while (input_offset < s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
3137 const struct dbg_dump_split_hdr *split_hdr;
3138 struct dbg_array curr_input_regs_arr;
3139 u32 split_data_size;
3142 split_hdr = (const struct dbg_dump_split_hdr*)&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
3143 split_type_id = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3144 split_data_size = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3145 curr_input_regs_arr.ptr = &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
3146 curr_input_regs_arr.size_in_dwords = split_data_size;
3148 switch(split_type_id) {
3149 case SPLIT_TYPE_NONE:
3150 offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "eng", (u32)(-1), param_name, param_val);
3153 case SPLIT_TYPE_PORT:
3154 for (port_id = 0; port_id < chip_platform->num_ports; port_id++) {
3156 ecore_port_pretend(p_hwfn, p_ptt, port_id);
3157 offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "port", port_id, param_name, param_val);
3162 case SPLIT_TYPE_PORT_PF:
3163 for (pf_id = 0; pf_id < chip_platform->num_pfs; pf_id++) {
3165 ecore_fid_pretend(p_hwfn, p_ptt, (pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT));
3166 offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "pf", pf_id, param_name, param_val);
3171 for (vf_id = 0; vf_id < chip_platform->num_vfs; vf_id++) {
3173 ecore_fid_pretend(p_hwfn, p_ptt, (1 << PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT) | (vf_id << PXP_PRETEND_CONCRETE_FID_VFID_SHIFT));
3174 offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "vf", vf_id, param_name, param_val);
3182 input_offset += split_data_size;
3185 /* Pretend to original PF */
3187 ecore_fid_pretend(p_hwfn, p_ptt, (p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT));
3192 /* Dump reset registers. Returns the dumped size in dwords. */
3193 static u32 ecore_grc_dump_reset_regs(struct ecore_hwfn *p_hwfn,
3194 struct ecore_ptt *p_ptt,
3198 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3199 u32 i, offset = 0, num_regs = 0;
3201 /* Calculate header size */
3202 offset += ecore_grc_dump_regs_hdr(dump_buf, false, 0, "eng", -1, OSAL_NULL, OSAL_NULL);
3204 /* Write reset registers */
3205 for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
3206 if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
3209 offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(s_reset_regs_defs[i].addr), 1, false);
3215 ecore_grc_dump_regs_hdr(dump_buf, true, num_regs, "eng", -1, OSAL_NULL, OSAL_NULL);
3220 /* Dump registers that are modified during GRC Dump and therefore must be
3221 * dumped first. Returns the dumped size in dwords.
3223 static u32 ecore_grc_dump_modified_regs(struct ecore_hwfn *p_hwfn,
3224 struct ecore_ptt *p_ptt,
3228 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3229 u32 block_id, offset = 0, num_reg_entries = 0;
3230 const struct dbg_attn_reg *attn_reg_arr;
3231 u8 storm_id, reg_idx, num_attn_regs;
3233 /* Calculate header size */
3234 offset += ecore_grc_dump_regs_hdr(dump_buf, false, 0, "eng", -1, OSAL_NULL, OSAL_NULL);
3236 /* Write parity registers */
3237 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3238 if (dev_data->block_in_reset[block_id] && dump)
3241 attn_reg_arr = ecore_get_block_attn_regs((enum block_id)block_id, ATTN_TYPE_PARITY, &num_attn_regs);
3243 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
3244 const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
3245 u16 modes_buf_offset;
3249 eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
3250 modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
3251 if (eval_mode && !ecore_is_mode_match(p_hwfn, &modes_buf_offset))
3254 /* Mode match: read & dump registers */
3255 offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, reg_data->mask_address, 1, false);
3256 offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, GET_FIELD(reg_data->data, DBG_ATTN_REG_STS_ADDRESS), 1, false);
3257 num_reg_entries += 2;
3261 /* Write Storm stall status registers */
3262 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3263 struct storm_defs *storm = &s_storm_defs[storm_id];
3265 if (dev_data->block_in_reset[storm->block_id] && dump)
3268 offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump,
3269 BYTES_TO_DWORDS(storm->sem_fast_mem_addr + SEM_FAST_REG_STALLED), 1, false);
3275 ecore_grc_dump_regs_hdr(dump_buf, true, num_reg_entries, "eng", -1, OSAL_NULL, OSAL_NULL);
3280 /* Dumps registers that can't be represented in the debug arrays */
3281 static u32 ecore_grc_dump_special_regs(struct ecore_hwfn *p_hwfn,
3282 struct ecore_ptt *p_ptt,
3288 offset += ecore_grc_dump_regs_hdr(dump_buf, dump, 2, "eng", -1, OSAL_NULL, OSAL_NULL);
3290 /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
3293 offset += ecore_grc_dump_reg_entry_skip(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO), RDIF_REG_DEBUG_ERROR_INFO_SIZE, 7, 1);
3294 offset += ecore_grc_dump_reg_entry_skip(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO), TDIF_REG_DEBUG_ERROR_INFO_SIZE, 7, 1);
3299 /* Dumps a GRC memory header (section and params). Returns the dumped size in
3300 * dwords. The following parameters are dumped:
3301 * - name: dumped only if it's not OSAL_NULL.
3302 * - addr: in dwords, dumped only if name is OSAL_NULL.
3303 * - len: in dwords, always dumped.
3304 * - width: dumped if it's not zero.
3305 * - packed: dumped only if it's not false.
3306 * - mem_group: always dumped.
3307 * - is_storm: true only if the memory is related to a Storm.
3308 * - storm_letter: valid only if is_storm is true.
3311 static u32 ecore_grc_dump_mem_hdr(struct ecore_hwfn *p_hwfn,
3319 const char *mem_group,
3328 DP_NOTICE(p_hwfn, true, "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
3335 /* Dump section header */
3336 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "grc_mem", num_params);
3342 OSAL_STRCPY(buf, "?STORM_");
3343 buf[0] = storm_letter;
3344 OSAL_STRCPY(buf + OSAL_STRLEN(buf), name);
3347 OSAL_STRCPY(buf, name);
3350 offset += ecore_dump_str_param(dump_buf + offset, dump, "name", buf);
3352 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Dumping %d registers from %s...\n", len, buf);
3357 u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
3359 offset += ecore_dump_num_param(dump_buf + offset, dump, "addr", addr_in_bytes);
3360 if (dump && len > 64)
3361 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Dumping %d registers from address 0x%x...\n", len, addr_in_bytes);
3365 offset += ecore_dump_num_param(dump_buf + offset, dump, "len", len);
3367 /* Dump bit width */
3369 offset += ecore_dump_num_param(dump_buf + offset, dump, "width", bit_width);
3373 offset += ecore_dump_num_param(dump_buf + offset, dump, "packed", 1);
3377 OSAL_STRCPY(buf, "?STORM_");
3378 buf[0] = storm_letter;
3379 OSAL_STRCPY(buf + OSAL_STRLEN(buf), mem_group);
3382 OSAL_STRCPY(buf, mem_group);
3385 offset += ecore_dump_str_param(dump_buf + offset, dump, "type", buf);
3390 /* Dumps a single GRC memory. If name is OSAL_NULL, the memory is stored by address.
3391 * Returns the dumped size in dwords.
3392 * The addr and len arguments are specified in dwords.
3394 static u32 ecore_grc_dump_mem(struct ecore_hwfn *p_hwfn,
3395 struct ecore_ptt *p_ptt,
3404 const char *mem_group,
3410 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, name, addr, len, bit_width, packed, mem_group, is_storm, storm_letter);
3411 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, len, wide_bus);
3416 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
3417 static u32 ecore_grc_dump_mem_entries(struct ecore_hwfn *p_hwfn,
3418 struct ecore_ptt *p_ptt,
3419 struct dbg_array input_mems_arr,
3423 u32 i, offset = 0, input_offset = 0;
3424 bool mode_match = true;
3426 while (input_offset < input_mems_arr.size_in_dwords) {
3427 const struct dbg_dump_cond_hdr* cond_hdr;
3428 u16 modes_buf_offset;
3432 cond_hdr = (const struct dbg_dump_cond_hdr*)&input_mems_arr.ptr[input_offset++];
3433 num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
3435 /* Check required mode */
3436 eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
3438 modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
3439 mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
3443 input_offset += cond_hdr->data_size;
3447 for (i = 0; i < num_entries; i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
3448 const struct dbg_dump_mem *mem = (const struct dbg_dump_mem*)&input_mems_arr.ptr[input_offset];
3449 u8 mem_group_id = GET_FIELD(mem->dword0, DBG_DUMP_MEM_MEM_GROUP_ID);
3450 bool is_storm = false, mem_wide_bus;
3451 char storm_letter = 'a';
3452 u32 mem_addr, mem_len;
3454 if (mem_group_id >= MEM_GROUPS_NUM) {
3455 DP_NOTICE(p_hwfn, true, "Invalid mem_group_id\n");
3459 if (!ecore_grc_is_mem_included(p_hwfn, (enum block_id)cond_hdr->block_id, mem_group_id))
3462 mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
3463 mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
3464 mem_wide_bus = GET_FIELD(mem->dword1, DBG_DUMP_MEM_WIDE_BUS);
3466 /* Update memory length for CCFC/TCFC memories
3467 * according to number of LCIDs/LTIDs.
3469 if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
3470 if (mem_len % MAX_LCIDS) {
3471 DP_NOTICE(p_hwfn, true, "Invalid CCFC connection memory size\n");
3475 mem_len = ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS) * (mem_len / MAX_LCIDS);
3477 else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) {
3478 if (mem_len % MAX_LTIDS) {
3479 DP_NOTICE(p_hwfn, true, "Invalid TCFC task memory size\n");
3483 mem_len = ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS) * (mem_len / MAX_LTIDS);
3486 /* If memory is associated with Storm, udpate Storm
3489 if (s_block_defs[cond_hdr->block_id]->associated_to_storm) {
3491 storm_letter = s_storm_defs[s_block_defs[cond_hdr->block_id]->storm_id].letter;
3495 offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, OSAL_NULL, mem_addr, mem_len, mem_wide_bus,
3496 0, false, s_mem_group_names[mem_group_id], is_storm, storm_letter);
3503 /* Dumps GRC memories according to the input array dump_mem.
3504 * Returns the dumped size in dwords.
3506 static u32 ecore_grc_dump_memories(struct ecore_hwfn *p_hwfn,
3507 struct ecore_ptt *p_ptt,
3511 u32 offset = 0, input_offset = 0;
3513 while (input_offset < s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
3514 const struct dbg_dump_split_hdr *split_hdr;
3515 struct dbg_array curr_input_mems_arr;
3516 u32 split_data_size;
3519 split_hdr = (const struct dbg_dump_split_hdr*)&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
3520 split_type_id = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3521 split_data_size = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3522 curr_input_mems_arr.ptr = &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
3523 curr_input_mems_arr.size_in_dwords = split_data_size;
3525 switch (split_type_id) {
3526 case SPLIT_TYPE_NONE:
3527 offset += ecore_grc_dump_mem_entries(p_hwfn, p_ptt, curr_input_mems_arr, dump_buf + offset, dump);
3531 DP_NOTICE(p_hwfn, true, "Dumping split memories is currently not supported\n");
3535 input_offset += split_data_size;
3541 /* Dumps GRC context data for the specified Storm.
3542 * Returns the dumped size in dwords.
3543 * The lid_size argument is specified in quad-regs.
3545 static u32 ecore_grc_dump_ctx_data(struct ecore_hwfn *p_hwfn,
3546 struct ecore_ptt *p_ptt,
3555 struct storm_defs *storm = &s_storm_defs[storm_id];
3556 u32 i, lid, total_size, offset = 0;
3561 lid_size *= BYTES_IN_DWORD;
3562 total_size = num_lids * lid_size;
3564 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, name, 0, total_size, lid_size * 32, false, name, true, storm->letter);
3567 return offset + total_size;
3569 /* Dump context data */
3570 for (lid = 0; lid < num_lids; lid++) {
3571 for (i = 0; i < lid_size; i++, offset++) {
3572 ecore_wr(p_hwfn, p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
3573 *(dump_buf + offset) = ecore_rd(p_hwfn, p_ptt, rd_reg_addr);
3580 /* Dumps GRC contexts. Returns the dumped size in dwords. */
3581 static u32 ecore_grc_dump_ctx(struct ecore_hwfn *p_hwfn,
3582 struct ecore_ptt *p_ptt,
3589 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3590 struct storm_defs *storm = &s_storm_defs[storm_id];
3592 if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id))
3595 /* Dump Conn AG context size */
3596 offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "CONN_AG_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS),
3597 storm->cm_conn_ag_ctx_lid_size, storm->cm_conn_ag_ctx_rd_addr, storm_id);
3599 /* Dump Conn ST context size */
3600 offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "CONN_ST_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS),
3601 storm->cm_conn_st_ctx_lid_size, storm->cm_conn_st_ctx_rd_addr, storm_id);
3603 /* Dump Task AG context size */
3604 offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "TASK_AG_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS),
3605 storm->cm_task_ag_ctx_lid_size, storm->cm_task_ag_ctx_rd_addr, storm_id);
3607 /* Dump Task ST context size */
3608 offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "TASK_ST_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS),
3609 storm->cm_task_st_ctx_lid_size, storm->cm_task_st_ctx_rd_addr, storm_id);
3615 /* Dumps GRC IORs data. Returns the dumped size in dwords. */
3616 static u32 ecore_grc_dump_iors(struct ecore_hwfn *p_hwfn,
3617 struct ecore_ptt *p_ptt,
3621 char buf[10] = "IOR_SET_?";
3622 u32 addr, offset = 0;
3623 u8 storm_id, set_id;
3625 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3626 struct storm_defs *storm = &s_storm_defs[storm_id];
3628 if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id))
3631 for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
3632 addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr + SEM_FAST_REG_STORM_REG_FILE) + IOR_SET_OFFSET(set_id);
3633 buf[OSAL_STRLEN(buf) - 1] = '0' + set_id;
3634 offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, buf, addr, IORS_PER_SET, false, 32, false, "ior", true, storm->letter);
3641 /* Dump VFC CAM. Returns the dumped size in dwords. */
3642 static u32 ecore_grc_dump_vfc_cam(struct ecore_hwfn *p_hwfn,
3643 struct ecore_ptt *p_ptt,
3648 u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
3649 struct storm_defs *storm = &s_storm_defs[storm_id];
3650 u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
3651 u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
3652 u32 row, i, offset = 0;
3654 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, "vfc_cam", 0, total_size, 256, false, "vfc_cam", true, storm->letter);
3657 return offset + total_size;
3659 /* Prepare CAM address */
3660 SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
3662 for (row = 0; row < VFC_CAM_NUM_ROWS; row++, offset += VFC_CAM_RESP_DWORDS) {
3664 /* Write VFC CAM command */
3665 SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
3666 ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR, cam_cmd, VFC_CAM_CMD_DWORDS);
3668 /* Write VFC CAM address */
3669 ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR, cam_addr, VFC_CAM_ADDR_DWORDS);
3671 /* Read VFC CAM read response */
3672 ARR_REG_RD(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD, dump_buf + offset, VFC_CAM_RESP_DWORDS);
3678 /* Dump VFC RAM. Returns the dumped size in dwords. */
3679 static u32 ecore_grc_dump_vfc_ram(struct ecore_hwfn *p_hwfn,
3680 struct ecore_ptt *p_ptt,
3684 struct vfc_ram_defs *ram_defs)
3686 u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
3687 struct storm_defs *storm = &s_storm_defs[storm_id];
3688 u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
3689 u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
3690 u32 row, i, offset = 0;
3692 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, ram_defs->mem_name, 0, total_size, 256, false, ram_defs->type_name, true, storm->letter);
3694 /* Prepare RAM address */
3695 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
3698 return offset + total_size;
3700 for (row = ram_defs->base_row; row < ram_defs->base_row + ram_defs->num_rows; row++, offset += VFC_RAM_RESP_DWORDS) {
3702 /* Write VFC RAM command */
3703 ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR, ram_cmd, VFC_RAM_CMD_DWORDS);
3705 /* Write VFC RAM address */
3706 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
3707 ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR, ram_addr, VFC_RAM_ADDR_DWORDS);
3709 /* Read VFC RAM read response */
3710 ARR_REG_RD(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD, dump_buf + offset, VFC_RAM_RESP_DWORDS);
3716 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
3717 static u32 ecore_grc_dump_vfc(struct ecore_hwfn *p_hwfn,
3718 struct ecore_ptt *p_ptt,
3722 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3726 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3727 if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id) ||
3728 !s_storm_defs[storm_id].has_vfc ||
3729 (storm_id == DBG_PSTORM_ID && dev_data->platform_id != PLATFORM_ASIC))
3733 offset += ecore_grc_dump_vfc_cam(p_hwfn, p_ptt, dump_buf + offset, dump, storm_id);
3736 for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
3737 offset += ecore_grc_dump_vfc_ram(p_hwfn, p_ptt, dump_buf + offset, dump, storm_id, &s_vfc_ram_defs[i]);
3743 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
3744 static u32 ecore_grc_dump_rss(struct ecore_hwfn *p_hwfn,
3745 struct ecore_ptt *p_ptt,
3749 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3753 for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
3754 u32 rss_addr, num_entries, total_dwords;
3755 struct rss_mem_defs *rss_defs;
3758 rss_defs = &s_rss_mem_defs[rss_mem_id];
3759 rss_addr = rss_defs->addr;
3760 num_entries = rss_defs->num_entries[dev_data->chip_id];
3761 total_dwords = (num_entries * rss_defs->entry_width) / 32;
3762 packed = (rss_defs->entry_width == 16);
3764 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, rss_defs->mem_name, 0, total_dwords,
3765 rss_defs->entry_width, packed, rss_defs->type_name, false, 0);
3769 offset += total_dwords;
3773 while (total_dwords) {
3774 u32 num_dwords_to_read = OSAL_MIN_T(u32, RSS_REG_RSS_RAM_DATA_SIZE, total_dwords);
3775 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3776 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA), num_dwords_to_read, false);
3777 total_dwords -= num_dwords_to_read;
3785 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
3786 static u32 ecore_grc_dump_big_ram(struct ecore_hwfn *p_hwfn,
3787 struct ecore_ptt *p_ptt,
3792 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3793 u32 total_blocks, ram_size, offset = 0, i;
3794 char mem_name[12] = "???_BIG_RAM";
3795 char type_name[8] = "???_RAM";
3796 struct big_ram_defs *big_ram;
3798 big_ram = &s_big_ram_defs[big_ram_id];
3799 total_blocks = big_ram->num_of_blocks[dev_data->chip_id];
3800 ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
3802 OSAL_STRNCPY(type_name, big_ram->instance_name, OSAL_STRLEN(big_ram->instance_name));
3803 OSAL_STRNCPY(mem_name, big_ram->instance_name, OSAL_STRLEN(big_ram->instance_name));
3805 /* Dump memory header */
3806 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, mem_name, 0, ram_size, BIG_RAM_BLOCK_SIZE_BYTES * 8, false, type_name, false, 0);
3808 /* Read and dump Big RAM data */
3810 return offset + ram_size;
3813 for (i = 0; i < total_blocks / 2; i++) {
3814 ecore_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3815 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(big_ram->data_reg_addr), 2 * BIG_RAM_BLOCK_SIZE_DWORDS, false);
3821 static u32 ecore_grc_dump_mcp(struct ecore_hwfn *p_hwfn,
3822 struct ecore_ptt *p_ptt,
3826 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3827 bool block_enable[MAX_BLOCK_ID] = { 0 };
3828 bool halted = false;
3832 if (dump && dev_data->platform_id == PLATFORM_ASIC && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3833 halted = !ecore_mcp_halt(p_hwfn, p_ptt);
3835 DP_NOTICE(p_hwfn, false, "MCP halt failed!\n");
3838 /* Dump MCP scratchpad */
3839 offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, OSAL_NULL, BYTES_TO_DWORDS(MCP_REG_SCRATCH), MCP_REG_SCRATCH_SIZE, false, 0, false, "MCP", false, 0);
3841 /* Dump MCP cpu_reg_file */
3842 offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, OSAL_NULL, BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE), MCP_REG_CPU_REG_FILE_SIZE, false, 0, false, "MCP", false, 0);
3844 /* Dump MCP registers */
3845 block_enable[BLOCK_MCP] = true;
3846 offset += ecore_grc_dump_registers(p_hwfn, p_ptt, dump_buf + offset, dump, block_enable, "block", "MCP");
3848 /* Dump required non-MCP registers */
3849 offset += ecore_grc_dump_regs_hdr(dump_buf + offset, dump, 1, "eng", -1, "block", "MCP");
3850 offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR), 1, false);
3853 if (halted && ecore_mcp_resume(p_hwfn, p_ptt))
3854 DP_NOTICE(p_hwfn, false, "Failed to resume MCP after halt!\n");
3859 /* Dumps the tbus indirect memory for all PHYs. */
3860 static u32 ecore_grc_dump_phy(struct ecore_hwfn *p_hwfn,
3861 struct ecore_ptt *p_ptt,
3865 u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3869 for (phy_id = 0; phy_id < OSAL_ARRAY_SIZE(s_phy_defs); phy_id++) {
3870 u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3871 struct phy_defs *phy_defs;
3874 phy_defs = &s_phy_defs[phy_id];
3875 addr_lo_addr = phy_defs->base_addr + phy_defs->tbus_addr_lo_addr;
3876 addr_hi_addr = phy_defs->base_addr + phy_defs->tbus_addr_hi_addr;
3877 data_lo_addr = phy_defs->base_addr + phy_defs->tbus_data_lo_addr;
3878 data_hi_addr = phy_defs->base_addr + phy_defs->tbus_data_hi_addr;
3879 bytes_buf = (u8*)(dump_buf + offset);
3881 if (OSAL_SNPRINTF(mem_name, sizeof(mem_name), "tbus_%s", phy_defs->phy_name) < 0)
3882 DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid PHY memory name\n");
3884 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, mem_name, 0, PHY_DUMP_SIZE_DWORDS, 16, true, mem_name, false, 0);
3887 offset += PHY_DUMP_SIZE_DWORDS;
3891 for (tbus_hi_offset = 0; tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8); tbus_hi_offset++) {
3892 ecore_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3893 for (tbus_lo_offset = 0; tbus_lo_offset < 256; tbus_lo_offset++) {
3894 ecore_wr(p_hwfn, p_ptt, addr_lo_addr, tbus_lo_offset);
3895 *(bytes_buf++) = (u8)ecore_rd(p_hwfn, p_ptt, data_lo_addr);
3896 *(bytes_buf++) = (u8)ecore_rd(p_hwfn, p_ptt, data_hi_addr);
3900 offset += PHY_DUMP_SIZE_DWORDS;
3906 static void ecore_config_dbg_line(struct ecore_hwfn *p_hwfn,
3907 struct ecore_ptt *p_ptt,
3908 enum block_id block_id,
3912 u8 force_valid_mask,
3913 u8 force_frame_mask)
3915 struct block_defs *block = s_block_defs[block_id];
3917 ecore_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
3918 ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
3919 ecore_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
3920 ecore_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
3921 ecore_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
3924 /* Dumps Static Debug data. Returns the dumped size in dwords. */
3925 static u32 ecore_grc_dump_static_debug(struct ecore_hwfn *p_hwfn,
3926 struct ecore_ptt *p_ptt,
3930 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3931 u32 block_id, line_id, offset = 0;
3933 /* Skip static debug if a debug bus recording is in progress */
3934 if (ecore_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3938 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Dumping static debug data...\n");
3940 /* Disable all blocks debug output */
3941 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3942 struct block_defs *block = s_block_defs[block_id];
3944 if (block->dbg_client_id[dev_data->chip_id] != MAX_DBG_BUS_CLIENTS)
3945 ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
3948 ecore_bus_reset_dbg_block(p_hwfn, p_ptt);
3949 ecore_bus_set_framing_mode(p_hwfn, p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
3950 ecore_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3951 ecore_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3952 ecore_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3955 /* Dump all static debug lines for each relevant block */
3956 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3957 struct block_defs *block = s_block_defs[block_id];
3958 struct dbg_bus_block *block_desc;
3961 if (block->dbg_client_id[dev_data->chip_id] == MAX_DBG_BUS_CLIENTS)
3964 block_desc = get_dbg_bus_block_desc(p_hwfn, (enum block_id)block_id);
3965 block_dwords = NUM_DBG_LINES(block_desc) * STATIC_DEBUG_LINE_DWORDS;
3967 /* Dump static section params */
3968 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, block->name, 0, block_dwords, 32, false, "STATIC", false, 0);
3971 offset += block_dwords;
3975 /* If all lines are invalid - dump zeros */
3976 if (dev_data->block_in_reset[block_id]) {
3977 OSAL_MEMSET(dump_buf + offset, 0, DWORDS_TO_BYTES(block_dwords));
3978 offset += block_dwords;
3982 /* Enable block's client */
3983 ecore_bus_enable_clients(p_hwfn, p_ptt, 1 << block->dbg_client_id[dev_data->chip_id]);
3984 for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc); line_id++) {
3986 /* Configure debug line ID */
3987 ecore_config_dbg_line(p_hwfn, p_ptt, (enum block_id)block_id, (u8)line_id, 0xf, 0, 0, 0);
3989 /* Read debug line info */
3990 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA), STATIC_DEBUG_LINE_DWORDS, true);
3993 /* Disable block's client and debug output */
3994 ecore_bus_enable_clients(p_hwfn, p_ptt, 0);
3995 ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
3999 ecore_bus_enable_dbg_block(p_hwfn, p_ptt, false);
4000 ecore_bus_enable_clients(p_hwfn, p_ptt, 0);
4006 /* Performs GRC Dump to the specified buffer.
4007 * Returns the dumped size in dwords.
4009 static enum dbg_status ecore_grc_dump(struct ecore_hwfn *p_hwfn,
4010 struct ecore_ptt *p_ptt,
4013 u32 *num_dumped_dwords)
4015 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4016 bool is_asic, parities_masked = false;
4017 u8 i, port_mode = 0;
4020 is_asic = dev_data->platform_id == PLATFORM_ASIC;
4022 *num_dumped_dwords = 0;
4026 /* Find port mode */
4027 switch (ecore_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
4028 case 0: port_mode = 1; break;
4029 case 1: port_mode = 2; break;
4030 case 2: port_mode = 4; break;
4033 /* Update reset state */
4034 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
4037 /* Dump global params */
4038 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 4);
4039 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "grc-dump");
4040 offset += ecore_dump_num_param(dump_buf + offset, dump, "num-lcids", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS));
4041 offset += ecore_dump_num_param(dump_buf + offset, dump, "num-ltids", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS));
4042 offset += ecore_dump_num_param(dump_buf + offset, dump, "num-ports", port_mode);
4044 /* Dump reset registers (dumped before taking blocks out of reset ) */
4045 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4046 offset += ecore_grc_dump_reset_regs(p_hwfn, p_ptt, dump_buf + offset, dump);
4048 /* Take all blocks out of reset (using reset registers) */
4050 ecore_grc_unreset_blocks(p_hwfn, p_ptt);
4051 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
4054 /* Disable all parities using MFW command */
4055 if (dump && is_asic && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
4056 parities_masked = !ecore_mcp_mask_parities(p_hwfn, p_ptt, 1);
4057 if (!parities_masked) {
4058 DP_NOTICE(p_hwfn, false, "Failed to mask parities using MFW\n");
4059 if (ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
4060 return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
4064 /* Dump modified registers (dumped before modifying them) */
4065 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4066 offset += ecore_grc_dump_modified_regs(p_hwfn, p_ptt, dump_buf + offset, dump);
4069 if (dump && (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR) || ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
4070 ecore_grc_stall_storms(p_hwfn, p_ptt, true);
4073 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
4074 bool block_enable[MAX_BLOCK_ID];
4076 /* Dump all blocks except MCP */
4077 for (i = 0; i < MAX_BLOCK_ID; i++)
4078 block_enable[i] = true;
4079 block_enable[BLOCK_MCP] = false;
4080 offset += ecore_grc_dump_registers(p_hwfn, p_ptt, dump_buf + offset, dump, block_enable, OSAL_NULL, OSAL_NULL);
4082 /* Dump special registers */
4083 offset += ecore_grc_dump_special_regs(p_hwfn, p_ptt, dump_buf + offset, dump);
4087 offset += ecore_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
4090 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
4091 offset += ecore_grc_dump_mcp(p_hwfn, p_ptt, dump_buf + offset, dump);
4094 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
4095 offset += ecore_grc_dump_ctx(p_hwfn, p_ptt, dump_buf + offset, dump);
4097 /* Dump RSS memories */
4098 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
4099 offset += ecore_grc_dump_rss(p_hwfn, p_ptt, dump_buf + offset, dump);
4102 for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
4103 if (ecore_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
4104 offset += ecore_grc_dump_big_ram(p_hwfn, p_ptt, dump_buf + offset, dump, i);
4107 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
4108 offset += ecore_grc_dump_iors(p_hwfn, p_ptt, dump_buf + offset, dump);
4111 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
4112 offset += ecore_grc_dump_vfc(p_hwfn, p_ptt, dump_buf + offset, dump);
4115 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id == CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
4116 offset += ecore_grc_dump_phy(p_hwfn, p_ptt, dump_buf + offset, dump);
4118 /* Dump static debug data */
4119 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_STATIC) && dev_data->bus.state == DBG_BUS_STATE_IDLE)
4120 offset += ecore_grc_dump_static_debug(p_hwfn, p_ptt, dump_buf + offset, dump);
4122 /* Dump last section */
4123 offset += ecore_dump_last_section(dump_buf, offset, dump);
4127 /* Unstall storms */
4128 if (ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
4129 ecore_grc_stall_storms(p_hwfn, p_ptt, false);
4131 /* Clear parity status */
4133 ecore_grc_clear_all_prty(p_hwfn, p_ptt);
4135 /* Enable all parities using MFW command */
4136 if (parities_masked)
4137 ecore_mcp_mask_parities(p_hwfn, p_ptt, 0);
4140 *num_dumped_dwords = offset;
4142 return DBG_STATUS_OK;
4145 /* Writes the specified failing Idle Check rule to the specified buffer.
4146 * Returns the dumped size in dwords.
4148 static u32 ecore_idle_chk_dump_failure(struct ecore_hwfn *p_hwfn,
4149 struct ecore_ptt *p_ptt,
4153 const struct dbg_idle_chk_rule *rule,
4155 u32 *cond_reg_values)
4157 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4158 const struct dbg_idle_chk_cond_reg *cond_regs;
4159 const struct dbg_idle_chk_info_reg *info_regs;
4160 u32 i, next_reg_offset = 0, offset = 0;
4161 struct dbg_idle_chk_result_hdr *hdr;
4162 const union dbg_idle_chk_reg *regs;
4165 hdr = (struct dbg_idle_chk_result_hdr*)dump_buf;
4166 regs = &((const union dbg_idle_chk_reg*)s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4167 cond_regs = ®s[0].cond_reg;
4168 info_regs = ®s[rule->num_cond_regs].info_reg;
4170 /* Dump rule data */
4172 OSAL_MEMSET(hdr, 0, sizeof(*hdr));
4173 hdr->rule_id = rule_id;
4174 hdr->mem_entry_id = fail_entry_id;
4175 hdr->severity = rule->severity;
4176 hdr->num_dumped_cond_regs = rule->num_cond_regs;
4179 offset += IDLE_CHK_RESULT_HDR_DWORDS;
4181 /* Dump condition register values */
4182 for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4183 const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4184 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4186 reg_hdr = (struct dbg_idle_chk_result_reg_hdr*)(dump_buf + offset);
4188 /* Write register header */
4190 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->entry_size;
4194 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4195 OSAL_MEMSET(reg_hdr, 0, sizeof(*reg_hdr));
4196 reg_hdr->start_entry = reg->start_entry;
4197 reg_hdr->size = reg->entry_size;
4198 SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM, reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
4199 SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
4201 /* Write register values */
4202 for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
4203 dump_buf[offset] = cond_reg_values[next_reg_offset];
4206 /* Dump info register values */
4207 for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
4208 const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
4211 /* Check if register's block is in reset */
4213 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
4217 block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
4218 if (block_id >= MAX_BLOCK_ID) {
4219 DP_NOTICE(p_hwfn, true, "Invalid block_id\n");
4223 if (!dev_data->block_in_reset[block_id]) {
4224 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4225 bool wide_bus, eval_mode, mode_match = true;
4226 u16 modes_buf_offset;
4229 reg_hdr = (struct dbg_idle_chk_result_reg_hdr*)(dump_buf + offset);
4232 eval_mode = GET_FIELD(reg->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
4234 modes_buf_offset = GET_FIELD(reg->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
4235 mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
4241 addr = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_ADDRESS);
4242 wide_bus = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
4244 /* Write register header */
4245 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4246 hdr->num_dumped_info_regs++;
4247 OSAL_MEMSET(reg_hdr, 0, sizeof(*reg_hdr));
4248 reg_hdr->size = reg->size;
4249 SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, rule->num_cond_regs + reg_id);
4251 /* Write register values */
4252 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, reg->size, wide_bus);
4259 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
4260 static u32 ecore_idle_chk_dump_rule_entries(struct ecore_hwfn *p_hwfn,
4261 struct ecore_ptt *p_ptt,
4264 const struct dbg_idle_chk_rule *input_rules,
4265 u32 num_input_rules,
4266 u32 *num_failing_rules)
4268 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4269 u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
4274 *num_failing_rules = 0;
4276 for (i = 0; i < num_input_rules; i++) {
4277 const struct dbg_idle_chk_cond_reg *cond_regs;
4278 const struct dbg_idle_chk_rule *rule;
4279 const union dbg_idle_chk_reg *regs;
4280 u16 num_reg_entries = 1;
4281 bool check_rule = true;
4282 const u32 *imm_values;
4284 rule = &input_rules[i];
4285 regs = &((const union dbg_idle_chk_reg*)s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4286 cond_regs = ®s[0].cond_reg;
4287 imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr[rule->imm_offset];
4289 /* Check if all condition register blocks are out of reset, and
4290 * find maximal number of entries (all condition registers that
4291 * are memories must have the same size, which is > 1).
4293 for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule; reg_id++) {
4294 u32 block_id = GET_FIELD(cond_regs[reg_id].data, DBG_IDLE_CHK_COND_REG_BLOCK_ID);
4296 if (block_id >= MAX_BLOCK_ID) {
4297 DP_NOTICE(p_hwfn, true, "Invalid block_id\n");
4301 check_rule = !dev_data->block_in_reset[block_id];
4302 if (cond_regs[reg_id].num_entries > num_reg_entries)
4303 num_reg_entries = cond_regs[reg_id].num_entries;
4306 if (!check_rule && dump)
4310 u32 entry_dump_size = ecore_idle_chk_dump_failure(p_hwfn, p_ptt, dump_buf + offset, false, rule->rule_id, rule, 0, OSAL_NULL);
4312 offset += num_reg_entries * entry_dump_size;
4313 (*num_failing_rules) += num_reg_entries;
4317 /* Go over all register entries (number of entries is the same for all
4318 * condition registers).
4320 for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
4321 u32 next_reg_offset = 0;
4323 /* Read current entry of all condition registers */
4324 for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4325 const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4326 u32 padded_entry_size, addr;
4329 /* Find GRC address (if it's a memory, the address of the
4330 * specific entry is calculated).
4332 addr = GET_FIELD(reg->data, DBG_IDLE_CHK_COND_REG_ADDRESS);
4333 wide_bus = GET_FIELD(reg->data, DBG_IDLE_CHK_COND_REG_WIDE_BUS);
4334 if (reg->num_entries > 1 || reg->start_entry > 0) {
4335 padded_entry_size = reg->entry_size > 1 ? OSAL_ROUNDUP_POW_OF_TWO(reg->entry_size) : 1;
4336 addr += (reg->start_entry + entry_id) * padded_entry_size;
4339 /* Read registers */
4340 if (next_reg_offset + reg->entry_size >= IDLE_CHK_MAX_ENTRIES_SIZE) {
4341 DP_NOTICE(p_hwfn, true, "idle check registers entry is too large\n");
4345 next_reg_offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, cond_reg_values + next_reg_offset, dump, addr, reg->entry_size, wide_bus);
4348 /* Call rule condition function. if returns true, it's a failure.*/
4349 if ((*cond_arr[rule->cond_id])(cond_reg_values, imm_values)) {
4350 offset += ecore_idle_chk_dump_failure(p_hwfn, p_ptt, dump_buf + offset, dump, rule->rule_id, rule, entry_id, cond_reg_values);
4351 (*num_failing_rules)++;
4359 /* Performs Idle Check Dump to the specified buffer.
4360 * Returns the dumped size in dwords.
4362 static u32 ecore_idle_chk_dump(struct ecore_hwfn *p_hwfn,
4363 struct ecore_ptt *p_ptt,
4367 u32 num_failing_rules_offset, offset = 0, input_offset = 0, num_failing_rules = 0;
4369 /* Dump global params */
4370 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4371 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "idle-chk");
4373 /* Dump idle check section header with a single parameter */
4374 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
4375 num_failing_rules_offset = offset;
4376 offset += ecore_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
4378 while (input_offset < s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
4379 const struct dbg_idle_chk_cond_hdr *cond_hdr = (const struct dbg_idle_chk_cond_hdr*)&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr[input_offset++];
4380 bool eval_mode, mode_match = true;
4381 u32 curr_failing_rules;
4382 u16 modes_buf_offset;
4385 eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
4387 modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
4388 mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
4392 offset += ecore_idle_chk_dump_rule_entries(p_hwfn, p_ptt, dump_buf + offset, dump, (const struct dbg_idle_chk_rule*)&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr[input_offset], cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS, &curr_failing_rules);
4393 num_failing_rules += curr_failing_rules;
4396 input_offset += cond_hdr->data_size;
4399 /* Overwrite num_rules parameter */
4401 ecore_dump_num_param(dump_buf + num_failing_rules_offset, dump, "num_rules", num_failing_rules);
4403 /* Dump last section */
4404 offset += ecore_dump_last_section(dump_buf, offset, dump);
4409 /* Finds the meta data image in NVRAM */
4410 static enum dbg_status ecore_find_nvram_image(struct ecore_hwfn *p_hwfn,
4411 struct ecore_ptt *p_ptt,
4413 u32 *nvram_offset_bytes,
4414 u32 *nvram_size_bytes)
4416 u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
4417 struct mcp_file_att file_att;
4420 /* Call NVRAM get file command */
4421 nvm_result = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_GET_FILE_ATT, image_type, &ret_mcp_resp, &ret_mcp_param, &ret_txn_size, (u32*)&file_att);
4423 /* Check response */
4424 if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4425 return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4427 /* Update return values */
4428 *nvram_offset_bytes = file_att.nvm_start_addr;
4429 *nvram_size_bytes = file_att.len;
4431 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n", image_type, *nvram_offset_bytes, *nvram_size_bytes);
4433 /* Check alignment */
4434 if (*nvram_size_bytes & 0x3)
4435 return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
4437 return DBG_STATUS_OK;
4440 /* Reads data from NVRAM */
4441 static enum dbg_status ecore_nvram_read(struct ecore_hwfn *p_hwfn,
4442 struct ecore_ptt *p_ptt,
4443 u32 nvram_offset_bytes,
4444 u32 nvram_size_bytes,
4447 u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
4448 s32 bytes_left = nvram_size_bytes;
4449 u32 read_offset = 0;
4451 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "nvram_read: reading image of size %d bytes from NVRAM\n", nvram_size_bytes);
4454 bytes_to_copy = (bytes_left > MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
4456 /* Call NVRAM read command */
4457 if (ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_READ_NVRAM, (nvram_offset_bytes + read_offset) | (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_OFFSET), &ret_mcp_resp, &ret_mcp_param, &ret_read_size, (u32*)((u8*)ret_buf + read_offset)))
4458 return DBG_STATUS_NVRAM_READ_FAILED;
4460 /* Check response */
4461 if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4462 return DBG_STATUS_NVRAM_READ_FAILED;
4464 /* Update read offset */
4465 read_offset += ret_read_size;
4466 bytes_left -= ret_read_size;
4467 } while (bytes_left > 0);
4469 return DBG_STATUS_OK;
4472 /* Get info on the MCP Trace data in the scratchpad:
4473 * - trace_data_grc_addr (OUT): trace data GRC address in bytes
4474 * - trace_data_size (OUT): trace data size in bytes (without the header)
4476 static enum dbg_status ecore_mcp_trace_get_data_info(struct ecore_hwfn *p_hwfn,
4477 struct ecore_ptt *p_ptt,
4478 u32 *trace_data_grc_addr,
4479 u32 *trace_data_size)
4481 u32 spad_trace_offsize, signature;
4483 /* Read trace section offsize structure from MCP scratchpad */
4484 spad_trace_offsize = ecore_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4486 /* Extract trace section address from offsize (in scratchpad) */
4487 *trace_data_grc_addr = MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4489 /* Read signature from MCP trace section */
4490 signature = ecore_rd(p_hwfn, p_ptt, *trace_data_grc_addr + OFFSETOF(struct mcp_trace, signature));
4492 if (signature != MFW_TRACE_SIGNATURE)
4493 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4495 /* Read trace size from MCP trace section */
4496 *trace_data_size = ecore_rd(p_hwfn, p_ptt, *trace_data_grc_addr + OFFSETOF(struct mcp_trace, size));
4498 return DBG_STATUS_OK;
4501 /* Reads MCP trace meta data image from NVRAM
4502 * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4503 * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4504 * loaded from file).
4505 * - trace_meta_size (OUT): size in bytes of the trace meta data.
4507 static enum dbg_status ecore_mcp_trace_get_meta_info(struct ecore_hwfn *p_hwfn,
4508 struct ecore_ptt *p_ptt,
4509 u32 trace_data_size_bytes,
4510 u32 *running_bundle_id,
4511 u32 *trace_meta_offset,
4512 u32 *trace_meta_size)
4514 u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4516 /* Read MCP trace section offsize structure from MCP scratchpad */
4517 spad_trace_offsize = ecore_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4519 /* Find running bundle ID */
4520 running_mfw_addr = MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) + SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4521 *running_bundle_id = ecore_rd(p_hwfn, p_ptt, running_mfw_addr);
4522 if (*running_bundle_id > 1)
4523 return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4525 /* Find image in NVRAM */
4526 nvram_image_type = (*running_bundle_id == DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4527 return ecore_find_nvram_image(p_hwfn, p_ptt, nvram_image_type, trace_meta_offset, trace_meta_size);
4530 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
4531 static enum dbg_status ecore_mcp_trace_read_meta(struct ecore_hwfn *p_hwfn,
4532 struct ecore_ptt *p_ptt,
4533 u32 nvram_offset_in_bytes,
4537 u8 modules_num, module_len, i, *byte_buf = (u8*)buf;
4538 enum dbg_status status;
4541 /* Read meta data from NVRAM */
4542 status = ecore_nvram_read(p_hwfn, p_ptt, nvram_offset_in_bytes, size_in_bytes, buf);
4543 if (status != DBG_STATUS_OK)
4546 /* Extract and check first signature */
4547 signature = ecore_read_unaligned_dword(byte_buf);
4548 byte_buf += sizeof(signature);
4549 if (signature != NVM_MAGIC_VALUE)
4550 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4552 /* Extract number of modules */
4553 modules_num = *(byte_buf++);
4555 /* Skip all modules */
4556 for (i = 0; i < modules_num; i++) {
4557 module_len = *(byte_buf++);
4558 byte_buf += module_len;
4561 /* Extract and check second signature */
4562 signature = ecore_read_unaligned_dword(byte_buf);
4563 byte_buf += sizeof(signature);
4564 if (signature != NVM_MAGIC_VALUE)
4565 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4567 return DBG_STATUS_OK;
4570 /* Dump MCP Trace */
4571 static enum dbg_status ecore_mcp_trace_dump(struct ecore_hwfn *p_hwfn,
4572 struct ecore_ptt *p_ptt,
4575 u32 *num_dumped_dwords)
4577 u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0, trace_meta_size_dwords = 0;
4578 u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4579 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4580 u32 running_bundle_id, offset = 0;
4581 enum dbg_status status;
4585 *num_dumped_dwords = 0;
4587 mcp_access = dev_data->platform_id == PLATFORM_ASIC && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4589 /* Get trace data info */
4590 status = ecore_mcp_trace_get_data_info(p_hwfn, p_ptt, &trace_data_grc_addr, &trace_data_size_bytes);
4591 if (status != DBG_STATUS_OK)
4594 /* Dump global params */
4595 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4596 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "mcp-trace");
4598 /* Halt MCP while reading from scratchpad so the read data will be
4599 * consistent. if halt fails, MCP trace is taken anyway, with a small
4600 * risk that it may be corrupt.
4602 if (dump && mcp_access) {
4603 halted = !ecore_mcp_halt(p_hwfn, p_ptt);
4605 DP_NOTICE(p_hwfn, false, "MCP halt failed!\n");
4608 /* Find trace data size */
4609 trace_data_size_dwords = DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace), BYTES_IN_DWORD);
4611 /* Dump trace data section header and param */
4612 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "mcp_trace_data", 1);
4613 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", trace_data_size_dwords);
4615 /* Read trace data from scratchpad into dump buffer */
4616 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(trace_data_grc_addr), trace_data_size_dwords, false);
4618 /* Resume MCP (only if halt succeeded) */
4619 if (halted && ecore_mcp_resume(p_hwfn, p_ptt))
4620 DP_NOTICE(p_hwfn, false, "Failed to resume MCP after halt!\n");
4622 /* Dump trace meta section header */
4623 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "mcp_trace_meta", 1);
4625 /* Read trace meta only if NVRAM access is enabled
4626 * (trace_meta_size_bytes is dword-aligned).
4628 if (OSAL_NVM_IS_ACCESS_ENABLED(p_hwfn) && mcp_access) {
4629 status = ecore_mcp_trace_get_meta_info(p_hwfn, p_ptt, trace_data_size_bytes, &running_bundle_id, &trace_meta_offset_bytes, &trace_meta_size_bytes);
4630 if (status == DBG_STATUS_OK)
4631 trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
4634 /* Dump trace meta size param */
4635 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", trace_meta_size_dwords);
4637 /* Read trace meta image into dump buffer */
4638 if (dump && trace_meta_size_dwords)
4639 status = ecore_mcp_trace_read_meta(p_hwfn, p_ptt, trace_meta_offset_bytes, trace_meta_size_bytes, dump_buf + offset);
4640 if (status == DBG_STATUS_OK)
4641 offset += trace_meta_size_dwords;
4643 /* Dump last section */
4644 offset += ecore_dump_last_section(dump_buf, offset, dump);
4646 *num_dumped_dwords = offset;
4648 /* If no mcp access, indicate that the dump doesn't contain the meta
4651 return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4655 static enum dbg_status ecore_reg_fifo_dump(struct ecore_hwfn *p_hwfn,
4656 struct ecore_ptt *p_ptt,
4659 u32 *num_dumped_dwords)
4661 u32 dwords_read, size_param_offset, offset = 0;
4664 *num_dumped_dwords = 0;
4666 /* Dump global params */
4667 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4668 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "reg-fifo");
4670 /* Dump fifo data section header and param. The size param is 0 for
4671 * now, and is overwritten after reading the FIFO.
4673 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "reg_fifo_data", 1);
4674 size_param_offset = offset;
4675 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", 0);
4678 fifo_has_data = ecore_rd(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4680 /* Pull available data from fifo. Use DMAE since this is
4681 * widebus memory and must be accessed atomically. Test for
4682 * dwords_read not passing buffer size since more entries could
4683 * be added to the buffer as we
4686 for (dwords_read = 0; fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS; dwords_read += REG_FIFO_ELEMENT_DWORDS, offset += REG_FIFO_ELEMENT_DWORDS) {
4687 if (ecore_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO, (u64)(osal_uintptr_t)(&dump_buf[offset]), REG_FIFO_ELEMENT_DWORDS, 0))
4688 return DBG_STATUS_DMAE_FAILED;
4689 fifo_has_data = ecore_rd(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4692 ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", dwords_read);
4696 /* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4697 * test how much data is available, except for reading it.
4699 offset += REG_FIFO_DEPTH_DWORDS;
4702 /* Dump last section */
4703 offset += ecore_dump_last_section(dump_buf, offset, dump);
4705 *num_dumped_dwords = offset;
4707 return DBG_STATUS_OK;
4711 static enum dbg_status ecore_igu_fifo_dump(struct ecore_hwfn *p_hwfn,
4712 struct ecore_ptt *p_ptt,
4715 u32 *num_dumped_dwords)
4717 u32 dwords_read, size_param_offset, offset = 0;
4720 *num_dumped_dwords = 0;
4722 /* Dump global params */
4723 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4724 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "igu-fifo");
4726 /* Dump fifo data section header and param. The size param is 0 for
4727 * now, and is overwritten after reading the FIFO.
4729 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "igu_fifo_data", 1);
4730 size_param_offset = offset;
4731 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", 0);
4734 fifo_has_data = ecore_rd(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4736 /* Pull available data from fifo. Use DMAE since this is
4737 * widebus memory and must be accessed atomically. Test for
4738 * dwords_read not passing buffer size since more entries could
4739 * be added to the buffer as we are emptying it.
4741 for (dwords_read = 0; fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS; dwords_read += IGU_FIFO_ELEMENT_DWORDS, offset += IGU_FIFO_ELEMENT_DWORDS) {
4742 if (ecore_dmae_grc2host(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_MEMORY, (u64)(osal_uintptr_t)(&dump_buf[offset]), IGU_FIFO_ELEMENT_DWORDS, 0))
4743 return DBG_STATUS_DMAE_FAILED;
4744 fifo_has_data = ecore_rd(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4747 ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", dwords_read);
4751 /* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4752 * test how much data is available, except for reading it.
4754 offset += IGU_FIFO_DEPTH_DWORDS;
4757 /* Dump last section */
4758 offset += ecore_dump_last_section(dump_buf, offset, dump);
4760 *num_dumped_dwords = offset;
4762 return DBG_STATUS_OK;
4765 /* Protection Override dump */
4766 static enum dbg_status ecore_protection_override_dump(struct ecore_hwfn *p_hwfn,
4767 struct ecore_ptt *p_ptt,
4770 u32 *num_dumped_dwords)
4772 u32 size_param_offset, override_window_dwords, offset = 0;
4774 *num_dumped_dwords = 0;
4776 /* Dump global params */
4777 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4778 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "protection-override");
4780 /* Dump data section header and param. The size param is 0 for now,
4781 * and is overwritten after reading the data.
4783 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "protection_override_data", 1);
4784 size_param_offset = offset;
4785 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", 0);
4788 /* Add override window info to buffer */
4789 override_window_dwords = ecore_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) * PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4790 if (ecore_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_PROTECTION_OVERRIDE_WINDOW, (u64)(osal_uintptr_t)(dump_buf + offset), override_window_dwords, 0))
4791 return DBG_STATUS_DMAE_FAILED;
4792 offset += override_window_dwords;
4793 ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", override_window_dwords);
4796 offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4799 /* Dump last section */
4800 offset += ecore_dump_last_section(dump_buf, offset, dump);
4802 *num_dumped_dwords = offset;
4804 return DBG_STATUS_OK;
4807 /* Performs FW Asserts Dump to the specified buffer.
4808 * Returns the dumped size in dwords.
4810 static u32 ecore_fw_asserts_dump(struct ecore_hwfn *p_hwfn,
4811 struct ecore_ptt *p_ptt,
4815 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4816 struct fw_asserts_ram_section *asserts;
4817 char storm_letter_str[2] = "?";
4818 struct fw_info fw_info;
4822 /* Dump global params */
4823 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4824 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "fw-asserts");
4826 /* Find Storm dump size */
4827 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4828 u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx, last_list_idx, addr;
4829 struct storm_defs *storm = &s_storm_defs[storm_id];
4831 if (dev_data->block_in_reset[storm->block_id])
4834 /* Read FW info for the current Storm */
4835 ecore_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
4837 asserts = &fw_info.fw_asserts_section;
4839 /* Dump FW Asserts section header and params */
4840 storm_letter_str[0] = storm->letter;
4841 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "fw_asserts", 2);
4842 offset += ecore_dump_str_param(dump_buf + offset, dump, "storm", storm_letter_str);
4843 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", asserts->list_element_dword_size);
4845 /* Read and dump FW Asserts data */
4847 offset += asserts->list_element_dword_size;
4851 fw_asserts_section_addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
4852 RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
4853 next_list_idx_addr = fw_asserts_section_addr + DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
4854 next_list_idx = ecore_rd(p_hwfn, p_ptt, next_list_idx_addr);
4855 last_list_idx = (next_list_idx > 0 ? next_list_idx : asserts->list_num_elements) - 1;
4856 addr = BYTES_TO_DWORDS(fw_asserts_section_addr) + asserts->list_dword_offset +
4857 last_list_idx * asserts->list_element_dword_size;
4858 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, asserts->list_element_dword_size, false);
4861 /* Dump last section */
4862 offset += ecore_dump_last_section(dump_buf, offset, dump);
4867 /***************************** Public Functions *******************************/
4869 enum dbg_status ecore_dbg_set_bin_ptr(const u8 * const bin_ptr)
4871 struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr*)bin_ptr;
4874 /* convert binary data to debug arrays */
4875 for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
4876 s_dbg_arrays[buf_id].ptr = (u32*)(bin_ptr + buf_array[buf_id].offset);
4877 s_dbg_arrays[buf_id].size_in_dwords = BYTES_TO_DWORDS(buf_array[buf_id].length);
4880 return DBG_STATUS_OK;
4883 enum dbg_status ecore_dbg_set_app_ver(u32 ver)
4885 if (ver < TOOLS_VERSION)
4886 return DBG_STATUS_UNSUPPORTED_APP_VERSION;
4890 return DBG_STATUS_OK;
4893 u32 ecore_dbg_get_fw_func_ver(void)
4895 return TOOLS_VERSION;
4898 enum chip_ids ecore_dbg_get_chip_id(struct ecore_hwfn *p_hwfn)
4900 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4902 return (enum chip_ids)dev_data->chip_id;
4905 enum dbg_status ecore_dbg_bus_reset(struct ecore_hwfn *p_hwfn,
4906 struct ecore_ptt *p_ptt,
4912 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4913 enum dbg_status status;
4915 status = ecore_dbg_dev_init(p_hwfn, p_ptt);
4916 if (status != DBG_STATUS_OK)
4919 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_reset: one_shot_en = %d, force_hw_dwords = %d, unify_inputs = %d, grc_input_en = %d\n", one_shot_en, force_hw_dwords, unify_inputs, grc_input_en);
4921 if (force_hw_dwords &&
4922 force_hw_dwords != 4 &&
4923 force_hw_dwords != 8)
4924 return DBG_STATUS_INVALID_ARGS;
4926 if (ecore_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
4927 return DBG_STATUS_DBG_BUS_IN_USE;
4929 /* Update reset state of all blocks */
4930 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
4932 /* Disable all debug inputs */
4933 status = ecore_bus_disable_inputs(p_hwfn, p_ptt, false);
4934 if (status != DBG_STATUS_OK)
4937 /* Reset DBG block */
4938 ecore_bus_reset_dbg_block(p_hwfn, p_ptt);
4940 /* Set one-shot / wrap-around */
4941 ecore_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, one_shot_en ? 0 : 1);
4943 /* Init state params */
4944 OSAL_MEMSET(&dev_data->bus, 0, sizeof(dev_data->bus));
4945 dev_data->bus.target = DBG_BUS_TARGET_ID_INT_BUF;
4946 dev_data->bus.state = DBG_BUS_STATE_READY;
4947 dev_data->bus.one_shot_en = one_shot_en;
4948 dev_data->bus.hw_dwords = force_hw_dwords;
4949 dev_data->bus.grc_input_en = grc_input_en;
4950 dev_data->bus.unify_inputs = unify_inputs;
4951 dev_data->bus.num_enabled_blocks = grc_input_en ? 1 : 0;
4953 /* Init special DBG block */
4955 SET_FIELD(dev_data->bus.blocks[BLOCK_DBG].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, 0x1);
4957 return DBG_STATUS_OK;
4960 enum dbg_status ecore_dbg_bus_set_pci_output(struct ecore_hwfn *p_hwfn,
4961 struct ecore_ptt *p_ptt,
4964 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4965 dma_addr_t pci_buf_phys_addr;
4968 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_set_pci_output: buf_size_kb = %d\n", buf_size_kb);
4970 if (dev_data->bus.target != DBG_BUS_TARGET_ID_INT_BUF)
4971 return DBG_STATUS_OUTPUT_ALREADY_SET;
4972 if (dev_data->bus.state != DBG_BUS_STATE_READY || dev_data->bus.pci_buf.size > 0)
4973 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
4975 dev_data->bus.target = DBG_BUS_TARGET_ID_PCI;
4976 dev_data->bus.pci_buf.size = buf_size_kb * 1024;
4977 if (dev_data->bus.pci_buf.size % PCI_PKT_SIZE_IN_BYTES)
4978 return DBG_STATUS_INVALID_ARGS;
4980 pci_buf = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &pci_buf_phys_addr, dev_data->bus.pci_buf.size);
4982 return DBG_STATUS_PCI_BUF_ALLOC_FAILED;
4984 OSAL_MEMCPY(&dev_data->bus.pci_buf.phys_addr, &pci_buf_phys_addr, sizeof(pci_buf_phys_addr));
4986 dev_data->bus.pci_buf.virt_addr.lo = (u32)((u64)(osal_uintptr_t)pci_buf);
4987 dev_data->bus.pci_buf.virt_addr.hi = (u32)((u64)(osal_uintptr_t)pci_buf >> 32);
4989 ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_LSB, dev_data->bus.pci_buf.phys_addr.lo);
4990 ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_MSB, dev_data->bus.pci_buf.phys_addr.hi);
4991 ecore_wr(p_hwfn, p_ptt, DBG_REG_TARGET_PACKET_SIZE, PCI_PKT_SIZE_IN_CHUNKS);
4992 ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_EXT_BUFFER_SIZE, dev_data->bus.pci_buf.size / PCI_PKT_SIZE_IN_BYTES);
4993 ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_FUNC_NUM, OPAQUE_FID(p_hwfn->rel_pf_id));
4994 ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_LOGIC_ADDR, PCI_PHYS_ADDR_TYPE);
4995 ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_REQ_CREDIT, PCI_REQ_CREDIT);
4996 ecore_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_PCI);
4997 ecore_wr(p_hwfn, p_ptt, DBG_REG_OUTPUT_ENABLE, TARGET_EN_MASK_PCI);
4999 return DBG_STATUS_OK;
5002 enum dbg_status ecore_dbg_bus_set_nw_output(struct ecore_hwfn *p_hwfn,
5003 struct ecore_ptt *p_ptt,
5007 u16 data_limit_size_kb,
5008 bool send_to_other_engine,
5009 bool rcv_from_other_engine)
5011 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5013 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_set_nw_output: port_id = %d, dest_addr_lo32 = 0x%x, dest_addr_hi16 = 0x%x, data_limit_size_kb = %d, send_to_other_engine = %d, rcv_from_other_engine = %d\n", port_id, dest_addr_lo32, dest_addr_hi16, data_limit_size_kb, send_to_other_engine, rcv_from_other_engine);
5015 if (dev_data->bus.target != DBG_BUS_TARGET_ID_INT_BUF)
5016 return DBG_STATUS_OUTPUT_ALREADY_SET;
5017 if (dev_data->bus.state != DBG_BUS_STATE_READY)
5018 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5019 if (port_id >= s_chip_defs[dev_data->chip_id].per_platform[dev_data->platform_id].num_ports || (send_to_other_engine && rcv_from_other_engine))
5020 return DBG_STATUS_INVALID_ARGS;
5022 dev_data->bus.target = DBG_BUS_TARGET_ID_NIG;
5023 dev_data->bus.rcv_from_other_engine = rcv_from_other_engine;
5025 ecore_wr(p_hwfn, p_ptt, DBG_REG_OUTPUT_ENABLE, TARGET_EN_MASK_NIG);
5026 ecore_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_NIG);
5028 if (send_to_other_engine)
5029 ecore_wr(p_hwfn, p_ptt, DBG_REG_OTHER_ENGINE_MODE_BB_K2, DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX);
5031 ecore_wr(p_hwfn, p_ptt, NIG_REG_DEBUG_PORT, port_id);
5033 if (rcv_from_other_engine) {
5034 ecore_wr(p_hwfn, p_ptt, DBG_REG_OTHER_ENGINE_MODE_BB_K2, DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX);
5038 /* Configure ethernet header of 14 bytes */
5039 ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_WIDTH, 0);
5040 ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_7, dest_addr_lo32);
5041 ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_6, (u32)SRC_MAC_ADDR_LO16 | ((u32)dest_addr_hi16 << 16));
5042 ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_5, SRC_MAC_ADDR_HI32);
5043 ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_4, (u32)ETH_TYPE << 16);
5044 ecore_wr(p_hwfn, p_ptt, DBG_REG_TARGET_PACKET_SIZE, NIG_PKT_SIZE_IN_CHUNKS);
5045 if (data_limit_size_kb)
5046 ecore_wr(p_hwfn, p_ptt, DBG_REG_NIG_DATA_LIMIT_SIZE, (data_limit_size_kb * 1024) / CHUNK_SIZE_IN_BYTES);
5049 return DBG_STATUS_OK;
5052 static bool ecore_is_overlapping_enable_mask(struct ecore_hwfn *p_hwfn,
5056 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5057 u8 curr_shifted_enable_mask, shifted_enable_mask;
5060 shifted_enable_mask = SHR(enable_mask, VALUES_PER_CYCLE, right_shift);
5062 if (dev_data->bus.num_enabled_blocks) {
5063 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5064 struct dbg_bus_block_data *block_bus = &dev_data->bus.blocks[block_id];
5066 if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5069 curr_shifted_enable_mask =
5070 SHR(GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5072 GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT));
5073 if (shifted_enable_mask & curr_shifted_enable_mask)
5081 enum dbg_status ecore_dbg_bus_enable_block(struct ecore_hwfn *p_hwfn,
5082 enum block_id block_id,
5086 u8 force_valid_mask,
5087 u8 force_frame_mask)
5089 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5090 struct block_defs *block = s_block_defs[block_id];
5091 struct dbg_bus_block_data *block_bus;
5092 struct dbg_bus_block *block_desc;
5094 block_bus = &dev_data->bus.blocks[block_id];
5095 block_desc = get_dbg_bus_block_desc(p_hwfn, block_id);
5097 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_block: block = %d, line_num = %d, enable_mask = 0x%x, right_shift = %d, force_valid_mask = 0x%x, force_frame_mask = 0x%x\n", block_id, line_num, enable_mask, right_shift, force_valid_mask, force_frame_mask);
5099 if (dev_data->bus.state != DBG_BUS_STATE_READY)
5100 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5101 if (block_id >= MAX_BLOCK_ID)
5102 return DBG_STATUS_INVALID_ARGS;
5103 if (GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5104 return DBG_STATUS_BLOCK_ALREADY_ENABLED;
5105 if (block->dbg_client_id[dev_data->chip_id] == MAX_DBG_BUS_CLIENTS ||
5106 line_num >= NUM_DBG_LINES(block_desc) ||
5108 enable_mask > MAX_CYCLE_VALUES_MASK ||
5109 force_valid_mask > MAX_CYCLE_VALUES_MASK ||
5110 force_frame_mask > MAX_CYCLE_VALUES_MASK ||
5111 right_shift > VALUES_PER_CYCLE - 1)
5112 return DBG_STATUS_INVALID_ARGS;
5113 if (dev_data->block_in_reset[block_id])
5114 return DBG_STATUS_BLOCK_IN_RESET;
5115 if (!dev_data->bus.unify_inputs && ecore_is_overlapping_enable_mask(p_hwfn, enable_mask, right_shift))
5116 return DBG_STATUS_INPUT_OVERLAP;
5118 dev_data->bus.blocks[block_id].line_num = line_num;
5119 SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, enable_mask);
5120 SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT, right_shift);
5121 SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK, force_valid_mask);
5122 SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK, force_frame_mask);
5124 dev_data->bus.num_enabled_blocks++;
5126 return DBG_STATUS_OK;
5129 enum dbg_status ecore_dbg_bus_enable_storm(struct ecore_hwfn *p_hwfn,
5130 enum dbg_storms storm,
5131 enum dbg_bus_storm_modes storm_mode)
5133 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5135 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_storm: storm = %d, storm_mode = %d\n", storm, storm_mode);
5137 if (dev_data->bus.state != DBG_BUS_STATE_READY)
5138 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5139 if (dev_data->bus.hw_dwords >= 4)
5140 return DBG_STATUS_HW_ONLY_RECORDING;
5141 if (storm >= MAX_DBG_STORMS)
5142 return DBG_STATUS_INVALID_ARGS;
5143 if (storm_mode >= MAX_DBG_BUS_STORM_MODES)
5144 return DBG_STATUS_INVALID_ARGS;
5145 if (dev_data->bus.unify_inputs)
5146 return DBG_STATUS_INVALID_ARGS;
5148 if (dev_data->bus.storms[storm].enabled)
5149 return DBG_STATUS_STORM_ALREADY_ENABLED;
5151 dev_data->bus.storms[storm].enabled = true;
5152 dev_data->bus.storms[storm].mode = (u8)storm_mode;
5153 dev_data->bus.storms[storm].hw_id = dev_data->bus.num_enabled_storms;
5155 dev_data->bus.num_enabled_storms++;
5157 return DBG_STATUS_OK;
5160 enum dbg_status ecore_dbg_bus_enable_timestamp(struct ecore_hwfn *p_hwfn,
5161 struct ecore_ptt *p_ptt,
5166 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5168 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_timestamp: valid_mask = 0x%x, frame_mask = 0x%x, tick_len = %d\n", valid_mask, frame_mask, tick_len);
5170 if (dev_data->bus.state != DBG_BUS_STATE_READY)
5171 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5172 if (valid_mask > 0x7 || frame_mask > 0x7)
5173 return DBG_STATUS_INVALID_ARGS;
5174 if (!dev_data->bus.unify_inputs && ecore_is_overlapping_enable_mask(p_hwfn, 0x1, 0))
5175 return DBG_STATUS_INPUT_OVERLAP;
5177 dev_data->bus.timestamp_input_en = true;
5178 dev_data->bus.num_enabled_blocks++;
5180 SET_FIELD(dev_data->bus.blocks[BLOCK_DBG].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, 0x1);
5182 ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_VALID_EN, valid_mask);
5183 ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_FRAME_EN, frame_mask);
5184 ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_TICK, tick_len);
5186 return DBG_STATUS_OK;
5189 enum dbg_status ecore_dbg_bus_add_eid_range_sem_filter(struct ecore_hwfn *p_hwfn,
5190 enum dbg_storms storm_id,
5194 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5195 struct dbg_bus_storm_data *storm_bus;
5197 storm_bus = &dev_data->bus.storms[storm_id];
5199 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_eid_range_sem_filter: storm = %d, min_eid = 0x%x, max_eid = 0x%x\n", storm_id, min_eid, max_eid);
5201 if (storm_id >= MAX_DBG_STORMS)
5202 return DBG_STATUS_INVALID_ARGS;
5203 if (min_eid > max_eid)
5204 return DBG_STATUS_INVALID_ARGS;
5205 if (!storm_bus->enabled)
5206 return DBG_STATUS_STORM_NOT_ENABLED;
5208 storm_bus->eid_filter_en = 1;
5209 storm_bus->eid_range_not_mask = 1;
5210 storm_bus->eid_filter_params.range.min = min_eid;
5211 storm_bus->eid_filter_params.range.max = max_eid;
5213 return DBG_STATUS_OK;
5216 enum dbg_status ecore_dbg_bus_add_eid_mask_sem_filter(struct ecore_hwfn *p_hwfn,
5217 enum dbg_storms storm_id,
5221 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5222 struct dbg_bus_storm_data *storm_bus;
5224 storm_bus = &dev_data->bus.storms[storm_id];
5226 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_eid_mask_sem_filter: storm = %d, eid_val = 0x%x, eid_mask = 0x%x\n", storm_id, eid_val, eid_mask);
5228 if (storm_id >= MAX_DBG_STORMS)
5229 return DBG_STATUS_INVALID_ARGS;
5230 if (!storm_bus->enabled)
5231 return DBG_STATUS_STORM_NOT_ENABLED;
5233 storm_bus->eid_filter_en = 1;
5234 storm_bus->eid_range_not_mask = 0;
5235 storm_bus->eid_filter_params.mask.val = eid_val;
5236 storm_bus->eid_filter_params.mask.mask = eid_mask;
5238 return DBG_STATUS_OK;
5241 enum dbg_status ecore_dbg_bus_add_cid_sem_filter(struct ecore_hwfn *p_hwfn,
5242 enum dbg_storms storm_id,
5245 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5246 struct dbg_bus_storm_data *storm_bus;
5248 storm_bus = &dev_data->bus.storms[storm_id];
5250 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_cid_sem_filter: storm = %d, cid = 0x%x\n", storm_id, cid);
5252 if (storm_id >= MAX_DBG_STORMS)
5253 return DBG_STATUS_INVALID_ARGS;
5254 if (!storm_bus->enabled)
5255 return DBG_STATUS_STORM_NOT_ENABLED;
5257 storm_bus->cid_filter_en = 1;
5258 storm_bus->cid = cid;
5260 return DBG_STATUS_OK;
5263 enum dbg_status ecore_dbg_bus_enable_filter(struct ecore_hwfn *p_hwfn,
5264 struct ecore_ptt *p_ptt,
5265 enum block_id block_id,
5268 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5270 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_filter: block = %d, const_msg_len = %d\n", block_id, const_msg_len);
5272 if (dev_data->bus.state != DBG_BUS_STATE_READY)
5273 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5274 if (dev_data->bus.filter_en)
5275 return DBG_STATUS_FILTER_ALREADY_ENABLED;
5276 if (block_id >= MAX_BLOCK_ID)
5277 return DBG_STATUS_INVALID_ARGS;
5278 if (!GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5279 return DBG_STATUS_BLOCK_NOT_ENABLED;
5280 if (!dev_data->bus.unify_inputs)
5281 return DBG_STATUS_FILTER_BUG;
5283 dev_data->bus.filter_en = true;
5284 dev_data->bus.next_constraint_id = 0;
5285 dev_data->bus.adding_filter = true;
5287 /* HW ID is set to 0 due to required unifyInputs */
5288 ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_ID_NUM, 0);
5289 ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_MSG_LENGTH_ENABLE, const_msg_len > 0 ? 1 : 0);
5290 if (const_msg_len > 0)
5291 ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_MSG_LENGTH, const_msg_len - 1);
5293 return DBG_STATUS_OK;
5296 enum dbg_status ecore_dbg_bus_enable_trigger(struct ecore_hwfn *p_hwfn,
5297 struct ecore_ptt *p_ptt,
5298 bool rec_pre_trigger,
5300 bool rec_post_trigger,
5302 bool filter_pre_trigger,
5303 bool filter_post_trigger)
5305 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5306 enum dbg_bus_post_trigger_types post_trigger_type;
5307 enum dbg_bus_pre_trigger_types pre_trigger_type;
5308 struct dbg_bus_data *bus = &dev_data->bus;
5310 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_trigger: rec_pre_trigger = %d, pre_chunks = %d, rec_post_trigger = %d, post_cycles = %d, filter_pre_trigger = %d, filter_post_trigger = %d\n", rec_pre_trigger, pre_chunks, rec_post_trigger, post_cycles, filter_pre_trigger, filter_post_trigger);
5312 if (bus->state != DBG_BUS_STATE_READY)
5313 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5314 if (bus->trigger_en)
5315 return DBG_STATUS_TRIGGER_ALREADY_ENABLED;
5316 if (rec_pre_trigger && pre_chunks >= INT_BUF_SIZE_IN_CHUNKS)
5317 return DBG_STATUS_INVALID_ARGS;
5319 bus->trigger_en = true;
5320 bus->filter_pre_trigger = filter_pre_trigger;
5321 bus->filter_post_trigger = filter_post_trigger;
5323 if (rec_pre_trigger) {
5324 pre_trigger_type = pre_chunks ? DBG_BUS_PRE_TRIGGER_NUM_CHUNKS : DBG_BUS_PRE_TRIGGER_START_FROM_ZERO;
5325 ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_PRE_NUM_CHUNKS, pre_chunks);
5328 pre_trigger_type = DBG_BUS_PRE_TRIGGER_DROP;
5331 if (rec_post_trigger) {
5332 post_trigger_type = DBG_BUS_POST_TRIGGER_RECORD;
5333 ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_POST_NUM_CYCLES, post_cycles ? post_cycles : 0xffffffff);
5336 post_trigger_type = DBG_BUS_POST_TRIGGER_DROP;
5339 ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_PRE_TRGR_EVNT_MODE, pre_trigger_type);
5340 ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_POST_TRGR_EVNT_MODE, post_trigger_type);
5341 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_ENABLE, 1);
5343 return DBG_STATUS_OK;
5346 enum dbg_status ecore_dbg_bus_add_trigger_state(struct ecore_hwfn *p_hwfn,
5347 struct ecore_ptt *p_ptt,
5348 enum block_id block_id,
5352 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5353 struct dbg_bus_data *bus = &dev_data->bus;
5354 struct dbg_bus_block_data *block_bus;
5357 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_trigger_state: block = %d, const_msg_len = %d, count_to_next = %d\n", block_id, const_msg_len, count_to_next);
5359 block_bus = &bus->blocks[block_id];
5361 if (!bus->trigger_en)
5362 return DBG_STATUS_TRIGGER_NOT_ENABLED;
5363 if (bus->next_trigger_state == MAX_TRIGGER_STATES)
5364 return DBG_STATUS_TOO_MANY_TRIGGER_STATES;
5365 if (block_id >= MAX_BLOCK_ID)
5366 return DBG_STATUS_INVALID_ARGS;
5367 if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5368 return DBG_STATUS_BLOCK_NOT_ENABLED;
5370 return DBG_STATUS_INVALID_ARGS;
5372 bus->next_constraint_id = 0;
5373 bus->adding_filter = false;
5375 /* Store block's shifted enable mask */
5376 SET_FIELD(bus->trigger_states[dev_data->bus.next_trigger_state].data, DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK, SHR(GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5378 GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT)));
5380 /* Set trigger state registers */
5381 reg_offset = bus->next_trigger_state * BYTES_IN_DWORD;
5382 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_MSG_LENGTH_ENABLE_0 + reg_offset, const_msg_len > 0 ? 1 : 0);
5383 if (const_msg_len > 0)
5384 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_MSG_LENGTH_0 + reg_offset, const_msg_len - 1);
5386 /* Set trigger set registers */
5387 reg_offset = bus->next_trigger_state * TRIGGER_SETS_PER_STATE * BYTES_IN_DWORD;
5388 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_SET_COUNT_0 + reg_offset, count_to_next);
5390 /* Set next state to final state, and overwrite previous next state
5393 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_SET_NXT_STATE_0 + reg_offset, MAX_TRIGGER_STATES);
5394 if (bus->next_trigger_state > 0) {
5395 reg_offset = (bus->next_trigger_state - 1) * TRIGGER_SETS_PER_STATE * BYTES_IN_DWORD;
5396 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_SET_NXT_STATE_0 + reg_offset, bus->next_trigger_state);
5399 bus->next_trigger_state++;
5401 return DBG_STATUS_OK;
5404 enum dbg_status ecore_dbg_bus_add_constraint(struct ecore_hwfn *p_hwfn,
5405 struct ecore_ptt *p_ptt,
5406 enum dbg_bus_constraint_ops constraint_op,
5412 u8 dword_offset_in_cycle,
5415 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5416 struct dbg_bus_data *bus = &dev_data->bus;
5417 u16 dword_offset, range = 0;
5419 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_constraint: op = %d, data_val = 0x%x, data_mask = 0x%x, compare_frame = %d, frame_bit = %d, cycle_offset = %d, dword_offset_in_cycle = %d, is_mandatory = %d\n", constraint_op, data_val, data_mask, compare_frame, frame_bit, cycle_offset, dword_offset_in_cycle, is_mandatory);
5421 if (!bus->filter_en && !dev_data->bus.trigger_en)
5422 return DBG_STATUS_CANT_ADD_CONSTRAINT;
5423 if (bus->trigger_en && !bus->adding_filter && !bus->next_trigger_state)
5424 return DBG_STATUS_CANT_ADD_CONSTRAINT;
5425 if (bus->next_constraint_id >= MAX_CONSTRAINTS)
5426 return DBG_STATUS_TOO_MANY_CONSTRAINTS;
5427 if (constraint_op >= MAX_DBG_BUS_CONSTRAINT_OPS || frame_bit > 1 || dword_offset_in_cycle > 3 || (bus->adding_filter && cycle_offset > 3))
5428 return DBG_STATUS_INVALID_ARGS;
5429 if (compare_frame &&
5430 constraint_op != DBG_BUS_CONSTRAINT_OP_EQ &&
5431 constraint_op != DBG_BUS_CONSTRAINT_OP_NE)
5432 return DBG_STATUS_INVALID_ARGS;
5434 dword_offset = cycle_offset * VALUES_PER_CYCLE + dword_offset_in_cycle;
5436 if (!bus->adding_filter) {
5437 u8 curr_trigger_state_id = bus->next_trigger_state - 1;
5438 struct dbg_bus_trigger_state_data *trigger_state;
5440 trigger_state = &bus->trigger_states[curr_trigger_state_id];
5442 /* Check if the selected dword is enabled in the block */
5443 if (!(GET_FIELD(trigger_state->data, DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK) & (u8)(1 << dword_offset_in_cycle)))
5444 return DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET;
5446 /* Add selected dword to trigger state's dword mask */
5447 SET_FIELD(trigger_state->data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK, GET_FIELD(trigger_state->data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK) | (u8)(1 << dword_offset_in_cycle));
5450 /* Prepare data mask and range */
5451 if (constraint_op == DBG_BUS_CONSTRAINT_OP_EQ ||
5452 constraint_op == DBG_BUS_CONSTRAINT_OP_NE) {
5453 data_mask = ~data_mask;
5458 /* Extract lsb and width from mask */
5460 return DBG_STATUS_INVALID_ARGS;
5462 for (lsb = 0; lsb < 32 && !(data_mask & 1); lsb++, data_mask >>= 1);
5464 width < 32 - lsb && (data_mask & 1);
5465 width++, data_mask >>= 1) {}
5467 return DBG_STATUS_INVALID_ARGS;
5468 range = (lsb << 5) | (width - 1);
5471 /* Add constraint */
5472 ecore_bus_set_constraint(p_hwfn, p_ptt, dev_data->bus.adding_filter ? 1 : 0,
5473 dev_data->bus.next_constraint_id,
5474 s_constraint_op_defs[constraint_op].hw_op_val,
5475 data_val, data_mask, frame_bit,
5476 compare_frame ? 0 : 1, dword_offset, range,
5477 s_constraint_op_defs[constraint_op].is_cyclic ? 1 : 0,
5478 is_mandatory ? 1 : 0);
5480 /* If first constraint, fill other 3 constraints with dummy constraints
5481 * that always match (using the same offset).
5483 if (!dev_data->bus.next_constraint_id) {
5486 for (i = 1; i < MAX_CONSTRAINTS; i++)
5487 ecore_bus_set_constraint(p_hwfn, p_ptt, bus->adding_filter ? 1 : 0,
5488 i, DBG_BUS_CONSTRAINT_OP_EQ, 0, 0xffffffff,
5489 0, 1, dword_offset, 0, 0, 1);
5492 bus->next_constraint_id++;
5494 return DBG_STATUS_OK;
5497 /* Configure the DBG block client mask */
5498 static void ecore_config_dbg_block_client_mask(struct ecore_hwfn *p_hwfn,
5499 struct ecore_ptt *p_ptt)
5501 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5502 struct dbg_bus_data *bus = &dev_data->bus;
5503 u32 block_id, client_mask = 0;
5506 /* Update client mask for Storm inputs */
5507 if (bus->num_enabled_storms)
5508 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5509 struct storm_defs *storm = &s_storm_defs[storm_id];
5511 if (bus->storms[storm_id].enabled)
5512 client_mask |= (1 << storm->dbg_client_id[dev_data->chip_id]);
5515 /* Update client mask for block inputs */
5516 if (bus->num_enabled_blocks) {
5517 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5518 struct dbg_bus_block_data *block_bus = &bus->blocks[block_id];
5519 struct block_defs *block = s_block_defs[block_id];
5521 if (GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK) && block_id != BLOCK_DBG)
5522 client_mask |= (1 << block->dbg_client_id[dev_data->chip_id]);
5526 /* Update client mask for GRC input */
5527 if (bus->grc_input_en)
5528 client_mask |= (1 << DBG_BUS_CLIENT_CPU);
5530 /* Update client mask for timestamp input */
5531 if (bus->timestamp_input_en)
5532 client_mask |= (1 << DBG_BUS_CLIENT_TIMESTAMP);
5534 ecore_bus_enable_clients(p_hwfn, p_ptt, client_mask);
5537 /* Configure the DBG block framing mode */
5538 static enum dbg_status ecore_config_dbg_block_framing_mode(struct ecore_hwfn *p_hwfn,
5539 struct ecore_ptt *p_ptt)
5541 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5542 struct dbg_bus_data *bus = &dev_data->bus;
5543 enum dbg_bus_frame_modes dbg_framing_mode;
5546 if (!bus->hw_dwords && bus->num_enabled_blocks) {
5547 struct dbg_bus_line *line_desc;
5550 /* Choose either 4 HW dwords (128-bit mode) or 8 HW dwords
5553 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5554 struct dbg_bus_block_data *block_bus = &bus->blocks[block_id];
5556 if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5559 line_desc = get_dbg_bus_line_desc(p_hwfn, (enum block_id)block_id);
5560 hw_dwords = line_desc && GET_FIELD(line_desc->data, DBG_BUS_LINE_IS_256B) ? 8 : 4;
5562 if (bus->hw_dwords > 0 && bus->hw_dwords != hw_dwords)
5563 return DBG_STATUS_NON_MATCHING_LINES;
5565 /* The DBG block doesn't support triggers and
5566 * filters on 256b debug lines.
5568 if (hw_dwords == 8 && (bus->trigger_en || bus->filter_en))
5569 return DBG_STATUS_NO_FILTER_TRIGGER_64B;
5571 bus->hw_dwords = hw_dwords;
5575 switch (bus->hw_dwords) {
5576 case 0: dbg_framing_mode = DBG_BUS_FRAME_MODE_0HW_4ST; break;
5577 case 4: dbg_framing_mode = DBG_BUS_FRAME_MODE_4HW_0ST; break;
5578 case 8: dbg_framing_mode = DBG_BUS_FRAME_MODE_8HW_0ST; break;
5579 default: dbg_framing_mode = DBG_BUS_FRAME_MODE_0HW_4ST; break;
5581 ecore_bus_set_framing_mode(p_hwfn, p_ptt, dbg_framing_mode);
5583 return DBG_STATUS_OK;
5586 /* Configure the DBG block Storm data */
5587 static enum dbg_status ecore_config_storm_inputs(struct ecore_hwfn *p_hwfn,
5588 struct ecore_ptt *p_ptt)
5590 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5591 struct dbg_bus_data *bus = &dev_data->bus;
5592 u8 storm_id, i, next_storm_id = 0;
5593 u32 storm_id_mask = 0;
5595 /* Check if SEMI sync FIFO is empty */
5596 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5597 struct dbg_bus_storm_data *storm_bus = &bus->storms[storm_id];
5598 struct storm_defs *storm = &s_storm_defs[storm_id];
5600 if (storm_bus->enabled && !ecore_rd(p_hwfn, p_ptt, storm->sem_sync_dbg_empty_addr))
5601 return DBG_STATUS_SEMI_FIFO_NOT_EMPTY;
5604 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5605 struct dbg_bus_storm_data *storm_bus = &bus->storms[storm_id];
5607 if (storm_bus->enabled)
5608 storm_id_mask |= (storm_bus->hw_id << (storm_id * HW_ID_BITS));
5611 ecore_wr(p_hwfn, p_ptt, DBG_REG_STORM_ID_NUM, storm_id_mask);
5613 /* Disable storm stall if recording to internal buffer in one-shot */
5614 ecore_wr(p_hwfn, p_ptt, DBG_REG_NO_GRANT_ON_FULL, (dev_data->bus.target == DBG_BUS_TARGET_ID_INT_BUF && bus->one_shot_en) ? 0 : 1);
5616 /* Configure calendar */
5617 for (i = 0; i < NUM_CALENDAR_SLOTS; i++, next_storm_id = (next_storm_id + 1) % MAX_DBG_STORMS) {
5619 /* Find next enabled Storm */
5620 for (; !dev_data->bus.storms[next_storm_id].enabled; next_storm_id = (next_storm_id + 1) % MAX_DBG_STORMS);
5622 /* Configure calendar slot */
5623 ecore_wr(p_hwfn, p_ptt, DBG_REG_CALENDAR_SLOT0 + DWORDS_TO_BYTES(i), next_storm_id);
5626 return DBG_STATUS_OK;
5629 /* Assign HW ID to each dword/qword:
5630 * if the inputs are unified, HW ID 0 is assigned to all dwords/qwords.
5631 * Otherwise, we would like to assign a different HW ID to each dword, to avoid
5632 * data synchronization issues. however, we need to check if there is a trigger
5633 * state for which more than one dword has a constraint. if there is, we cannot
5634 * assign a different HW ID to each dword (since a trigger state has a single
5635 * HW ID), so we assign a different HW ID to each block.
5637 static void ecore_assign_hw_ids(struct ecore_hwfn *p_hwfn,
5638 u8 hw_ids[VALUES_PER_CYCLE])
5640 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5641 struct dbg_bus_data *bus = &dev_data->bus;
5642 bool hw_id_per_dword = true;
5643 u8 val_id, state_id;
5646 OSAL_MEMSET(hw_ids, 0, VALUES_PER_CYCLE);
5648 if (bus->unify_inputs)
5651 if (bus->trigger_en) {
5652 for (state_id = 0; state_id < bus->next_trigger_state && hw_id_per_dword; state_id++) {
5655 for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5656 if (GET_FIELD(bus->trigger_states[state_id].data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK) & (1 << val_id))
5660 hw_id_per_dword = false;
5664 if (hw_id_per_dword) {
5666 /* Assign a different HW ID for each dword */
5667 for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5668 hw_ids[val_id] = val_id;
5671 u8 shifted_enable_mask, next_hw_id = 0;
5673 /* Assign HW IDs according to blocks enable / */
5674 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5675 struct dbg_bus_block_data *block_bus = &bus->blocks[block_id];
5677 if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5680 block_bus->hw_id = next_hw_id++;
5681 if (!block_bus->hw_id)
5684 shifted_enable_mask =
5685 SHR(GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5687 GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT));
5689 for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5690 if (shifted_enable_mask & (1 << val_id))
5691 hw_ids[val_id] = block_bus->hw_id;
5696 /* Configure the DBG block HW blocks data */
5697 static void ecore_config_block_inputs(struct ecore_hwfn *p_hwfn,
5698 struct ecore_ptt *p_ptt)
5700 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5701 struct dbg_bus_data *bus = &dev_data->bus;
5702 u8 hw_ids[VALUES_PER_CYCLE];
5703 u8 val_id, state_id;
5705 ecore_assign_hw_ids(p_hwfn, hw_ids);
5707 /* Assign a HW ID to each trigger state */
5708 if (dev_data->bus.trigger_en) {
5709 for (state_id = 0; state_id < bus->next_trigger_state; state_id++) {
5710 for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++) {
5711 u8 state_data = bus->trigger_states[state_id].data;
5713 if (GET_FIELD(state_data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK) & (1 << val_id)) {
5714 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_ID_0 + state_id * BYTES_IN_DWORD, hw_ids[val_id]);
5721 /* Configure HW ID mask */
5722 dev_data->bus.hw_id_mask = 0;
5723 for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5724 bus->hw_id_mask |= (hw_ids[val_id] << (val_id * HW_ID_BITS));
5725 ecore_wr(p_hwfn, p_ptt, DBG_REG_HW_ID_NUM, bus->hw_id_mask);
5727 /* Configure additional K2 PCIE registers */
5728 if (dev_data->chip_id == CHIP_K2 &&
5729 (GET_FIELD(bus->blocks[BLOCK_PCIE].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK) ||
5730 GET_FIELD(bus->blocks[BLOCK_PHY_PCIE].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))) {
5731 ecore_wr(p_hwfn, p_ptt, PCIE_REG_DBG_REPEAT_THRESHOLD_COUNT_K2_E5, 1);
5732 ecore_wr(p_hwfn, p_ptt, PCIE_REG_DBG_FW_TRIGGER_ENABLE_K2_E5, 1);
5736 enum dbg_status ecore_dbg_bus_start(struct ecore_hwfn *p_hwfn,
5737 struct ecore_ptt *p_ptt)
5739 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5740 struct dbg_bus_data *bus = &dev_data->bus;
5741 enum dbg_bus_filter_types filter_type;
5742 enum dbg_status status;
5746 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_start\n");
5748 if (bus->state != DBG_BUS_STATE_READY)
5749 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5751 /* Check if any input was enabled */
5752 if (!bus->num_enabled_storms &&
5753 !bus->num_enabled_blocks &&
5754 !bus->rcv_from_other_engine)
5755 return DBG_STATUS_NO_INPUT_ENABLED;
5757 /* Check if too many input types were enabled (storm+dbgmux) */
5758 if (bus->num_enabled_storms && bus->num_enabled_blocks)
5759 return DBG_STATUS_TOO_MANY_INPUTS;
5761 /* Configure framing mode */
5762 if ((status = ecore_config_dbg_block_framing_mode(p_hwfn, p_ptt)) != DBG_STATUS_OK)
5765 /* Configure DBG block for Storm inputs */
5766 if (bus->num_enabled_storms)
5767 if ((status = ecore_config_storm_inputs(p_hwfn, p_ptt)) != DBG_STATUS_OK)
5770 /* Configure DBG block for block inputs */
5771 if (bus->num_enabled_blocks)
5772 ecore_config_block_inputs(p_hwfn, p_ptt);
5774 /* Configure filter type */
5775 if (bus->filter_en) {
5776 if (bus->trigger_en) {
5777 if (bus->filter_pre_trigger)
5778 filter_type = bus->filter_post_trigger ? DBG_BUS_FILTER_TYPE_ON : DBG_BUS_FILTER_TYPE_PRE;
5780 filter_type = bus->filter_post_trigger ? DBG_BUS_FILTER_TYPE_POST : DBG_BUS_FILTER_TYPE_OFF;
5783 filter_type = DBG_BUS_FILTER_TYPE_ON;
5787 filter_type = DBG_BUS_FILTER_TYPE_OFF;
5789 ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_ENABLE, filter_type);
5791 /* Restart timestamp */
5792 ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP, 0);
5794 /* Enable debug block */
5795 ecore_bus_enable_dbg_block(p_hwfn, p_ptt, 1);
5797 /* Configure enabled blocks - must be done before the DBG block is
5800 if (dev_data->bus.num_enabled_blocks) {
5801 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5802 if (!GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK) || block_id == BLOCK_DBG)
5805 ecore_config_dbg_line(p_hwfn, p_ptt, (enum block_id)block_id,
5806 dev_data->bus.blocks[block_id].line_num,
5807 GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5808 GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT),
5809 GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK),
5810 GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK));
5814 /* Configure client mask */
5815 ecore_config_dbg_block_client_mask(p_hwfn, p_ptt);
5817 /* Configure enabled Storms - must be done after the DBG block is
5820 if (dev_data->bus.num_enabled_storms)
5821 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++)
5822 if (dev_data->bus.storms[storm_id].enabled)
5823 ecore_bus_enable_storm(p_hwfn, p_ptt, (enum dbg_storms)storm_id, filter_type);
5825 dev_data->bus.state = DBG_BUS_STATE_RECORDING;
5827 return DBG_STATUS_OK;
5830 enum dbg_status ecore_dbg_bus_stop(struct ecore_hwfn *p_hwfn,
5831 struct ecore_ptt *p_ptt)
5833 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5834 struct dbg_bus_data *bus = &dev_data->bus;
5835 enum dbg_status status = DBG_STATUS_OK;
5837 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_stop\n");
5839 if (bus->state != DBG_BUS_STATE_RECORDING)
5840 return DBG_STATUS_RECORDING_NOT_STARTED;
5842 status = ecore_bus_disable_inputs(p_hwfn, p_ptt, true);
5843 if (status != DBG_STATUS_OK)
5846 ecore_wr(p_hwfn, p_ptt, DBG_REG_CPU_TIMEOUT, 1);
5848 OSAL_MSLEEP(FLUSH_DELAY_MS);
5850 ecore_bus_enable_dbg_block(p_hwfn, p_ptt, false);
5852 /* Check if trigger worked */
5853 if (bus->trigger_en) {
5854 u32 trigger_state = ecore_rd(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATUS_CUR_STATE);
5856 if (trigger_state != MAX_TRIGGER_STATES)
5857 return DBG_STATUS_DATA_DIDNT_TRIGGER;
5860 bus->state = DBG_BUS_STATE_STOPPED;
5865 enum dbg_status ecore_dbg_bus_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
5866 struct ecore_ptt *p_ptt,
5869 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5870 struct dbg_bus_data *bus = &dev_data->bus;
5871 enum dbg_status status;
5873 status = ecore_dbg_dev_init(p_hwfn, p_ptt);
5877 if (status != DBG_STATUS_OK)
5880 /* Add dump header */
5881 *buf_size = (u32)ecore_bus_dump_hdr(p_hwfn, p_ptt, OSAL_NULL, false);
5883 switch (bus->target) {
5884 case DBG_BUS_TARGET_ID_INT_BUF:
5885 *buf_size += INT_BUF_SIZE_IN_DWORDS; break;
5886 case DBG_BUS_TARGET_ID_PCI:
5887 *buf_size += BYTES_TO_DWORDS(bus->pci_buf.size); break;
5892 /* Dump last section */
5893 *buf_size += ecore_dump_last_section(OSAL_NULL, 0, false);
5895 return DBG_STATUS_OK;
5898 enum dbg_status ecore_dbg_bus_dump(struct ecore_hwfn *p_hwfn,
5899 struct ecore_ptt *p_ptt,
5901 u32 buf_size_in_dwords,
5902 u32 *num_dumped_dwords)
5904 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5905 u32 min_buf_size_in_dwords, block_id, offset = 0;
5906 struct dbg_bus_data *bus = &dev_data->bus;
5907 enum dbg_status status;
5910 *num_dumped_dwords = 0;
5912 status = ecore_dbg_bus_get_dump_buf_size(p_hwfn, p_ptt, &min_buf_size_in_dwords);
5913 if (status != DBG_STATUS_OK)
5916 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_dump: dump_buf = 0x%p, buf_size_in_dwords = %d\n", dump_buf, buf_size_in_dwords);
5918 if (bus->state != DBG_BUS_STATE_RECORDING && bus->state != DBG_BUS_STATE_STOPPED)
5919 return DBG_STATUS_RECORDING_NOT_STARTED;
5921 if (bus->state == DBG_BUS_STATE_RECORDING) {
5922 enum dbg_status stop_state = ecore_dbg_bus_stop(p_hwfn, p_ptt);
5923 if (stop_state != DBG_STATUS_OK)
5927 if (buf_size_in_dwords < min_buf_size_in_dwords)
5928 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5930 if (bus->target == DBG_BUS_TARGET_ID_PCI && !bus->pci_buf.size)
5931 return DBG_STATUS_PCI_BUF_NOT_ALLOCATED;
5934 offset += ecore_bus_dump_hdr(p_hwfn, p_ptt, dump_buf + offset, true);
5936 /* Dump recorded data */
5937 if (bus->target != DBG_BUS_TARGET_ID_NIG) {
5938 u32 recorded_dwords = ecore_bus_dump_data(p_hwfn, p_ptt, dump_buf + offset, true);
5940 if (!recorded_dwords)
5941 return DBG_STATUS_NO_DATA_RECORDED;
5942 if (recorded_dwords % CHUNK_SIZE_IN_DWORDS)
5943 return DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED;
5944 offset += recorded_dwords;
5947 /* Dump last section */
5948 offset += ecore_dump_last_section(dump_buf, offset, true);
5950 /* If recorded to PCI buffer - free the buffer */
5951 ecore_bus_free_pci_buf(p_hwfn);
5953 /* Clear debug bus parameters */
5954 bus->state = DBG_BUS_STATE_IDLE;
5955 bus->num_enabled_blocks = 0;
5956 bus->num_enabled_storms = 0;
5957 bus->filter_en = bus->trigger_en = 0;
5959 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++)
5960 SET_FIELD(bus->blocks[BLOCK_PCIE].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, 0);
5962 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5963 struct dbg_bus_storm_data *storm_bus = &bus->storms[storm_id];
5965 storm_bus->enabled = false;
5966 storm_bus->eid_filter_en = storm_bus->cid_filter_en = 0;
5969 *num_dumped_dwords = offset;
5971 return DBG_STATUS_OK;
5974 enum dbg_status ecore_dbg_grc_config(struct ecore_hwfn *p_hwfn,
5975 enum dbg_grc_params grc_param,
5980 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val);
5982 /* Initializes the GRC parameters (if not initialized). Needed in order
5983 * to set the default parameter values for the first time.
5985 ecore_dbg_grc_init_params(p_hwfn);
5987 if (grc_param >= MAX_DBG_GRC_PARAMS)
5988 return DBG_STATUS_INVALID_ARGS;
5989 if (val < s_grc_param_defs[grc_param].min ||
5990 val > s_grc_param_defs[grc_param].max)
5991 return DBG_STATUS_INVALID_ARGS;
5993 if (s_grc_param_defs[grc_param].is_preset) {
5997 /* Disabling a preset is not allowed. Call
5998 * dbg_grc_set_params_default instead.
6001 return DBG_STATUS_INVALID_ARGS;
6003 /* Update all params with the preset values */
6004 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) {
6007 if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL)
6008 preset_val = s_grc_param_defs[i].exclude_all_preset_val;
6009 else if (grc_param == DBG_GRC_PARAM_CRASH)
6010 preset_val = s_grc_param_defs[i].crash_preset_val;
6012 return DBG_STATUS_INVALID_ARGS;
6014 ecore_grc_set_param(p_hwfn, (enum dbg_grc_params)i, preset_val);
6019 /* Regular param - set its value */
6020 ecore_grc_set_param(p_hwfn, grc_param, val);
6023 return DBG_STATUS_OK;
6026 /* Assign default GRC param values */
6027 void ecore_dbg_grc_set_params_default(struct ecore_hwfn *p_hwfn)
6029 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
6032 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
6033 dev_data->grc.param_val[i] = s_grc_param_defs[i].default_val[dev_data->chip_id];
6036 enum dbg_status ecore_dbg_grc_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6037 struct ecore_ptt *p_ptt,
6040 enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6044 if (status != DBG_STATUS_OK)
6047 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr || !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
6048 !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
6049 return DBG_STATUS_DBG_ARRAY_NOT_SET;
6051 return ecore_grc_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6054 enum dbg_status ecore_dbg_grc_dump(struct ecore_hwfn *p_hwfn,
6055 struct ecore_ptt *p_ptt,
6057 u32 buf_size_in_dwords,
6058 u32 *num_dumped_dwords)
6060 u32 needed_buf_size_in_dwords;
6061 enum dbg_status status;
6063 *num_dumped_dwords = 0;
6065 status = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6066 if (status != DBG_STATUS_OK)
6069 if (buf_size_in_dwords < needed_buf_size_in_dwords)
6070 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6072 /* Doesn't do anything, needed for compile time asserts */
6073 ecore_static_asserts();
6076 status = ecore_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6078 /* Reveret GRC params to their default */
6079 ecore_dbg_grc_set_params_default(p_hwfn);
6084 enum dbg_status ecore_dbg_idle_chk_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6085 struct ecore_ptt *p_ptt,
6088 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
6089 struct idle_chk_data *idle_chk = &dev_data->idle_chk;
6090 enum dbg_status status;
6094 status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6095 if (status != DBG_STATUS_OK)
6098 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
6099 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr || !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
6100 return DBG_STATUS_DBG_ARRAY_NOT_SET;
6102 if (!idle_chk->buf_size_set) {
6103 idle_chk->buf_size = ecore_idle_chk_dump(p_hwfn, p_ptt, OSAL_NULL, false);
6104 idle_chk->buf_size_set = true;
6107 *buf_size = idle_chk->buf_size;
6109 return DBG_STATUS_OK;
6112 enum dbg_status ecore_dbg_idle_chk_dump(struct ecore_hwfn *p_hwfn,
6113 struct ecore_ptt *p_ptt,
6115 u32 buf_size_in_dwords,
6116 u32 *num_dumped_dwords)
6118 u32 needed_buf_size_in_dwords;
6119 enum dbg_status status;
6121 *num_dumped_dwords = 0;
6123 status = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6124 if (status != DBG_STATUS_OK)
6127 if (buf_size_in_dwords < needed_buf_size_in_dwords)
6128 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6130 /* Update reset state */
6131 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6133 /* Idle Check Dump */
6134 *num_dumped_dwords = ecore_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
6136 /* Reveret GRC params to their default */
6137 ecore_dbg_grc_set_params_default(p_hwfn);
6139 return DBG_STATUS_OK;
6142 enum dbg_status ecore_dbg_mcp_trace_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6143 struct ecore_ptt *p_ptt,
6146 enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6150 if (status != DBG_STATUS_OK)
6153 return ecore_mcp_trace_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6156 enum dbg_status ecore_dbg_mcp_trace_dump(struct ecore_hwfn *p_hwfn,
6157 struct ecore_ptt *p_ptt,
6159 u32 buf_size_in_dwords,
6160 u32 *num_dumped_dwords)
6162 u32 needed_buf_size_in_dwords;
6163 enum dbg_status status;
6165 status = ecore_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6166 if (status != DBG_STATUS_OK && status != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
6169 if (buf_size_in_dwords < needed_buf_size_in_dwords)
6170 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6172 /* Update reset state */
6173 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6176 status = ecore_mcp_trace_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6178 /* Reveret GRC params to their default */
6179 ecore_dbg_grc_set_params_default(p_hwfn);
6184 enum dbg_status ecore_dbg_reg_fifo_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6185 struct ecore_ptt *p_ptt,
6188 enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6192 if (status != DBG_STATUS_OK)
6195 return ecore_reg_fifo_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6198 enum dbg_status ecore_dbg_reg_fifo_dump(struct ecore_hwfn *p_hwfn,
6199 struct ecore_ptt *p_ptt,
6201 u32 buf_size_in_dwords,
6202 u32 *num_dumped_dwords)
6204 u32 needed_buf_size_in_dwords;
6205 enum dbg_status status;
6207 *num_dumped_dwords = 0;
6209 status = ecore_dbg_reg_fifo_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6210 if (status != DBG_STATUS_OK)
6213 if (buf_size_in_dwords < needed_buf_size_in_dwords)
6214 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6216 /* Update reset state */
6217 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6219 status = ecore_reg_fifo_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6221 /* Reveret GRC params to their default */
6222 ecore_dbg_grc_set_params_default(p_hwfn);
6227 enum dbg_status ecore_dbg_igu_fifo_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6228 struct ecore_ptt *p_ptt,
6231 enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6235 if (status != DBG_STATUS_OK)
6238 return ecore_igu_fifo_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6241 enum dbg_status ecore_dbg_igu_fifo_dump(struct ecore_hwfn *p_hwfn,
6242 struct ecore_ptt *p_ptt,
6244 u32 buf_size_in_dwords,
6245 u32 *num_dumped_dwords)
6247 u32 needed_buf_size_in_dwords;
6248 enum dbg_status status;
6250 *num_dumped_dwords = 0;
6252 status = ecore_dbg_igu_fifo_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6253 if (status != DBG_STATUS_OK)
6256 if (buf_size_in_dwords < needed_buf_size_in_dwords)
6257 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6259 /* Update reset state */
6260 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6262 status = ecore_igu_fifo_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6264 /* Reveret GRC params to their default */
6265 ecore_dbg_grc_set_params_default(p_hwfn);
6270 enum dbg_status ecore_dbg_protection_override_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6271 struct ecore_ptt *p_ptt,
6274 enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6278 if (status != DBG_STATUS_OK)
6281 return ecore_protection_override_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6284 enum dbg_status ecore_dbg_protection_override_dump(struct ecore_hwfn *p_hwfn,
6285 struct ecore_ptt *p_ptt,
6287 u32 buf_size_in_dwords,
6288 u32 *num_dumped_dwords)
6290 u32 needed_buf_size_in_dwords;
6291 enum dbg_status status;
6293 *num_dumped_dwords = 0;
6295 status = ecore_dbg_protection_override_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6296 if (status != DBG_STATUS_OK)
6299 if (buf_size_in_dwords < needed_buf_size_in_dwords)
6300 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6302 /* Update reset state */
6303 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6305 status = ecore_protection_override_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6307 /* Reveret GRC params to their default */
6308 ecore_dbg_grc_set_params_default(p_hwfn);
6313 enum dbg_status ecore_dbg_fw_asserts_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6314 struct ecore_ptt *p_ptt,
6317 enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6321 if (status != DBG_STATUS_OK)
6324 /* Update reset state */
6325 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6327 *buf_size = ecore_fw_asserts_dump(p_hwfn, p_ptt, OSAL_NULL, false);
6329 return DBG_STATUS_OK;
6332 enum dbg_status ecore_dbg_fw_asserts_dump(struct ecore_hwfn *p_hwfn,
6333 struct ecore_ptt *p_ptt,
6335 u32 buf_size_in_dwords,
6336 u32 *num_dumped_dwords)
6338 u32 needed_buf_size_in_dwords;
6339 enum dbg_status status;
6341 *num_dumped_dwords = 0;
6343 status = ecore_dbg_fw_asserts_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6344 if (status != DBG_STATUS_OK)
6347 if (buf_size_in_dwords < needed_buf_size_in_dwords)
6348 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6350 *num_dumped_dwords = ecore_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
6352 /* Reveret GRC params to their default */
6353 ecore_dbg_grc_set_params_default(p_hwfn);
6355 return DBG_STATUS_OK;
6358 enum dbg_status ecore_dbg_read_attn(struct ecore_hwfn *p_hwfn,
6359 struct ecore_ptt *p_ptt,
6360 enum block_id block_id,
6361 enum dbg_attn_type attn_type,
6363 struct dbg_attn_block_result *results)
6365 enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6366 u8 reg_idx, num_attn_regs, num_result_regs = 0;
6367 const struct dbg_attn_reg *attn_reg_arr;
6369 if (status != DBG_STATUS_OK)
6372 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
6373 return DBG_STATUS_DBG_ARRAY_NOT_SET;
6375 attn_reg_arr = ecore_get_block_attn_regs(block_id, attn_type, &num_attn_regs);
6377 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
6378 const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
6379 struct dbg_attn_reg_result *reg_result;
6380 u32 sts_addr, sts_val;
6381 u16 modes_buf_offset;
6385 eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
6386 modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
6387 if (eval_mode && !ecore_is_mode_match(p_hwfn, &modes_buf_offset))
6390 /* Mode match - read attention status register */
6391 sts_addr = DWORDS_TO_BYTES(clear_status ? reg_data->sts_clr_address : GET_FIELD(reg_data->data, DBG_ATTN_REG_STS_ADDRESS));
6392 sts_val = ecore_rd(p_hwfn, p_ptt, sts_addr);
6396 /* Non-zero attention status - add to results */
6397 reg_result = &results->reg_results[num_result_regs];
6398 SET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
6399 SET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_NUM_REG_ATTN, GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
6400 reg_result->block_attn_offset = reg_data->block_attn_offset;
6401 reg_result->sts_val = sts_val;
6402 reg_result->mask_val = ecore_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(reg_data->mask_address));
6406 results->block_id = (u8)block_id;
6407 results->names_offset = ecore_get_block_attn_data(block_id, attn_type)->names_offset;
6408 SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
6409 SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
6411 return DBG_STATUS_OK;
6414 enum dbg_status ecore_dbg_print_attn(struct ecore_hwfn *p_hwfn,
6415 struct dbg_attn_block_result *results)
6417 enum dbg_attn_type attn_type;
6420 num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
6421 attn_type = (enum dbg_attn_type)GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
6423 for (i = 0; i < num_regs; i++) {
6424 struct dbg_attn_reg_result *reg_result;
6425 const char *attn_type_str;
6428 reg_result = &results->reg_results[i];
6429 attn_type_str = (attn_type == ATTN_TYPE_INTERRUPT ? "interrupt" : "parity");
6430 sts_addr = GET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_STS_ADDRESS);
6431 DP_NOTICE(p_hwfn, false, "%s: address 0x%08x, status 0x%08x, mask 0x%08x\n", attn_type_str, sts_addr, reg_result->sts_val, reg_result->mask_val);
6434 return DBG_STATUS_OK;
6437 bool ecore_is_block_in_reset(struct ecore_hwfn *p_hwfn,
6438 struct ecore_ptt *p_ptt,
6439 enum block_id block_id)
6441 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
6442 struct block_defs *block = s_block_defs[block_id];
6445 if (!block->has_reset_bit)
6448 reset_reg = block->reset_reg;
6450 return s_reset_regs_defs[reset_reg].exists[dev_data->chip_id] ?
6451 !(ecore_rd(p_hwfn, p_ptt, s_reset_regs_defs[reset_reg].addr) & (1 << block->reset_bit_offset)) : true;