2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
29 * File : ecore_dbg_fw_funcs.c
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
38 #include "ecore_mcp.h"
39 #include "spad_layout.h"
42 #include "ecore_hsi_common.h"
43 #include "ecore_hsi_debug_tools.h"
44 #include "mcp_public.h"
46 #ifndef USE_DBG_BIN_FILE
47 #include "ecore_dbg_values.h"
49 #include "ecore_dbg_fw_funcs.h"
51 /* Memory groups enum */
68 MEM_GROUP_CONN_CFC_MEM,
69 MEM_GROUP_TASK_CFC_MEM,
84 /* Memory groups names */
85 static const char* s_mem_group_names[] = {
116 /* Idle check conditions */
118 #ifndef __PREVENT_COND_ARR__
120 static u32 cond5(const u32 *r, const u32 *imm) {
121 return (((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]));
124 static u32 cond7(const u32 *r, const u32 *imm) {
125 return (((r[0] >> imm[0]) & imm[1]) != imm[2]);
128 static u32 cond14(const u32 *r, const u32 *imm) {
129 return ((r[0] != imm[0]) && (((r[1] >> imm[1]) & imm[2]) == imm[3]));
132 static u32 cond6(const u32 *r, const u32 *imm) {
133 return ((r[0] & imm[0]) != imm[1]);
136 static u32 cond9(const u32 *r, const u32 *imm) {
137 return ((r[0] & imm[0]) >> imm[1]) != (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
140 static u32 cond10(const u32 *r, const u32 *imm) {
141 return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
144 static u32 cond4(const u32 *r, const u32 *imm) {
145 return ((r[0] & ~imm[0]) != imm[1]);
148 static u32 cond0(const u32 *r, const u32 *imm) {
149 return ((r[0] & ~r[1]) != imm[0]);
152 static u32 cond1(const u32 *r, const u32 *imm) {
153 return (r[0] != imm[0]);
156 static u32 cond11(const u32 *r, const u32 *imm) {
157 return (r[0] != r[1] && r[2] == imm[0]);
160 static u32 cond12(const u32 *r, const u32 *imm) {
161 return (r[0] != r[1] && r[2] > imm[0]);
164 static u32 cond3(const u32 *r, const u32 *imm) {
165 return (r[0] != r[1]);
168 static u32 cond13(const u32 *r, const u32 *imm) {
169 return (r[0] & imm[0]);
172 static u32 cond8(const u32 *r, const u32 *imm) {
173 return (r[0] < (r[1] - imm[0]));
176 static u32 cond2(const u32 *r, const u32 *imm) {
177 return (r[0] > imm[0]);
180 /* Array of Idle Check conditions */
181 static u32 (*cond_arr[])(const u32 *r, const u32 *imm) = {
199 #endif /* __PREVENT_COND_ARR__ */
202 /******************************* Data Types **********************************/
207 PLATFORM_EMUL_REDUCED,
212 struct chip_platform_defs {
218 /* Chip constant definitions */
221 struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
224 /* Platform constant definitions */
225 struct platform_defs {
230 /* Storm constant definitions.
231 * Addresses are in bytes, sizes are in quad-regs.
235 enum block_id block_id;
236 enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
238 u32 sem_fast_mem_addr;
239 u32 sem_frame_mode_addr;
240 u32 sem_slow_enable_addr;
241 u32 sem_slow_mode_addr;
242 u32 sem_slow_mode1_conf_addr;
243 u32 sem_sync_dbg_empty_addr;
244 u32 sem_slow_dbg_empty_addr;
246 u32 cm_conn_ag_ctx_lid_size;
247 u32 cm_conn_ag_ctx_rd_addr;
248 u32 cm_conn_st_ctx_lid_size;
249 u32 cm_conn_st_ctx_rd_addr;
250 u32 cm_task_ag_ctx_lid_size;
251 u32 cm_task_ag_ctx_rd_addr;
252 u32 cm_task_st_ctx_lid_size;
253 u32 cm_task_st_ctx_rd_addr;
256 /* Block constant definitions */
259 bool has_dbg_bus[MAX_CHIP_IDS];
260 bool associated_to_storm;
262 /* Valid only if associated_to_storm is true */
264 enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
268 u32 dbg_force_valid_addr;
269 u32 dbg_force_frame_addr;
272 /* If true, block is taken out of reset before dump */
274 enum dbg_reset_regs reset_reg;
276 /* Bit offset in reset register */
280 /* Reset register definitions */
281 struct reset_reg_defs {
284 bool exists[MAX_CHIP_IDS];
287 /* Debug Bus Constraint operation constant definitions */
288 struct dbg_bus_constraint_op_defs {
293 /* Storm Mode definitions */
294 struct storm_mode_defs {
300 struct grc_param_defs {
301 u32 default_val[MAX_CHIP_IDS];
305 u32 exclude_all_preset_val;
306 u32 crash_preset_val;
309 /* address is in 128b units. Width is in bits. */
310 struct rss_mem_defs {
311 const char *mem_name;
312 const char *type_name;
314 u32 num_entries[MAX_CHIP_IDS];
315 u32 entry_width[MAX_CHIP_IDS];
318 struct vfc_ram_defs {
319 const char *mem_name;
320 const char *type_name;
325 struct big_ram_defs {
326 const char *instance_name;
327 enum mem_groups mem_group_id;
328 enum mem_groups ram_mem_group_id;
329 enum dbg_grc_params grc_param;
332 u32 num_of_blocks[MAX_CHIP_IDS];
336 const char *phy_name;
338 /* PHY base GRC address */
341 /* Relative address of indirect TBUS address register (bits 0..7) */
342 u32 tbus_addr_lo_addr;
344 /* Relative address of indirect TBUS address register (bits 8..10) */
345 u32 tbus_addr_hi_addr;
347 /* Relative address of indirect TBUS data register (bits 0..7) */
348 u32 tbus_data_lo_addr;
350 /* Relative address of indirect TBUS data register (bits 8..11) */
351 u32 tbus_data_hi_addr;
354 /******************************** Constants **********************************/
356 #define MAX_LCIDS 320
357 #define MAX_LTIDS 320
359 #define NUM_IOR_SETS 2
360 #define IORS_PER_SET 176
361 #define IOR_SET_OFFSET(set_id) ((set_id) * 256)
363 #define BYTES_IN_DWORD sizeof(u32)
366 #define SHR(val, val_width, amount) (((val) | ((val) << (val_width))) >> (amount)) & ((1 << (val_width)) - 1)
368 /* In the macros below, size and offset are specified in bits */
369 #define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32)
370 #define FIELD_BIT_OFFSET(type, field) type##_##field##_##OFFSET
371 #define FIELD_BIT_SIZE(type, field) type##_##field##_##SIZE
372 #define FIELD_DWORD_OFFSET(type, field) (int)(FIELD_BIT_OFFSET(type, field) / 32)
373 #define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32)
374 #define FIELD_BIT_MASK(type, field) (((1 << FIELD_BIT_SIZE(type, field)) - 1) << FIELD_DWORD_SHIFT(type, field))
376 #define SET_VAR_FIELD(var, type, field, val) var[FIELD_DWORD_OFFSET(type, field)] &= (~FIELD_BIT_MASK(type, field)); var[FIELD_DWORD_OFFSET(type, field)] |= (val) << FIELD_DWORD_SHIFT(type, field)
378 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) for (i = 0; i < (arr_size); i++) ecore_wr(dev, ptt, addr, (arr)[i])
380 #define ARR_REG_RD(dev, ptt, addr, arr, arr_size) for (i = 0; i < (arr_size); i++) (arr)[i] = ecore_rd(dev, ptt, addr)
382 #define CHECK_ARR_SIZE(arr, size) OSAL_BUILD_BUG_ON(!(OSAL_ARRAY_SIZE(arr) == size))
384 #ifndef DWORDS_TO_BYTES
385 #define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
387 #ifndef BYTES_TO_DWORDS
388 #define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
391 /* extra lines include a signature line + optional latency events line */
392 #ifndef NUM_DBG_LINES
393 #define NUM_EXTRA_DBG_LINES(block_desc) (1 + (block_desc->has_latency_events ? 1 : 0))
394 #define NUM_DBG_LINES(block_desc) (block_desc->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
397 #define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
398 #define RAM_LINES_TO_BYTES(lines) DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
400 #define REG_DUMP_LEN_SHIFT 24
401 #define MEM_DUMP_ENTRY_SIZE_DWORDS BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
403 #define IDLE_CHK_RULE_SIZE_DWORDS BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
405 #define IDLE_CHK_RESULT_HDR_DWORDS BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
407 #define IDLE_CHK_RESULT_REG_HDR_DWORDS BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
409 #define IDLE_CHK_MAX_ENTRIES_SIZE 32
411 /* The sizes and offsets below are specified in bits */
412 #define VFC_CAM_CMD_STRUCT_SIZE 64
413 #define VFC_CAM_CMD_ROW_OFFSET 48
414 #define VFC_CAM_CMD_ROW_SIZE 9
415 #define VFC_CAM_ADDR_STRUCT_SIZE 16
416 #define VFC_CAM_ADDR_OP_OFFSET 0
417 #define VFC_CAM_ADDR_OP_SIZE 4
418 #define VFC_CAM_RESP_STRUCT_SIZE 256
419 #define VFC_RAM_ADDR_STRUCT_SIZE 16
420 #define VFC_RAM_ADDR_OP_OFFSET 0
421 #define VFC_RAM_ADDR_OP_SIZE 2
422 #define VFC_RAM_ADDR_ROW_OFFSET 2
423 #define VFC_RAM_ADDR_ROW_SIZE 10
424 #define VFC_RAM_RESP_STRUCT_SIZE 256
426 #define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
427 #define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
428 #define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
429 #define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS
430 #define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
431 #define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
433 #define NUM_VFC_RAM_TYPES 4
435 #define VFC_CAM_NUM_ROWS 512
437 #define VFC_OPCODE_CAM_RD 14
438 #define VFC_OPCODE_RAM_RD 0
440 #define NUM_RSS_MEM_TYPES 5
442 #define NUM_BIG_RAM_TYPES 3
443 #define BIG_RAM_BLOCK_SIZE_BYTES 128
444 #define BIG_RAM_BLOCK_SIZE_DWORDS BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES)
446 #define NUM_PHY_TBUS_ADDRESSES 2048
447 #define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
449 #define SEM_FAST_MODE6_SRC_ENABLE 0x10
450 #define SEM_FAST_MODE6_SRC_DISABLE 0x3f
452 #define SEM_SLOW_MODE1_DATA_ENABLE 0x1
454 #define VALUES_PER_CYCLE 4
455 #define MAX_CYCLE_VALUES_MASK ((1 << VALUES_PER_CYCLE) - 1)
457 #define MAX_DWORDS_PER_CYCLE 8
461 #define NUM_CALENDAR_SLOTS 16
463 #define MAX_TRIGGER_STATES 3
464 #define TRIGGER_SETS_PER_STATE 2
465 #define MAX_CONSTRAINTS 4
467 #define SEM_FILTER_CID_EN_MASK 0x008
468 #define SEM_FILTER_EID_MASK_EN_MASK 0x010
469 #define SEM_FILTER_EID_RANGE_EN_MASK 0x110
471 #define CHUNK_SIZE_IN_DWORDS 64
472 #define CHUNK_SIZE_IN_BYTES DWORDS_TO_BYTES(CHUNK_SIZE_IN_DWORDS)
474 #define INT_BUF_NUM_OF_LINES 192
475 #define INT_BUF_LINE_SIZE_IN_DWORDS 16
476 #define INT_BUF_SIZE_IN_DWORDS (INT_BUF_NUM_OF_LINES * INT_BUF_LINE_SIZE_IN_DWORDS)
477 #define INT_BUF_SIZE_IN_CHUNKS (INT_BUF_SIZE_IN_DWORDS / CHUNK_SIZE_IN_DWORDS)
479 #define PCI_BUF_LINE_SIZE_IN_DWORDS 8
480 #define PCI_BUF_LINE_SIZE_IN_BYTES DWORDS_TO_BYTES(PCI_BUF_LINE_SIZE_IN_DWORDS)
482 #define TARGET_EN_MASK_PCI 0x3
483 #define TARGET_EN_MASK_NIG 0x4
485 #define PCI_REQ_CREDIT 1
486 #define PCI_PHYS_ADDR_TYPE 0
488 #define OPAQUE_FID(pci_func) ((pci_func << 4) | 0xff00)
490 #define RESET_REG_UNRESET_OFFSET 4
492 #define PCI_PKT_SIZE_IN_CHUNKS 1
493 #define PCI_PKT_SIZE_IN_BYTES (PCI_PKT_SIZE_IN_CHUNKS * CHUNK_SIZE_IN_BYTES)
495 #define NIG_PKT_SIZE_IN_CHUNKS 4
497 #define FLUSH_DELAY_MS 500
498 #define STALL_DELAY_MS 500
500 #define SRC_MAC_ADDR_LO16 0x0a0b
501 #define SRC_MAC_ADDR_HI32 0x0c0d0e0f
502 #define ETH_TYPE 0x1000
504 #define STATIC_DEBUG_LINE_DWORDS 9
506 #define NUM_COMMON_GLOBAL_PARAMS 8
508 #define FW_IMG_KUKU 0
509 #define FW_IMG_MAIN 1
512 #ifndef REG_FIFO_ELEMENT_DWORDS
513 #define REG_FIFO_ELEMENT_DWORDS 2
515 #define REG_FIFO_DEPTH_ELEMENTS 32
516 #define REG_FIFO_DEPTH_DWORDS (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
518 #ifndef IGU_FIFO_ELEMENT_DWORDS
519 #define IGU_FIFO_ELEMENT_DWORDS 4
521 #define IGU_FIFO_DEPTH_ELEMENTS 64
522 #define IGU_FIFO_DEPTH_DWORDS (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
524 #define SEMI_SYNC_FIFO_POLLING_DELAY_MS 5
525 #define SEMI_SYNC_FIFO_POLLING_COUNT 20
527 #ifndef PROTECTION_OVERRIDE_ELEMENT_DWORDS
528 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
530 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
531 #define PROTECTION_OVERRIDE_DEPTH_DWORDS (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * PROTECTION_OVERRIDE_ELEMENT_DWORDS)
533 #define MCP_SPAD_TRACE_OFFSIZE_ADDR (MCP_REG_SCRATCH + OFFSETOF(struct static_init, sections[SPAD_SECTION_TRACE]))
535 #define EMPTY_FW_VERSION_STR "???_???_???_???"
536 #define EMPTY_FW_IMAGE_STR "???????????????"
539 /***************************** Constant Arrays *******************************/
547 #ifdef USE_DBG_BIN_FILE
548 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { { OSAL_NULL } };
550 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = {
552 /* BIN_BUF_DBG_MODE_TREE */
553 { (const u32 *)dbg_modes_tree_buf, OSAL_ARRAY_SIZE(dbg_modes_tree_buf)},
555 /* BIN_BUF_DBG_DUMP_REG */
556 { dump_reg, OSAL_ARRAY_SIZE(dump_reg) },
558 /* BIN_BUF_DBG_DUMP_MEM */
559 { dump_mem, OSAL_ARRAY_SIZE(dump_mem) },
561 /* BIN_BUF_DBG_IDLE_CHK_REGS */
562 { idle_chk_regs, OSAL_ARRAY_SIZE(idle_chk_regs) },
564 /* BIN_BUF_DBG_IDLE_CHK_IMMS */
565 { idle_chk_imms, OSAL_ARRAY_SIZE(idle_chk_imms) },
567 /* BIN_BUF_DBG_IDLE_CHK_RULES */
568 { idle_chk_rules, OSAL_ARRAY_SIZE(idle_chk_rules) },
570 /* BIN_BUF_DBG_IDLE_CHK_PARSING_DATA */
573 /* BIN_BUF_DBG_ATTN_BLOCKS */
574 { attn_block, OSAL_ARRAY_SIZE(attn_block) },
576 /* BIN_BUF_DBG_ATTN_REGSS */
577 { attn_reg, OSAL_ARRAY_SIZE(attn_reg) },
579 /* BIN_BUF_DBG_ATTN_INDEXES */
582 /* BIN_BUF_DBG_ATTN_NAME_OFFSETS */
585 /* BIN_BUF_DBG_BUS_BLOCKS */
586 { dbg_bus_blocks, OSAL_ARRAY_SIZE(dbg_bus_blocks) },
588 /* BIN_BUF_DBG_BUS_LINES */
589 { dbg_bus_lines, OSAL_ARRAY_SIZE(dbg_bus_lines) },
591 /* BIN_BUF_DBG_BUS_BLOCKS_USER_DATA */
594 /* BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS */
597 /* BIN_BUF_DBG_PARSING_STRINGS */
602 /* Chip constant definitions array */
603 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
607 { { MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB },
610 { MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB },
613 { MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB },
616 { MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB } } },
621 { { MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2 },
624 { MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2 },
627 { MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2 },
630 { MAX_NUM_PORTS_K2, 8, MAX_NUM_VFS_K2 } } }
633 /* Storm constant definitions array */
634 static struct storm_defs s_storm_defs[] = {
638 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT }, true,
639 TSEM_REG_FAST_MEMORY,
640 TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
641 TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
642 TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
643 TCM_REG_CTX_RBC_ACCS,
644 4, TCM_REG_AGG_CON_CTX,
645 16, TCM_REG_SM_CON_CTX,
646 2, TCM_REG_AGG_TASK_CTX,
647 4, TCM_REG_SM_TASK_CTX },
651 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM }, false,
652 MSEM_REG_FAST_MEMORY,
653 MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
654 MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
655 MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2,
656 MCM_REG_CTX_RBC_ACCS,
657 1, MCM_REG_AGG_CON_CTX,
658 10, MCM_REG_SM_CON_CTX,
659 2, MCM_REG_AGG_TASK_CTX,
660 7, MCM_REG_SM_TASK_CTX },
664 { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU }, false,
665 USEM_REG_FAST_MEMORY,
666 USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
667 USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
668 USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2,
669 UCM_REG_CTX_RBC_ACCS,
670 2, UCM_REG_AGG_CON_CTX,
671 13, UCM_REG_SM_CON_CTX,
672 3, UCM_REG_AGG_TASK_CTX,
673 3, UCM_REG_SM_TASK_CTX },
677 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX }, false,
678 XSEM_REG_FAST_MEMORY,
679 XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
680 XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
681 XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2,
682 XCM_REG_CTX_RBC_ACCS,
683 9, XCM_REG_AGG_CON_CTX,
684 15, XCM_REG_SM_CON_CTX,
690 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY }, false,
691 YSEM_REG_FAST_MEMORY,
692 YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
693 YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
694 YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
695 YCM_REG_CTX_RBC_ACCS,
696 2, YCM_REG_AGG_CON_CTX,
697 3, YCM_REG_SM_CON_CTX,
698 2, YCM_REG_AGG_TASK_CTX,
699 12, YCM_REG_SM_TASK_CTX },
703 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS }, true,
704 PSEM_REG_FAST_MEMORY,
705 PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
706 PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
707 PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2,
708 PCM_REG_CTX_RBC_ACCS,
710 10, PCM_REG_SM_CON_CTX,
715 /* Block definitions array */
717 static struct block_defs block_grc_defs = {
718 "grc", { true, true }, false, 0,
719 { DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN },
720 GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
721 GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
722 GRC_REG_DBG_FORCE_FRAME,
723 true, false, DBG_RESET_REG_MISC_PL_UA, 1 };
725 static struct block_defs block_miscs_defs = {
726 "miscs", { false, false }, false, 0,
727 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
729 false, false, MAX_DBG_RESET_REGS, 0 };
731 static struct block_defs block_misc_defs = {
732 "misc", { false, false }, false, 0,
733 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
735 false, false, MAX_DBG_RESET_REGS, 0 };
737 static struct block_defs block_dbu_defs = {
738 "dbu", { false, false }, false, 0,
739 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
741 false, false, MAX_DBG_RESET_REGS, 0 };
743 static struct block_defs block_pglue_b_defs = {
744 "pglue_b", { true, true }, false, 0,
745 { DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH },
746 PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
747 PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
748 PGLUE_B_REG_DBG_FORCE_FRAME,
749 true, false, DBG_RESET_REG_MISCS_PL_HV, 1 };
751 static struct block_defs block_cnig_defs = {
752 "cnig", { false, true }, false, 0,
753 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW },
754 CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
755 CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
756 CNIG_REG_DBG_FORCE_FRAME_K2_E5,
757 true, false, DBG_RESET_REG_MISCS_PL_HV, 0 };
759 static struct block_defs block_cpmu_defs = {
760 "cpmu", { false, false }, false, 0,
761 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
763 true, false, DBG_RESET_REG_MISCS_PL_HV, 8 };
765 static struct block_defs block_ncsi_defs = {
766 "ncsi", { true, true }, false, 0,
767 { DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
768 NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
769 NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
770 NCSI_REG_DBG_FORCE_FRAME,
771 true, false, DBG_RESET_REG_MISCS_PL_HV, 5 };
773 static struct block_defs block_opte_defs = {
774 "opte", { false, false }, false, 0,
775 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
777 true, false, DBG_RESET_REG_MISCS_PL_HV, 4 };
779 static struct block_defs block_bmb_defs = {
780 "bmb", { true, true }, false, 0,
781 { DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB },
782 BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
783 BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
784 BMB_REG_DBG_FORCE_FRAME,
785 true, false, DBG_RESET_REG_MISCS_PL_UA, 7 };
787 static struct block_defs block_pcie_defs = {
788 "pcie", { false, true }, false, 0,
789 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH },
790 PCIE_REG_DBG_COMMON_SELECT_K2_E5, PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
791 PCIE_REG_DBG_COMMON_SHIFT_K2_E5, PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
792 PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
793 false, false, MAX_DBG_RESET_REGS, 0 };
795 static struct block_defs block_mcp_defs = {
796 "mcp", { false, false }, false, 0,
797 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
799 false, false, MAX_DBG_RESET_REGS, 0 };
801 static struct block_defs block_mcp2_defs = {
802 "mcp2", { true, true }, false, 0,
803 { DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
804 MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
805 MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
806 MCP2_REG_DBG_FORCE_FRAME,
807 false, false, MAX_DBG_RESET_REGS, 0 };
809 static struct block_defs block_pswhst_defs = {
810 "pswhst", { true, true }, false, 0,
811 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
812 PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
813 PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
814 PSWHST_REG_DBG_FORCE_FRAME,
815 true, false, DBG_RESET_REG_MISC_PL_HV, 0 };
817 static struct block_defs block_pswhst2_defs = {
818 "pswhst2", { true, true }, false, 0,
819 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
820 PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
821 PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
822 PSWHST2_REG_DBG_FORCE_FRAME,
823 true, false, DBG_RESET_REG_MISC_PL_HV, 0 };
825 static struct block_defs block_pswrd_defs = {
826 "pswrd", { true, true }, false, 0,
827 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
828 PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
829 PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
830 PSWRD_REG_DBG_FORCE_FRAME,
831 true, false, DBG_RESET_REG_MISC_PL_HV, 2 };
833 static struct block_defs block_pswrd2_defs = {
834 "pswrd2", { true, true }, false, 0,
835 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
836 PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
837 PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
838 PSWRD2_REG_DBG_FORCE_FRAME,
839 true, false, DBG_RESET_REG_MISC_PL_HV, 2 };
841 static struct block_defs block_pswwr_defs = {
842 "pswwr", { true, true }, false, 0,
843 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
844 PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
845 PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
846 PSWWR_REG_DBG_FORCE_FRAME,
847 true, false, DBG_RESET_REG_MISC_PL_HV, 3 };
849 static struct block_defs block_pswwr2_defs = {
850 "pswwr2", { false, false }, false, 0,
851 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
853 true, false, DBG_RESET_REG_MISC_PL_HV, 3 };
855 static struct block_defs block_pswrq_defs = {
856 "pswrq", { true, true }, false, 0,
857 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
858 PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
859 PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
860 PSWRQ_REG_DBG_FORCE_FRAME,
861 true, false, DBG_RESET_REG_MISC_PL_HV, 1 };
863 static struct block_defs block_pswrq2_defs = {
864 "pswrq2", { true, true }, false, 0,
865 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
866 PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
867 PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
868 PSWRQ2_REG_DBG_FORCE_FRAME,
869 true, false, DBG_RESET_REG_MISC_PL_HV, 1 };
871 static struct block_defs block_pglcs_defs = {
872 "pglcs", { false, true }, false, 0,
873 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH },
874 PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
875 PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
876 PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
877 true, false, DBG_RESET_REG_MISCS_PL_HV, 2 };
879 static struct block_defs block_ptu_defs ={
880 "ptu", { true, true }, false, 0,
881 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
882 PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
883 PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
884 PTU_REG_DBG_FORCE_FRAME,
885 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20 };
887 static struct block_defs block_dmae_defs = {
888 "dmae", { true, true }, false, 0,
889 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
890 DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
891 DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
892 DMAE_REG_DBG_FORCE_FRAME,
893 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28 };
895 static struct block_defs block_tcm_defs = {
896 "tcm", { true, true }, true, DBG_TSTORM_ID,
897 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
898 TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
899 TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
900 TCM_REG_DBG_FORCE_FRAME,
901 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5 };
903 static struct block_defs block_mcm_defs = {
904 "mcm", { true, true }, true, DBG_MSTORM_ID,
905 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM },
906 MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
907 MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
908 MCM_REG_DBG_FORCE_FRAME,
909 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3 };
911 static struct block_defs block_ucm_defs = {
912 "ucm", { true, true }, true, DBG_USTORM_ID,
913 { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
914 UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
915 UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
916 UCM_REG_DBG_FORCE_FRAME,
917 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8 };
919 static struct block_defs block_xcm_defs = {
920 "xcm", { true, true }, true, DBG_XSTORM_ID,
921 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
922 XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
923 XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
924 XCM_REG_DBG_FORCE_FRAME,
925 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19 };
927 static struct block_defs block_ycm_defs = {
928 "ycm", { true, true }, true, DBG_YSTORM_ID,
929 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY },
930 YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
931 YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
932 YCM_REG_DBG_FORCE_FRAME,
933 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5 };
935 static struct block_defs block_pcm_defs = {
936 "pcm", { true, true }, true, DBG_PSTORM_ID,
937 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
938 PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
939 PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
940 PCM_REG_DBG_FORCE_FRAME,
941 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4 };
943 static struct block_defs block_qm_defs = {
944 "qm", { true, true }, false, 0,
945 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ },
946 QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
947 QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
948 QM_REG_DBG_FORCE_FRAME,
949 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16 };
951 static struct block_defs block_tm_defs = {
952 "tm", { true, true }, false, 0,
953 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
954 TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
955 TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
956 TM_REG_DBG_FORCE_FRAME,
957 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17 };
959 static struct block_defs block_dorq_defs = {
960 "dorq", { true, true }, false, 0,
961 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY },
962 DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
963 DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
964 DORQ_REG_DBG_FORCE_FRAME,
965 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18 };
967 static struct block_defs block_brb_defs = {
968 "brb", { true, true }, false, 0,
969 { DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR },
970 BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
971 BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
972 BRB_REG_DBG_FORCE_FRAME,
973 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0 };
975 static struct block_defs block_src_defs = {
976 "src", { true, true }, false, 0,
977 { DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
978 SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
979 SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
980 SRC_REG_DBG_FORCE_FRAME,
981 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2 };
983 static struct block_defs block_prs_defs = {
984 "prs", { true, true }, false, 0,
985 { DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR },
986 PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
987 PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
988 PRS_REG_DBG_FORCE_FRAME,
989 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1 };
991 static struct block_defs block_tsdm_defs = {
992 "tsdm", { true, true }, true, DBG_TSTORM_ID,
993 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
994 TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
995 TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
996 TSDM_REG_DBG_FORCE_FRAME,
997 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3 };
999 static struct block_defs block_msdm_defs = {
1000 "msdm", { true, true }, true, DBG_MSTORM_ID,
1001 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM },
1002 MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
1003 MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
1004 MSDM_REG_DBG_FORCE_FRAME,
1005 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6 };
1007 static struct block_defs block_usdm_defs = {
1008 "usdm", { true, true }, true, DBG_USTORM_ID,
1009 { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1010 USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
1011 USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
1012 USDM_REG_DBG_FORCE_FRAME,
1013 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
1015 static struct block_defs block_xsdm_defs = {
1016 "xsdm", { true, true }, true, DBG_XSTORM_ID,
1017 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
1018 XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
1019 XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
1020 XSDM_REG_DBG_FORCE_FRAME,
1021 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20 };
1023 static struct block_defs block_ysdm_defs = {
1024 "ysdm", { true, true }, true, DBG_YSTORM_ID,
1025 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY },
1026 YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
1027 YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
1028 YSDM_REG_DBG_FORCE_FRAME,
1029 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8 };
1031 static struct block_defs block_psdm_defs = {
1032 "psdm", { true, true }, true, DBG_PSTORM_ID,
1033 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
1034 PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
1035 PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
1036 PSDM_REG_DBG_FORCE_FRAME,
1037 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7 };
1039 static struct block_defs block_tsem_defs = {
1040 "tsem", { true, true }, true, DBG_TSTORM_ID,
1041 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
1042 TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
1043 TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
1044 TSEM_REG_DBG_FORCE_FRAME,
1045 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4 };
1047 static struct block_defs block_msem_defs = {
1048 "msem", { true, true }, true, DBG_MSTORM_ID,
1049 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM },
1050 MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
1051 MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
1052 MSEM_REG_DBG_FORCE_FRAME,
1053 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9 };
1055 static struct block_defs block_usem_defs = {
1056 "usem", { true, true }, true, DBG_USTORM_ID,
1057 { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1058 USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
1059 USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
1060 USEM_REG_DBG_FORCE_FRAME,
1061 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9 };
1063 static struct block_defs block_xsem_defs = {
1064 "xsem", { true, true }, true, DBG_XSTORM_ID,
1065 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
1066 XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
1067 XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
1068 XSEM_REG_DBG_FORCE_FRAME,
1069 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21 };
1071 static struct block_defs block_ysem_defs = {
1072 "ysem", { true, true }, true, DBG_YSTORM_ID,
1073 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY },
1074 YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
1075 YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
1076 YSEM_REG_DBG_FORCE_FRAME,
1077 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11 };
1079 static struct block_defs block_psem_defs = {
1080 "psem", { true, true }, true, DBG_PSTORM_ID,
1081 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
1082 PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
1083 PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
1084 PSEM_REG_DBG_FORCE_FRAME,
1085 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10 };
1087 static struct block_defs block_rss_defs = {
1088 "rss", { true, true }, false, 0,
1089 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
1090 RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
1091 RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
1092 RSS_REG_DBG_FORCE_FRAME,
1093 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18 };
1095 static struct block_defs block_tmld_defs = {
1096 "tmld", { true, true }, false, 0,
1097 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM },
1098 TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
1099 TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
1100 TMLD_REG_DBG_FORCE_FRAME,
1101 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13 };
1103 static struct block_defs block_muld_defs = {
1104 "muld", { true, true }, false, 0,
1105 { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1106 MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
1107 MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
1108 MULD_REG_DBG_FORCE_FRAME,
1109 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14 };
1111 static struct block_defs block_yuld_defs = {
1112 "yuld", { true, true }, false, 0,
1113 { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1114 YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
1115 YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
1116 YULD_REG_DBG_FORCE_FRAME_BB_K2,
1117 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 15 };
1119 static struct block_defs block_xyld_defs = {
1120 "xyld", { true, true }, false, 0,
1121 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
1122 XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
1123 XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
1124 XYLD_REG_DBG_FORCE_FRAME,
1125 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12 };
1127 static struct block_defs block_prm_defs = {
1128 "prm", { true, true }, false, 0,
1129 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM },
1130 PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
1131 PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
1132 PRM_REG_DBG_FORCE_FRAME,
1133 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21 };
1135 static struct block_defs block_pbf_pb1_defs = {
1136 "pbf_pb1", { true, true }, false, 0,
1137 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV },
1138 PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
1139 PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
1140 PBF_PB1_REG_DBG_FORCE_FRAME,
1141 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 11 };
1143 static struct block_defs block_pbf_pb2_defs = {
1144 "pbf_pb2", { true, true }, false, 0,
1145 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV },
1146 PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
1147 PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
1148 PBF_PB2_REG_DBG_FORCE_FRAME,
1149 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 12 };
1151 static struct block_defs block_rpb_defs = {
1152 "rpb", { true, true }, false, 0,
1153 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM },
1154 RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
1155 RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
1156 RPB_REG_DBG_FORCE_FRAME,
1157 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13 };
1159 static struct block_defs block_btb_defs = {
1160 "btb", { true, true }, false, 0,
1161 { DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV },
1162 BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
1163 BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
1164 BTB_REG_DBG_FORCE_FRAME,
1165 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10 };
1167 static struct block_defs block_pbf_defs = {
1168 "pbf", { true, true }, false, 0,
1169 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV },
1170 PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
1171 PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
1172 PBF_REG_DBG_FORCE_FRAME,
1173 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15 };
1175 static struct block_defs block_rdif_defs = {
1176 "rdif", { true, true }, false, 0,
1177 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM },
1178 RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
1179 RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
1180 RDIF_REG_DBG_FORCE_FRAME,
1181 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16 };
1183 static struct block_defs block_tdif_defs = {
1184 "tdif", { true, true }, false, 0,
1185 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
1186 TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
1187 TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
1188 TDIF_REG_DBG_FORCE_FRAME,
1189 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17 };
1191 static struct block_defs block_cdu_defs = {
1192 "cdu", { true, true }, false, 0,
1193 { DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
1194 CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
1195 CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
1196 CDU_REG_DBG_FORCE_FRAME,
1197 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23 };
1199 static struct block_defs block_ccfc_defs = {
1200 "ccfc", { true, true }, false, 0,
1201 { DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
1202 CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
1203 CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
1204 CCFC_REG_DBG_FORCE_FRAME,
1205 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24 };
1207 static struct block_defs block_tcfc_defs = {
1208 "tcfc", { true, true }, false, 0,
1209 { DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
1210 TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
1211 TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
1212 TCFC_REG_DBG_FORCE_FRAME,
1213 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25 };
1215 static struct block_defs block_igu_defs = {
1216 "igu", { true, true }, false, 0,
1217 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
1218 IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
1219 IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
1220 IGU_REG_DBG_FORCE_FRAME,
1221 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27 };
1223 static struct block_defs block_cau_defs = {
1224 "cau", { true, true }, false, 0,
1225 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
1226 CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
1227 CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
1228 CAU_REG_DBG_FORCE_FRAME,
1229 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19 };
1231 static struct block_defs block_umac_defs = {
1232 "umac", { false, true }, false, 0,
1233 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ },
1234 UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
1235 UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
1236 UMAC_REG_DBG_FORCE_FRAME_K2_E5,
1237 true, false, DBG_RESET_REG_MISCS_PL_HV, 6 };
1239 static struct block_defs block_xmac_defs = {
1240 "xmac", { false, false }, false, 0,
1241 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1243 false, false, MAX_DBG_RESET_REGS, 0 };
1245 static struct block_defs block_dbg_defs = {
1246 "dbg", { false, false }, false, 0,
1247 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1249 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3 };
1251 static struct block_defs block_nig_defs = {
1252 "nig", { true, true }, false, 0,
1253 { DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN },
1254 NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
1255 NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
1256 NIG_REG_DBG_FORCE_FRAME,
1257 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0 };
1259 static struct block_defs block_wol_defs = {
1260 "wol", { false, true }, false, 0,
1261 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ },
1262 WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
1263 WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
1264 WOL_REG_DBG_FORCE_FRAME_K2_E5,
1265 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7 };
1267 static struct block_defs block_bmbn_defs = {
1268 "bmbn", { false, true }, false, 0,
1269 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB },
1270 BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
1271 BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
1272 BMBN_REG_DBG_FORCE_FRAME_K2_E5,
1273 false, false, MAX_DBG_RESET_REGS, 0 };
1275 static struct block_defs block_ipc_defs = {
1276 "ipc", { false, false }, false, 0,
1277 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1279 true, false, DBG_RESET_REG_MISCS_PL_UA, 8 };
1281 static struct block_defs block_nwm_defs = {
1282 "nwm", { false, true }, false, 0,
1283 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW },
1284 NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
1285 NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
1286 NWM_REG_DBG_FORCE_FRAME_K2_E5,
1287 true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0 };
1289 static struct block_defs block_nws_defs = {
1290 "nws", { false, true }, false, 0,
1291 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW },
1292 NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
1293 NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
1294 NWS_REG_DBG_FORCE_FRAME_K2_E5,
1295 true, false, DBG_RESET_REG_MISCS_PL_HV, 12 };
1297 static struct block_defs block_ms_defs = {
1298 "ms", { false, true }, false, 0,
1299 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ },
1300 MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
1301 MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
1302 MS_REG_DBG_FORCE_FRAME_K2_E5,
1303 true, false, DBG_RESET_REG_MISCS_PL_HV, 13 };
1305 static struct block_defs block_phy_pcie_defs = {
1306 "phy_pcie", { false, true }, false, 0,
1307 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH },
1308 PCIE_REG_DBG_COMMON_SELECT_K2_E5, PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
1309 PCIE_REG_DBG_COMMON_SHIFT_K2_E5, PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
1310 PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
1311 false, false, MAX_DBG_RESET_REGS, 0 };
1313 static struct block_defs block_led_defs = {
1314 "led", { false, false }, false, 0,
1315 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1317 true, false, DBG_RESET_REG_MISCS_PL_HV, 14 };
1319 static struct block_defs block_avs_wrap_defs = {
1320 "avs_wrap", { false, false }, false, 0,
1321 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1323 true, false, DBG_RESET_REG_MISCS_PL_UA, 11 };
1325 static struct block_defs block_rgfs_defs = {
1326 "rgfs", { false, false }, false, 0,
1327 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1329 false, false, MAX_DBG_RESET_REGS, 0 };
1331 static struct block_defs block_rgsrc_defs = {
1332 "rgsrc", { false, false }, false, 0,
1333 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1335 false, false, MAX_DBG_RESET_REGS, 0 };
1337 static struct block_defs block_tgfs_defs = {
1338 "tgfs", { false, false }, false, 0,
1339 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1341 false, false, MAX_DBG_RESET_REGS, 0 };
1343 static struct block_defs block_tgsrc_defs = {
1344 "tgsrc", { false, false }, false, 0,
1345 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1347 false, false, MAX_DBG_RESET_REGS, 0 };
1349 static struct block_defs block_ptld_defs = {
1350 "ptld", { false, false }, false, 0,
1351 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1353 false, false, MAX_DBG_RESET_REGS, 0 };
1355 static struct block_defs block_ypld_defs = {
1356 "ypld", { false, false }, false, 0,
1357 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1359 false, false, MAX_DBG_RESET_REGS, 0 };
1361 static struct block_defs block_misc_aeu_defs = {
1362 "misc_aeu", { false, false }, false, 0,
1363 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1365 false, false, MAX_DBG_RESET_REGS, 0 };
1367 static struct block_defs block_bar0_map_defs = {
1368 "bar0_map", { false, false }, false, 0,
1369 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1371 false, false, MAX_DBG_RESET_REGS, 0 };
1374 static struct block_defs* s_block_defs[MAX_BLOCK_ID] = {
1379 &block_pglue_b_defs,
1389 &block_pswhst2_defs,
1431 &block_pbf_pb1_defs,
1432 &block_pbf_pb2_defs,
1457 &block_phy_pcie_defs,
1459 &block_avs_wrap_defs,
1460 &block_misc_aeu_defs,
1461 &block_bar0_map_defs,
1466 /* Constraint operation types */
1467 static struct dbg_bus_constraint_op_defs s_constraint_op_defs[] = {
1469 /* DBG_BUS_CONSTRAINT_OP_EQ */
1472 /* DBG_BUS_CONSTRAINT_OP_NE */
1475 /* DBG_BUS_CONSTRAINT_OP_LT */
1478 /* DBG_BUS_CONSTRAINT_OP_LTC */
1481 /* DBG_BUS_CONSTRAINT_OP_LE */
1484 /* DBG_BUS_CONSTRAINT_OP_LEC */
1487 /* DBG_BUS_CONSTRAINT_OP_GT */
1490 /* DBG_BUS_CONSTRAINT_OP_GTC */
1493 /* DBG_BUS_CONSTRAINT_OP_GE */
1496 /* DBG_BUS_CONSTRAINT_OP_GEC */
1500 static const char* s_dbg_target_names[] = {
1502 /* DBG_BUS_TARGET_ID_INT_BUF */
1505 /* DBG_BUS_TARGET_ID_NIG */
1508 /* DBG_BUS_TARGET_ID_PCI */
1512 static struct storm_mode_defs s_storm_mode_defs[] = {
1514 /* DBG_BUS_STORM_MODE_PRINTF */
1515 { "printf", true, 0 },
1517 /* DBG_BUS_STORM_MODE_PRAM_ADDR */
1518 { "pram_addr", true, 1 },
1520 /* DBG_BUS_STORM_MODE_DRA_RW */
1521 { "dra_rw", true, 2 },
1523 /* DBG_BUS_STORM_MODE_DRA_W */
1524 { "dra_w", true, 3 },
1526 /* DBG_BUS_STORM_MODE_LD_ST_ADDR */
1527 { "ld_st_addr", true, 4 },
1529 /* DBG_BUS_STORM_MODE_DRA_FSM */
1530 { "dra_fsm", true, 5 },
1532 /* DBG_BUS_STORM_MODE_RH */
1535 /* DBG_BUS_STORM_MODE_FOC */
1536 { "foc", false, 1 },
1538 /* DBG_BUS_STORM_MODE_EXT_STORE */
1539 { "ext_store", false, 3 }
1542 static struct platform_defs s_platform_defs[] = {
1547 /* PLATFORM_EMUL_FULL */
1548 { "emul_full", 2000 },
1550 /* PLATFORM_EMUL_REDUCED */
1551 { "emul_reduced", 2000 },
1557 static struct grc_param_defs s_grc_param_defs[] = {
1559 /* DBG_GRC_PARAM_DUMP_TSTORM */
1560 { { 1, 1 }, 0, 1, false, 1, 1 },
1562 /* DBG_GRC_PARAM_DUMP_MSTORM */
1563 { { 1, 1 }, 0, 1, false, 1, 1 },
1565 /* DBG_GRC_PARAM_DUMP_USTORM */
1566 { { 1, 1 }, 0, 1, false, 1, 1 },
1568 /* DBG_GRC_PARAM_DUMP_XSTORM */
1569 { { 1, 1 }, 0, 1, false, 1, 1 },
1571 /* DBG_GRC_PARAM_DUMP_YSTORM */
1572 { { 1, 1 }, 0, 1, false, 1, 1 },
1574 /* DBG_GRC_PARAM_DUMP_PSTORM */
1575 { { 1, 1 }, 0, 1, false, 1, 1 },
1577 /* DBG_GRC_PARAM_DUMP_REGS */
1578 { { 1, 1 }, 0, 1, false, 0, 1 },
1580 /* DBG_GRC_PARAM_DUMP_RAM */
1581 { { 1, 1 }, 0, 1, false, 0, 1 },
1583 /* DBG_GRC_PARAM_DUMP_PBUF */
1584 { { 1, 1 }, 0, 1, false, 0, 1 },
1586 /* DBG_GRC_PARAM_DUMP_IOR */
1587 { { 0, 0 }, 0, 1, false, 0, 1 },
1589 /* DBG_GRC_PARAM_DUMP_VFC */
1590 { { 0, 0 }, 0, 1, false, 0, 1 },
1592 /* DBG_GRC_PARAM_DUMP_CM_CTX */
1593 { { 1, 1 }, 0, 1, false, 0, 1 },
1595 /* DBG_GRC_PARAM_DUMP_ILT */
1596 { { 1, 1 }, 0, 1, false, 0, 1 },
1598 /* DBG_GRC_PARAM_DUMP_RSS */
1599 { { 1, 1 }, 0, 1, false, 0, 1 },
1601 /* DBG_GRC_PARAM_DUMP_CAU */
1602 { { 1, 1 }, 0, 1, false, 0, 1 },
1604 /* DBG_GRC_PARAM_DUMP_QM */
1605 { { 1, 1 }, 0, 1, false, 0, 1 },
1607 /* DBG_GRC_PARAM_DUMP_MCP */
1608 { { 1, 1 }, 0, 1, false, 0, 1 },
1610 /* DBG_GRC_PARAM_RESERVED */
1611 { { 1, 1 }, 0, 1, false, 0, 1 },
1613 /* DBG_GRC_PARAM_DUMP_CFC */
1614 { { 1, 1 }, 0, 1, false, 0, 1 },
1616 /* DBG_GRC_PARAM_DUMP_IGU */
1617 { { 1, 1 }, 0, 1, false, 0, 1 },
1619 /* DBG_GRC_PARAM_DUMP_BRB */
1620 { { 0, 0 }, 0, 1, false, 0, 1 },
1622 /* DBG_GRC_PARAM_DUMP_BTB */
1623 { { 0, 0 }, 0, 1, false, 0, 1 },
1625 /* DBG_GRC_PARAM_DUMP_BMB */
1626 { { 0, 0 }, 0, 1, false, 0, 1 },
1628 /* DBG_GRC_PARAM_DUMP_NIG */
1629 { { 1, 1 }, 0, 1, false, 0, 1 },
1631 /* DBG_GRC_PARAM_DUMP_MULD */
1632 { { 1, 1 }, 0, 1, false, 0, 1 },
1634 /* DBG_GRC_PARAM_DUMP_PRS */
1635 { { 1, 1 }, 0, 1, false, 0, 1 },
1637 /* DBG_GRC_PARAM_DUMP_DMAE */
1638 { { 1, 1 }, 0, 1, false, 0, 1 },
1640 /* DBG_GRC_PARAM_DUMP_TM */
1641 { { 1, 1 }, 0, 1, false, 0, 1 },
1643 /* DBG_GRC_PARAM_DUMP_SDM */
1644 { { 1, 1 }, 0, 1, false, 0, 1 },
1646 /* DBG_GRC_PARAM_DUMP_DIF */
1647 { { 1, 1 }, 0, 1, false, 0, 1 },
1649 /* DBG_GRC_PARAM_DUMP_STATIC */
1650 { { 1, 1 }, 0, 1, false, 0, 1 },
1652 /* DBG_GRC_PARAM_UNSTALL */
1653 { { 0, 0 }, 0, 1, false, 0, 0 },
1655 /* DBG_GRC_PARAM_NUM_LCIDS */
1656 { { MAX_LCIDS, MAX_LCIDS }, 1, MAX_LCIDS, false, MAX_LCIDS, MAX_LCIDS },
1658 /* DBG_GRC_PARAM_NUM_LTIDS */
1659 { { MAX_LTIDS, MAX_LTIDS }, 1, MAX_LTIDS, false, MAX_LTIDS, MAX_LTIDS },
1661 /* DBG_GRC_PARAM_EXCLUDE_ALL */
1662 { { 0, 0 }, 0, 1, true, 0, 0 },
1664 /* DBG_GRC_PARAM_CRASH */
1665 { { 0, 0 }, 0, 1, true, 0, 0 },
1667 /* DBG_GRC_PARAM_PARITY_SAFE */
1668 { { 0, 0 }, 0, 1, false, 1, 0 },
1670 /* DBG_GRC_PARAM_DUMP_CM */
1671 { { 1, 1 }, 0, 1, false, 0, 1 },
1673 /* DBG_GRC_PARAM_DUMP_PHY */
1674 { { 1, 1 }, 0, 1, false, 0, 1 },
1676 /* DBG_GRC_PARAM_NO_MCP */
1677 { { 0, 0 }, 0, 1, false, 0, 0 },
1679 /* DBG_GRC_PARAM_NO_FW_VER */
1680 { { 0, 0 }, 0, 1, false, 0, 0 }
1683 static struct rss_mem_defs s_rss_mem_defs[] = {
1684 { "rss_mem_cid", "rss_cid", 0,
1688 { "rss_mem_key_msb", "rss_key", 1024,
1692 { "rss_mem_key_lsb", "rss_key", 2048,
1696 { "rss_mem_info", "rss_info", 3072,
1700 { "rss_mem_ind", "rss_ind", 4096,
1705 static struct vfc_ram_defs s_vfc_ram_defs[] = {
1706 { "vfc_ram_tt1", "vfc_ram", 0, 512 },
1707 { "vfc_ram_mtt2", "vfc_ram", 512, 128 },
1708 { "vfc_ram_stt2", "vfc_ram", 640, 32 },
1709 { "vfc_ram_ro_vect", "vfc_ram", 672, 32 }
1712 static struct big_ram_defs s_big_ram_defs[] = {
1713 { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB, BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
1716 { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB, BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
1719 { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB, BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
1723 static struct reset_reg_defs s_reset_regs_defs[] = {
1725 /* DBG_RESET_REG_MISCS_PL_UA */
1726 { MISCS_REG_RESET_PL_UA, 0x0, { true, true } },
1728 /* DBG_RESET_REG_MISCS_PL_HV */
1729 { MISCS_REG_RESET_PL_HV, 0x0, { true, true } },
1731 /* DBG_RESET_REG_MISCS_PL_HV_2 */
1732 { MISCS_REG_RESET_PL_HV_2_K2_E5, 0x0, { false, true } },
1734 /* DBG_RESET_REG_MISC_PL_UA */
1735 { MISC_REG_RESET_PL_UA, 0x0, { true, true } },
1737 /* DBG_RESET_REG_MISC_PL_HV */
1738 { MISC_REG_RESET_PL_HV, 0x0, { true, true } },
1740 /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
1741 { MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040, { true, true } },
1743 /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
1744 { MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007, { true, true } },
1746 /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
1747 { MISC_REG_RESET_PL_PDA_VAUX, 0x2, { true, true } },
1750 static struct phy_defs s_phy_defs[] = {
1751 { "nw_phy", NWS_REG_NWS_CMU_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5 },
1752 { "sgmii_phy", MS_REG_MS_CMU_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 },
1753 { "pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 },
1754 { "pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 },
1757 /* The order of indexes that should be applied to a PCI buffer line */
1758 static const u8 s_pci_buf_line_ind[PCI_BUF_LINE_SIZE_IN_DWORDS] = { 1, 0, 3, 2, 5, 4, 7, 6 };
1760 /******************************** Variables **********************************/
1762 /* The version of the calling app */
1763 static u32 s_app_ver;
1765 /**************************** Private Functions ******************************/
1767 static void ecore_static_asserts(void)
1769 CHECK_ARR_SIZE(s_dbg_arrays, MAX_BIN_DBG_BUFFER_TYPE);
1770 CHECK_ARR_SIZE(s_big_ram_defs, NUM_BIG_RAM_TYPES);
1771 CHECK_ARR_SIZE(s_vfc_ram_defs, NUM_VFC_RAM_TYPES);
1772 CHECK_ARR_SIZE(s_rss_mem_defs, NUM_RSS_MEM_TYPES);
1773 CHECK_ARR_SIZE(s_chip_defs, MAX_CHIP_IDS);
1774 CHECK_ARR_SIZE(s_platform_defs, MAX_PLATFORM_IDS);
1775 CHECK_ARR_SIZE(s_storm_defs, MAX_DBG_STORMS);
1776 CHECK_ARR_SIZE(s_constraint_op_defs, MAX_DBG_BUS_CONSTRAINT_OPS);
1777 CHECK_ARR_SIZE(s_dbg_target_names, MAX_DBG_BUS_TARGETS);
1778 CHECK_ARR_SIZE(s_storm_mode_defs, MAX_DBG_BUS_STORM_MODES);
1779 CHECK_ARR_SIZE(s_grc_param_defs, MAX_DBG_GRC_PARAMS);
1780 CHECK_ARR_SIZE(s_reset_regs_defs, MAX_DBG_RESET_REGS);
1783 /* Reads and returns a single dword from the specified unaligned buffer. */
1784 static u32 ecore_read_unaligned_dword(u8 *buf)
1788 OSAL_MEMCPY((u8*)&dword, buf, sizeof(dword));
1792 /* Returns the difference in bytes between the specified physical addresses.
1793 * Assumes that the first address is bigger then the second, and that the
1794 * difference is a 32-bit value.
1796 static u32 ecore_phys_addr_diff(struct dbg_bus_mem_addr *a,
1797 struct dbg_bus_mem_addr *b)
1799 return a->hi == b->hi ? a->lo - b->lo : b->lo - a->lo;
1802 /* Sets the value of the specified GRC param */
1803 static void ecore_grc_set_param(struct ecore_hwfn *p_hwfn,
1804 enum dbg_grc_params grc_param,
1807 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1809 dev_data->grc.param_val[grc_param] = val;
1812 /* Returns the value of the specified GRC param */
1813 static u32 ecore_grc_get_param(struct ecore_hwfn *p_hwfn,
1814 enum dbg_grc_params grc_param)
1816 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1818 return dev_data->grc.param_val[grc_param];
1821 /* Initializes the GRC parameters */
1822 static void ecore_dbg_grc_init_params(struct ecore_hwfn *p_hwfn)
1824 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1826 if (!dev_data->grc.params_initialized) {
1827 ecore_dbg_grc_set_params_default(p_hwfn);
1828 dev_data->grc.params_initialized = 1;
1832 /* Initializes debug data for the specified device */
1833 static enum dbg_status ecore_dbg_dev_init(struct ecore_hwfn *p_hwfn,
1834 struct ecore_ptt *p_ptt)
1836 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1838 if (dev_data->initialized)
1839 return DBG_STATUS_OK;
1842 return DBG_STATUS_APP_VERSION_NOT_SET;
1844 if (ECORE_IS_K2(p_hwfn->p_dev)) {
1845 dev_data->chip_id = CHIP_K2;
1846 dev_data->mode_enable[MODE_K2] = 1;
1848 else if (ECORE_IS_BB_B0(p_hwfn->p_dev)) {
1849 dev_data->chip_id = CHIP_BB;
1850 dev_data->mode_enable[MODE_BB] = 1;
1853 return DBG_STATUS_UNKNOWN_CHIP;
1857 dev_data->platform_id = PLATFORM_ASIC;
1858 dev_data->mode_enable[MODE_ASIC] = 1;
1860 if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) {
1861 dev_data->platform_id = PLATFORM_ASIC;
1862 dev_data->mode_enable[MODE_ASIC] = 1;
1864 else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1865 if (ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED) & 0x20000000) {
1866 dev_data->platform_id = PLATFORM_EMUL_FULL;
1867 dev_data->mode_enable[MODE_EMUL_FULL] = 1;
1870 dev_data->platform_id = PLATFORM_EMUL_REDUCED;
1871 dev_data->mode_enable[MODE_EMUL_REDUCED] = 1;
1874 else if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1875 dev_data->platform_id = PLATFORM_FPGA;
1876 dev_data->mode_enable[MODE_FPGA] = 1;
1879 return DBG_STATUS_UNKNOWN_CHIP;
1883 /* Initializes the GRC parameters */
1884 ecore_dbg_grc_init_params(p_hwfn);
1886 dev_data->initialized = true;
1888 return DBG_STATUS_OK;
1891 static struct dbg_bus_block* get_dbg_bus_block_desc(struct ecore_hwfn *p_hwfn,
1892 enum block_id block_id)
1894 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1896 return (struct dbg_bus_block*)&dbg_bus_blocks[block_id * MAX_CHIP_IDS + dev_data->chip_id];
1899 /* Returns OSAL_NULL for signature line, latency line and non-existing lines */
1900 static struct dbg_bus_line* get_dbg_bus_line_desc(struct ecore_hwfn *p_hwfn,
1901 enum block_id block_id)
1903 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1904 struct dbg_bus_block_data *block_bus;
1905 struct dbg_bus_block *block_desc;
1907 block_bus = &dev_data->bus.blocks[block_id];
1908 block_desc = get_dbg_bus_block_desc(p_hwfn, block_id);
1910 if (!block_bus->line_num ||
1911 (block_bus->line_num == 1 && block_desc->has_latency_events) ||
1912 block_bus->line_num >= NUM_DBG_LINES(block_desc))
1915 return (struct dbg_bus_line*)&dbg_bus_lines[block_desc->lines_offset + block_bus->line_num - NUM_EXTRA_DBG_LINES(block_desc)];
1918 /* Reads the FW info structure for the specified Storm from the chip,
1919 * and writes it to the specified fw_info pointer.
1921 static void ecore_read_fw_info(struct ecore_hwfn *p_hwfn,
1922 struct ecore_ptt *p_ptt,
1924 struct fw_info *fw_info)
1926 struct storm_defs *storm = &s_storm_defs[storm_id];
1927 struct fw_info_location fw_info_location;
1930 OSAL_MEMSET(&fw_info_location, 0, sizeof(fw_info_location));
1931 OSAL_MEMSET(fw_info, 0, sizeof(*fw_info));
1933 /* Read first the address that points to fw_info location.
1934 * The address is located in the last line of the Storm RAM.
1936 addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM + DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) - sizeof(fw_info_location);
1937 dest = (u32*)&fw_info_location;
1939 for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location)); i++, addr += BYTES_IN_DWORD)
1940 dest[i] = ecore_rd(p_hwfn, p_ptt, addr);
1942 /* Read FW version info from Storm RAM */
1943 if (fw_info_location.size > 0 && fw_info_location.size <= sizeof(*fw_info)) {
1944 addr = fw_info_location.grc_addr;
1945 dest = (u32*)fw_info;
1946 for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size); i++, addr += BYTES_IN_DWORD)
1947 dest[i] = ecore_rd(p_hwfn, p_ptt, addr);
1951 /* Dumps the specified string to the specified buffer.
1952 * Returns the dumped size in bytes.
1954 static u32 ecore_dump_str(char *dump_buf,
1959 OSAL_STRCPY(dump_buf, str);
1961 return (u32)OSAL_STRLEN(str) + 1;
1964 /* Dumps zeros to align the specified buffer to dwords.
1965 * Returns the dumped size in bytes.
1967 static u32 ecore_dump_align(char *dump_buf,
1971 u8 offset_in_dword, align_size;
1973 offset_in_dword = (u8)(byte_offset & 0x3);
1974 align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1976 if (dump && align_size)
1977 OSAL_MEMSET(dump_buf, 0, align_size);
1982 /* Writes the specified string param to the specified buffer.
1983 * Returns the dumped size in dwords.
1985 static u32 ecore_dump_str_param(u32 *dump_buf,
1987 const char *param_name,
1988 const char *param_val)
1990 char *char_buf = (char*)dump_buf;
1993 /* Dump param name */
1994 offset += ecore_dump_str(char_buf + offset, dump, param_name);
1996 /* Indicate a string param value */
1998 *(char_buf + offset) = 1;
2001 /* Dump param value */
2002 offset += ecore_dump_str(char_buf + offset, dump, param_val);
2004 /* Align buffer to next dword */
2005 offset += ecore_dump_align(char_buf + offset, dump, offset);
2007 return BYTES_TO_DWORDS(offset);
2010 /* Writes the specified numeric param to the specified buffer.
2011 * Returns the dumped size in dwords.
2013 static u32 ecore_dump_num_param(u32 *dump_buf,
2015 const char *param_name,
2018 char *char_buf = (char*)dump_buf;
2021 /* Dump param name */
2022 offset += ecore_dump_str(char_buf + offset, dump, param_name);
2024 /* Indicate a numeric param value */
2026 *(char_buf + offset) = 0;
2029 /* Align buffer to next dword */
2030 offset += ecore_dump_align(char_buf + offset, dump, offset);
2032 /* Dump param value (and change offset from bytes to dwords) */
2033 offset = BYTES_TO_DWORDS(offset);
2035 *(dump_buf + offset) = param_val;
2041 /* Reads the FW version and writes it as a param to the specified buffer.
2042 * Returns the dumped size in dwords.
2044 static u32 ecore_dump_fw_ver_param(struct ecore_hwfn *p_hwfn,
2045 struct ecore_ptt *p_ptt,
2049 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2050 char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
2051 char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
2052 struct fw_info fw_info = { { 0 }, { 0 } };
2055 if (dump && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2056 /* Read FW image/version from PRAM in a non-reset SEMI */
2060 for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found; storm_id++) {
2061 struct storm_defs *storm = &s_storm_defs[storm_id];
2063 /* Read FW version/image */
2064 if (dev_data->block_in_reset[storm->block_id])
2067 /* Read FW info for the current Storm */
2068 ecore_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
2070 /* Create FW version/image strings */
2071 if (OSAL_SNPRINTF(fw_ver_str, sizeof(fw_ver_str), "%d_%d_%d_%d", fw_info.ver.num.major, fw_info.ver.num.minor, fw_info.ver.num.rev, fw_info.ver.num.eng) < 0)
2072 DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid FW version string\n");
2073 switch (fw_info.ver.image_id) {
2074 case FW_IMG_KUKU: OSAL_STRCPY(fw_img_str, "kuku"); break;
2075 case FW_IMG_MAIN: OSAL_STRCPY(fw_img_str, "main"); break;
2076 case FW_IMG_L2B: OSAL_STRCPY(fw_img_str, "l2b"); break;
2077 default: OSAL_STRCPY(fw_img_str, "unknown"); break;
2084 /* Dump FW version, image and timestamp */
2085 offset += ecore_dump_str_param(dump_buf + offset, dump, "fw-version", fw_ver_str);
2086 offset += ecore_dump_str_param(dump_buf + offset, dump, "fw-image", fw_img_str);
2087 offset += ecore_dump_num_param(dump_buf + offset, dump, "fw-timestamp", fw_info.ver.timestamp);
2092 /* Reads the MFW version and writes it as a param to the specified buffer.
2093 * Returns the dumped size in dwords.
2095 static u32 ecore_dump_mfw_ver_param(struct ecore_hwfn *p_hwfn,
2096 struct ecore_ptt *p_ptt,
2100 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2101 char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
2104 is_emul = dev_data->platform_id == PLATFORM_EMUL_FULL || dev_data->platform_id == PLATFORM_EMUL_REDUCED;
2106 if (dump && !is_emul && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2107 u32 public_data_addr, global_section_offsize_addr, global_section_offsize, global_section_addr, mfw_ver;
2109 /* Find MCP public data GRC address. Needs to be ORed with
2110 * MCP_REG_SCRATCH due to a HW bug.
2112 public_data_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR) | MCP_REG_SCRATCH;
2114 /* Find MCP public global section offset */
2115 global_section_offsize_addr = public_data_addr + OFFSETOF(struct mcp_public_data, sections) + sizeof(offsize_t) * PUBLIC_GLOBAL;
2116 global_section_offsize = ecore_rd(p_hwfn, p_ptt, global_section_offsize_addr);
2117 global_section_addr = MCP_REG_SCRATCH + (global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
2119 /* Read MFW version from MCP public global section */
2120 mfw_ver = ecore_rd(p_hwfn, p_ptt, global_section_addr + OFFSETOF(struct public_global, mfw_ver));
2122 /* Dump MFW version param */
2123 if (OSAL_SNPRINTF(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d", (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16), (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
2124 DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid MFW version string\n");
2127 return ecore_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
2130 /* Writes a section header to the specified buffer.
2131 * Returns the dumped size in dwords.
2133 static u32 ecore_dump_section_hdr(u32 *dump_buf,
2138 return ecore_dump_num_param(dump_buf, dump, name, num_params);
2141 /* Writes the common global params to the specified buffer.
2142 * Returns the dumped size in dwords.
2144 static u32 ecore_dump_common_global_params(struct ecore_hwfn *p_hwfn,
2145 struct ecore_ptt *p_ptt,
2148 u8 num_specific_global_params)
2150 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2154 /* Dump global params section header */
2155 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
2156 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "global_params", num_params);
2159 offset += ecore_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2160 offset += ecore_dump_mfw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2161 offset += ecore_dump_num_param(dump_buf + offset, dump, "tools-version", TOOLS_VERSION);
2162 offset += ecore_dump_str_param(dump_buf + offset, dump, "chip", s_chip_defs[dev_data->chip_id].name);
2163 offset += ecore_dump_str_param(dump_buf + offset, dump, "platform", s_platform_defs[dev_data->platform_id].name);
2164 offset += ecore_dump_num_param(dump_buf + offset, dump, "pci-func", p_hwfn->abs_pf_id);
2169 /* Writes the "last" section (including CRC) to the specified buffer at the
2170 * given offset. Returns the dumped size in dwords.
2172 static u32 ecore_dump_last_section(struct ecore_hwfn *p_hwfn,
2177 u32 start_offset = offset;
2179 /* Dump CRC section header */
2180 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "last", 0);
2182 /* Calculate CRC32 and add it to the dword after the "last" section */
2184 *(dump_buf + offset) = ~OSAL_CRC32(0xffffffff, (u8*)dump_buf, DWORDS_TO_BYTES(offset));
2188 return offset - start_offset;
2191 /* Update blocks reset state */
2192 static void ecore_update_blocks_reset_state(struct ecore_hwfn *p_hwfn,
2193 struct ecore_ptt *p_ptt)
2195 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2196 u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2199 /* Read reset registers */
2200 for (i = 0; i < MAX_DBG_RESET_REGS; i++)
2201 if (s_reset_regs_defs[i].exists[dev_data->chip_id])
2202 reg_val[i] = ecore_rd(p_hwfn, p_ptt, s_reset_regs_defs[i].addr);
2204 /* Check if blocks are in reset */
2205 for (i = 0; i < MAX_BLOCK_ID; i++) {
2206 struct block_defs *block = s_block_defs[i];
2208 dev_data->block_in_reset[i] = block->has_reset_bit && !(reg_val[block->reset_reg] & (1 << block->reset_bit_offset));
2212 /* Enable / disable the Debug block */
2213 static void ecore_bus_enable_dbg_block(struct ecore_hwfn *p_hwfn,
2214 struct ecore_ptt *p_ptt,
2217 ecore_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
2220 /* Resets the Debug block */
2221 static void ecore_bus_reset_dbg_block(struct ecore_hwfn *p_hwfn,
2222 struct ecore_ptt *p_ptt)
2224 u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
2225 struct block_defs *dbg_block = s_block_defs[BLOCK_DBG];
2227 dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr;
2228 old_reset_reg_val = ecore_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
2229 new_reset_reg_val = old_reset_reg_val & ~(1 << dbg_block->reset_bit_offset);
2231 ecore_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
2232 ecore_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
2235 static void ecore_bus_set_framing_mode(struct ecore_hwfn *p_hwfn,
2236 struct ecore_ptt *p_ptt,
2237 enum dbg_bus_frame_modes mode)
2239 ecore_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
2242 /* Enable / disable Debug Bus clients according to the specified mask
2243 * (1 = enable, 0 = disable).
2245 static void ecore_bus_enable_clients(struct ecore_hwfn *p_hwfn,
2246 struct ecore_ptt *p_ptt,
2249 ecore_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
2252 /* Enables the specified Storm for Debug Bus. Assumes a valid Storm ID. */
2253 static void ecore_bus_enable_storm(struct ecore_hwfn *p_hwfn,
2254 struct ecore_ptt *p_ptt,
2255 enum dbg_storms storm_id,
2256 enum dbg_bus_filter_types filter_type)
2258 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2259 u32 base_addr, sem_filter_params = filter_type;
2260 struct dbg_bus_storm_data *storm_bus;
2261 struct storm_mode_defs *storm_mode;
2262 struct storm_defs *storm;
2264 storm = &s_storm_defs[storm_id];
2265 storm_bus = &dev_data->bus.storms[storm_id];
2266 storm_mode = &s_storm_mode_defs[storm_bus->mode];
2267 base_addr = storm->sem_fast_mem_addr;
2270 if (storm_mode->is_fast_dbg) {
2272 /* Enable fast debug */
2273 ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST);
2274 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DEBUG_MODE, storm_mode->id_in_hw);
2275 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DEBUG_ACTIVE, 1);
2277 /* Enable all messages except STORE. Must be done after
2278 * enabling SEM_FAST_REG_DEBUG_ACTIVE, otherwise messages will
2279 * be dropped after the SEMI sync fifo is filled.
2281 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DBG_MODE6_SRC_DISABLE, SEM_FAST_MODE6_SRC_ENABLE);
2285 /* Ensable slow debug */
2286 ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST);
2287 ecore_wr(p_hwfn, p_ptt, storm->sem_slow_enable_addr, 1);
2288 ecore_wr(p_hwfn, p_ptt, storm->sem_slow_mode_addr, storm_mode->id_in_hw);
2289 ecore_wr(p_hwfn, p_ptt, storm->sem_slow_mode1_conf_addr, SEM_SLOW_MODE1_DATA_ENABLE);
2292 /* Config SEM cid filter */
2293 if (storm_bus->cid_filter_en) {
2294 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_FILTER_CID, storm_bus->cid);
2295 sem_filter_params |= SEM_FILTER_CID_EN_MASK;
2298 /* Config SEM eid filter */
2299 if (storm_bus->eid_filter_en) {
2300 const union dbg_bus_storm_eid_params *eid_filter = &storm_bus->eid_filter_params;
2302 if (storm_bus->eid_range_not_mask) {
2303 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_EVENT_ID_RANGE_STRT, eid_filter->range.min);
2304 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_EVENT_ID_RANGE_END, eid_filter->range.max);
2305 sem_filter_params |= SEM_FILTER_EID_RANGE_EN_MASK;
2308 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_FILTER_EVENT_ID, eid_filter->mask.val);
2309 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_EVENT_ID_MASK, ~eid_filter->mask.mask);
2310 sem_filter_params |= SEM_FILTER_EID_MASK_EN_MASK;
2314 /* Config accumulaed SEM filter parameters (if any) */
2315 if (sem_filter_params)
2316 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_RECORD_FILTER_ENABLE, sem_filter_params);
2319 /* Disables Debug Bus block inputs */
2320 static enum dbg_status ecore_bus_disable_inputs(struct ecore_hwfn *p_hwfn,
2321 struct ecore_ptt *p_ptt,
2322 bool empty_semi_fifos)
2324 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2325 u8 storm_id, num_fifos_to_empty = MAX_DBG_STORMS;
2326 bool is_fifo_empty[MAX_DBG_STORMS] = { false };
2329 /* Disable messages output in all Storms */
2330 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2331 struct storm_defs *storm = &s_storm_defs[storm_id];
2333 if (!dev_data->block_in_reset[storm->block_id])
2334 ecore_wr(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_DBG_MODE6_SRC_DISABLE, SEM_FAST_MODE6_SRC_DISABLE);
2337 /* Try to empty the SEMI sync fifo. Must be done after messages output
2338 * were disabled in all Storms (i.e. SEM_FAST_REG_DBG_MODE6_SRC_DISABLE
2339 * was set to all 1's.
2341 while (num_fifos_to_empty) {
2342 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2343 struct storm_defs *storm = &s_storm_defs[storm_id];
2345 if (is_fifo_empty[storm_id])
2348 /* Check if sync fifo got empty */
2349 if (dev_data->block_in_reset[storm->block_id] || ecore_rd(p_hwfn, p_ptt, storm->sem_sync_dbg_empty_addr)) {
2350 is_fifo_empty[storm_id] = true;
2351 num_fifos_to_empty--;
2355 /* Check if need to continue polling */
2356 if (num_fifos_to_empty) {
2357 u32 polling_ms = SEMI_SYNC_FIFO_POLLING_DELAY_MS * s_platform_defs[dev_data->platform_id].delay_factor;
2358 u32 polling_count = 0;
2360 if (empty_semi_fifos && polling_count < SEMI_SYNC_FIFO_POLLING_COUNT) {
2361 OSAL_MSLEEP(polling_ms);
2365 DP_NOTICE(p_hwfn, false, "Warning: failed to empty the SEMI sync FIFO. It means that the last few messages from the SEMI could not be sent to the DBG block. This can happen when the DBG block is blocked (e.g. due to a PCI problem).\n");
2371 /* Disable debug in all Storms */
2372 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2373 struct storm_defs *storm = &s_storm_defs[storm_id];
2374 u32 base_addr = storm->sem_fast_mem_addr;
2376 if (dev_data->block_in_reset[storm->block_id])
2379 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DEBUG_ACTIVE, 0);
2380 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_RECORD_FILTER_ENABLE, DBG_BUS_FILTER_TYPE_OFF);
2381 ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_FRAME_MODE_4HW_0ST);
2382 ecore_wr(p_hwfn, p_ptt, storm->sem_slow_enable_addr, 0);
2385 /* Disable all clients */
2386 ecore_bus_enable_clients(p_hwfn, p_ptt, 0);
2388 /* Disable all blocks */
2389 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2390 struct block_defs *block = s_block_defs[block_id];
2392 if (block->has_dbg_bus[dev_data->chip_id] && !dev_data->block_in_reset[block_id])
2393 ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
2396 /* Disable timestamp */
2397 ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_VALID_EN, 0);
2399 /* Disable filters and triggers */
2400 ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_ENABLE, DBG_BUS_FILTER_TYPE_OFF);
2401 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_ENABLE, 0);
2403 return DBG_STATUS_OK;
2406 /* Sets a Debug Bus trigger/filter constraint */
2407 static void ecore_bus_set_constraint(struct ecore_hwfn *p_hwfn,
2408 struct ecore_ptt *p_ptt,
2421 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2422 u32 reg_offset = constraint_id * BYTES_IN_DWORD;
2423 u8 curr_trigger_state;
2425 /* For trigger only - set register offset according to state */
2427 curr_trigger_state = dev_data->bus.next_trigger_state - 1;
2428 reg_offset += curr_trigger_state * TRIGGER_SETS_PER_STATE * MAX_CONSTRAINTS * BYTES_IN_DWORD;
2431 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_OPRTN_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_OPRTN_0) + reg_offset, hw_op_val);
2432 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_DATA_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_0) + reg_offset, data_val);
2433 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_DATA_MASK_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_MASK_0) + reg_offset, data_mask);
2434 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_FRAME_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_0) + reg_offset, frame_bit);
2435 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_FRAME_MASK_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_MASK_0) + reg_offset, frame_mask);
2436 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_OFFSET_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_OFFSET_0) + reg_offset, dword_offset);
2437 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_RANGE_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_RANGE_0) + reg_offset, range);
2438 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_CYCLIC_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_CYCLIC_0) + reg_offset, cyclic_bit);
2439 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_MUST_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_MUST_0) + reg_offset, must_bit);
2442 /* Reads the specified DBG Bus internal buffer range and copy it to the
2443 * specified buffer. Returns the dumped size in dwords.
2445 static u32 ecore_bus_dump_int_buf_range(struct ecore_hwfn *p_hwfn,
2446 struct ecore_ptt *p_ptt,
2452 u32 line, reg_addr, i, offset = 0;
2455 return (end_line - start_line + 1) * INT_BUF_LINE_SIZE_IN_DWORDS;
2457 for (line = start_line, reg_addr = DBG_REG_INTR_BUFFER + DWORDS_TO_BYTES(start_line * INT_BUF_LINE_SIZE_IN_DWORDS);
2459 line++, offset += INT_BUF_LINE_SIZE_IN_DWORDS)
2460 for (i = 0; i < INT_BUF_LINE_SIZE_IN_DWORDS; i++, reg_addr += BYTES_IN_DWORD)
2461 dump_buf[offset + INT_BUF_LINE_SIZE_IN_DWORDS - 1 - i] = ecore_rd(p_hwfn, p_ptt, reg_addr);
2466 /* Reads the DBG Bus internal buffer and copy its contents to a buffer.
2467 * Returns the dumped size in dwords.
2469 static u32 ecore_bus_dump_int_buf(struct ecore_hwfn *p_hwfn,
2470 struct ecore_ptt *p_ptt,
2474 u32 last_written_line, offset = 0;
2476 last_written_line = ecore_rd(p_hwfn, p_ptt, DBG_REG_INTR_BUFFER_WR_PTR);
2478 if (ecore_rd(p_hwfn, p_ptt, DBG_REG_WRAP_ON_INT_BUFFER)) {
2480 /* Internal buffer was wrapped: first dump from write pointer
2481 * to buffer end, then dump from buffer start to write pointer.
2483 if (last_written_line < INT_BUF_NUM_OF_LINES - 1)
2484 offset += ecore_bus_dump_int_buf_range(p_hwfn, p_ptt, dump_buf + offset, dump, last_written_line + 1, INT_BUF_NUM_OF_LINES - 1);
2485 offset += ecore_bus_dump_int_buf_range(p_hwfn, p_ptt, dump_buf + offset, dump, 0, last_written_line);
2487 else if (last_written_line) {
2489 /* Internal buffer wasn't wrapped: dump from buffer start until
2492 if (!ecore_rd(p_hwfn, p_ptt, DBG_REG_INTR_BUFFER_RD_PTR))
2493 offset += ecore_bus_dump_int_buf_range(p_hwfn, p_ptt, dump_buf + offset, dump, 0, last_written_line);
2495 DP_NOTICE(p_hwfn, true, "Unexpected Debug Bus error: internal buffer read pointer is not zero\n");
2501 /* Reads the specified DBG Bus PCI buffer range and copy it to the specified
2502 * buffer. Returns the dumped size in dwords.
2504 static u32 ecore_bus_dump_pci_buf_range(struct ecore_hwfn *p_hwfn,
2510 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2513 /* Extract PCI buffer pointer from virtual address */
2514 void *virt_addr_lo = &dev_data->bus.pci_buf.virt_addr.lo;
2515 u32 *pci_buf_start = (u32*)(osal_uintptr_t)*((u64*)virt_addr_lo);
2516 u32 *pci_buf, line, i;
2519 return (end_line - start_line + 1) * PCI_BUF_LINE_SIZE_IN_DWORDS;
2521 for (line = start_line, pci_buf = pci_buf_start + start_line * PCI_BUF_LINE_SIZE_IN_DWORDS;
2523 line++, offset += PCI_BUF_LINE_SIZE_IN_DWORDS)
2524 for (i = 0; i < PCI_BUF_LINE_SIZE_IN_DWORDS; i++, pci_buf++)
2525 dump_buf[offset + s_pci_buf_line_ind[i]] = *pci_buf;
2530 /* Copies the DBG Bus PCI buffer to the specified buffer.
2531 * Returns the dumped size in dwords.
2533 static u32 ecore_bus_dump_pci_buf(struct ecore_hwfn *p_hwfn,
2534 struct ecore_ptt *p_ptt,
2538 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2539 u32 next_wr_byte_offset, next_wr_line_offset;
2540 struct dbg_bus_mem_addr next_wr_phys_addr;
2541 u32 pci_buf_size_in_lines, offset = 0;
2543 pci_buf_size_in_lines = dev_data->bus.pci_buf.size / PCI_BUF_LINE_SIZE_IN_BYTES;
2545 /* Extract write pointer (physical address) */
2546 next_wr_phys_addr.lo = ecore_rd(p_hwfn, p_ptt, DBG_REG_EXT_BUFFER_WR_PTR);
2547 next_wr_phys_addr.hi = ecore_rd(p_hwfn, p_ptt, DBG_REG_EXT_BUFFER_WR_PTR + BYTES_IN_DWORD);
2549 /* Convert write pointer to offset */
2550 next_wr_byte_offset = ecore_phys_addr_diff(&next_wr_phys_addr, &dev_data->bus.pci_buf.phys_addr);
2551 if ((next_wr_byte_offset % PCI_BUF_LINE_SIZE_IN_BYTES) || next_wr_byte_offset > dev_data->bus.pci_buf.size)
2553 next_wr_line_offset = next_wr_byte_offset / PCI_BUF_LINE_SIZE_IN_BYTES;
2555 /* PCI buffer wrapped: first dump from write pointer to buffer end. */
2556 if (ecore_rd(p_hwfn, p_ptt, DBG_REG_WRAP_ON_EXT_BUFFER))
2557 offset += ecore_bus_dump_pci_buf_range(p_hwfn, dump_buf + offset, dump, next_wr_line_offset, pci_buf_size_in_lines - 1);
2559 /* Dump from buffer start until write pointer */
2560 if (next_wr_line_offset)
2561 offset += ecore_bus_dump_pci_buf_range(p_hwfn, dump_buf + offset, dump, 0, next_wr_line_offset - 1);
2566 /* Copies the DBG Bus recorded data to the specified buffer.
2567 * Returns the dumped size in dwords.
2569 static u32 ecore_bus_dump_data(struct ecore_hwfn *p_hwfn,
2570 struct ecore_ptt *p_ptt,
2574 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2576 switch (dev_data->bus.target) {
2577 case DBG_BUS_TARGET_ID_INT_BUF:
2578 return ecore_bus_dump_int_buf(p_hwfn, p_ptt, dump_buf, dump);
2579 case DBG_BUS_TARGET_ID_PCI:
2580 return ecore_bus_dump_pci_buf(p_hwfn, p_ptt, dump_buf, dump);
2588 /* Frees the Debug Bus PCI buffer */
2589 static void ecore_bus_free_pci_buf(struct ecore_hwfn *p_hwfn)
2591 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2592 dma_addr_t pci_buf_phys_addr;
2596 /* Extract PCI buffer pointer from virtual address */
2597 virt_addr_lo = &dev_data->bus.pci_buf.virt_addr.lo;
2598 pci_buf = (u32*)(osal_uintptr_t)*((u64*)virt_addr_lo);
2600 if (!dev_data->bus.pci_buf.size)
2603 OSAL_MEMCPY(&pci_buf_phys_addr, &dev_data->bus.pci_buf.phys_addr, sizeof(pci_buf_phys_addr));
2605 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, pci_buf, pci_buf_phys_addr, dev_data->bus.pci_buf.size);
2607 dev_data->bus.pci_buf.size = 0;
2610 /* Dumps the list of DBG Bus inputs (blocks/Storms) to the specified buffer.
2611 * Returns the dumped size in dwords.
2613 static u32 ecore_bus_dump_inputs(struct ecore_hwfn *p_hwfn,
2617 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2618 char storm_name[8] = "?storm";
2619 u32 block_id, offset = 0;
2623 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2624 struct dbg_bus_storm_data *storm_bus = &dev_data->bus.storms[storm_id];
2625 struct storm_defs *storm = &s_storm_defs[storm_id];
2627 if (!dev_data->bus.storms[storm_id].enabled)
2630 /* Dump section header */
2631 storm_name[0] = storm->letter;
2632 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "bus_input", 3);
2633 offset += ecore_dump_str_param(dump_buf + offset, dump, "name", storm_name);
2634 offset += ecore_dump_num_param(dump_buf + offset, dump, "id", storm_bus->hw_id);
2635 offset += ecore_dump_str_param(dump_buf + offset, dump, "mode", s_storm_mode_defs[storm_bus->mode].name);
2639 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2640 struct dbg_bus_block_data *block_bus = &dev_data->bus.blocks[block_id];
2641 struct block_defs *block = s_block_defs[block_id];
2643 if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
2646 /* Dump section header */
2647 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "bus_input", 4);
2648 offset += ecore_dump_str_param(dump_buf + offset, dump, "name", block->name);
2649 offset += ecore_dump_num_param(dump_buf + offset, dump, "line", block_bus->line_num);
2650 offset += ecore_dump_num_param(dump_buf + offset, dump, "en", GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK));
2651 offset += ecore_dump_num_param(dump_buf + offset, dump, "shr", GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT));
2657 /* Dumps the Debug Bus header (params, inputs, data header) to the specified
2658 * buffer. Returns the dumped size in dwords.
2660 static u32 ecore_bus_dump_hdr(struct ecore_hwfn *p_hwfn,
2661 struct ecore_ptt *p_ptt,
2665 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2666 char hw_id_mask_str[16];
2669 if (OSAL_SNPRINTF(hw_id_mask_str, sizeof(hw_id_mask_str), "0x%x", dev_data->bus.hw_id_mask) < 0)
2670 DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid HW ID mask\n");
2672 /* Dump global params */
2673 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 5);
2674 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "debug-bus");
2675 offset += ecore_dump_str_param(dump_buf + offset, dump, "wrap-mode", dev_data->bus.one_shot_en ? "one-shot" : "wrap-around");
2676 offset += ecore_dump_num_param(dump_buf + offset, dump, "hw-dwords", dev_data->bus.hw_dwords);
2677 offset += ecore_dump_str_param(dump_buf + offset, dump, "hw-id-mask", hw_id_mask_str);
2678 offset += ecore_dump_str_param(dump_buf + offset, dump, "target", s_dbg_target_names[dev_data->bus.target]);
2680 offset += ecore_bus_dump_inputs(p_hwfn, dump_buf + offset, dump);
2682 if (dev_data->bus.target != DBG_BUS_TARGET_ID_NIG) {
2683 u32 recorded_dwords = 0;
2686 recorded_dwords = ecore_bus_dump_data(p_hwfn, p_ptt, OSAL_NULL, false);
2688 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "bus_data", 1);
2689 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", recorded_dwords);
2695 static bool ecore_is_mode_match(struct ecore_hwfn *p_hwfn,
2696 u16 *modes_buf_offset)
2698 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2702 /* Get next element from modes tree buffer */
2703 tree_val = ((u8*)s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr)[(*modes_buf_offset)++];
2706 case INIT_MODE_OP_NOT:
2707 return !ecore_is_mode_match(p_hwfn, modes_buf_offset);
2708 case INIT_MODE_OP_OR:
2709 case INIT_MODE_OP_AND:
2710 arg1 = ecore_is_mode_match(p_hwfn, modes_buf_offset);
2711 arg2 = ecore_is_mode_match(p_hwfn, modes_buf_offset);
2712 return (tree_val == INIT_MODE_OP_OR) ? (arg1 || arg2) : (arg1 && arg2);
2713 default: return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
2717 /* Returns true if the specified entity (indicated by GRC param) should be
2718 * included in the dump, false otherwise.
2720 static bool ecore_grc_is_included(struct ecore_hwfn *p_hwfn,
2721 enum dbg_grc_params grc_param)
2723 return ecore_grc_get_param(p_hwfn, grc_param) > 0;
2726 /* Returns true of the specified Storm should be included in the dump, false
2729 static bool ecore_grc_is_storm_included(struct ecore_hwfn *p_hwfn,
2730 enum dbg_storms storm)
2732 return ecore_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
2735 /* Returns true if the specified memory should be included in the dump, false
2738 static bool ecore_grc_is_mem_included(struct ecore_hwfn *p_hwfn,
2739 enum block_id block_id,
2742 struct block_defs *block = s_block_defs[block_id];
2745 /* Check Storm match */
2746 if (block->associated_to_storm &&
2747 !ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)block->storm_id))
2750 for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
2751 struct big_ram_defs *big_ram = &s_big_ram_defs[i];
2753 if (mem_group_id == big_ram->mem_group_id || mem_group_id == big_ram->ram_mem_group_id)
2754 return ecore_grc_is_included(p_hwfn, big_ram->grc_param);
2757 switch (mem_group_id) {
2758 case MEM_GROUP_PXP_ILT:
2759 case MEM_GROUP_PXP_MEM:
2760 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
2762 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
2763 case MEM_GROUP_PBUF:
2764 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
2765 case MEM_GROUP_CAU_MEM:
2766 case MEM_GROUP_CAU_SB:
2767 case MEM_GROUP_CAU_PI:
2768 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
2769 case MEM_GROUP_QM_MEM:
2770 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
2771 case MEM_GROUP_CFC_MEM:
2772 case MEM_GROUP_CONN_CFC_MEM:
2773 case MEM_GROUP_TASK_CFC_MEM:
2774 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC);
2775 case MEM_GROUP_IGU_MEM:
2776 case MEM_GROUP_IGU_MSIX:
2777 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
2778 case MEM_GROUP_MULD_MEM:
2779 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
2780 case MEM_GROUP_PRS_MEM:
2781 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
2782 case MEM_GROUP_DMAE_MEM:
2783 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
2784 case MEM_GROUP_TM_MEM:
2785 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
2786 case MEM_GROUP_SDM_MEM:
2787 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
2788 case MEM_GROUP_TDIF_CTX:
2789 case MEM_GROUP_RDIF_CTX:
2790 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
2791 case MEM_GROUP_CM_MEM:
2792 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
2794 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
2800 /* Stalls all Storms */
2801 static void ecore_grc_stall_storms(struct ecore_hwfn *p_hwfn,
2802 struct ecore_ptt *p_ptt,
2808 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2809 if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id))
2812 reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr + SEM_FAST_REG_STALL_0_BB_K2;
2813 ecore_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
2816 OSAL_MSLEEP(STALL_DELAY_MS);
2819 /* Takes all blocks out of reset */
2820 static void ecore_grc_unreset_blocks(struct ecore_hwfn *p_hwfn,
2821 struct ecore_ptt *p_ptt)
2823 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2824 u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2827 /* Fill reset regs values */
2828 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2829 struct block_defs *block = s_block_defs[block_id];
2831 if (block->has_reset_bit && block->unreset)
2832 reg_val[block->reset_reg] |= (1 << block->reset_bit_offset);
2835 /* Write reset registers */
2836 for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2837 if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2840 reg_val[i] |= s_reset_regs_defs[i].unreset_val;
2843 ecore_wr(p_hwfn, p_ptt, s_reset_regs_defs[i].addr + RESET_REG_UNRESET_OFFSET, reg_val[i]);
2847 /* Returns the attention block data of the specified block */
2848 static const struct dbg_attn_block_type_data* ecore_get_block_attn_data(enum block_id block_id,
2849 enum dbg_attn_type attn_type)
2851 const struct dbg_attn_block *base_attn_block_arr = (const struct dbg_attn_block*)s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
2853 return &base_attn_block_arr[block_id].per_type_data[attn_type];
2856 /* Returns the attention registers of the specified block */
2857 static const struct dbg_attn_reg* ecore_get_block_attn_regs(enum block_id block_id,
2858 enum dbg_attn_type attn_type,
2861 const struct dbg_attn_block_type_data *block_type_data = ecore_get_block_attn_data(block_id, attn_type);
2863 *num_attn_regs = block_type_data->num_regs;
2865 return &((const struct dbg_attn_reg*)s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->regs_offset];
2868 /* For each block, clear the status of all parities */
2869 static void ecore_grc_clear_all_prty(struct ecore_hwfn *p_hwfn,
2870 struct ecore_ptt *p_ptt)
2872 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2873 const struct dbg_attn_reg *attn_reg_arr;
2874 u8 reg_idx, num_attn_regs;
2877 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2878 if (dev_data->block_in_reset[block_id])
2881 attn_reg_arr = ecore_get_block_attn_regs((enum block_id)block_id, ATTN_TYPE_PARITY, &num_attn_regs);
2883 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2884 const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
2885 u16 modes_buf_offset;
2889 eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
2890 modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
2892 /* If Mode match: clear parity status */
2893 if (!eval_mode || ecore_is_mode_match(p_hwfn, &modes_buf_offset))
2894 ecore_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(reg_data->sts_clr_address));
2899 /* Dumps GRC registers section header. Returns the dumped size in dwords.
2900 * the following parameters are dumped:
2901 * - count: no. of dumped entries
2902 * - split: split type
2903 * - id: split ID (dumped only if split_id >= 0)
2904 * - param_name: user parameter value (dumped only if param_name != OSAL_NULL
2905 * and param_val != OSAL_NULL).
2907 static u32 ecore_grc_dump_regs_hdr(u32 *dump_buf,
2909 u32 num_reg_entries,
2910 const char *split_type,
2912 const char *param_name,
2913 const char *param_val)
2915 u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0);
2918 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "grc_regs", num_params);
2919 offset += ecore_dump_num_param(dump_buf + offset, dump, "count", num_reg_entries);
2920 offset += ecore_dump_str_param(dump_buf + offset, dump, "split", split_type);
2922 offset += ecore_dump_num_param(dump_buf + offset, dump, "id", split_id);
2923 if (param_name && param_val)
2924 offset += ecore_dump_str_param(dump_buf + offset, dump, param_name, param_val);
2929 /* Dumps the GRC registers in the specified address range.
2930 * Returns the dumped size in dwords.
2931 * The addr and len arguments are specified in dwords.
2933 static u32 ecore_grc_dump_addr_range(struct ecore_hwfn *p_hwfn,
2934 struct ecore_ptt *p_ptt,
2941 u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i;
2946 for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
2947 *(dump_buf + offset) = ecore_rd(p_hwfn, p_ptt, byte_addr);
2952 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
2953 * The addr and len arguments are specified in dwords.
2955 static u32 ecore_grc_dump_reg_entry_hdr(u32 *dump_buf,
2961 *dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
2966 /* Dumps GRC registers sequence. Returns the dumped size in dwords.
2967 * The addr and len arguments are specified in dwords.
2969 static u32 ecore_grc_dump_reg_entry(struct ecore_hwfn *p_hwfn,
2970 struct ecore_ptt *p_ptt,
2979 offset += ecore_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
2980 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, len, wide_bus);
2985 /* Dumps GRC registers sequence with skip cycle.
2986 * Returns the dumped size in dwords.
2987 * - addr: start GRC address in dwords
2988 * - total_len: total no. of dwords to dump
2989 * - read_len: no. consecutive dwords to read
2990 * - skip_len: no. of dwords to skip (and fill with zeros)
2992 static u32 ecore_grc_dump_reg_entry_skip(struct ecore_hwfn *p_hwfn,
2993 struct ecore_ptt *p_ptt,
3001 u32 offset = 0, reg_offset = 0;
3003 offset += ecore_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
3006 return offset + total_len;
3008 while (reg_offset < total_len) {
3009 u32 curr_len = OSAL_MIN_T(u32, read_len, total_len - reg_offset);
3011 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, curr_len, false);
3012 reg_offset += curr_len;
3015 if (reg_offset < total_len) {
3016 curr_len = OSAL_MIN_T(u32, skip_len, total_len - skip_len);
3017 OSAL_MEMSET(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
3019 reg_offset += curr_len;
3027 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
3028 static u32 ecore_grc_dump_regs_entries(struct ecore_hwfn *p_hwfn,
3029 struct ecore_ptt *p_ptt,
3030 struct dbg_array input_regs_arr,
3033 bool block_enable[MAX_BLOCK_ID],
3034 u32 *num_dumped_reg_entries)
3036 u32 i, offset = 0, input_offset = 0;
3037 bool mode_match = true;
3039 *num_dumped_reg_entries = 0;
3041 while (input_offset < input_regs_arr.size_in_dwords) {
3042 const struct dbg_dump_cond_hdr* cond_hdr = (const struct dbg_dump_cond_hdr*)&input_regs_arr.ptr[input_offset++];
3043 u16 modes_buf_offset;
3046 /* Check mode/block */
3047 eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
3049 modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
3050 mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
3053 if (!mode_match || !block_enable[cond_hdr->block_id]) {
3054 input_offset += cond_hdr->data_size;
3058 for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
3059 const struct dbg_dump_reg *reg = (const struct dbg_dump_reg*)&input_regs_arr.ptr[input_offset];
3061 offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump,
3062 GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS),
3063 GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH),
3064 GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS));
3065 (*num_dumped_reg_entries)++;
3072 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
3073 static u32 ecore_grc_dump_split_data(struct ecore_hwfn *p_hwfn,
3074 struct ecore_ptt *p_ptt,
3075 struct dbg_array input_regs_arr,
3078 bool block_enable[MAX_BLOCK_ID],
3079 const char *split_type_name,
3081 const char *param_name,
3082 const char *param_val)
3084 u32 num_dumped_reg_entries, offset;
3086 /* Calculate register dump header size (and skip it for now) */
3087 offset = ecore_grc_dump_regs_hdr(dump_buf, false, 0, split_type_name, split_id, param_name, param_val);
3089 /* Dump registers */
3090 offset += ecore_grc_dump_regs_entries(p_hwfn, p_ptt, input_regs_arr, dump_buf + offset, dump, block_enable, &num_dumped_reg_entries);
3092 /* Write register dump header */
3093 if (dump && num_dumped_reg_entries > 0)
3094 ecore_grc_dump_regs_hdr(dump_buf, dump, num_dumped_reg_entries, split_type_name, split_id, param_name, param_val);
3096 return num_dumped_reg_entries > 0 ? offset : 0;
3099 /* Dumps registers according to the input registers array. Returns the dumped
3102 static u32 ecore_grc_dump_registers(struct ecore_hwfn *p_hwfn,
3103 struct ecore_ptt *p_ptt,
3106 bool block_enable[MAX_BLOCK_ID],
3107 const char *param_name,
3108 const char *param_val)
3110 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3111 struct chip_platform_defs *chip_platform;
3112 u32 offset = 0, input_offset = 0;
3113 u8 port_id, pf_id, vf_id;
3115 chip_platform = &s_chip_defs[dev_data->chip_id].per_platform[dev_data->platform_id];
3118 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Dumping registers...\n");
3120 while (input_offset < s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
3121 const struct dbg_dump_split_hdr *split_hdr;
3122 struct dbg_array curr_input_regs_arr;
3123 u32 split_data_size;
3126 split_hdr = (const struct dbg_dump_split_hdr*)&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
3127 split_type_id = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3128 split_data_size = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3129 curr_input_regs_arr.ptr = &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
3130 curr_input_regs_arr.size_in_dwords = split_data_size;
3132 switch(split_type_id) {
3133 case SPLIT_TYPE_NONE:
3134 offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "eng", (u32)(-1), param_name, param_val);
3137 case SPLIT_TYPE_PORT:
3138 for (port_id = 0; port_id < chip_platform->num_ports; port_id++) {
3140 ecore_port_pretend(p_hwfn, p_ptt, port_id);
3141 offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "port", port_id, param_name, param_val);
3146 case SPLIT_TYPE_PORT_PF:
3147 for (pf_id = 0; pf_id < chip_platform->num_pfs; pf_id++) {
3149 ecore_fid_pretend(p_hwfn, p_ptt, (pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT));
3150 offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "pf", pf_id, param_name, param_val);
3155 for (vf_id = 0; vf_id < chip_platform->num_vfs; vf_id++) {
3157 ecore_fid_pretend(p_hwfn, p_ptt, (1 << PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT) | (vf_id << PXP_PRETEND_CONCRETE_FID_VFID_SHIFT));
3158 offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "vf", vf_id, param_name, param_val);
3166 input_offset += split_data_size;
3169 /* Pretend to original PF */
3171 ecore_fid_pretend(p_hwfn, p_ptt, (p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT));
3176 /* Dump reset registers. Returns the dumped size in dwords. */
3177 static u32 ecore_grc_dump_reset_regs(struct ecore_hwfn *p_hwfn,
3178 struct ecore_ptt *p_ptt,
3182 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3183 u32 i, offset = 0, num_regs = 0;
3185 /* Calculate header size */
3186 offset += ecore_grc_dump_regs_hdr(dump_buf, false, 0, "eng", -1, OSAL_NULL, OSAL_NULL);
3188 /* Write reset registers */
3189 for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
3190 if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
3193 offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(s_reset_regs_defs[i].addr), 1, false);
3199 ecore_grc_dump_regs_hdr(dump_buf, true, num_regs, "eng", -1, OSAL_NULL, OSAL_NULL);
3204 /* Dump registers that are modified during GRC Dump and therefore must be
3205 * dumped first. Returns the dumped size in dwords.
3207 static u32 ecore_grc_dump_modified_regs(struct ecore_hwfn *p_hwfn,
3208 struct ecore_ptt *p_ptt,
3212 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3213 u32 block_id, offset = 0, num_reg_entries = 0;
3214 const struct dbg_attn_reg *attn_reg_arr;
3215 u8 storm_id, reg_idx, num_attn_regs;
3217 /* Calculate header size */
3218 offset += ecore_grc_dump_regs_hdr(dump_buf, false, 0, "eng", -1, OSAL_NULL, OSAL_NULL);
3220 /* Write parity registers */
3221 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3222 if (dev_data->block_in_reset[block_id] && dump)
3225 attn_reg_arr = ecore_get_block_attn_regs((enum block_id)block_id, ATTN_TYPE_PARITY, &num_attn_regs);
3227 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
3228 const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
3229 u16 modes_buf_offset;
3233 eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
3234 modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
3235 if (eval_mode && !ecore_is_mode_match(p_hwfn, &modes_buf_offset))
3238 /* Mode match: read & dump registers */
3239 offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, reg_data->mask_address, 1, false);
3240 offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, GET_FIELD(reg_data->data, DBG_ATTN_REG_STS_ADDRESS), 1, false);
3241 num_reg_entries += 2;
3245 /* Write Storm stall status registers */
3246 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3247 struct storm_defs *storm = &s_storm_defs[storm_id];
3249 if (dev_data->block_in_reset[storm->block_id] && dump)
3252 offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump,
3253 BYTES_TO_DWORDS(storm->sem_fast_mem_addr + SEM_FAST_REG_STALLED), 1, false);
3259 ecore_grc_dump_regs_hdr(dump_buf, true, num_reg_entries, "eng", -1, OSAL_NULL, OSAL_NULL);
3264 /* Dumps registers that can't be represented in the debug arrays */
3265 static u32 ecore_grc_dump_special_regs(struct ecore_hwfn *p_hwfn,
3266 struct ecore_ptt *p_ptt,
3272 offset += ecore_grc_dump_regs_hdr(dump_buf, dump, 2, "eng", -1, OSAL_NULL, OSAL_NULL);
3274 /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
3277 offset += ecore_grc_dump_reg_entry_skip(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO), RDIF_REG_DEBUG_ERROR_INFO_SIZE, 7, 1);
3278 offset += ecore_grc_dump_reg_entry_skip(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO), TDIF_REG_DEBUG_ERROR_INFO_SIZE, 7, 1);
3283 /* Dumps a GRC memory header (section and params). Returns the dumped size in
3284 * dwords. The following parameters are dumped:
3285 * - name: dumped only if it's not OSAL_NULL.
3286 * - addr: in dwords, dumped only if name is OSAL_NULL.
3287 * - len: in dwords, always dumped.
3288 * - width: dumped if it's not zero.
3289 * - packed: dumped only if it's not false.
3290 * - mem_group: always dumped.
3291 * - is_storm: true only if the memory is related to a Storm.
3292 * - storm_letter: valid only if is_storm is true.
3295 static u32 ecore_grc_dump_mem_hdr(struct ecore_hwfn *p_hwfn,
3303 const char *mem_group,
3312 DP_NOTICE(p_hwfn, true, "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
3319 /* Dump section header */
3320 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "grc_mem", num_params);
3326 OSAL_STRCPY(buf, "?STORM_");
3327 buf[0] = storm_letter;
3328 OSAL_STRCPY(buf + OSAL_STRLEN(buf), name);
3331 OSAL_STRCPY(buf, name);
3334 offset += ecore_dump_str_param(dump_buf + offset, dump, "name", buf);
3336 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Dumping %d registers from %s...\n", len, buf);
3341 u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
3343 offset += ecore_dump_num_param(dump_buf + offset, dump, "addr", addr_in_bytes);
3344 if (dump && len > 64)
3345 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Dumping %d registers from address 0x%x...\n", len, addr_in_bytes);
3349 offset += ecore_dump_num_param(dump_buf + offset, dump, "len", len);
3351 /* Dump bit width */
3353 offset += ecore_dump_num_param(dump_buf + offset, dump, "width", bit_width);
3357 offset += ecore_dump_num_param(dump_buf + offset, dump, "packed", 1);
3361 OSAL_STRCPY(buf, "?STORM_");
3362 buf[0] = storm_letter;
3363 OSAL_STRCPY(buf + OSAL_STRLEN(buf), mem_group);
3366 OSAL_STRCPY(buf, mem_group);
3369 offset += ecore_dump_str_param(dump_buf + offset, dump, "type", buf);
3374 /* Dumps a single GRC memory. If name is OSAL_NULL, the memory is stored by address.
3375 * Returns the dumped size in dwords.
3376 * The addr and len arguments are specified in dwords.
3378 static u32 ecore_grc_dump_mem(struct ecore_hwfn *p_hwfn,
3379 struct ecore_ptt *p_ptt,
3388 const char *mem_group,
3394 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, name, addr, len, bit_width, packed, mem_group, is_storm, storm_letter);
3395 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, len, wide_bus);
3400 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
3401 static u32 ecore_grc_dump_mem_entries(struct ecore_hwfn *p_hwfn,
3402 struct ecore_ptt *p_ptt,
3403 struct dbg_array input_mems_arr,
3407 u32 i, offset = 0, input_offset = 0;
3408 bool mode_match = true;
3410 while (input_offset < input_mems_arr.size_in_dwords) {
3411 const struct dbg_dump_cond_hdr* cond_hdr;
3412 u16 modes_buf_offset;
3416 cond_hdr = (const struct dbg_dump_cond_hdr*)&input_mems_arr.ptr[input_offset++];
3417 num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
3419 /* Check required mode */
3420 eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
3422 modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
3423 mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
3427 input_offset += cond_hdr->data_size;
3431 for (i = 0; i < num_entries; i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
3432 const struct dbg_dump_mem *mem = (const struct dbg_dump_mem*)&input_mems_arr.ptr[input_offset];
3433 u8 mem_group_id = GET_FIELD(mem->dword0, DBG_DUMP_MEM_MEM_GROUP_ID);
3434 bool is_storm = false, mem_wide_bus;
3435 char storm_letter = 'a';
3436 u32 mem_addr, mem_len;
3438 if (mem_group_id >= MEM_GROUPS_NUM) {
3439 DP_NOTICE(p_hwfn, true, "Invalid mem_group_id\n");
3443 if (!ecore_grc_is_mem_included(p_hwfn, (enum block_id)cond_hdr->block_id, mem_group_id))
3446 mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
3447 mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
3448 mem_wide_bus = GET_FIELD(mem->dword1, DBG_DUMP_MEM_WIDE_BUS);
3450 /* Update memory length for CCFC/TCFC memories
3451 * according to number of LCIDs/LTIDs.
3453 if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
3454 if (mem_len % MAX_LCIDS) {
3455 DP_NOTICE(p_hwfn, true, "Invalid CCFC connection memory size\n");
3459 mem_len = ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS) * (mem_len / MAX_LCIDS);
3461 else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) {
3462 if (mem_len % MAX_LTIDS) {
3463 DP_NOTICE(p_hwfn, true, "Invalid TCFC task memory size\n");
3467 mem_len = ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS) * (mem_len / MAX_LTIDS);
3470 /* If memory is associated with Storm, udpate Storm
3473 if (s_block_defs[cond_hdr->block_id]->associated_to_storm) {
3475 storm_letter = s_storm_defs[s_block_defs[cond_hdr->block_id]->storm_id].letter;
3479 offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, OSAL_NULL, mem_addr, mem_len, mem_wide_bus,
3480 0, false, s_mem_group_names[mem_group_id], is_storm, storm_letter);
3487 /* Dumps GRC memories according to the input array dump_mem.
3488 * Returns the dumped size in dwords.
3490 static u32 ecore_grc_dump_memories(struct ecore_hwfn *p_hwfn,
3491 struct ecore_ptt *p_ptt,
3495 u32 offset = 0, input_offset = 0;
3497 while (input_offset < s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
3498 const struct dbg_dump_split_hdr *split_hdr;
3499 struct dbg_array curr_input_mems_arr;
3500 u32 split_data_size;
3503 split_hdr = (const struct dbg_dump_split_hdr*)&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
3504 split_type_id = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3505 split_data_size = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3506 curr_input_mems_arr.ptr = &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
3507 curr_input_mems_arr.size_in_dwords = split_data_size;
3509 switch (split_type_id) {
3510 case SPLIT_TYPE_NONE:
3511 offset += ecore_grc_dump_mem_entries(p_hwfn, p_ptt, curr_input_mems_arr, dump_buf + offset, dump);
3515 DP_NOTICE(p_hwfn, true, "Dumping split memories is currently not supported\n");
3519 input_offset += split_data_size;
3525 /* Dumps GRC context data for the specified Storm.
3526 * Returns the dumped size in dwords.
3527 * The lid_size argument is specified in quad-regs.
3529 static u32 ecore_grc_dump_ctx_data(struct ecore_hwfn *p_hwfn,
3530 struct ecore_ptt *p_ptt,
3539 struct storm_defs *storm = &s_storm_defs[storm_id];
3540 u32 i, lid, total_size, offset = 0;
3545 lid_size *= BYTES_IN_DWORD;
3546 total_size = num_lids * lid_size;
3548 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, name, 0, total_size, lid_size * 32, false, name, true, storm->letter);
3551 return offset + total_size;
3553 /* Dump context data */
3554 for (lid = 0; lid < num_lids; lid++) {
3555 for (i = 0; i < lid_size; i++, offset++) {
3556 ecore_wr(p_hwfn, p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
3557 *(dump_buf + offset) = ecore_rd(p_hwfn, p_ptt, rd_reg_addr);
3564 /* Dumps GRC contexts. Returns the dumped size in dwords. */
3565 static u32 ecore_grc_dump_ctx(struct ecore_hwfn *p_hwfn,
3566 struct ecore_ptt *p_ptt,
3573 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3574 struct storm_defs *storm = &s_storm_defs[storm_id];
3576 if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id))
3579 /* Dump Conn AG context size */
3580 offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "CONN_AG_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS),
3581 storm->cm_conn_ag_ctx_lid_size, storm->cm_conn_ag_ctx_rd_addr, storm_id);
3583 /* Dump Conn ST context size */
3584 offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "CONN_ST_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS),
3585 storm->cm_conn_st_ctx_lid_size, storm->cm_conn_st_ctx_rd_addr, storm_id);
3587 /* Dump Task AG context size */
3588 offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "TASK_AG_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS),
3589 storm->cm_task_ag_ctx_lid_size, storm->cm_task_ag_ctx_rd_addr, storm_id);
3591 /* Dump Task ST context size */
3592 offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "TASK_ST_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS),
3593 storm->cm_task_st_ctx_lid_size, storm->cm_task_st_ctx_rd_addr, storm_id);
3599 /* Dumps GRC IORs data. Returns the dumped size in dwords. */
3600 static u32 ecore_grc_dump_iors(struct ecore_hwfn *p_hwfn,
3601 struct ecore_ptt *p_ptt,
3605 char buf[10] = "IOR_SET_?";
3606 u32 addr, offset = 0;
3607 u8 storm_id, set_id;
3609 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3610 struct storm_defs *storm = &s_storm_defs[storm_id];
3612 if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id))
3615 for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
3616 addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr + SEM_FAST_REG_STORM_REG_FILE) + IOR_SET_OFFSET(set_id);
3617 buf[OSAL_STRLEN(buf) - 1] = '0' + set_id;
3618 offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, buf, addr, IORS_PER_SET, false, 32, false, "ior", true, storm->letter);
3625 /* Dump VFC CAM. Returns the dumped size in dwords. */
3626 static u32 ecore_grc_dump_vfc_cam(struct ecore_hwfn *p_hwfn,
3627 struct ecore_ptt *p_ptt,
3632 u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
3633 struct storm_defs *storm = &s_storm_defs[storm_id];
3634 u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
3635 u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
3636 u32 row, i, offset = 0;
3638 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, "vfc_cam", 0, total_size, 256, false, "vfc_cam", true, storm->letter);
3641 return offset + total_size;
3643 /* Prepare CAM address */
3644 SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
3646 for (row = 0; row < VFC_CAM_NUM_ROWS; row++, offset += VFC_CAM_RESP_DWORDS) {
3648 /* Write VFC CAM command */
3649 SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
3650 ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR, cam_cmd, VFC_CAM_CMD_DWORDS);
3652 /* Write VFC CAM address */
3653 ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR, cam_addr, VFC_CAM_ADDR_DWORDS);
3655 /* Read VFC CAM read response */
3656 ARR_REG_RD(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD, dump_buf + offset, VFC_CAM_RESP_DWORDS);
3662 /* Dump VFC RAM. Returns the dumped size in dwords. */
3663 static u32 ecore_grc_dump_vfc_ram(struct ecore_hwfn *p_hwfn,
3664 struct ecore_ptt *p_ptt,
3668 struct vfc_ram_defs *ram_defs)
3670 u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
3671 struct storm_defs *storm = &s_storm_defs[storm_id];
3672 u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
3673 u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
3674 u32 row, i, offset = 0;
3676 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, ram_defs->mem_name, 0, total_size, 256, false, ram_defs->type_name, true, storm->letter);
3678 /* Prepare RAM address */
3679 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
3682 return offset + total_size;
3684 for (row = ram_defs->base_row; row < ram_defs->base_row + ram_defs->num_rows; row++, offset += VFC_RAM_RESP_DWORDS) {
3686 /* Write VFC RAM command */
3687 ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR, ram_cmd, VFC_RAM_CMD_DWORDS);
3689 /* Write VFC RAM address */
3690 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
3691 ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR, ram_addr, VFC_RAM_ADDR_DWORDS);
3693 /* Read VFC RAM read response */
3694 ARR_REG_RD(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD, dump_buf + offset, VFC_RAM_RESP_DWORDS);
3700 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
3701 static u32 ecore_grc_dump_vfc(struct ecore_hwfn *p_hwfn,
3702 struct ecore_ptt *p_ptt,
3706 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3710 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3711 if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id) ||
3712 !s_storm_defs[storm_id].has_vfc ||
3713 (storm_id == DBG_PSTORM_ID && dev_data->platform_id != PLATFORM_ASIC))
3717 offset += ecore_grc_dump_vfc_cam(p_hwfn, p_ptt, dump_buf + offset, dump, storm_id);
3720 for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
3721 offset += ecore_grc_dump_vfc_ram(p_hwfn, p_ptt, dump_buf + offset, dump, storm_id, &s_vfc_ram_defs[i]);
3727 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
3728 static u32 ecore_grc_dump_rss(struct ecore_hwfn *p_hwfn,
3729 struct ecore_ptt *p_ptt,
3733 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3737 for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
3738 u32 rss_addr, num_entries, entry_width, total_dwords, i;
3739 struct rss_mem_defs *rss_defs;
3742 rss_defs = &s_rss_mem_defs[rss_mem_id];
3743 rss_addr = rss_defs->addr;
3744 num_entries = rss_defs->num_entries[dev_data->chip_id];
3745 entry_width = rss_defs->entry_width[dev_data->chip_id];
3746 total_dwords = (num_entries * entry_width) / 32;
3747 packed = (entry_width == 16);
3749 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, rss_defs->mem_name, 0, total_dwords,
3750 entry_width, packed, rss_defs->type_name, false, 0);
3754 offset += total_dwords;
3758 for (i = 0; i < total_dwords; i += RSS_REG_RSS_RAM_DATA_SIZE, rss_addr++) {
3759 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3760 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA), RSS_REG_RSS_RAM_DATA_SIZE, false);
3767 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
3768 static u32 ecore_grc_dump_big_ram(struct ecore_hwfn *p_hwfn,
3769 struct ecore_ptt *p_ptt,
3774 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3775 u32 total_blocks, ram_size, offset = 0, i;
3776 char mem_name[12] = "???_BIG_RAM";
3777 char type_name[8] = "???_RAM";
3778 struct big_ram_defs *big_ram;
3780 big_ram = &s_big_ram_defs[big_ram_id];
3781 total_blocks = big_ram->num_of_blocks[dev_data->chip_id];
3782 ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
3784 OSAL_STRNCPY(type_name, big_ram->instance_name, OSAL_STRLEN(big_ram->instance_name));
3785 OSAL_STRNCPY(mem_name, big_ram->instance_name, OSAL_STRLEN(big_ram->instance_name));
3787 /* Dump memory header */
3788 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, mem_name, 0, ram_size, BIG_RAM_BLOCK_SIZE_BYTES * 8, false, type_name, false, 0);
3790 /* Read and dump Big RAM data */
3792 return offset + ram_size;
3795 for (i = 0; i < total_blocks / 2; i++) {
3796 ecore_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3797 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(big_ram->data_reg_addr), 2 * BIG_RAM_BLOCK_SIZE_DWORDS, false);
3803 static u32 ecore_grc_dump_mcp(struct ecore_hwfn *p_hwfn,
3804 struct ecore_ptt *p_ptt,
3808 bool block_enable[MAX_BLOCK_ID] = { 0 };
3809 bool halted = false;
3813 if (dump && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3814 halted = !ecore_mcp_halt(p_hwfn, p_ptt);
3816 DP_NOTICE(p_hwfn, false, "MCP halt failed!\n");
3819 /* Dump MCP scratchpad */
3820 offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, OSAL_NULL, BYTES_TO_DWORDS(MCP_REG_SCRATCH), MCP_REG_SCRATCH_SIZE, false, 0, false, "MCP", false, 0);
3822 /* Dump MCP cpu_reg_file */
3823 offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, OSAL_NULL, BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE), MCP_REG_CPU_REG_FILE_SIZE, false, 0, false, "MCP", false, 0);
3825 /* Dump MCP registers */
3826 block_enable[BLOCK_MCP] = true;
3827 offset += ecore_grc_dump_registers(p_hwfn, p_ptt, dump_buf + offset, dump, block_enable, "block", "MCP");
3829 /* Dump required non-MCP registers */
3830 offset += ecore_grc_dump_regs_hdr(dump_buf + offset, dump, 1, "eng", -1, "block", "MCP");
3831 offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR), 1, false);
3834 if (halted && ecore_mcp_resume(p_hwfn, p_ptt))
3835 DP_NOTICE(p_hwfn, false, "Failed to resume MCP after halt!\n");
3840 /* Dumps the tbus indirect memory for all PHYs. */
3841 static u32 ecore_grc_dump_phy(struct ecore_hwfn *p_hwfn,
3842 struct ecore_ptt *p_ptt,
3846 u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3850 for (phy_id = 0; phy_id < OSAL_ARRAY_SIZE(s_phy_defs); phy_id++) {
3851 u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3852 struct phy_defs *phy_defs;
3855 phy_defs = &s_phy_defs[phy_id];
3856 addr_lo_addr = phy_defs->base_addr + phy_defs->tbus_addr_lo_addr;
3857 addr_hi_addr = phy_defs->base_addr + phy_defs->tbus_addr_hi_addr;
3858 data_lo_addr = phy_defs->base_addr + phy_defs->tbus_data_lo_addr;
3859 data_hi_addr = phy_defs->base_addr + phy_defs->tbus_data_hi_addr;
3860 bytes_buf = (u8*)(dump_buf + offset);
3862 if (OSAL_SNPRINTF(mem_name, sizeof(mem_name), "tbus_%s", phy_defs->phy_name) < 0)
3863 DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid PHY memory name\n");
3865 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, mem_name, 0, PHY_DUMP_SIZE_DWORDS, 16, true, mem_name, false, 0);
3868 offset += PHY_DUMP_SIZE_DWORDS;
3872 for (tbus_hi_offset = 0; tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8); tbus_hi_offset++) {
3873 ecore_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3874 for (tbus_lo_offset = 0; tbus_lo_offset < 256; tbus_lo_offset++) {
3875 ecore_wr(p_hwfn, p_ptt, addr_lo_addr, tbus_lo_offset);
3876 *(bytes_buf++) = (u8)ecore_rd(p_hwfn, p_ptt, data_lo_addr);
3877 *(bytes_buf++) = (u8)ecore_rd(p_hwfn, p_ptt, data_hi_addr);
3881 offset += PHY_DUMP_SIZE_DWORDS;
3887 static void ecore_config_dbg_line(struct ecore_hwfn *p_hwfn,
3888 struct ecore_ptt *p_ptt,
3889 enum block_id block_id,
3893 u8 force_valid_mask,
3894 u8 force_frame_mask)
3896 struct block_defs *block = s_block_defs[block_id];
3898 ecore_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
3899 ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
3900 ecore_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
3901 ecore_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
3902 ecore_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
3905 /* Dumps Static Debug data. Returns the dumped size in dwords. */
3906 static u32 ecore_grc_dump_static_debug(struct ecore_hwfn *p_hwfn,
3907 struct ecore_ptt *p_ptt,
3911 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3912 u32 block_id, line_id, offset = 0;
3914 /* Skip static debug if a debug bus recording is in progress */
3915 if (ecore_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3919 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Dumping static debug data...\n");
3921 /* Disable all blocks debug output */
3922 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3923 struct block_defs *block = s_block_defs[block_id];
3925 if (block->has_dbg_bus[dev_data->chip_id])
3926 ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
3929 ecore_bus_reset_dbg_block(p_hwfn, p_ptt);
3930 ecore_bus_set_framing_mode(p_hwfn, p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
3931 ecore_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3932 ecore_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3933 ecore_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3936 /* Dump all static debug lines for each relevant block */
3937 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3938 struct block_defs *block = s_block_defs[block_id];
3939 struct dbg_bus_block *block_desc;
3942 if (!block->has_dbg_bus[dev_data->chip_id])
3945 block_desc = get_dbg_bus_block_desc(p_hwfn, (enum block_id)block_id);
3946 block_dwords = NUM_DBG_LINES(block_desc) * STATIC_DEBUG_LINE_DWORDS;
3948 /* Dump static section params */
3949 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, block->name, 0, block_dwords, 32, false, "STATIC", false, 0);
3952 offset += block_dwords;
3956 /* If all lines are invalid - dump zeros */
3957 if (dev_data->block_in_reset[block_id]) {
3958 OSAL_MEMSET(dump_buf + offset, 0, DWORDS_TO_BYTES(block_dwords));
3959 offset += block_dwords;
3963 /* Enable block's client */
3964 ecore_bus_enable_clients(p_hwfn, p_ptt, 1 << block->dbg_client_id[dev_data->chip_id]);
3965 for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc); line_id++) {
3967 /* Configure debug line ID */
3968 ecore_config_dbg_line(p_hwfn, p_ptt, (enum block_id)block_id, (u8)line_id, 0xf, 0, 0, 0);
3970 /* Read debug line info */
3971 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA), STATIC_DEBUG_LINE_DWORDS, true);
3974 /* Disable block's client and debug output */
3975 ecore_bus_enable_clients(p_hwfn, p_ptt, 0);
3976 ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
3980 ecore_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3981 ecore_bus_enable_clients(p_hwfn, p_ptt, 0);
3987 /* Performs GRC Dump to the specified buffer.
3988 * Returns the dumped size in dwords.
3990 static enum dbg_status ecore_grc_dump(struct ecore_hwfn *p_hwfn,
3991 struct ecore_ptt *p_ptt,
3994 u32 *num_dumped_dwords)
3996 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3997 bool is_emul, parities_masked = false;
3998 u8 i, port_mode = 0;
4001 is_emul = dev_data->platform_id == PLATFORM_EMUL_FULL || dev_data->platform_id == PLATFORM_EMUL_REDUCED;
4003 *num_dumped_dwords = 0;
4009 /* Find port mode */
4010 switch (ecore_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
4011 case 0: port_mode = 1; break;
4012 case 1: port_mode = 2; break;
4013 case 2: port_mode = 4; break;
4016 /* Update reset state */
4017 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
4020 /* Dump global params */
4021 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 4);
4022 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "grc-dump");
4023 offset += ecore_dump_num_param(dump_buf + offset, dump, "num-lcids", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS));
4024 offset += ecore_dump_num_param(dump_buf + offset, dump, "num-ltids", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS));
4025 offset += ecore_dump_num_param(dump_buf + offset, dump, "num-ports", port_mode);
4027 /* Dump reset registers (dumped before taking blocks out of reset ) */
4028 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4029 offset += ecore_grc_dump_reset_regs(p_hwfn, p_ptt, dump_buf + offset, dump);
4031 /* Take all blocks out of reset (using reset registers) */
4033 ecore_grc_unreset_blocks(p_hwfn, p_ptt);
4034 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
4037 /* Disable all parities using MFW command */
4038 if (dump && !is_emul && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
4039 parities_masked = !ecore_mcp_mask_parities(p_hwfn, p_ptt, 1);
4040 if (!parities_masked) {
4041 DP_NOTICE(p_hwfn, false, "Failed to mask parities using MFW\n");
4042 if (ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
4043 return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
4047 /* Dump modified registers (dumped before modifying them) */
4048 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4049 offset += ecore_grc_dump_modified_regs(p_hwfn, p_ptt, dump_buf + offset, dump);
4052 if (dump && (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR) || ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
4053 ecore_grc_stall_storms(p_hwfn, p_ptt, true);
4056 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
4057 bool block_enable[MAX_BLOCK_ID];
4059 /* Dump all blocks except MCP */
4060 for (i = 0; i < MAX_BLOCK_ID; i++)
4061 block_enable[i] = true;
4062 block_enable[BLOCK_MCP] = false;
4063 offset += ecore_grc_dump_registers(p_hwfn, p_ptt, dump_buf + offset, dump, block_enable, OSAL_NULL, OSAL_NULL);
4065 /* Dump special registers */
4066 offset += ecore_grc_dump_special_regs(p_hwfn, p_ptt, dump_buf + offset, dump);
4070 offset += ecore_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
4073 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
4074 offset += ecore_grc_dump_mcp(p_hwfn, p_ptt, dump_buf + offset, dump);
4077 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
4078 offset += ecore_grc_dump_ctx(p_hwfn, p_ptt, dump_buf + offset, dump);
4080 /* Dump RSS memories */
4081 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
4082 offset += ecore_grc_dump_rss(p_hwfn, p_ptt, dump_buf + offset, dump);
4085 for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
4086 if (ecore_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
4087 offset += ecore_grc_dump_big_ram(p_hwfn, p_ptt, dump_buf + offset, dump, i);
4090 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
4091 offset += ecore_grc_dump_iors(p_hwfn, p_ptt, dump_buf + offset, dump);
4094 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
4095 offset += ecore_grc_dump_vfc(p_hwfn, p_ptt, dump_buf + offset, dump);
4098 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id == CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
4099 offset += ecore_grc_dump_phy(p_hwfn, p_ptt, dump_buf + offset, dump);
4101 /* Dump static debug data */
4102 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_STATIC) && dev_data->bus.state == DBG_BUS_STATE_IDLE)
4103 offset += ecore_grc_dump_static_debug(p_hwfn, p_ptt, dump_buf + offset, dump);
4105 /* Dump last section */
4106 offset += ecore_dump_last_section(p_hwfn, dump_buf, offset, dump);
4110 /* Unstall storms */
4111 if (ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
4112 ecore_grc_stall_storms(p_hwfn, p_ptt, false);
4114 /* Clear parity status */
4116 ecore_grc_clear_all_prty(p_hwfn, p_ptt);
4118 /* Enable all parities using MFW command */
4119 if (parities_masked)
4120 ecore_mcp_mask_parities(p_hwfn, p_ptt, 0);
4123 *num_dumped_dwords = offset;
4127 return DBG_STATUS_OK;
4130 /* Writes the specified failing Idle Check rule to the specified buffer.
4131 * Returns the dumped size in dwords.
4133 static u32 ecore_idle_chk_dump_failure(struct ecore_hwfn *p_hwfn,
4134 struct ecore_ptt *p_ptt,
4138 const struct dbg_idle_chk_rule *rule,
4140 u32 *cond_reg_values)
4142 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4143 const struct dbg_idle_chk_cond_reg *cond_regs;
4144 const struct dbg_idle_chk_info_reg *info_regs;
4145 u32 i, next_reg_offset = 0, offset = 0;
4146 struct dbg_idle_chk_result_hdr *hdr;
4147 const union dbg_idle_chk_reg *regs;
4150 hdr = (struct dbg_idle_chk_result_hdr*)dump_buf;
4151 regs = &((const union dbg_idle_chk_reg*)s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4152 cond_regs = ®s[0].cond_reg;
4153 info_regs = ®s[rule->num_cond_regs].info_reg;
4155 /* Dump rule data */
4157 OSAL_MEMSET(hdr, 0, sizeof(*hdr));
4158 hdr->rule_id = rule_id;
4159 hdr->mem_entry_id = fail_entry_id;
4160 hdr->severity = rule->severity;
4161 hdr->num_dumped_cond_regs = rule->num_cond_regs;
4164 offset += IDLE_CHK_RESULT_HDR_DWORDS;
4166 /* Dump condition register values */
4167 for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4168 const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4169 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4171 reg_hdr = (struct dbg_idle_chk_result_reg_hdr*)(dump_buf + offset);
4173 /* Write register header */
4175 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->entry_size;
4179 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4180 OSAL_MEMSET(reg_hdr, 0, sizeof(*reg_hdr));
4181 reg_hdr->start_entry = reg->start_entry;
4182 reg_hdr->size = reg->entry_size;
4183 SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM, reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
4184 SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
4186 /* Write register values */
4187 for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
4188 dump_buf[offset] = cond_reg_values[next_reg_offset];
4191 /* Dump info register values */
4192 for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
4193 const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
4196 /* Check if register's block is in reset */
4198 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
4202 block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
4203 if (block_id >= MAX_BLOCK_ID) {
4204 DP_NOTICE(p_hwfn, true, "Invalid block_id\n");
4208 if (!dev_data->block_in_reset[block_id]) {
4209 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4210 bool wide_bus, eval_mode, mode_match = true;
4211 u16 modes_buf_offset;
4214 reg_hdr = (struct dbg_idle_chk_result_reg_hdr*)(dump_buf + offset);
4217 eval_mode = GET_FIELD(reg->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
4219 modes_buf_offset = GET_FIELD(reg->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
4220 mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
4226 addr = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_ADDRESS);
4227 wide_bus = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
4229 /* Write register header */
4230 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4231 hdr->num_dumped_info_regs++;
4232 OSAL_MEMSET(reg_hdr, 0, sizeof(*reg_hdr));
4233 reg_hdr->size = reg->size;
4234 SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, rule->num_cond_regs + reg_id);
4236 /* Write register values */
4237 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, reg->size, wide_bus);
4244 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
4245 static u32 ecore_idle_chk_dump_rule_entries(struct ecore_hwfn *p_hwfn,
4246 struct ecore_ptt *p_ptt,
4249 const struct dbg_idle_chk_rule *input_rules,
4250 u32 num_input_rules,
4251 u32 *num_failing_rules)
4253 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4254 u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
4259 *num_failing_rules = 0;
4261 for (i = 0; i < num_input_rules; i++) {
4262 const struct dbg_idle_chk_cond_reg *cond_regs;
4263 const struct dbg_idle_chk_rule *rule;
4264 const union dbg_idle_chk_reg *regs;
4265 u16 num_reg_entries = 1;
4266 bool check_rule = true;
4267 const u32 *imm_values;
4269 rule = &input_rules[i];
4270 regs = &((const union dbg_idle_chk_reg*)s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4271 cond_regs = ®s[0].cond_reg;
4272 imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr[rule->imm_offset];
4274 /* Check if all condition register blocks are out of reset, and
4275 * find maximal number of entries (all condition registers that
4276 * are memories must have the same size, which is > 1).
4278 for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule; reg_id++) {
4279 u32 block_id = GET_FIELD(cond_regs[reg_id].data, DBG_IDLE_CHK_COND_REG_BLOCK_ID);
4281 if (block_id >= MAX_BLOCK_ID) {
4282 DP_NOTICE(p_hwfn, true, "Invalid block_id\n");
4286 check_rule = !dev_data->block_in_reset[block_id];
4287 if (cond_regs[reg_id].num_entries > num_reg_entries)
4288 num_reg_entries = cond_regs[reg_id].num_entries;
4291 if (!check_rule && dump)
4294 /* Go over all register entries (number of entries is the same for all
4295 * condition registers).
4297 for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
4298 u32 next_reg_offset = 0;
4301 offset += ecore_idle_chk_dump_failure(p_hwfn, p_ptt, dump_buf + offset, false, rule->rule_id, rule, entry_id, OSAL_NULL);
4302 (*num_failing_rules)++;
4306 /* Read current entry of all condition registers */
4307 for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4308 const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4309 u32 padded_entry_size, addr;
4312 /* Find GRC address (if it's a memory, the address of the
4313 * specific entry is calculated).
4315 addr = GET_FIELD(reg->data, DBG_IDLE_CHK_COND_REG_ADDRESS);
4316 wide_bus = GET_FIELD(reg->data, DBG_IDLE_CHK_COND_REG_WIDE_BUS);
4317 if (reg->num_entries > 1 || reg->start_entry > 0) {
4318 padded_entry_size = reg->entry_size > 1 ? OSAL_ROUNDUP_POW_OF_TWO(reg->entry_size) : 1;
4319 addr += (reg->start_entry + entry_id) * padded_entry_size;
4322 /* Read registers */
4323 if (next_reg_offset + reg->entry_size >= IDLE_CHK_MAX_ENTRIES_SIZE) {
4324 DP_NOTICE(p_hwfn, true, "idle check registers entry is too large\n");
4328 next_reg_offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, cond_reg_values + next_reg_offset, dump, addr, reg->entry_size, wide_bus);
4331 /* Call rule condition function. if returns true, it's a failure.*/
4332 if ((*cond_arr[rule->cond_id])(cond_reg_values, imm_values)) {
4333 offset += ecore_idle_chk_dump_failure(p_hwfn, p_ptt, dump_buf + offset, dump, rule->rule_id, rule, entry_id, cond_reg_values);
4334 (*num_failing_rules)++;
4343 /* Performs Idle Check Dump to the specified buffer.
4344 * Returns the dumped size in dwords.
4346 static u32 ecore_idle_chk_dump(struct ecore_hwfn *p_hwfn,
4347 struct ecore_ptt *p_ptt,
4351 u32 num_failing_rules_offset, offset = 0, input_offset = 0, num_failing_rules = 0;
4353 /* Dump global params */
4354 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4355 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "idle-chk");
4357 /* Dump idle check section header with a single parameter */
4358 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
4359 num_failing_rules_offset = offset;
4360 offset += ecore_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
4362 while (input_offset < s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
4363 const struct dbg_idle_chk_cond_hdr *cond_hdr = (const struct dbg_idle_chk_cond_hdr*)&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr[input_offset++];
4364 bool eval_mode, mode_match = true;
4365 u32 curr_failing_rules;
4366 u16 modes_buf_offset;
4369 eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
4371 modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
4372 mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
4376 offset += ecore_idle_chk_dump_rule_entries(p_hwfn, p_ptt, dump_buf + offset, dump, (const struct dbg_idle_chk_rule*)&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr[input_offset], cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS, &curr_failing_rules);
4377 num_failing_rules += curr_failing_rules;
4380 input_offset += cond_hdr->data_size;
4383 /* Overwrite num_rules parameter */
4385 ecore_dump_num_param(dump_buf + num_failing_rules_offset, dump, "num_rules", num_failing_rules);
4387 /* Dump last section */
4388 offset += ecore_dump_last_section(p_hwfn, dump_buf, offset, dump);
4393 /* Finds the meta data image in NVRAM */
4394 static enum dbg_status ecore_find_nvram_image(struct ecore_hwfn *p_hwfn,
4395 struct ecore_ptt *p_ptt,
4397 u32 *nvram_offset_bytes,
4398 u32 *nvram_size_bytes)
4400 u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
4401 struct mcp_file_att file_att;
4404 /* Call NVRAM get file command */
4405 nvm_result = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_GET_FILE_ATT, image_type, &ret_mcp_resp, &ret_mcp_param, &ret_txn_size, (u32*)&file_att);
4407 /* Check response */
4408 if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4409 return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4411 /* Update return values */
4412 *nvram_offset_bytes = file_att.nvm_start_addr;
4413 *nvram_size_bytes = file_att.len;
4415 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n", image_type, *nvram_offset_bytes, *nvram_size_bytes);
4417 /* Check alignment */
4418 if (*nvram_size_bytes & 0x3)
4419 return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
4421 return DBG_STATUS_OK;
4424 /* Reads data from NVRAM */
4425 static enum dbg_status ecore_nvram_read(struct ecore_hwfn *p_hwfn,
4426 struct ecore_ptt *p_ptt,
4427 u32 nvram_offset_bytes,
4428 u32 nvram_size_bytes,
4431 u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
4432 s32 bytes_left = nvram_size_bytes;
4433 u32 read_offset = 0;
4435 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "nvram_read: reading image of size %d bytes from NVRAM\n", nvram_size_bytes);
4438 bytes_to_copy = (bytes_left > MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
4440 /* Call NVRAM read command */
4441 if (ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_READ_NVRAM, (nvram_offset_bytes + read_offset) | (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_SHIFT), &ret_mcp_resp, &ret_mcp_param, &ret_read_size, (u32*)((u8*)ret_buf + read_offset)))
4442 return DBG_STATUS_NVRAM_READ_FAILED;
4444 /* Check response */
4445 if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4446 return DBG_STATUS_NVRAM_READ_FAILED;
4448 /* Update read offset */
4449 read_offset += ret_read_size;
4450 bytes_left -= ret_read_size;
4451 } while (bytes_left > 0);
4453 return DBG_STATUS_OK;
4456 /* Get info on the MCP Trace data in the scratchpad:
4457 * - trace_data_grc_addr (OUT): trace data GRC address in bytes
4458 * - trace_data_size (OUT): trace data size in bytes (without the header)
4460 static enum dbg_status ecore_mcp_trace_get_data_info(struct ecore_hwfn *p_hwfn,
4461 struct ecore_ptt *p_ptt,
4462 u32 *trace_data_grc_addr,
4463 u32 *trace_data_size)
4465 u32 spad_trace_offsize, signature;
4467 /* Read trace section offsize structure from MCP scratchpad */
4468 spad_trace_offsize = ecore_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4470 /* Extract trace section address from offsize (in scratchpad) */
4471 *trace_data_grc_addr = MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4473 /* Read signature from MCP trace section */
4474 signature = ecore_rd(p_hwfn, p_ptt, *trace_data_grc_addr + OFFSETOF(struct mcp_trace, signature));
4476 if (signature != MFW_TRACE_SIGNATURE)
4477 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4479 /* Read trace size from MCP trace section */
4480 *trace_data_size = ecore_rd(p_hwfn, p_ptt, *trace_data_grc_addr + OFFSETOF(struct mcp_trace, size));
4482 return DBG_STATUS_OK;
4485 /* Reads MCP trace meta data image from NVRAM
4486 * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4487 * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4488 * loaded from file).
4489 * - trace_meta_size (OUT): size in bytes of the trace meta data.
4491 static enum dbg_status ecore_mcp_trace_get_meta_info(struct ecore_hwfn *p_hwfn,
4492 struct ecore_ptt *p_ptt,
4493 u32 trace_data_size_bytes,
4494 u32 *running_bundle_id,
4495 u32 *trace_meta_offset,
4496 u32 *trace_meta_size)
4498 u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4500 /* Read MCP trace section offsize structure from MCP scratchpad */
4501 spad_trace_offsize = ecore_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4503 /* Find running bundle ID */
4504 running_mfw_addr = MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) + SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4505 *running_bundle_id = ecore_rd(p_hwfn, p_ptt, running_mfw_addr);
4506 if (*running_bundle_id > 1)
4507 return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4509 /* Find image in NVRAM */
4510 nvram_image_type = (*running_bundle_id == DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4511 return ecore_find_nvram_image(p_hwfn, p_ptt, nvram_image_type, trace_meta_offset, trace_meta_size);
4514 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
4515 static enum dbg_status ecore_mcp_trace_read_meta(struct ecore_hwfn *p_hwfn,
4516 struct ecore_ptt *p_ptt,
4517 u32 nvram_offset_in_bytes,
4521 u8 modules_num, module_len, i, *byte_buf = (u8*)buf;
4522 enum dbg_status status;
4525 /* Read meta data from NVRAM */
4526 status = ecore_nvram_read(p_hwfn, p_ptt, nvram_offset_in_bytes, size_in_bytes, buf);
4527 if (status != DBG_STATUS_OK)
4530 /* Extract and check first signature */
4531 signature = ecore_read_unaligned_dword(byte_buf);
4532 byte_buf += sizeof(signature);
4533 if (signature != NVM_MAGIC_VALUE)
4534 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4536 /* Extract number of modules */
4537 modules_num = *(byte_buf++);
4539 /* Skip all modules */
4540 for (i = 0; i < modules_num; i++) {
4541 module_len = *(byte_buf++);
4542 byte_buf += module_len;
4545 /* Extract and check second signature */
4546 signature = ecore_read_unaligned_dword(byte_buf);
4547 byte_buf += sizeof(signature);
4548 if (signature != NVM_MAGIC_VALUE)
4549 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4551 return DBG_STATUS_OK;
4554 /* Dump MCP Trace */
4555 static enum dbg_status ecore_mcp_trace_dump(struct ecore_hwfn *p_hwfn,
4556 struct ecore_ptt *p_ptt,
4559 u32 *num_dumped_dwords)
4561 u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0, trace_meta_size_dwords = 0;
4562 u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4563 u32 running_bundle_id, offset = 0;
4564 enum dbg_status status;
4568 *num_dumped_dwords = 0;
4570 mcp_access = !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4572 /* Get trace data info */
4573 status = ecore_mcp_trace_get_data_info(p_hwfn, p_ptt, &trace_data_grc_addr, &trace_data_size_bytes);
4574 if (status != DBG_STATUS_OK)
4577 /* Dump global params */
4578 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4579 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "mcp-trace");
4581 /* Halt MCP while reading from scratchpad so the read data will be
4582 * consistent. if halt fails, MCP trace is taken anyway, with a small
4583 * risk that it may be corrupt.
4585 if (dump && mcp_access) {
4586 halted = !ecore_mcp_halt(p_hwfn, p_ptt);
4588 DP_NOTICE(p_hwfn, false, "MCP halt failed!\n");
4591 /* Find trace data size */
4592 trace_data_size_dwords = DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace), BYTES_IN_DWORD);
4594 /* Dump trace data section header and param */
4595 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "mcp_trace_data", 1);
4596 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", trace_data_size_dwords);
4598 /* Read trace data from scratchpad into dump buffer */
4599 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(trace_data_grc_addr), trace_data_size_dwords, false);
4601 /* Resume MCP (only if halt succeeded) */
4602 if (halted && ecore_mcp_resume(p_hwfn, p_ptt))
4603 DP_NOTICE(p_hwfn, false, "Failed to resume MCP after halt!\n");
4605 /* Dump trace meta section header */
4606 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "mcp_trace_meta", 1);
4608 /* Read trace meta only if NVRAM access is enabled
4609 * (trace_meta_size_bytes is dword-aligned).
4611 if (OSAL_NVM_IS_ACCESS_ENABLED(p_hwfn) && mcp_access) {
4612 status = ecore_mcp_trace_get_meta_info(p_hwfn, p_ptt, trace_data_size_bytes, &running_bundle_id, &trace_meta_offset_bytes, &trace_meta_size_bytes);
4613 if (status == DBG_STATUS_OK)
4614 trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
4617 /* Dump trace meta size param */
4618 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", trace_meta_size_dwords);
4620 /* Read trace meta image into dump buffer */
4621 if (dump && trace_meta_size_dwords)
4622 status = ecore_mcp_trace_read_meta(p_hwfn, p_ptt, trace_meta_offset_bytes, trace_meta_size_bytes, dump_buf + offset);
4623 if (status == DBG_STATUS_OK)
4624 offset += trace_meta_size_dwords;
4626 /* Dump last section */
4627 offset += ecore_dump_last_section(p_hwfn, dump_buf, offset, dump);
4629 *num_dumped_dwords = offset;
4631 /* If no mcp access, indicate that the dump doesn't contain the meta
4634 return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4638 static enum dbg_status ecore_reg_fifo_dump(struct ecore_hwfn *p_hwfn,
4639 struct ecore_ptt *p_ptt,
4642 u32 *num_dumped_dwords)
4644 u32 dwords_read, size_param_offset, offset = 0;
4647 *num_dumped_dwords = 0;
4649 /* Dump global params */
4650 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4651 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "reg-fifo");
4653 /* Dump fifo data section header and param. The size param is 0 for
4654 * now, and is overwritten after reading the FIFO.
4656 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "reg_fifo_data", 1);
4657 size_param_offset = offset;
4658 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", 0);
4661 fifo_has_data = ecore_rd(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4663 /* Pull available data from fifo. Use DMAE since this is
4664 * widebus memory and must be accessed atomically. Test for
4665 * dwords_read not passing buffer size since more entries could
4666 * be added to the buffer as we
4669 for (dwords_read = 0; fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS; dwords_read += REG_FIFO_ELEMENT_DWORDS, offset += REG_FIFO_ELEMENT_DWORDS) {
4670 if (ecore_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO, (u64)(osal_uintptr_t)(&dump_buf[offset]), REG_FIFO_ELEMENT_DWORDS, 0))
4671 return DBG_STATUS_DMAE_FAILED;
4672 fifo_has_data = ecore_rd(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4675 ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", dwords_read);
4679 /* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4680 * test how much data is available, except for reading it.
4682 offset += REG_FIFO_DEPTH_DWORDS;
4685 /* Dump last section */
4686 offset += ecore_dump_last_section(p_hwfn, dump_buf, offset, dump);
4688 *num_dumped_dwords = offset;
4690 return DBG_STATUS_OK;
4694 static enum dbg_status ecore_igu_fifo_dump(struct ecore_hwfn *p_hwfn,
4695 struct ecore_ptt *p_ptt,
4698 u32 *num_dumped_dwords)
4700 u32 dwords_read, size_param_offset, offset = 0;
4703 *num_dumped_dwords = 0;
4705 /* Dump global params */
4706 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4707 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "igu-fifo");
4709 /* Dump fifo data section header and param. The size param is 0 for
4710 * now, and is overwritten after reading the FIFO.
4712 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "igu_fifo_data", 1);
4713 size_param_offset = offset;
4714 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", 0);
4717 fifo_has_data = ecore_rd(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4719 /* Pull available data from fifo. Use DMAE since this is
4720 * widebus memory and must be accessed atomically. Test for
4721 * dwords_read not passing buffer size since more entries could
4722 * be added to the buffer as we are emptying it.
4724 for (dwords_read = 0; fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS; dwords_read += IGU_FIFO_ELEMENT_DWORDS, offset += IGU_FIFO_ELEMENT_DWORDS) {
4725 if (ecore_dmae_grc2host(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_MEMORY, (u64)(osal_uintptr_t)(&dump_buf[offset]), IGU_FIFO_ELEMENT_DWORDS, 0))
4726 return DBG_STATUS_DMAE_FAILED;
4727 fifo_has_data = ecore_rd(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4730 ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", dwords_read);
4734 /* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4735 * test how much data is available, except for reading it.
4737 offset += IGU_FIFO_DEPTH_DWORDS;
4740 /* Dump last section */
4741 offset += ecore_dump_last_section(p_hwfn, dump_buf, offset, dump);
4743 *num_dumped_dwords = offset;
4745 return DBG_STATUS_OK;
4748 /* Protection Override dump */
4749 static enum dbg_status ecore_protection_override_dump(struct ecore_hwfn *p_hwfn,
4750 struct ecore_ptt *p_ptt,
4753 u32 *num_dumped_dwords)
4755 u32 size_param_offset, override_window_dwords, offset = 0;
4757 *num_dumped_dwords = 0;
4759 /* Dump global params */
4760 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4761 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "protection-override");
4763 /* Dump data section header and param. The size param is 0 for now,
4764 * and is overwritten after reading the data.
4766 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "protection_override_data", 1);
4767 size_param_offset = offset;
4768 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", 0);
4771 /* Add override window info to buffer */
4772 override_window_dwords = ecore_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) * PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4773 if (ecore_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_PROTECTION_OVERRIDE_WINDOW, (u64)(osal_uintptr_t)(dump_buf + offset), override_window_dwords, 0))
4774 return DBG_STATUS_DMAE_FAILED;
4775 offset += override_window_dwords;
4776 ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", override_window_dwords);
4779 offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4782 /* Dump last section */
4783 offset += ecore_dump_last_section(p_hwfn, dump_buf, offset, dump);
4785 *num_dumped_dwords = offset;
4787 return DBG_STATUS_OK;
4790 /* Performs FW Asserts Dump to the specified buffer.
4791 * Returns the dumped size in dwords.
4793 static u32 ecore_fw_asserts_dump(struct ecore_hwfn *p_hwfn,
4794 struct ecore_ptt *p_ptt,
4798 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4799 struct fw_asserts_ram_section *asserts;
4800 char storm_letter_str[2] = "?";
4801 struct fw_info fw_info;
4805 /* Dump global params */
4806 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4807 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "fw-asserts");
4809 /* Find Storm dump size */
4810 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4811 u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx, last_list_idx, addr;
4812 struct storm_defs *storm = &s_storm_defs[storm_id];
4814 if (dev_data->block_in_reset[storm->block_id])
4817 /* Read FW info for the current Storm */
4818 ecore_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
4820 asserts = &fw_info.fw_asserts_section;
4822 /* Dump FW Asserts section header and params */
4823 storm_letter_str[0] = storm->letter;
4824 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "fw_asserts", 2);
4825 offset += ecore_dump_str_param(dump_buf + offset, dump, "storm", storm_letter_str);
4826 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", asserts->list_element_dword_size);
4828 /* Read and dump FW Asserts data */
4830 offset += asserts->list_element_dword_size;
4834 fw_asserts_section_addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
4835 RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
4836 next_list_idx_addr = fw_asserts_section_addr + DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
4837 next_list_idx = ecore_rd(p_hwfn, p_ptt, next_list_idx_addr);
4838 last_list_idx = (next_list_idx > 0 ? next_list_idx : asserts->list_num_elements) - 1;
4839 addr = BYTES_TO_DWORDS(fw_asserts_section_addr) + asserts->list_dword_offset +
4840 last_list_idx * asserts->list_element_dword_size;
4841 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, asserts->list_element_dword_size, false);
4844 /* Dump last section */
4845 offset += ecore_dump_last_section(p_hwfn, dump_buf, offset, dump);
4850 /***************************** Public Functions *******************************/
4852 enum dbg_status ecore_dbg_set_bin_ptr(const u8 * const bin_ptr)
4854 struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr*)bin_ptr;
4857 /* convert binary data to debug arrays */
4858 for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
4859 s_dbg_arrays[buf_id].ptr = (u32*)(bin_ptr + buf_array[buf_id].offset);
4860 s_dbg_arrays[buf_id].size_in_dwords = BYTES_TO_DWORDS(buf_array[buf_id].length);
4863 return DBG_STATUS_OK;
4866 enum dbg_status ecore_dbg_set_app_ver(u32 ver)
4868 if (ver < TOOLS_VERSION)
4869 return DBG_STATUS_UNSUPPORTED_APP_VERSION;
4873 return DBG_STATUS_OK;
4876 u32 ecore_dbg_get_fw_func_ver(void)
4878 return TOOLS_VERSION;
4881 enum chip_ids ecore_dbg_get_chip_id(struct ecore_hwfn *p_hwfn)
4883 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4885 return (enum chip_ids)dev_data->chip_id;
4888 enum dbg_status ecore_dbg_bus_reset(struct ecore_hwfn *p_hwfn,
4889 struct ecore_ptt *p_ptt,
4895 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4896 enum dbg_status status;
4898 status = ecore_dbg_dev_init(p_hwfn, p_ptt);
4899 if (status != DBG_STATUS_OK)
4902 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_reset: one_shot_en = %d, force_hw_dwords = %d, unify_inputs = %d, grc_input_en = %d\n", one_shot_en, force_hw_dwords, unify_inputs, grc_input_en);
4904 if (force_hw_dwords &&
4905 force_hw_dwords != 4 &&
4906 force_hw_dwords != 8)
4907 return DBG_STATUS_INVALID_ARGS;
4909 if (ecore_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
4910 return DBG_STATUS_DBG_BUS_IN_USE;
4912 /* Update reset state of all blocks */
4913 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
4915 /* Disable all debug inputs */
4916 status = ecore_bus_disable_inputs(p_hwfn, p_ptt, false);
4917 if (status != DBG_STATUS_OK)
4920 /* Reset DBG block */
4921 ecore_bus_reset_dbg_block(p_hwfn, p_ptt);
4923 /* Set one-shot / wrap-around */
4924 ecore_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, one_shot_en ? 0 : 1);
4926 /* Init state params */
4927 OSAL_MEMSET(&dev_data->bus, 0, sizeof(dev_data->bus));
4928 dev_data->bus.target = DBG_BUS_TARGET_ID_INT_BUF;
4929 dev_data->bus.state = DBG_BUS_STATE_READY;
4930 dev_data->bus.one_shot_en = one_shot_en;
4931 dev_data->bus.hw_dwords = force_hw_dwords;
4932 dev_data->bus.grc_input_en = grc_input_en;
4933 dev_data->bus.unify_inputs = unify_inputs;
4934 dev_data->bus.num_enabled_blocks = grc_input_en ? 1 : 0;
4936 /* Init special DBG block */
4938 SET_FIELD(dev_data->bus.blocks[BLOCK_DBG].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, 0x1);
4940 return DBG_STATUS_OK;
4943 enum dbg_status ecore_dbg_bus_set_pci_output(struct ecore_hwfn *p_hwfn,
4944 struct ecore_ptt *p_ptt,
4947 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4948 dma_addr_t pci_buf_phys_addr;
4951 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_set_pci_output: buf_size_kb = %d\n", buf_size_kb);
4953 if (dev_data->bus.target != DBG_BUS_TARGET_ID_INT_BUF)
4954 return DBG_STATUS_OUTPUT_ALREADY_SET;
4955 if (dev_data->bus.state != DBG_BUS_STATE_READY || dev_data->bus.pci_buf.size > 0)
4956 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
4958 dev_data->bus.target = DBG_BUS_TARGET_ID_PCI;
4959 dev_data->bus.pci_buf.size = buf_size_kb * 1024;
4960 if (dev_data->bus.pci_buf.size % PCI_PKT_SIZE_IN_BYTES)
4961 return DBG_STATUS_INVALID_ARGS;
4963 pci_buf = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &pci_buf_phys_addr, dev_data->bus.pci_buf.size);
4965 return DBG_STATUS_PCI_BUF_ALLOC_FAILED;
4967 OSAL_MEMCPY(&dev_data->bus.pci_buf.phys_addr, &pci_buf_phys_addr, sizeof(pci_buf_phys_addr));
4969 dev_data->bus.pci_buf.virt_addr.lo = (u32)((u64)(osal_uintptr_t)pci_buf);
4970 dev_data->bus.pci_buf.virt_addr.hi = (u32)((u64)(osal_uintptr_t)pci_buf >> 32);
4972 ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_LSB, dev_data->bus.pci_buf.phys_addr.lo);
4973 ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_MSB, dev_data->bus.pci_buf.phys_addr.hi);
4974 ecore_wr(p_hwfn, p_ptt, DBG_REG_TARGET_PACKET_SIZE, PCI_PKT_SIZE_IN_CHUNKS);
4975 ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_EXT_BUFFER_SIZE, dev_data->bus.pci_buf.size / PCI_PKT_SIZE_IN_BYTES);
4976 ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_FUNC_NUM, OPAQUE_FID(p_hwfn->rel_pf_id));
4977 ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_LOGIC_ADDR, PCI_PHYS_ADDR_TYPE);
4978 ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_REQ_CREDIT, PCI_REQ_CREDIT);
4979 ecore_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_PCI);
4980 ecore_wr(p_hwfn, p_ptt, DBG_REG_OUTPUT_ENABLE, TARGET_EN_MASK_PCI);
4982 return DBG_STATUS_OK;
4985 enum dbg_status ecore_dbg_bus_set_nw_output(struct ecore_hwfn *p_hwfn,
4986 struct ecore_ptt *p_ptt,
4990 u16 data_limit_size_kb,
4991 bool send_to_other_engine,
4992 bool rcv_from_other_engine)
4994 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4996 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_set_nw_output: port_id = %d, dest_addr_lo32 = 0x%x, dest_addr_hi16 = 0x%x, data_limit_size_kb = %d, send_to_other_engine = %d, rcv_from_other_engine = %d\n", port_id, dest_addr_lo32, dest_addr_hi16, data_limit_size_kb, send_to_other_engine, rcv_from_other_engine);
4998 if (dev_data->bus.target != DBG_BUS_TARGET_ID_INT_BUF)
4999 return DBG_STATUS_OUTPUT_ALREADY_SET;
5000 if (dev_data->bus.state != DBG_BUS_STATE_READY)
5001 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5002 if (port_id >= s_chip_defs[dev_data->chip_id].per_platform[dev_data->platform_id].num_ports || (send_to_other_engine && rcv_from_other_engine))
5003 return DBG_STATUS_INVALID_ARGS;
5005 dev_data->bus.target = DBG_BUS_TARGET_ID_NIG;
5006 dev_data->bus.rcv_from_other_engine = rcv_from_other_engine;
5008 ecore_wr(p_hwfn, p_ptt, DBG_REG_OUTPUT_ENABLE, TARGET_EN_MASK_NIG);
5009 ecore_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_NIG);
5011 if (send_to_other_engine)
5012 ecore_wr(p_hwfn, p_ptt, DBG_REG_OTHER_ENGINE_MODE_BB_K2, DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX);
5014 ecore_wr(p_hwfn, p_ptt, NIG_REG_DEBUG_PORT, port_id);
5016 if (rcv_from_other_engine) {
5017 ecore_wr(p_hwfn, p_ptt, DBG_REG_OTHER_ENGINE_MODE_BB_K2, DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX);
5021 /* Configure ethernet header of 14 bytes */
5022 ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_WIDTH, 0);
5023 ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_7, dest_addr_lo32);
5024 ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_6, (u32)SRC_MAC_ADDR_LO16 | ((u32)dest_addr_hi16 << 16));
5025 ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_5, SRC_MAC_ADDR_HI32);
5026 ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_4, (u32)ETH_TYPE << 16);
5027 ecore_wr(p_hwfn, p_ptt, DBG_REG_TARGET_PACKET_SIZE, NIG_PKT_SIZE_IN_CHUNKS);
5028 if (data_limit_size_kb)
5029 ecore_wr(p_hwfn, p_ptt, DBG_REG_NIG_DATA_LIMIT_SIZE, (data_limit_size_kb * 1024) / CHUNK_SIZE_IN_BYTES);
5032 return DBG_STATUS_OK;
5035 static bool ecore_is_overlapping_enable_mask(struct ecore_hwfn *p_hwfn,
5039 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5040 u8 curr_shifted_enable_mask, shifted_enable_mask;
5043 shifted_enable_mask = SHR(enable_mask, VALUES_PER_CYCLE, right_shift);
5045 if (dev_data->bus.num_enabled_blocks) {
5046 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5047 struct dbg_bus_block_data *block_bus = &dev_data->bus.blocks[block_id];
5049 if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5052 curr_shifted_enable_mask =
5053 SHR(GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5055 GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT));
5056 if (shifted_enable_mask & curr_shifted_enable_mask)
5064 enum dbg_status ecore_dbg_bus_enable_block(struct ecore_hwfn *p_hwfn,
5065 struct ecore_ptt *p_ptt,
5066 enum block_id block_id,
5070 u8 force_valid_mask,
5071 u8 force_frame_mask)
5073 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5074 struct block_defs *block = s_block_defs[block_id];
5075 struct dbg_bus_block_data *block_bus;
5076 struct dbg_bus_block *block_desc;
5078 block_bus = &dev_data->bus.blocks[block_id];
5079 block_desc = get_dbg_bus_block_desc(p_hwfn, block_id);
5081 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_block: block = %d, line_num = %d, enable_mask = 0x%x, right_shift = %d, force_valid_mask = 0x%x, force_frame_mask = 0x%x\n", block_id, line_num, enable_mask, right_shift, force_valid_mask, force_frame_mask);
5083 if (dev_data->bus.state != DBG_BUS_STATE_READY)
5084 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5085 if (block_id >= MAX_BLOCK_ID)
5086 return DBG_STATUS_INVALID_ARGS;
5087 if (GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5088 return DBG_STATUS_BLOCK_ALREADY_ENABLED;
5089 if (!block->has_dbg_bus[dev_data->chip_id] ||
5090 line_num >= NUM_DBG_LINES(block_desc) ||
5092 enable_mask > MAX_CYCLE_VALUES_MASK ||
5093 force_valid_mask > MAX_CYCLE_VALUES_MASK ||
5094 force_frame_mask > MAX_CYCLE_VALUES_MASK ||
5095 right_shift > VALUES_PER_CYCLE - 1)
5096 return DBG_STATUS_INVALID_ARGS;
5097 if (dev_data->block_in_reset[block_id])
5098 return DBG_STATUS_BLOCK_IN_RESET;
5099 if (!dev_data->bus.unify_inputs && ecore_is_overlapping_enable_mask(p_hwfn, enable_mask, right_shift))
5100 return DBG_STATUS_INPUT_OVERLAP;
5102 dev_data->bus.blocks[block_id].line_num = line_num;
5103 SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, enable_mask);
5104 SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT, right_shift);
5105 SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK, force_valid_mask);
5106 SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK, force_frame_mask);
5108 dev_data->bus.num_enabled_blocks++;
5110 return DBG_STATUS_OK;
5113 enum dbg_status ecore_dbg_bus_enable_storm(struct ecore_hwfn *p_hwfn,
5114 enum dbg_storms storm,
5115 enum dbg_bus_storm_modes storm_mode)
5117 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5119 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_storm: storm = %d, storm_mode = %d\n", storm, storm_mode);
5121 if (dev_data->bus.state != DBG_BUS_STATE_READY)
5122 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5123 if (dev_data->bus.hw_dwords >= 4)
5124 return DBG_STATUS_HW_ONLY_RECORDING;
5125 if (storm >= MAX_DBG_STORMS)
5126 return DBG_STATUS_INVALID_ARGS;
5127 if (storm_mode >= MAX_DBG_BUS_STORM_MODES)
5128 return DBG_STATUS_INVALID_ARGS;
5129 if (dev_data->bus.unify_inputs)
5130 return DBG_STATUS_INVALID_ARGS;
5132 if (dev_data->bus.storms[storm].enabled)
5133 return DBG_STATUS_STORM_ALREADY_ENABLED;
5135 dev_data->bus.storms[storm].enabled = true;
5136 dev_data->bus.storms[storm].mode = (u8)storm_mode;
5137 dev_data->bus.storms[storm].hw_id = dev_data->bus.num_enabled_storms;
5139 dev_data->bus.num_enabled_storms++;
5141 return DBG_STATUS_OK;
5144 enum dbg_status ecore_dbg_bus_enable_timestamp(struct ecore_hwfn *p_hwfn,
5145 struct ecore_ptt *p_ptt,
5150 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5152 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_timestamp: valid_mask = 0x%x, frame_mask = 0x%x, tick_len = %d\n", valid_mask, frame_mask, tick_len);
5154 if (dev_data->bus.state != DBG_BUS_STATE_READY)
5155 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5156 if (valid_mask > 0x7 || frame_mask > 0x7)
5157 return DBG_STATUS_INVALID_ARGS;
5158 if (!dev_data->bus.unify_inputs && ecore_is_overlapping_enable_mask(p_hwfn, 0x1, 0))
5159 return DBG_STATUS_INPUT_OVERLAP;
5161 dev_data->bus.timestamp_input_en = true;
5162 dev_data->bus.num_enabled_blocks++;
5164 SET_FIELD(dev_data->bus.blocks[BLOCK_DBG].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, 0x1);
5166 ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_VALID_EN, valid_mask);
5167 ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_FRAME_EN, frame_mask);
5168 ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_TICK, tick_len);
5170 return DBG_STATUS_OK;
5173 enum dbg_status ecore_dbg_bus_add_eid_range_sem_filter(struct ecore_hwfn *p_hwfn,
5174 enum dbg_storms storm_id,
5178 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5179 struct dbg_bus_storm_data *storm_bus;
5181 storm_bus = &dev_data->bus.storms[storm_id];
5183 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_eid_range_sem_filter: storm = %d, min_eid = 0x%x, max_eid = 0x%x\n", storm_id, min_eid, max_eid);
5185 if (storm_id >= MAX_DBG_STORMS)
5186 return DBG_STATUS_INVALID_ARGS;
5187 if (min_eid > max_eid)
5188 return DBG_STATUS_INVALID_ARGS;
5189 if (!storm_bus->enabled)
5190 return DBG_STATUS_STORM_NOT_ENABLED;
5192 storm_bus->eid_filter_en = 1;
5193 storm_bus->eid_range_not_mask = 1;
5194 storm_bus->eid_filter_params.range.min = min_eid;
5195 storm_bus->eid_filter_params.range.max = max_eid;
5197 return DBG_STATUS_OK;
5200 enum dbg_status ecore_dbg_bus_add_eid_mask_sem_filter(struct ecore_hwfn *p_hwfn,
5201 enum dbg_storms storm_id,
5205 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5206 struct dbg_bus_storm_data *storm_bus;
5208 storm_bus = &dev_data->bus.storms[storm_id];
5210 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_eid_mask_sem_filter: storm = %d, eid_val = 0x%x, eid_mask = 0x%x\n", storm_id, eid_val, eid_mask);
5212 if (storm_id >= MAX_DBG_STORMS)
5213 return DBG_STATUS_INVALID_ARGS;
5214 if (!storm_bus->enabled)
5215 return DBG_STATUS_STORM_NOT_ENABLED;
5217 storm_bus->eid_filter_en = 1;
5218 storm_bus->eid_range_not_mask = 0;
5219 storm_bus->eid_filter_params.mask.val = eid_val;
5220 storm_bus->eid_filter_params.mask.mask = eid_mask;
5222 return DBG_STATUS_OK;
5225 enum dbg_status ecore_dbg_bus_add_cid_sem_filter(struct ecore_hwfn *p_hwfn,
5226 enum dbg_storms storm_id,
5229 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5230 struct dbg_bus_storm_data *storm_bus;
5232 storm_bus = &dev_data->bus.storms[storm_id];
5234 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_cid_sem_filter: storm = %d, cid = 0x%x\n", storm_id, cid);
5236 if (storm_id >= MAX_DBG_STORMS)
5237 return DBG_STATUS_INVALID_ARGS;
5238 if (!storm_bus->enabled)
5239 return DBG_STATUS_STORM_NOT_ENABLED;
5241 storm_bus->cid_filter_en = 1;
5242 storm_bus->cid = cid;
5244 return DBG_STATUS_OK;
5247 enum dbg_status ecore_dbg_bus_enable_filter(struct ecore_hwfn *p_hwfn,
5248 struct ecore_ptt *p_ptt,
5249 enum block_id block_id,
5252 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5254 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_filter: block = %d, const_msg_len = %d\n", block_id, const_msg_len);
5256 if (dev_data->bus.state != DBG_BUS_STATE_READY)
5257 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5258 if (dev_data->bus.filter_en)
5259 return DBG_STATUS_FILTER_ALREADY_ENABLED;
5260 if (block_id >= MAX_BLOCK_ID)
5261 return DBG_STATUS_INVALID_ARGS;
5262 if (!GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5263 return DBG_STATUS_BLOCK_NOT_ENABLED;
5264 if (!dev_data->bus.unify_inputs)
5265 return DBG_STATUS_FILTER_BUG;
5267 dev_data->bus.filter_en = true;
5268 dev_data->bus.next_constraint_id = 0;
5269 dev_data->bus.adding_filter = true;
5271 /* HW ID is set to 0 due to required unifyInputs */
5272 ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_ID_NUM, 0);
5273 ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_MSG_LENGTH_ENABLE, const_msg_len > 0 ? 1 : 0);
5274 if (const_msg_len > 0)
5275 ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_MSG_LENGTH, const_msg_len - 1);
5277 return DBG_STATUS_OK;
5280 enum dbg_status ecore_dbg_bus_enable_trigger(struct ecore_hwfn *p_hwfn,
5281 struct ecore_ptt *p_ptt,
5282 bool rec_pre_trigger,
5284 bool rec_post_trigger,
5286 bool filter_pre_trigger,
5287 bool filter_post_trigger)
5289 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5290 enum dbg_bus_post_trigger_types post_trigger_type;
5291 enum dbg_bus_pre_trigger_types pre_trigger_type;
5292 struct dbg_bus_data *bus = &dev_data->bus;
5294 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_trigger: rec_pre_trigger = %d, pre_chunks = %d, rec_post_trigger = %d, post_cycles = %d, filter_pre_trigger = %d, filter_post_trigger = %d\n", rec_pre_trigger, pre_chunks, rec_post_trigger, post_cycles, filter_pre_trigger, filter_post_trigger);
5296 if (bus->state != DBG_BUS_STATE_READY)
5297 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5298 if (bus->trigger_en)
5299 return DBG_STATUS_TRIGGER_ALREADY_ENABLED;
5300 if (rec_pre_trigger && pre_chunks >= INT_BUF_SIZE_IN_CHUNKS)
5301 return DBG_STATUS_INVALID_ARGS;
5303 bus->trigger_en = true;
5304 bus->filter_pre_trigger = filter_pre_trigger;
5305 bus->filter_post_trigger = filter_post_trigger;
5307 if (rec_pre_trigger) {
5308 pre_trigger_type = pre_chunks ? DBG_BUS_PRE_TRIGGER_NUM_CHUNKS : DBG_BUS_PRE_TRIGGER_START_FROM_ZERO;
5309 ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_PRE_NUM_CHUNKS, pre_chunks);
5312 pre_trigger_type = DBG_BUS_PRE_TRIGGER_DROP;
5315 if (rec_post_trigger) {
5316 post_trigger_type = DBG_BUS_POST_TRIGGER_RECORD;
5317 ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_POST_NUM_CYCLES, post_cycles ? post_cycles : 0xffffffff);
5320 post_trigger_type = DBG_BUS_POST_TRIGGER_DROP;
5323 ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_PRE_TRGR_EVNT_MODE, pre_trigger_type);
5324 ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_POST_TRGR_EVNT_MODE, post_trigger_type);
5325 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_ENABLE, 1);
5327 return DBG_STATUS_OK;
5330 enum dbg_status ecore_dbg_bus_add_trigger_state(struct ecore_hwfn *p_hwfn,
5331 struct ecore_ptt *p_ptt,
5332 enum block_id block_id,
5336 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5337 struct dbg_bus_data *bus = &dev_data->bus;
5338 struct dbg_bus_block_data *block_bus;
5341 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_trigger_state: block = %d, const_msg_len = %d, count_to_next = %d\n", block_id, const_msg_len, count_to_next);
5343 block_bus = &bus->blocks[block_id];
5345 if (!bus->trigger_en)
5346 return DBG_STATUS_TRIGGER_NOT_ENABLED;
5347 if (bus->next_trigger_state == MAX_TRIGGER_STATES)
5348 return DBG_STATUS_TOO_MANY_TRIGGER_STATES;
5349 if (block_id >= MAX_BLOCK_ID)
5350 return DBG_STATUS_INVALID_ARGS;
5351 if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5352 return DBG_STATUS_BLOCK_NOT_ENABLED;
5354 return DBG_STATUS_INVALID_ARGS;
5356 bus->next_constraint_id = 0;
5357 bus->adding_filter = false;
5359 /* Store block's shifted enable mask */
5360 SET_FIELD(bus->trigger_states[dev_data->bus.next_trigger_state].data, DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK, SHR(GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5362 GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT)));
5364 /* Set trigger state registers */
5365 reg_offset = bus->next_trigger_state * BYTES_IN_DWORD;
5366 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_MSG_LENGTH_ENABLE_0 + reg_offset, const_msg_len > 0 ? 1 : 0);
5367 if (const_msg_len > 0)
5368 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_MSG_LENGTH_0 + reg_offset, const_msg_len - 1);
5370 /* Set trigger set registers */
5371 reg_offset = bus->next_trigger_state * TRIGGER_SETS_PER_STATE * BYTES_IN_DWORD;
5372 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_SET_COUNT_0 + reg_offset, count_to_next);
5374 /* Set next state to final state, and overwrite previous next state
5377 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_SET_NXT_STATE_0 + reg_offset, MAX_TRIGGER_STATES);
5378 if (bus->next_trigger_state > 0) {
5379 reg_offset = (bus->next_trigger_state - 1) * TRIGGER_SETS_PER_STATE * BYTES_IN_DWORD;
5380 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_SET_NXT_STATE_0 + reg_offset, bus->next_trigger_state);
5383 bus->next_trigger_state++;
5385 return DBG_STATUS_OK;
5388 enum dbg_status ecore_dbg_bus_add_constraint(struct ecore_hwfn *p_hwfn,
5389 struct ecore_ptt *p_ptt,
5390 enum dbg_bus_constraint_ops constraint_op,
5396 u8 dword_offset_in_cycle,
5399 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5400 struct dbg_bus_data *bus = &dev_data->bus;
5401 u16 dword_offset, range = 0;
5403 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_constraint: op = %d, data_val = 0x%x, data_mask = 0x%x, compare_frame = %d, frame_bit = %d, cycle_offset = %d, dword_offset_in_cycle = %d, is_mandatory = %d\n", constraint_op, data_val, data_mask, compare_frame, frame_bit, cycle_offset, dword_offset_in_cycle, is_mandatory);
5405 if (!bus->filter_en && !dev_data->bus.trigger_en)
5406 return DBG_STATUS_CANT_ADD_CONSTRAINT;
5407 if (bus->trigger_en && !bus->adding_filter && !bus->next_trigger_state)
5408 return DBG_STATUS_CANT_ADD_CONSTRAINT;
5409 if (bus->next_constraint_id >= MAX_CONSTRAINTS)
5410 return DBG_STATUS_TOO_MANY_CONSTRAINTS;
5411 if (constraint_op >= MAX_DBG_BUS_CONSTRAINT_OPS || frame_bit > 1 || dword_offset_in_cycle > 3 || (bus->adding_filter && cycle_offset > 3))
5412 return DBG_STATUS_INVALID_ARGS;
5413 if (compare_frame &&
5414 constraint_op != DBG_BUS_CONSTRAINT_OP_EQ &&
5415 constraint_op != DBG_BUS_CONSTRAINT_OP_NE)
5416 return DBG_STATUS_INVALID_ARGS;
5418 dword_offset = cycle_offset * VALUES_PER_CYCLE + dword_offset_in_cycle;
5420 if (!bus->adding_filter) {
5421 u8 curr_trigger_state_id = bus->next_trigger_state - 1;
5422 struct dbg_bus_trigger_state_data *trigger_state;
5424 trigger_state = &bus->trigger_states[curr_trigger_state_id];
5426 /* Check if the selected dword is enabled in the block */
5427 if (!(GET_FIELD(trigger_state->data, DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK) & (u8)(1 << dword_offset_in_cycle)))
5428 return DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET;
5430 /* Add selected dword to trigger state's dword mask */
5431 SET_FIELD(trigger_state->data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK, GET_FIELD(trigger_state->data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK) | (u8)(1 << dword_offset_in_cycle));
5434 /* Prepare data mask and range */
5435 if (constraint_op == DBG_BUS_CONSTRAINT_OP_EQ ||
5436 constraint_op == DBG_BUS_CONSTRAINT_OP_NE) {
5437 data_mask = ~data_mask;
5442 /* Extract lsb and width from mask */
5444 return DBG_STATUS_INVALID_ARGS;
5446 for (lsb = 0; lsb < 32 && !(data_mask & 1); lsb++, data_mask >>= 1);
5448 width < 32 - lsb && (data_mask & 1);
5449 width++, data_mask >>= 1) {}
5451 return DBG_STATUS_INVALID_ARGS;
5452 range = (lsb << 5) | (width - 1);
5455 /* Add constraint */
5456 ecore_bus_set_constraint(p_hwfn, p_ptt, dev_data->bus.adding_filter ? 1 : 0,
5457 dev_data->bus.next_constraint_id,
5458 s_constraint_op_defs[constraint_op].hw_op_val,
5459 data_val, data_mask, frame_bit,
5460 compare_frame ? 0 : 1, dword_offset, range,
5461 s_constraint_op_defs[constraint_op].is_cyclic ? 1 : 0,
5462 is_mandatory ? 1 : 0);
5464 /* If first constraint, fill other 3 constraints with dummy constraints
5465 * that always match (using the same offset).
5467 if (!dev_data->bus.next_constraint_id) {
5470 for (i = 1; i < MAX_CONSTRAINTS; i++)
5471 ecore_bus_set_constraint(p_hwfn, p_ptt, bus->adding_filter ? 1 : 0,
5472 i, DBG_BUS_CONSTRAINT_OP_EQ, 0, 0xffffffff,
5473 0, 1, dword_offset, 0, 0, 1);
5476 bus->next_constraint_id++;
5478 return DBG_STATUS_OK;
5481 /* Configure the DBG block client mask */
5482 static void ecore_config_dbg_block_client_mask(struct ecore_hwfn *p_hwfn,
5483 struct ecore_ptt *p_ptt)
5485 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5486 struct dbg_bus_data *bus = &dev_data->bus;
5487 u32 block_id, client_mask = 0;
5490 /* Update client mask for Storm inputs */
5491 if (bus->num_enabled_storms)
5492 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5493 struct storm_defs *storm = &s_storm_defs[storm_id];
5495 if (bus->storms[storm_id].enabled)
5496 client_mask |= (1 << storm->dbg_client_id[dev_data->chip_id]);
5499 /* Update client mask for block inputs */
5500 if (bus->num_enabled_blocks) {
5501 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5502 struct dbg_bus_block_data *block_bus = &bus->blocks[block_id];
5503 struct block_defs *block = s_block_defs[block_id];
5505 if (GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK) && block_id != BLOCK_DBG)
5506 client_mask |= (1 << block->dbg_client_id[dev_data->chip_id]);
5510 /* Update client mask for GRC input */
5511 if (bus->grc_input_en)
5512 client_mask |= (1 << DBG_BUS_CLIENT_CPU);
5514 /* Update client mask for timestamp input */
5515 if (bus->timestamp_input_en)
5516 client_mask |= (1 << DBG_BUS_CLIENT_TIMESTAMP);
5518 ecore_bus_enable_clients(p_hwfn, p_ptt, client_mask);
5521 /* Configure the DBG block framing mode */
5522 static enum dbg_status ecore_config_dbg_block_framing_mode(struct ecore_hwfn *p_hwfn,
5523 struct ecore_ptt *p_ptt)
5525 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5526 struct dbg_bus_data *bus = &dev_data->bus;
5527 enum dbg_bus_frame_modes dbg_framing_mode;
5530 if (!bus->hw_dwords && bus->num_enabled_blocks) {
5531 struct dbg_bus_line *line_desc;
5534 /* Choose either 4 HW dwords (128-bit mode) or 8 HW dwords
5537 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5538 struct dbg_bus_block_data *block_bus = &bus->blocks[block_id];
5540 if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5543 line_desc = get_dbg_bus_line_desc(p_hwfn, (enum block_id)block_id);
5544 hw_dwords = line_desc && GET_FIELD(line_desc->data, DBG_BUS_LINE_IS_256B) ? 8 : 4;
5546 if (bus->hw_dwords > 0 && bus->hw_dwords != hw_dwords)
5547 return DBG_STATUS_NON_MATCHING_LINES;
5549 /* The DBG block doesn't support triggers and
5550 * filters on 256b debug lines.
5552 if (hw_dwords == 8 && (bus->trigger_en || bus->filter_en))
5553 return DBG_STATUS_NO_FILTER_TRIGGER_64B;
5555 bus->hw_dwords = hw_dwords;
5559 switch (bus->hw_dwords) {
5560 case 0: dbg_framing_mode = DBG_BUS_FRAME_MODE_0HW_4ST; break;
5561 case 4: dbg_framing_mode = DBG_BUS_FRAME_MODE_4HW_0ST; break;
5562 case 8: dbg_framing_mode = DBG_BUS_FRAME_MODE_8HW_0ST; break;
5563 default: dbg_framing_mode = DBG_BUS_FRAME_MODE_0HW_4ST; break;
5565 ecore_bus_set_framing_mode(p_hwfn, p_ptt, dbg_framing_mode);
5567 return DBG_STATUS_OK;
5570 /* Configure the DBG block Storm data */
5571 static enum dbg_status ecore_config_storm_inputs(struct ecore_hwfn *p_hwfn,
5572 struct ecore_ptt *p_ptt)
5574 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5575 struct dbg_bus_data *bus = &dev_data->bus;
5576 u8 storm_id, i, next_storm_id = 0;
5577 u32 storm_id_mask = 0;
5579 /* Check if SEMI sync FIFO is empty */
5580 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5581 struct dbg_bus_storm_data *storm_bus = &bus->storms[storm_id];
5582 struct storm_defs *storm = &s_storm_defs[storm_id];
5584 if (storm_bus->enabled && !ecore_rd(p_hwfn, p_ptt, storm->sem_sync_dbg_empty_addr))
5585 return DBG_STATUS_SEMI_FIFO_NOT_EMPTY;
5588 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5589 struct dbg_bus_storm_data *storm_bus = &bus->storms[storm_id];
5591 if (storm_bus->enabled)
5592 storm_id_mask |= (storm_bus->hw_id << (storm_id * HW_ID_BITS));
5595 ecore_wr(p_hwfn, p_ptt, DBG_REG_STORM_ID_NUM, storm_id_mask);
5597 /* Disable storm stall if recording to internal buffer in one-shot */
5598 ecore_wr(p_hwfn, p_ptt, DBG_REG_NO_GRANT_ON_FULL, (dev_data->bus.target == DBG_BUS_TARGET_ID_INT_BUF && bus->one_shot_en) ? 0 : 1);
5600 /* Configure calendar */
5601 for (i = 0; i < NUM_CALENDAR_SLOTS; i++, next_storm_id = (next_storm_id + 1) % MAX_DBG_STORMS) {
5603 /* Find next enabled Storm */
5604 for (; !dev_data->bus.storms[next_storm_id].enabled; next_storm_id = (next_storm_id + 1) % MAX_DBG_STORMS);
5606 /* Configure calendar slot */
5607 ecore_wr(p_hwfn, p_ptt, DBG_REG_CALENDAR_SLOT0 + DWORDS_TO_BYTES(i), next_storm_id);
5610 return DBG_STATUS_OK;
5613 /* Assign HW ID to each dword/qword:
5614 * if the inputs are unified, HW ID 0 is assigned to all dwords/qwords.
5615 * Otherwise, we would like to assign a different HW ID to each dword, to avoid
5616 * data synchronization issues. however, we need to check if there is a trigger
5617 * state for which more than one dword has a constraint. if there is, we cannot
5618 * assign a different HW ID to each dword (since a trigger state has a single
5619 * HW ID), so we assign a different HW ID to each block.
5621 static void ecore_assign_hw_ids(struct ecore_hwfn *p_hwfn,
5622 u8 hw_ids[VALUES_PER_CYCLE])
5624 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5625 struct dbg_bus_data *bus = &dev_data->bus;
5626 bool hw_id_per_dword = true;
5627 u8 val_id, state_id;
5630 OSAL_MEMSET(hw_ids, 0, VALUES_PER_CYCLE);
5632 if (bus->unify_inputs)
5635 if (bus->trigger_en) {
5636 for (state_id = 0; state_id < bus->next_trigger_state && hw_id_per_dword; state_id++) {
5639 for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5640 if (GET_FIELD(bus->trigger_states[state_id].data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK) & (1 << val_id))
5644 hw_id_per_dword = false;
5648 if (hw_id_per_dword) {
5650 /* Assign a different HW ID for each dword */
5651 for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5652 hw_ids[val_id] = val_id;
5655 u8 shifted_enable_mask, next_hw_id = 0;
5657 /* Assign HW IDs according to blocks enable / */
5658 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5659 struct dbg_bus_block_data *block_bus = &bus->blocks[block_id];
5661 if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5664 block_bus->hw_id = next_hw_id++;
5665 if (!block_bus->hw_id)
5668 shifted_enable_mask =
5669 SHR(GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5671 GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT));
5673 for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5674 if (shifted_enable_mask & (1 << val_id))
5675 hw_ids[val_id] = block_bus->hw_id;
5680 /* Configure the DBG block HW blocks data */
5681 static void ecore_config_block_inputs(struct ecore_hwfn *p_hwfn,
5682 struct ecore_ptt *p_ptt)
5684 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5685 struct dbg_bus_data *bus = &dev_data->bus;
5686 u8 hw_ids[VALUES_PER_CYCLE];
5687 u8 val_id, state_id;
5689 ecore_assign_hw_ids(p_hwfn, hw_ids);
5691 /* Assign a HW ID to each trigger state */
5692 if (dev_data->bus.trigger_en) {
5693 for (state_id = 0; state_id < bus->next_trigger_state; state_id++) {
5694 for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++) {
5695 u8 state_data = bus->trigger_states[state_id].data;
5697 if (GET_FIELD(state_data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK) & (1 << val_id)) {
5698 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_ID_0 + state_id * BYTES_IN_DWORD, hw_ids[val_id]);
5705 /* Configure HW ID mask */
5706 dev_data->bus.hw_id_mask = 0;
5707 for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5708 bus->hw_id_mask |= (hw_ids[val_id] << (val_id * HW_ID_BITS));
5709 ecore_wr(p_hwfn, p_ptt, DBG_REG_HW_ID_NUM, bus->hw_id_mask);
5711 /* Configure additional K2 PCIE registers */
5712 if (dev_data->chip_id == CHIP_K2 &&
5713 (GET_FIELD(bus->blocks[BLOCK_PCIE].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK) ||
5714 GET_FIELD(bus->blocks[BLOCK_PHY_PCIE].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))) {
5715 ecore_wr(p_hwfn, p_ptt, PCIE_REG_DBG_REPEAT_THRESHOLD_COUNT_K2_E5, 1);
5716 ecore_wr(p_hwfn, p_ptt, PCIE_REG_DBG_FW_TRIGGER_ENABLE_K2_E5, 1);
5720 enum dbg_status ecore_dbg_bus_start(struct ecore_hwfn *p_hwfn,
5721 struct ecore_ptt *p_ptt)
5723 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5724 struct dbg_bus_data *bus = &dev_data->bus;
5725 enum dbg_bus_filter_types filter_type;
5726 enum dbg_status status;
5730 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_start\n");
5732 if (bus->state != DBG_BUS_STATE_READY)
5733 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5735 /* Check if any input was enabled */
5736 if (!bus->num_enabled_storms &&
5737 !bus->num_enabled_blocks &&
5738 !bus->rcv_from_other_engine)
5739 return DBG_STATUS_NO_INPUT_ENABLED;
5741 /* Check if too many input types were enabled (storm+dbgmux) */
5742 if (bus->num_enabled_storms && bus->num_enabled_blocks)
5743 return DBG_STATUS_TOO_MANY_INPUTS;
5745 /* Configure framing mode */
5746 if ((status = ecore_config_dbg_block_framing_mode(p_hwfn, p_ptt)) != DBG_STATUS_OK)
5749 /* Configure DBG block for Storm inputs */
5750 if (bus->num_enabled_storms)
5751 if ((status = ecore_config_storm_inputs(p_hwfn, p_ptt)) != DBG_STATUS_OK)
5754 /* Configure DBG block for block inputs */
5755 if (bus->num_enabled_blocks)
5756 ecore_config_block_inputs(p_hwfn, p_ptt);
5758 /* Configure filter type */
5759 if (bus->filter_en) {
5760 if (bus->trigger_en) {
5761 if (bus->filter_pre_trigger)
5762 filter_type = bus->filter_post_trigger ? DBG_BUS_FILTER_TYPE_ON : DBG_BUS_FILTER_TYPE_PRE;
5764 filter_type = bus->filter_post_trigger ? DBG_BUS_FILTER_TYPE_POST : DBG_BUS_FILTER_TYPE_OFF;
5767 filter_type = DBG_BUS_FILTER_TYPE_ON;
5771 filter_type = DBG_BUS_FILTER_TYPE_OFF;
5773 ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_ENABLE, filter_type);
5775 /* Restart timestamp */
5776 ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP, 0);
5778 /* Enable debug block */
5779 ecore_bus_enable_dbg_block(p_hwfn, p_ptt, 1);
5781 /* Configure enabled blocks - must be done before the DBG block is
5784 if (dev_data->bus.num_enabled_blocks) {
5785 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5786 if (!GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK) || block_id == BLOCK_DBG)
5789 ecore_config_dbg_line(p_hwfn, p_ptt, (enum block_id)block_id,
5790 dev_data->bus.blocks[block_id].line_num,
5791 GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5792 GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT),
5793 GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK),
5794 GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK));
5798 /* Configure client mask */
5799 ecore_config_dbg_block_client_mask(p_hwfn, p_ptt);
5801 /* Configure enabled Storms - must be done after the DBG block is
5804 if (dev_data->bus.num_enabled_storms)
5805 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++)
5806 if (dev_data->bus.storms[storm_id].enabled)
5807 ecore_bus_enable_storm(p_hwfn, p_ptt, (enum dbg_storms)storm_id, filter_type);
5809 dev_data->bus.state = DBG_BUS_STATE_RECORDING;
5811 return DBG_STATUS_OK;
5814 enum dbg_status ecore_dbg_bus_stop(struct ecore_hwfn *p_hwfn,
5815 struct ecore_ptt *p_ptt)
5817 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5818 struct dbg_bus_data *bus = &dev_data->bus;
5819 enum dbg_status status = DBG_STATUS_OK;
5821 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_stop\n");
5823 if (bus->state != DBG_BUS_STATE_RECORDING)
5824 return DBG_STATUS_RECORDING_NOT_STARTED;
5826 status = ecore_bus_disable_inputs(p_hwfn, p_ptt, true);
5827 if (status != DBG_STATUS_OK)
5830 ecore_wr(p_hwfn, p_ptt, DBG_REG_CPU_TIMEOUT, 1);
5832 OSAL_MSLEEP(FLUSH_DELAY_MS);
5834 ecore_bus_enable_dbg_block(p_hwfn, p_ptt, false);
5836 /* Check if trigger worked */
5837 if (bus->trigger_en) {
5838 u32 trigger_state = ecore_rd(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATUS_CUR_STATE);
5840 if (trigger_state != MAX_TRIGGER_STATES)
5841 return DBG_STATUS_DATA_DIDNT_TRIGGER;
5844 bus->state = DBG_BUS_STATE_STOPPED;
5849 enum dbg_status ecore_dbg_bus_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
5850 struct ecore_ptt *p_ptt,
5853 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5854 struct dbg_bus_data *bus = &dev_data->bus;
5855 enum dbg_status status;
5857 status = ecore_dbg_dev_init(p_hwfn, p_ptt);
5861 if (status != DBG_STATUS_OK)
5864 /* Add dump header */
5865 *buf_size = (u32)ecore_bus_dump_hdr(p_hwfn, p_ptt, OSAL_NULL, false);
5867 switch (bus->target) {
5868 case DBG_BUS_TARGET_ID_INT_BUF:
5869 *buf_size += INT_BUF_SIZE_IN_DWORDS; break;
5870 case DBG_BUS_TARGET_ID_PCI:
5871 *buf_size += BYTES_TO_DWORDS(bus->pci_buf.size); break;
5876 /* Dump last section */
5877 *buf_size += ecore_dump_last_section(p_hwfn, OSAL_NULL, 0, false);
5879 return DBG_STATUS_OK;
5882 enum dbg_status ecore_dbg_bus_dump(struct ecore_hwfn *p_hwfn,
5883 struct ecore_ptt *p_ptt,
5885 u32 buf_size_in_dwords,
5886 u32 *num_dumped_dwords)
5888 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5889 u32 min_buf_size_in_dwords, block_id, offset = 0;
5890 struct dbg_bus_data *bus = &dev_data->bus;
5891 enum dbg_status status;
5894 *num_dumped_dwords = 0;
5896 status = ecore_dbg_bus_get_dump_buf_size(p_hwfn, p_ptt, &min_buf_size_in_dwords);
5897 if (status != DBG_STATUS_OK)
5900 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_dump: dump_buf = 0x%p, buf_size_in_dwords = %d\n", dump_buf, buf_size_in_dwords);
5902 if (bus->state != DBG_BUS_STATE_RECORDING && bus->state != DBG_BUS_STATE_STOPPED)
5903 return DBG_STATUS_RECORDING_NOT_STARTED;
5905 if (bus->state == DBG_BUS_STATE_RECORDING) {
5906 enum dbg_status stop_state = ecore_dbg_bus_stop(p_hwfn, p_ptt);
5907 if (stop_state != DBG_STATUS_OK)
5911 if (buf_size_in_dwords < min_buf_size_in_dwords)
5912 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5914 if (bus->target == DBG_BUS_TARGET_ID_PCI && !bus->pci_buf.size)
5915 return DBG_STATUS_PCI_BUF_NOT_ALLOCATED;
5918 offset += ecore_bus_dump_hdr(p_hwfn, p_ptt, dump_buf + offset, true);
5920 /* Dump recorded data */
5921 if (bus->target != DBG_BUS_TARGET_ID_NIG) {
5922 u32 recorded_dwords = ecore_bus_dump_data(p_hwfn, p_ptt, dump_buf + offset, true);
5924 if (!recorded_dwords)
5925 return DBG_STATUS_NO_DATA_RECORDED;
5926 if (recorded_dwords % CHUNK_SIZE_IN_DWORDS)
5927 return DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED;
5928 offset += recorded_dwords;
5931 /* Dump last section */
5932 offset += ecore_dump_last_section(p_hwfn, dump_buf, offset, true);
5934 /* If recorded to PCI buffer - free the buffer */
5935 ecore_bus_free_pci_buf(p_hwfn);
5937 /* Clear debug bus parameters */
5938 bus->state = DBG_BUS_STATE_IDLE;
5939 bus->num_enabled_blocks = 0;
5940 bus->num_enabled_storms = 0;
5941 bus->filter_en = bus->trigger_en = 0;
5943 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++)
5944 SET_FIELD(bus->blocks[BLOCK_PCIE].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, 0);
5946 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5947 struct dbg_bus_storm_data *storm_bus = &bus->storms[storm_id];
5949 storm_bus->enabled = false;
5950 storm_bus->eid_filter_en = storm_bus->cid_filter_en = 0;
5953 *num_dumped_dwords = offset;
5955 return DBG_STATUS_OK;
5958 enum dbg_status ecore_dbg_grc_config(struct ecore_hwfn *p_hwfn,
5959 enum dbg_grc_params grc_param,
5964 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val);
5966 /* Initializes the GRC parameters (if not initialized). Needed in order
5967 * to set the default parameter values for the first time.
5969 ecore_dbg_grc_init_params(p_hwfn);
5971 if (grc_param >= MAX_DBG_GRC_PARAMS)
5972 return DBG_STATUS_INVALID_ARGS;
5973 if (val < s_grc_param_defs[grc_param].min ||
5974 val > s_grc_param_defs[grc_param].max)
5975 return DBG_STATUS_INVALID_ARGS;
5977 if (s_grc_param_defs[grc_param].is_preset) {
5981 /* Disabling a preset is not allowed. Call
5982 * dbg_grc_set_params_default instead.
5985 return DBG_STATUS_INVALID_ARGS;
5987 /* Update all params with the preset values */
5988 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) {
5991 if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL)
5992 preset_val = s_grc_param_defs[i].exclude_all_preset_val;
5993 else if (grc_param == DBG_GRC_PARAM_CRASH)
5994 preset_val = s_grc_param_defs[i].crash_preset_val;
5996 return DBG_STATUS_INVALID_ARGS;
5998 ecore_grc_set_param(p_hwfn, (enum dbg_grc_params)i, preset_val);
6003 /* Regular param - set its value */
6004 ecore_grc_set_param(p_hwfn, grc_param, val);
6007 return DBG_STATUS_OK;
6010 /* Assign default GRC param values */
6011 void ecore_dbg_grc_set_params_default(struct ecore_hwfn *p_hwfn)
6013 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
6016 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
6017 dev_data->grc.param_val[i] = s_grc_param_defs[i].default_val[dev_data->chip_id];
6020 enum dbg_status ecore_dbg_grc_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6021 struct ecore_ptt *p_ptt,
6024 enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6028 if (status != DBG_STATUS_OK)
6031 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr || !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
6032 !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
6033 return DBG_STATUS_DBG_ARRAY_NOT_SET;
6035 return ecore_grc_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6038 enum dbg_status ecore_dbg_grc_dump(struct ecore_hwfn *p_hwfn,
6039 struct ecore_ptt *p_ptt,
6041 u32 buf_size_in_dwords,
6042 u32 *num_dumped_dwords)
6044 u32 needed_buf_size_in_dwords;
6045 enum dbg_status status;
6047 *num_dumped_dwords = 0;
6049 status = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6050 if (status != DBG_STATUS_OK)
6053 if (buf_size_in_dwords < needed_buf_size_in_dwords)
6054 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6056 /* Doesn't do anything, needed for compile time asserts */
6057 ecore_static_asserts();
6060 status = ecore_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6062 /* Reveret GRC params to their default */
6063 ecore_dbg_grc_set_params_default(p_hwfn);
6068 enum dbg_status ecore_dbg_idle_chk_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6069 struct ecore_ptt *p_ptt,
6072 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
6073 struct idle_chk_data *idle_chk = &dev_data->idle_chk;
6074 enum dbg_status status;
6078 status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6079 if (status != DBG_STATUS_OK)
6082 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
6083 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr || !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
6084 return DBG_STATUS_DBG_ARRAY_NOT_SET;
6086 if (!idle_chk->buf_size_set) {
6087 idle_chk->buf_size = ecore_idle_chk_dump(p_hwfn, p_ptt, OSAL_NULL, false);
6088 idle_chk->buf_size_set = true;
6091 *buf_size = idle_chk->buf_size;
6093 return DBG_STATUS_OK;
6096 enum dbg_status ecore_dbg_idle_chk_dump(struct ecore_hwfn *p_hwfn,
6097 struct ecore_ptt *p_ptt,
6099 u32 buf_size_in_dwords,
6100 u32 *num_dumped_dwords)
6102 u32 needed_buf_size_in_dwords;
6103 enum dbg_status status;
6105 *num_dumped_dwords = 0;
6107 status = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6108 if (status != DBG_STATUS_OK)
6111 if (buf_size_in_dwords < needed_buf_size_in_dwords)
6112 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6114 /* Update reset state */
6115 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6117 /* Idle Check Dump */
6118 *num_dumped_dwords = ecore_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
6120 /* Reveret GRC params to their default */
6121 ecore_dbg_grc_set_params_default(p_hwfn);
6123 return DBG_STATUS_OK;
6126 enum dbg_status ecore_dbg_mcp_trace_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6127 struct ecore_ptt *p_ptt,
6130 enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6134 if (status != DBG_STATUS_OK)
6137 return ecore_mcp_trace_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6140 enum dbg_status ecore_dbg_mcp_trace_dump(struct ecore_hwfn *p_hwfn,
6141 struct ecore_ptt *p_ptt,
6143 u32 buf_size_in_dwords,
6144 u32 *num_dumped_dwords)
6146 u32 needed_buf_size_in_dwords;
6147 enum dbg_status status;
6149 status = ecore_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6150 if (status != DBG_STATUS_OK && status != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
6153 if (buf_size_in_dwords < needed_buf_size_in_dwords)
6154 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6156 /* Update reset state */
6157 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6160 status = ecore_mcp_trace_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6162 /* Reveret GRC params to their default */
6163 ecore_dbg_grc_set_params_default(p_hwfn);
6168 enum dbg_status ecore_dbg_reg_fifo_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6169 struct ecore_ptt *p_ptt,
6172 enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6176 if (status != DBG_STATUS_OK)
6179 return ecore_reg_fifo_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6182 enum dbg_status ecore_dbg_reg_fifo_dump(struct ecore_hwfn *p_hwfn,
6183 struct ecore_ptt *p_ptt,
6185 u32 buf_size_in_dwords,
6186 u32 *num_dumped_dwords)
6188 u32 needed_buf_size_in_dwords;
6189 enum dbg_status status;
6191 *num_dumped_dwords = 0;
6193 status = ecore_dbg_reg_fifo_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6194 if (status != DBG_STATUS_OK)
6197 if (buf_size_in_dwords < needed_buf_size_in_dwords)
6198 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6200 /* Update reset state */
6201 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6203 status = ecore_reg_fifo_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6205 /* Reveret GRC params to their default */
6206 ecore_dbg_grc_set_params_default(p_hwfn);
6211 enum dbg_status ecore_dbg_igu_fifo_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6212 struct ecore_ptt *p_ptt,
6215 enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6219 if (status != DBG_STATUS_OK)
6222 return ecore_igu_fifo_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6225 enum dbg_status ecore_dbg_igu_fifo_dump(struct ecore_hwfn *p_hwfn,
6226 struct ecore_ptt *p_ptt,
6228 u32 buf_size_in_dwords,
6229 u32 *num_dumped_dwords)
6231 u32 needed_buf_size_in_dwords;
6232 enum dbg_status status;
6234 *num_dumped_dwords = 0;
6236 status = ecore_dbg_igu_fifo_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6237 if (status != DBG_STATUS_OK)
6240 if (buf_size_in_dwords < needed_buf_size_in_dwords)
6241 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6243 /* Update reset state */
6244 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6246 status = ecore_igu_fifo_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6248 /* Reveret GRC params to their default */
6249 ecore_dbg_grc_set_params_default(p_hwfn);
6254 enum dbg_status ecore_dbg_protection_override_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6255 struct ecore_ptt *p_ptt,
6258 enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6262 if (status != DBG_STATUS_OK)
6265 return ecore_protection_override_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6268 enum dbg_status ecore_dbg_protection_override_dump(struct ecore_hwfn *p_hwfn,
6269 struct ecore_ptt *p_ptt,
6271 u32 buf_size_in_dwords,
6272 u32 *num_dumped_dwords)
6274 u32 needed_buf_size_in_dwords;
6275 enum dbg_status status;
6277 *num_dumped_dwords = 0;
6279 status = ecore_dbg_protection_override_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6280 if (status != DBG_STATUS_OK)
6283 if (buf_size_in_dwords < needed_buf_size_in_dwords)
6284 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6286 /* Update reset state */
6287 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6289 status = ecore_protection_override_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6291 /* Reveret GRC params to their default */
6292 ecore_dbg_grc_set_params_default(p_hwfn);
6297 enum dbg_status ecore_dbg_fw_asserts_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6298 struct ecore_ptt *p_ptt,
6301 enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6305 if (status != DBG_STATUS_OK)
6308 /* Update reset state */
6309 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6311 *buf_size = ecore_fw_asserts_dump(p_hwfn, p_ptt, OSAL_NULL, false);
6313 return DBG_STATUS_OK;
6316 enum dbg_status ecore_dbg_fw_asserts_dump(struct ecore_hwfn *p_hwfn,
6317 struct ecore_ptt *p_ptt,
6319 u32 buf_size_in_dwords,
6320 u32 *num_dumped_dwords)
6322 u32 needed_buf_size_in_dwords;
6323 enum dbg_status status;
6325 *num_dumped_dwords = 0;
6327 status = ecore_dbg_fw_asserts_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6328 if (status != DBG_STATUS_OK)
6331 if (buf_size_in_dwords < needed_buf_size_in_dwords)
6332 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6334 *num_dumped_dwords = ecore_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
6336 /* Reveret GRC params to their default */
6337 ecore_dbg_grc_set_params_default(p_hwfn);
6339 return DBG_STATUS_OK;
6342 enum dbg_status ecore_dbg_read_attn(struct ecore_hwfn *p_hwfn,
6343 struct ecore_ptt *p_ptt,
6344 enum block_id block_id,
6345 enum dbg_attn_type attn_type,
6347 struct dbg_attn_block_result *results)
6349 enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6350 u8 reg_idx, num_attn_regs, num_result_regs = 0;
6351 const struct dbg_attn_reg *attn_reg_arr;
6353 if (status != DBG_STATUS_OK)
6356 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
6357 return DBG_STATUS_DBG_ARRAY_NOT_SET;
6359 attn_reg_arr = ecore_get_block_attn_regs(block_id, attn_type, &num_attn_regs);
6361 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
6362 const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
6363 struct dbg_attn_reg_result *reg_result;
6364 u32 sts_addr, sts_val;
6365 u16 modes_buf_offset;
6369 eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
6370 modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
6371 if (eval_mode && !ecore_is_mode_match(p_hwfn, &modes_buf_offset))
6374 /* Mode match - read attention status register */
6375 sts_addr = DWORDS_TO_BYTES(clear_status ? reg_data->sts_clr_address : GET_FIELD(reg_data->data, DBG_ATTN_REG_STS_ADDRESS));
6376 sts_val = ecore_rd(p_hwfn, p_ptt, sts_addr);
6380 /* Non-zero attention status - add to results */
6381 reg_result = &results->reg_results[num_result_regs];
6382 SET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
6383 SET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_NUM_REG_ATTN, GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
6384 reg_result->block_attn_offset = reg_data->block_attn_offset;
6385 reg_result->sts_val = sts_val;
6386 reg_result->mask_val = ecore_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(reg_data->mask_address));
6390 results->block_id = (u8)block_id;
6391 results->names_offset = ecore_get_block_attn_data(block_id, attn_type)->names_offset;
6392 SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
6393 SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
6395 return DBG_STATUS_OK;
6398 enum dbg_status ecore_dbg_print_attn(struct ecore_hwfn *p_hwfn,
6399 struct dbg_attn_block_result *results)
6401 enum dbg_attn_type attn_type;
6404 num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
6405 attn_type = (enum dbg_attn_type)GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
6407 for (i = 0; i < num_regs; i++) {
6408 struct dbg_attn_reg_result *reg_result;
6409 const char *attn_type_str;
6412 reg_result = &results->reg_results[i];
6413 attn_type_str = (attn_type == ATTN_TYPE_INTERRUPT ? "interrupt" : "parity");
6414 sts_addr = GET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_STS_ADDRESS);
6415 DP_NOTICE(p_hwfn, false, "%s: address 0x%08x, status 0x%08x, mask 0x%08x\n", attn_type_str, sts_addr, reg_result->sts_val, reg_result->mask_val);
6418 return DBG_STATUS_OK;
6421 bool ecore_is_block_in_reset(struct ecore_hwfn *p_hwfn,
6422 struct ecore_ptt *p_ptt,
6423 enum block_id block_id)
6425 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
6426 struct block_defs *block = s_block_defs[block_id];
6429 if (!block->has_reset_bit)
6432 reset_reg = block->reset_reg;
6434 return s_reset_regs_defs[reset_reg].exists[dev_data->chip_id] ?
6435 !(ecore_rd(p_hwfn, p_ptt, s_reset_regs_defs[reset_reg].addr) & (1 << block->reset_bit_offset)) : true;