2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
29 * File : ecore_dbg_fw_funcs.c
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
37 #include "ecore_mcp.h"
38 #include "spad_layout.h"
41 #include "ecore_hsi_common.h"
42 #include "ecore_hsi_debug_tools.h"
43 #include "mcp_public.h"
45 #ifndef USE_DBG_BIN_FILE
46 #include "ecore_dbg_values.h"
48 #include "ecore_dbg_fw_funcs.h"
50 /* Memory groups enum */
62 MEM_GROUP_CONN_CFC_MEM,
63 MEM_GROUP_TASK_CFC_MEM,
84 /* Memory groups names */
85 static const char* s_mem_group_names[] = {
117 /* Idle check conditions */
119 #ifndef __PREVENT_COND_ARR__
121 static u32 cond5(const u32 *r, const u32 *imm) {
122 return (((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]));
125 static u32 cond7(const u32 *r, const u32 *imm) {
126 return (((r[0] >> imm[0]) & imm[1]) != imm[2]);
129 static u32 cond6(const u32 *r, const u32 *imm) {
130 return ((r[0] & imm[0]) != imm[1]);
133 static u32 cond9(const u32 *r, const u32 *imm) {
134 return ((r[0] & imm[0]) >> imm[1]) != (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
137 static u32 cond10(const u32 *r, const u32 *imm) {
138 return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
141 static u32 cond4(const u32 *r, const u32 *imm) {
142 return ((r[0] & ~imm[0]) != imm[1]);
145 static u32 cond0(const u32 *r, const u32 *imm) {
146 return ((r[0] & ~r[1]) != imm[0]);
149 static u32 cond1(const u32 *r, const u32 *imm) {
150 return (r[0] != imm[0]);
153 static u32 cond11(const u32 *r, const u32 *imm) {
154 return (r[0] != r[1] && r[2] == imm[0]);
157 static u32 cond12(const u32 *r, const u32 *imm) {
158 return (r[0] != r[1] && r[2] > imm[0]);
161 static u32 cond3(const u32 *r, const u32 OSAL_UNUSED *imm) {
162 return (r[0] != r[1]);
165 static u32 cond13(const u32 *r, const u32 *imm) {
166 return (r[0] & imm[0]);
169 static u32 cond8(const u32 *r, const u32 *imm) {
170 return (r[0] < (r[1] - imm[0]));
173 static u32 cond2(const u32 *r, const u32 *imm) {
174 return (r[0] > imm[0]);
177 /* Array of Idle Check conditions */
178 static u32 (*cond_arr[])(const u32 *r, const u32 *imm) = {
195 #endif /* __PREVENT_COND_ARR__ */
198 /******************************* Data Types **********************************/
203 PLATFORM_EMUL_REDUCED,
208 struct chip_platform_defs {
214 /* Chip constant definitions */
217 struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
220 /* Platform constant definitions */
221 struct platform_defs {
228 /* Storm constant definitions.
229 * Addresses are in bytes, sizes are in quad-regs.
233 enum block_id block_id;
234 enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
236 u32 sem_fast_mem_addr;
237 u32 sem_frame_mode_addr;
238 u32 sem_slow_enable_addr;
239 u32 sem_slow_mode_addr;
240 u32 sem_slow_mode1_conf_addr;
241 u32 sem_sync_dbg_empty_addr;
242 u32 sem_slow_dbg_empty_addr;
244 u32 cm_conn_ag_ctx_lid_size;
245 u32 cm_conn_ag_ctx_rd_addr;
246 u32 cm_conn_st_ctx_lid_size;
247 u32 cm_conn_st_ctx_rd_addr;
248 u32 cm_task_ag_ctx_lid_size;
249 u32 cm_task_ag_ctx_rd_addr;
250 u32 cm_task_st_ctx_lid_size;
251 u32 cm_task_st_ctx_rd_addr;
254 /* Block constant definitions */
257 bool exists[MAX_CHIP_IDS];
258 bool associated_to_storm;
260 /* Valid only if associated_to_storm is true */
262 enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
266 u32 dbg_force_valid_addr;
267 u32 dbg_force_frame_addr;
270 /* If true, block is taken out of reset before dump */
272 enum dbg_reset_regs reset_reg;
274 /* Bit offset in reset register */
278 /* Reset register definitions */
279 struct reset_reg_defs {
281 bool exists[MAX_CHIP_IDS];
282 u32 unreset_val[MAX_CHIP_IDS];
285 /* Debug Bus Constraint operation constant definitions */
286 struct dbg_bus_constraint_op_defs {
291 /* Storm Mode definitions */
292 struct storm_mode_defs {
298 struct grc_param_defs {
299 u32 default_val[MAX_CHIP_IDS];
303 u32 exclude_all_preset_val;
304 u32 crash_preset_val;
307 /* address is in 128b units. Width is in bits. */
308 struct rss_mem_defs {
309 const char *mem_name;
310 const char *type_name;
313 u32 num_entries[MAX_CHIP_IDS];
316 struct vfc_ram_defs {
317 const char *mem_name;
318 const char *type_name;
323 struct big_ram_defs {
324 const char *instance_name;
325 enum mem_groups mem_group_id;
326 enum mem_groups ram_mem_group_id;
327 enum dbg_grc_params grc_param;
330 u32 is_256b_reg_addr;
331 u32 is_256b_bit_offset[MAX_CHIP_IDS];
332 u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
336 const char *phy_name;
338 /* PHY base GRC address */
341 /* Relative address of indirect TBUS address register (bits 0..7) */
342 u32 tbus_addr_lo_addr;
344 /* Relative address of indirect TBUS address register (bits 8..10) */
345 u32 tbus_addr_hi_addr;
347 /* Relative address of indirect TBUS data register (bits 0..7) */
348 u32 tbus_data_lo_addr;
350 /* Relative address of indirect TBUS data register (bits 8..11) */
351 u32 tbus_data_hi_addr;
354 /******************************** Constants **********************************/
356 #define MAX_LCIDS 320
357 #define MAX_LTIDS 320
359 #define NUM_IOR_SETS 2
360 #define IORS_PER_SET 176
361 #define IOR_SET_OFFSET(set_id) ((set_id) * 256)
363 #define BYTES_IN_DWORD sizeof(u32)
366 #define SHR(val, val_width, amount) (((val) | ((val) << (val_width))) >> (amount)) & ((1 << (val_width)) - 1)
368 /* In the macros below, size and offset are specified in bits */
369 #define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32)
370 #define FIELD_BIT_OFFSET(type, field) type##_##field##_##OFFSET
371 #define FIELD_BIT_SIZE(type, field) type##_##field##_##SIZE
372 #define FIELD_DWORD_OFFSET(type, field) (int)(FIELD_BIT_OFFSET(type, field) / 32)
373 #define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32)
374 #define FIELD_BIT_MASK(type, field) (((1 << FIELD_BIT_SIZE(type, field)) - 1) << FIELD_DWORD_SHIFT(type, field))
376 #define SET_VAR_FIELD(var, type, field, val) var[FIELD_DWORD_OFFSET(type, field)] &= (~FIELD_BIT_MASK(type, field)); var[FIELD_DWORD_OFFSET(type, field)] |= (val) << FIELD_DWORD_SHIFT(type, field)
378 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) for (i = 0; i < (arr_size); i++) ecore_wr(dev, ptt, addr, (arr)[i])
380 #define ARR_REG_RD(dev, ptt, addr, arr, arr_size) for (i = 0; i < (arr_size); i++) (arr)[i] = ecore_rd(dev, ptt, addr)
382 #define CHECK_ARR_SIZE(arr, size) OSAL_BUILD_BUG_ON(!(OSAL_ARRAY_SIZE(arr) == size))
384 #ifndef DWORDS_TO_BYTES
385 #define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
387 #ifndef BYTES_TO_DWORDS
388 #define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
391 /* extra lines include a signature line + optional latency events line */
392 #ifndef NUM_DBG_LINES
393 #define NUM_EXTRA_DBG_LINES(block_desc) (1 + (block_desc->has_latency_events ? 1 : 0))
394 #define NUM_DBG_LINES(block_desc) (block_desc->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
397 #define USE_DMAE true
398 #define PROTECT_WIDE_BUS true
400 #define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
401 #define RAM_LINES_TO_BYTES(lines) DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
403 #define REG_DUMP_LEN_SHIFT 24
404 #define MEM_DUMP_ENTRY_SIZE_DWORDS BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
406 #define IDLE_CHK_RULE_SIZE_DWORDS BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
408 #define IDLE_CHK_RESULT_HDR_DWORDS BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
410 #define IDLE_CHK_RESULT_REG_HDR_DWORDS BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
412 #define IDLE_CHK_MAX_ENTRIES_SIZE 32
414 /* The sizes and offsets below are specified in bits */
415 #define VFC_CAM_CMD_STRUCT_SIZE 64
416 #define VFC_CAM_CMD_ROW_OFFSET 48
417 #define VFC_CAM_CMD_ROW_SIZE 9
418 #define VFC_CAM_ADDR_STRUCT_SIZE 16
419 #define VFC_CAM_ADDR_OP_OFFSET 0
420 #define VFC_CAM_ADDR_OP_SIZE 4
421 #define VFC_CAM_RESP_STRUCT_SIZE 256
422 #define VFC_RAM_ADDR_STRUCT_SIZE 16
423 #define VFC_RAM_ADDR_OP_OFFSET 0
424 #define VFC_RAM_ADDR_OP_SIZE 2
425 #define VFC_RAM_ADDR_ROW_OFFSET 2
426 #define VFC_RAM_ADDR_ROW_SIZE 10
427 #define VFC_RAM_RESP_STRUCT_SIZE 256
429 #define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
430 #define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
431 #define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
432 #define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS
433 #define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
434 #define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
436 #define NUM_VFC_RAM_TYPES 4
438 #define VFC_CAM_NUM_ROWS 512
440 #define VFC_OPCODE_CAM_RD 14
441 #define VFC_OPCODE_RAM_RD 0
443 #define NUM_RSS_MEM_TYPES 5
445 #define NUM_BIG_RAM_TYPES 3
447 #define NUM_PHY_TBUS_ADDRESSES 2048
448 #define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
450 #define SEM_FAST_MODE23_SRC_ENABLE_VAL 0x0
451 #define SEM_FAST_MODE23_SRC_DISABLE_VAL 0x7
452 #define SEM_FAST_MODE4_SRC_ENABLE_VAL 0x0
453 #define SEM_FAST_MODE4_SRC_DISABLE_VAL 0x3
454 #define SEM_FAST_MODE6_SRC_ENABLE_VAL 0x10
455 #define SEM_FAST_MODE6_SRC_DISABLE_VAL 0x3f
457 #define SEM_SLOW_MODE1_DATA_ENABLE 0x1
459 #define VALUES_PER_CYCLE 4
460 #define MAX_CYCLE_VALUES_MASK ((1 << VALUES_PER_CYCLE) - 1)
462 #define MAX_DWORDS_PER_CYCLE 8
466 #define NUM_CALENDAR_SLOTS 16
468 #define MAX_TRIGGER_STATES 3
469 #define TRIGGER_SETS_PER_STATE 2
470 #define MAX_CONSTRAINTS 4
472 #define SEM_FILTER_CID_EN_MASK 0x00b
473 #define SEM_FILTER_EID_MASK_EN_MASK 0x013
474 #define SEM_FILTER_EID_RANGE_EN_MASK 0x113
476 #define CHUNK_SIZE_IN_DWORDS 64
477 #define CHUNK_SIZE_IN_BYTES DWORDS_TO_BYTES(CHUNK_SIZE_IN_DWORDS)
479 #define INT_BUF_NUM_OF_LINES 192
480 #define INT_BUF_LINE_SIZE_IN_DWORDS 16
481 #define INT_BUF_SIZE_IN_DWORDS (INT_BUF_NUM_OF_LINES * INT_BUF_LINE_SIZE_IN_DWORDS)
482 #define INT_BUF_SIZE_IN_CHUNKS (INT_BUF_SIZE_IN_DWORDS / CHUNK_SIZE_IN_DWORDS)
484 #define PCI_BUF_LINE_SIZE_IN_DWORDS 8
485 #define PCI_BUF_LINE_SIZE_IN_BYTES DWORDS_TO_BYTES(PCI_BUF_LINE_SIZE_IN_DWORDS)
487 #define TARGET_EN_MASK_PCI 0x3
488 #define TARGET_EN_MASK_NIG 0x4
490 #define PCI_REQ_CREDIT 1
491 #define PCI_PHYS_ADDR_TYPE 0
493 #define OPAQUE_FID(pci_func) ((pci_func << 4) | 0xff00)
495 #define RESET_REG_UNRESET_OFFSET 4
497 #define PCI_PKT_SIZE_IN_CHUNKS 1
498 #define PCI_PKT_SIZE_IN_BYTES (PCI_PKT_SIZE_IN_CHUNKS * CHUNK_SIZE_IN_BYTES)
500 #define NIG_PKT_SIZE_IN_CHUNKS 4
502 #define FLUSH_DELAY_MS 500
503 #define STALL_DELAY_MS 500
505 #define SRC_MAC_ADDR_LO16 0x0a0b
506 #define SRC_MAC_ADDR_HI32 0x0c0d0e0f
507 #define ETH_TYPE 0x1000
509 #define STATIC_DEBUG_LINE_DWORDS 9
511 #define NUM_COMMON_GLOBAL_PARAMS 8
513 #define FW_IMG_KUKU 0
514 #define FW_IMG_MAIN 1
517 #ifndef REG_FIFO_ELEMENT_DWORDS
518 #define REG_FIFO_ELEMENT_DWORDS 2
520 #define REG_FIFO_DEPTH_ELEMENTS 32
521 #define REG_FIFO_DEPTH_DWORDS (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
523 #ifndef IGU_FIFO_ELEMENT_DWORDS
524 #define IGU_FIFO_ELEMENT_DWORDS 4
526 #define IGU_FIFO_DEPTH_ELEMENTS 64
527 #define IGU_FIFO_DEPTH_DWORDS (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
529 #define SEMI_SYNC_FIFO_POLLING_DELAY_MS 5
530 #define SEMI_SYNC_FIFO_POLLING_COUNT 20
532 #ifndef PROTECTION_OVERRIDE_ELEMENT_DWORDS
533 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
535 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
536 #define PROTECTION_OVERRIDE_DEPTH_DWORDS (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * PROTECTION_OVERRIDE_ELEMENT_DWORDS)
538 #define MCP_SPAD_TRACE_OFFSIZE_ADDR (MCP_REG_SCRATCH + OFFSETOF(struct static_init, sections[SPAD_SECTION_TRACE]))
540 #define EMPTY_FW_VERSION_STR "???_???_???_???"
541 #define EMPTY_FW_IMAGE_STR "???????????????"
544 /***************************** Constant Arrays *******************************/
552 #ifdef USE_DBG_BIN_FILE
553 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { { OSAL_NULL } };
555 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = {
557 /* BIN_BUF_DBG_MODE_TREE */
558 { (const u32 *)dbg_modes_tree_buf, OSAL_ARRAY_SIZE(dbg_modes_tree_buf)},
560 /* BIN_BUF_DBG_DUMP_REG */
561 { dump_reg, OSAL_ARRAY_SIZE(dump_reg) },
563 /* BIN_BUF_DBG_DUMP_MEM */
564 { dump_mem, OSAL_ARRAY_SIZE(dump_mem) },
566 /* BIN_BUF_DBG_IDLE_CHK_REGS */
567 { idle_chk_regs, OSAL_ARRAY_SIZE(idle_chk_regs) },
569 /* BIN_BUF_DBG_IDLE_CHK_IMMS */
570 { idle_chk_imms, OSAL_ARRAY_SIZE(idle_chk_imms) },
572 /* BIN_BUF_DBG_IDLE_CHK_RULES */
573 { idle_chk_rules, OSAL_ARRAY_SIZE(idle_chk_rules) },
575 /* BIN_BUF_DBG_IDLE_CHK_PARSING_DATA */
578 /* BIN_BUF_DBG_ATTN_BLOCKS */
579 { attn_block, OSAL_ARRAY_SIZE(attn_block) },
581 /* BIN_BUF_DBG_ATTN_REGSS */
582 { attn_reg, OSAL_ARRAY_SIZE(attn_reg) },
584 /* BIN_BUF_DBG_ATTN_INDEXES */
587 /* BIN_BUF_DBG_ATTN_NAME_OFFSETS */
590 /* BIN_BUF_DBG_BUS_BLOCKS */
591 { dbg_bus_blocks, OSAL_ARRAY_SIZE(dbg_bus_blocks) },
593 /* BIN_BUF_DBG_BUS_LINES */
594 { dbg_bus_lines, OSAL_ARRAY_SIZE(dbg_bus_lines) },
596 /* BIN_BUF_DBG_BUS_BLOCKS_USER_DATA */
599 /* BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS */
602 /* BIN_BUF_DBG_PARSING_STRINGS */
607 /* Chip constant definitions array */
608 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
612 { { MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB },
615 { MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB },
618 { MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB },
621 { MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB } } },
626 { { MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2 },
629 { MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2 },
632 { MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2 },
635 { MAX_NUM_PORTS_K2, 8, MAX_NUM_VFS_K2 } } },
640 { { MAX_NUM_PORTS_E5, MAX_NUM_PFS_E5, MAX_NUM_VFS_E5 },
643 { MAX_NUM_PORTS_E5, MAX_NUM_PFS_E5, MAX_NUM_VFS_E5 },
646 { MAX_NUM_PORTS_E5, MAX_NUM_PFS_E5, MAX_NUM_VFS_E5 },
649 { MAX_NUM_PORTS_E5, 8, MAX_NUM_VFS_E5 } } }
652 /* Storm constant definitions array */
653 static struct storm_defs s_storm_defs[] = {
657 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT }, true,
658 TSEM_REG_FAST_MEMORY,
659 TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
660 TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
661 TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
662 TCM_REG_CTX_RBC_ACCS,
663 4, TCM_REG_AGG_CON_CTX,
664 16, TCM_REG_SM_CON_CTX,
665 2, TCM_REG_AGG_TASK_CTX,
666 4, TCM_REG_SM_TASK_CTX },
670 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM }, false,
671 MSEM_REG_FAST_MEMORY,
672 MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
673 MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
674 MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2,
675 MCM_REG_CTX_RBC_ACCS,
676 1, MCM_REG_AGG_CON_CTX,
677 10, MCM_REG_SM_CON_CTX,
678 2, MCM_REG_AGG_TASK_CTX,
679 7, MCM_REG_SM_TASK_CTX },
683 { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU }, false,
684 USEM_REG_FAST_MEMORY,
685 USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
686 USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
687 USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2,
688 UCM_REG_CTX_RBC_ACCS,
689 2, UCM_REG_AGG_CON_CTX,
690 13, UCM_REG_SM_CON_CTX,
691 3, UCM_REG_AGG_TASK_CTX,
692 3, UCM_REG_SM_TASK_CTX },
696 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX }, false,
697 XSEM_REG_FAST_MEMORY,
698 XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
699 XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
700 XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2,
701 XCM_REG_CTX_RBC_ACCS,
702 9, XCM_REG_AGG_CON_CTX,
703 15, XCM_REG_SM_CON_CTX,
709 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY }, false,
710 YSEM_REG_FAST_MEMORY,
711 YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
712 YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
713 YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
714 YCM_REG_CTX_RBC_ACCS,
715 2, YCM_REG_AGG_CON_CTX,
716 3, YCM_REG_SM_CON_CTX,
717 2, YCM_REG_AGG_TASK_CTX,
718 12, YCM_REG_SM_TASK_CTX },
722 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS }, true,
723 PSEM_REG_FAST_MEMORY,
724 PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
725 PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
726 PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2,
727 PCM_REG_CTX_RBC_ACCS,
729 10, PCM_REG_SM_CON_CTX,
734 /* Block definitions array */
736 static struct block_defs block_grc_defs = {
737 "grc", { true, true, true }, false, 0,
738 { DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN },
739 GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
740 GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
741 GRC_REG_DBG_FORCE_FRAME,
742 true, false, DBG_RESET_REG_MISC_PL_UA, 1 };
744 static struct block_defs block_miscs_defs = {
745 "miscs", { true, true, true }, false, 0,
746 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
748 false, false, MAX_DBG_RESET_REGS, 0 };
750 static struct block_defs block_misc_defs = {
751 "misc", { true, true, true }, false, 0,
752 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
754 false, false, MAX_DBG_RESET_REGS, 0 };
756 static struct block_defs block_dbu_defs = {
757 "dbu", { true, true, true }, false, 0,
758 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
760 false, false, MAX_DBG_RESET_REGS, 0 };
762 static struct block_defs block_pglue_b_defs = {
763 "pglue_b", { true, true, true }, false, 0,
764 { DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH },
765 PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
766 PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
767 PGLUE_B_REG_DBG_FORCE_FRAME,
768 true, false, DBG_RESET_REG_MISCS_PL_HV, 1 };
770 static struct block_defs block_cnig_defs = {
771 "cnig", { true, true, true }, false, 0,
772 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW },
773 CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
774 CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
775 CNIG_REG_DBG_FORCE_FRAME_K2_E5,
776 true, false, DBG_RESET_REG_MISCS_PL_HV, 0 };
778 static struct block_defs block_cpmu_defs = {
779 "cpmu", { true, true, true }, false, 0,
780 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
782 true, false, DBG_RESET_REG_MISCS_PL_HV, 8 };
784 static struct block_defs block_ncsi_defs = {
785 "ncsi", { true, true, true }, false, 0,
786 { DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
787 NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
788 NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
789 NCSI_REG_DBG_FORCE_FRAME,
790 true, false, DBG_RESET_REG_MISCS_PL_HV, 5 };
792 static struct block_defs block_opte_defs = {
793 "opte", { true, true, false }, false, 0,
794 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
796 true, false, DBG_RESET_REG_MISCS_PL_HV, 4 };
798 static struct block_defs block_bmb_defs = {
799 "bmb", { true, true, true }, false, 0,
800 { DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB },
801 BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
802 BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
803 BMB_REG_DBG_FORCE_FRAME,
804 true, false, DBG_RESET_REG_MISCS_PL_UA, 7 };
806 static struct block_defs block_pcie_defs = {
807 "pcie", { true, true, true }, false, 0,
808 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH },
809 PCIE_REG_DBG_COMMON_SELECT_K2_E5, PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
810 PCIE_REG_DBG_COMMON_SHIFT_K2_E5, PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
811 PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
812 false, false, MAX_DBG_RESET_REGS, 0 };
814 static struct block_defs block_mcp_defs = {
815 "mcp", { true, true, true }, false, 0,
816 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
818 false, false, MAX_DBG_RESET_REGS, 0 };
820 static struct block_defs block_mcp2_defs = {
821 "mcp2", { true, true, true }, false, 0,
822 { DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
823 MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
824 MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
825 MCP2_REG_DBG_FORCE_FRAME,
826 false, false, MAX_DBG_RESET_REGS, 0 };
828 static struct block_defs block_pswhst_defs = {
829 "pswhst", { true, true, true }, false, 0,
830 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
831 PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
832 PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
833 PSWHST_REG_DBG_FORCE_FRAME,
834 true, false, DBG_RESET_REG_MISC_PL_HV, 0 };
836 static struct block_defs block_pswhst2_defs = {
837 "pswhst2", { true, true, true }, false, 0,
838 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
839 PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
840 PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
841 PSWHST2_REG_DBG_FORCE_FRAME,
842 true, false, DBG_RESET_REG_MISC_PL_HV, 0 };
844 static struct block_defs block_pswrd_defs = {
845 "pswrd", { true, true, true }, false, 0,
846 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
847 PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
848 PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
849 PSWRD_REG_DBG_FORCE_FRAME,
850 true, false, DBG_RESET_REG_MISC_PL_HV, 2 };
852 static struct block_defs block_pswrd2_defs = {
853 "pswrd2", { true, true, true }, false, 0,
854 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
855 PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
856 PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
857 PSWRD2_REG_DBG_FORCE_FRAME,
858 true, false, DBG_RESET_REG_MISC_PL_HV, 2 };
860 static struct block_defs block_pswwr_defs = {
861 "pswwr", { true, true, true }, false, 0,
862 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
863 PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
864 PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
865 PSWWR_REG_DBG_FORCE_FRAME,
866 true, false, DBG_RESET_REG_MISC_PL_HV, 3 };
868 static struct block_defs block_pswwr2_defs = {
869 "pswwr2", { true, true, true }, false, 0,
870 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
872 true, false, DBG_RESET_REG_MISC_PL_HV, 3 };
874 static struct block_defs block_pswrq_defs = {
875 "pswrq", { true, true, true }, false, 0,
876 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
877 PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
878 PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
879 PSWRQ_REG_DBG_FORCE_FRAME,
880 true, false, DBG_RESET_REG_MISC_PL_HV, 1 };
882 static struct block_defs block_pswrq2_defs = {
883 "pswrq2", { true, true, true }, false, 0,
884 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
885 PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
886 PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
887 PSWRQ2_REG_DBG_FORCE_FRAME,
888 true, false, DBG_RESET_REG_MISC_PL_HV, 1 };
890 static struct block_defs block_pglcs_defs = {
891 "pglcs", { true, true, true }, false, 0,
892 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH },
893 PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
894 PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
895 PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
896 true, false, DBG_RESET_REG_MISCS_PL_HV, 2 };
898 static struct block_defs block_ptu_defs ={
899 "ptu", { true, true, true }, false, 0,
900 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
901 PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
902 PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
903 PTU_REG_DBG_FORCE_FRAME,
904 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20 };
906 static struct block_defs block_dmae_defs = {
907 "dmae", { true, true, true }, false, 0,
908 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
909 DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
910 DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
911 DMAE_REG_DBG_FORCE_FRAME,
912 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28 };
914 static struct block_defs block_tcm_defs = {
915 "tcm", { true, true, true }, true, DBG_TSTORM_ID,
916 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
917 TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
918 TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
919 TCM_REG_DBG_FORCE_FRAME,
920 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5 };
922 static struct block_defs block_mcm_defs = {
923 "mcm", { true, true, true }, true, DBG_MSTORM_ID,
924 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
925 MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
926 MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
927 MCM_REG_DBG_FORCE_FRAME,
928 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3 };
930 static struct block_defs block_ucm_defs = {
931 "ucm", { true, true, true }, true, DBG_USTORM_ID,
932 { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
933 UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
934 UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
935 UCM_REG_DBG_FORCE_FRAME,
936 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8 };
938 static struct block_defs block_xcm_defs = {
939 "xcm", { true, true, true }, true, DBG_XSTORM_ID,
940 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
941 XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
942 XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
943 XCM_REG_DBG_FORCE_FRAME,
944 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19 };
946 static struct block_defs block_ycm_defs = {
947 "ycm", { true, true, true }, true, DBG_YSTORM_ID,
948 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY },
949 YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
950 YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
951 YCM_REG_DBG_FORCE_FRAME,
952 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5 };
954 static struct block_defs block_pcm_defs = {
955 "pcm", { true, true, true }, true, DBG_PSTORM_ID,
956 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
957 PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
958 PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
959 PCM_REG_DBG_FORCE_FRAME,
960 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4 };
962 static struct block_defs block_qm_defs = {
963 "qm", { true, true, true }, false, 0,
964 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ },
965 QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
966 QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
967 QM_REG_DBG_FORCE_FRAME,
968 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16 };
970 static struct block_defs block_tm_defs = {
971 "tm", { true, true, true }, false, 0,
972 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
973 TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
974 TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
975 TM_REG_DBG_FORCE_FRAME,
976 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17 };
978 static struct block_defs block_dorq_defs = {
979 "dorq", { true, true, true }, false, 0,
980 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY },
981 DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
982 DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
983 DORQ_REG_DBG_FORCE_FRAME,
984 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18 };
986 static struct block_defs block_brb_defs = {
987 "brb", { true, true, true }, false, 0,
988 { DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR },
989 BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
990 BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
991 BRB_REG_DBG_FORCE_FRAME,
992 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0 };
994 static struct block_defs block_src_defs = {
995 "src", { true, true, true }, false, 0,
996 { DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
997 SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
998 SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
999 SRC_REG_DBG_FORCE_FRAME,
1000 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2 };
1002 static struct block_defs block_prs_defs = {
1003 "prs", { true, true, true }, false, 0,
1004 { DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR },
1005 PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
1006 PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
1007 PRS_REG_DBG_FORCE_FRAME,
1008 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1 };
1010 static struct block_defs block_tsdm_defs = {
1011 "tsdm", { true, true, true }, true, DBG_TSTORM_ID,
1012 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
1013 TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
1014 TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
1015 TSDM_REG_DBG_FORCE_FRAME,
1016 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3 };
1018 static struct block_defs block_msdm_defs = {
1019 "msdm", { true, true, true }, true, DBG_MSTORM_ID,
1020 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1021 MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
1022 MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
1023 MSDM_REG_DBG_FORCE_FRAME,
1024 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6 };
1026 static struct block_defs block_usdm_defs = {
1027 "usdm", { true, true, true }, true, DBG_USTORM_ID,
1028 { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1029 USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
1030 USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
1031 USDM_REG_DBG_FORCE_FRAME,
1032 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
1034 static struct block_defs block_xsdm_defs = {
1035 "xsdm", { true, true, true }, true, DBG_XSTORM_ID,
1036 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
1037 XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
1038 XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
1039 XSDM_REG_DBG_FORCE_FRAME,
1040 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20 };
1042 static struct block_defs block_ysdm_defs = {
1043 "ysdm", { true, true, true }, true, DBG_YSTORM_ID,
1044 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY },
1045 YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
1046 YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
1047 YSDM_REG_DBG_FORCE_FRAME,
1048 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8 };
1050 static struct block_defs block_psdm_defs = {
1051 "psdm", { true, true, true }, true, DBG_PSTORM_ID,
1052 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
1053 PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
1054 PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
1055 PSDM_REG_DBG_FORCE_FRAME,
1056 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7 };
1058 static struct block_defs block_tsem_defs = {
1059 "tsem", { true, true, true }, true, DBG_TSTORM_ID,
1060 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
1061 TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
1062 TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
1063 TSEM_REG_DBG_FORCE_FRAME,
1064 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4 };
1066 static struct block_defs block_msem_defs = {
1067 "msem", { true, true, true }, true, DBG_MSTORM_ID,
1068 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1069 MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
1070 MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
1071 MSEM_REG_DBG_FORCE_FRAME,
1072 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9 };
1074 static struct block_defs block_usem_defs = {
1075 "usem", { true, true, true }, true, DBG_USTORM_ID,
1076 { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1077 USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
1078 USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
1079 USEM_REG_DBG_FORCE_FRAME,
1080 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9 };
1082 static struct block_defs block_xsem_defs = {
1083 "xsem", { true, true, true }, true, DBG_XSTORM_ID,
1084 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
1085 XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
1086 XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
1087 XSEM_REG_DBG_FORCE_FRAME,
1088 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21 };
1090 static struct block_defs block_ysem_defs = {
1091 "ysem", { true, true, true }, true, DBG_YSTORM_ID,
1092 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY },
1093 YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
1094 YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
1095 YSEM_REG_DBG_FORCE_FRAME,
1096 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11 };
1098 static struct block_defs block_psem_defs = {
1099 "psem", { true, true, true }, true, DBG_PSTORM_ID,
1100 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
1101 PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
1102 PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
1103 PSEM_REG_DBG_FORCE_FRAME,
1104 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10 };
1106 static struct block_defs block_rss_defs = {
1107 "rss", { true, true, true }, false, 0,
1108 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
1109 RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
1110 RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
1111 RSS_REG_DBG_FORCE_FRAME,
1112 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18 };
1114 static struct block_defs block_tmld_defs = {
1115 "tmld", { true, true, true }, false, 0,
1116 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1117 TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
1118 TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
1119 TMLD_REG_DBG_FORCE_FRAME,
1120 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13 };
1122 static struct block_defs block_muld_defs = {
1123 "muld", { true, true, true }, false, 0,
1124 { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1125 MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
1126 MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
1127 MULD_REG_DBG_FORCE_FRAME,
1128 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14 };
1130 static struct block_defs block_yuld_defs = {
1131 "yuld", { true, true, false }, false, 0,
1132 { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, MAX_DBG_BUS_CLIENTS },
1133 YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
1134 YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
1135 YULD_REG_DBG_FORCE_FRAME_BB_K2,
1136 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 15 };
1138 static struct block_defs block_xyld_defs = {
1139 "xyld", { true, true, true }, false, 0,
1140 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
1141 XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
1142 XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
1143 XYLD_REG_DBG_FORCE_FRAME,
1144 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12 };
1146 static struct block_defs block_ptld_defs = {
1147 "ptld", { false, false, true }, false, 0,
1148 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT },
1149 PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5,
1150 PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5,
1151 PTLD_REG_DBG_FORCE_FRAME_E5,
1152 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 28 };
1154 static struct block_defs block_ypld_defs = {
1155 "ypld", { false, false, true }, false, 0,
1156 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS },
1157 YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5,
1158 YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5,
1159 YPLD_REG_DBG_FORCE_FRAME_E5,
1160 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 27 };
1162 static struct block_defs block_prm_defs = {
1163 "prm", { true, true, true }, false, 0,
1164 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1165 PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
1166 PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
1167 PRM_REG_DBG_FORCE_FRAME,
1168 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21 };
1170 static struct block_defs block_pbf_pb1_defs = {
1171 "pbf_pb1", { true, true, true }, false, 0,
1172 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV },
1173 PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
1174 PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
1175 PBF_PB1_REG_DBG_FORCE_FRAME,
1176 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 11 };
1178 static struct block_defs block_pbf_pb2_defs = {
1179 "pbf_pb2", { true, true, true }, false, 0,
1180 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV },
1181 PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
1182 PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
1183 PBF_PB2_REG_DBG_FORCE_FRAME,
1184 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 12 };
1186 static struct block_defs block_rpb_defs = {
1187 "rpb", { true, true, true }, false, 0,
1188 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1189 RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
1190 RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
1191 RPB_REG_DBG_FORCE_FRAME,
1192 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13 };
1194 static struct block_defs block_btb_defs = {
1195 "btb", { true, true, true }, false, 0,
1196 { DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV },
1197 BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
1198 BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
1199 BTB_REG_DBG_FORCE_FRAME,
1200 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10 };
1202 static struct block_defs block_pbf_defs = {
1203 "pbf", { true, true, true }, false, 0,
1204 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV },
1205 PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
1206 PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
1207 PBF_REG_DBG_FORCE_FRAME,
1208 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15 };
1210 static struct block_defs block_rdif_defs = {
1211 "rdif", { true, true, true }, false, 0,
1212 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1213 RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
1214 RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
1215 RDIF_REG_DBG_FORCE_FRAME,
1216 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16 };
1218 static struct block_defs block_tdif_defs = {
1219 "tdif", { true, true, true }, false, 0,
1220 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
1221 TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
1222 TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
1223 TDIF_REG_DBG_FORCE_FRAME,
1224 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17 };
1226 static struct block_defs block_cdu_defs = {
1227 "cdu", { true, true, true }, false, 0,
1228 { DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
1229 CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
1230 CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
1231 CDU_REG_DBG_FORCE_FRAME,
1232 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23 };
1234 static struct block_defs block_ccfc_defs = {
1235 "ccfc", { true, true, true }, false, 0,
1236 { DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
1237 CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
1238 CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
1239 CCFC_REG_DBG_FORCE_FRAME,
1240 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24 };
1242 static struct block_defs block_tcfc_defs = {
1243 "tcfc", { true, true, true }, false, 0,
1244 { DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
1245 TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
1246 TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
1247 TCFC_REG_DBG_FORCE_FRAME,
1248 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25 };
1250 static struct block_defs block_igu_defs = {
1251 "igu", { true, true, true }, false, 0,
1252 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
1253 IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
1254 IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
1255 IGU_REG_DBG_FORCE_FRAME,
1256 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27 };
1258 static struct block_defs block_cau_defs = {
1259 "cau", { true, true, true }, false, 0,
1260 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
1261 CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
1262 CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
1263 CAU_REG_DBG_FORCE_FRAME,
1264 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19 };
1266 /* TODO: add debug bus parameters when E5 RGFS RF is added */
1267 static struct block_defs block_rgfs_defs = {
1268 "rgfs", { false, false, true }, false, 0,
1269 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1271 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29 };
1273 static struct block_defs block_rgsrc_defs = {
1274 "rgsrc", { false, false, true }, false, 0,
1275 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH },
1276 RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5,
1277 RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5,
1278 RGSRC_REG_DBG_FORCE_FRAME_E5,
1279 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 30 };
1281 /* TODO: add debug bus parameters when E5 TGFS RF is added */
1282 static struct block_defs block_tgfs_defs = {
1283 "tgfs", { false, false, true }, false, 0,
1284 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1286 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30 };
1288 static struct block_defs block_tgsrc_defs = {
1289 "tgsrc", { false, false, true }, false, 0,
1290 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV },
1291 TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5,
1292 TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5,
1293 TGSRC_REG_DBG_FORCE_FRAME_E5,
1294 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 31 };
1296 static struct block_defs block_umac_defs = {
1297 "umac", { true, true, true }, false, 0,
1298 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
1299 UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
1300 UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
1301 UMAC_REG_DBG_FORCE_FRAME_K2_E5,
1302 true, false, DBG_RESET_REG_MISCS_PL_HV, 6 };
1304 static struct block_defs block_xmac_defs = {
1305 "xmac", { true, false, false }, false, 0,
1306 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1308 false, false, MAX_DBG_RESET_REGS, 0 };
1310 static struct block_defs block_dbg_defs = {
1311 "dbg", { true, true, true }, false, 0,
1312 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1314 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3 };
1316 static struct block_defs block_nig_defs = {
1317 "nig", { true, true, true }, false, 0,
1318 { DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN },
1319 NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
1320 NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
1321 NIG_REG_DBG_FORCE_FRAME,
1322 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0 };
1324 static struct block_defs block_wol_defs = {
1325 "wol", { false, true, true }, false, 0,
1326 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
1327 WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
1328 WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
1329 WOL_REG_DBG_FORCE_FRAME_K2_E5,
1330 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7 };
1332 static struct block_defs block_bmbn_defs = {
1333 "bmbn", { false, true, true }, false, 0,
1334 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB },
1335 BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
1336 BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
1337 BMBN_REG_DBG_FORCE_FRAME_K2_E5,
1338 false, false, MAX_DBG_RESET_REGS, 0 };
1340 static struct block_defs block_ipc_defs = {
1341 "ipc", { true, true, true }, false, 0,
1342 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1344 true, false, DBG_RESET_REG_MISCS_PL_UA, 8 };
1346 static struct block_defs block_nwm_defs = {
1347 "nwm", { false, true, true }, false, 0,
1348 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW },
1349 NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
1350 NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
1351 NWM_REG_DBG_FORCE_FRAME_K2_E5,
1352 true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0 };
1354 static struct block_defs block_nws_defs = {
1355 "nws", { false, true, true }, false, 0,
1356 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW },
1357 NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
1358 NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
1359 NWS_REG_DBG_FORCE_FRAME_K2_E5,
1360 true, false, DBG_RESET_REG_MISCS_PL_HV, 12 };
1362 static struct block_defs block_ms_defs = {
1363 "ms", { false, true, true }, false, 0,
1364 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
1365 MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
1366 MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
1367 MS_REG_DBG_FORCE_FRAME_K2_E5,
1368 true, false, DBG_RESET_REG_MISCS_PL_HV, 13 };
1370 static struct block_defs block_phy_pcie_defs = {
1371 "phy_pcie", { false, true, true }, false, 0,
1372 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH },
1373 PCIE_REG_DBG_COMMON_SELECT_K2_E5, PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
1374 PCIE_REG_DBG_COMMON_SHIFT_K2_E5, PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
1375 PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
1376 false, false, MAX_DBG_RESET_REGS, 0 };
1378 static struct block_defs block_led_defs = {
1379 "led", { false, true, true }, false, 0,
1380 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1382 true, false, DBG_RESET_REG_MISCS_PL_HV, 14 };
1384 static struct block_defs block_avs_wrap_defs = {
1385 "avs_wrap", { false, true, false }, false, 0,
1386 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1388 true, false, DBG_RESET_REG_MISCS_PL_UA, 11 };
1390 static struct block_defs block_pxpreqbus_defs = {
1391 "pxpreqbus", { false, false, false }, false, 0,
1392 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1394 false, false, MAX_DBG_RESET_REGS, 0 };
1396 static struct block_defs block_misc_aeu_defs = {
1397 "misc_aeu", { true, true, true }, false, 0,
1398 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1400 false, false, MAX_DBG_RESET_REGS, 0 };
1402 static struct block_defs block_bar0_map_defs = {
1403 "bar0_map", { true, true, true }, false, 0,
1404 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1406 false, false, MAX_DBG_RESET_REGS, 0 };
1409 static struct block_defs* s_block_defs[MAX_BLOCK_ID] = {
1414 &block_pglue_b_defs,
1424 &block_pswhst2_defs,
1466 &block_pbf_pb1_defs,
1467 &block_pbf_pb2_defs,
1492 &block_phy_pcie_defs,
1494 &block_avs_wrap_defs,
1495 &block_pxpreqbus_defs,
1496 &block_misc_aeu_defs,
1497 &block_bar0_map_defs,
1502 /* Constraint operation types */
1503 static struct dbg_bus_constraint_op_defs s_constraint_op_defs[] = {
1505 /* DBG_BUS_CONSTRAINT_OP_EQ */
1508 /* DBG_BUS_CONSTRAINT_OP_NE */
1511 /* DBG_BUS_CONSTRAINT_OP_LT */
1514 /* DBG_BUS_CONSTRAINT_OP_LTC */
1517 /* DBG_BUS_CONSTRAINT_OP_LE */
1520 /* DBG_BUS_CONSTRAINT_OP_LEC */
1523 /* DBG_BUS_CONSTRAINT_OP_GT */
1526 /* DBG_BUS_CONSTRAINT_OP_GTC */
1529 /* DBG_BUS_CONSTRAINT_OP_GE */
1532 /* DBG_BUS_CONSTRAINT_OP_GEC */
1536 static const char* s_dbg_target_names[] = {
1538 /* DBG_BUS_TARGET_ID_INT_BUF */
1541 /* DBG_BUS_TARGET_ID_NIG */
1544 /* DBG_BUS_TARGET_ID_PCI */
1548 static struct storm_mode_defs s_storm_mode_defs[] = {
1550 /* DBG_BUS_STORM_MODE_PRINTF */
1551 { "printf", true, 0 },
1553 /* DBG_BUS_STORM_MODE_PRAM_ADDR */
1554 { "pram_addr", true, 1 },
1556 /* DBG_BUS_STORM_MODE_DRA_RW */
1557 { "dra_rw", true, 2 },
1559 /* DBG_BUS_STORM_MODE_DRA_W */
1560 { "dra_w", true, 3 },
1562 /* DBG_BUS_STORM_MODE_LD_ST_ADDR */
1563 { "ld_st_addr", true, 4 },
1565 /* DBG_BUS_STORM_MODE_DRA_FSM */
1566 { "dra_fsm", true, 5 },
1568 /* DBG_BUS_STORM_MODE_RH */
1571 /* DBG_BUS_STORM_MODE_FOC */
1572 { "foc", false, 1 },
1574 /* DBG_BUS_STORM_MODE_EXT_STORE */
1575 { "ext_store", false, 3 }
1578 static struct platform_defs s_platform_defs[] = {
1581 { "asic", 1, 256, 32768 },
1583 /* PLATFORM_EMUL_FULL */
1584 { "emul_full", 2000, 8, 4096 },
1586 /* PLATFORM_EMUL_REDUCED */
1587 { "emul_reduced", 2000, 8, 4096 },
1590 { "fpga", 200, 32, 8192 }
1593 static struct grc_param_defs s_grc_param_defs[] = {
1595 /* DBG_GRC_PARAM_DUMP_TSTORM */
1596 { { 1, 1, 1 }, 0, 1, false, 1, 1 },
1598 /* DBG_GRC_PARAM_DUMP_MSTORM */
1599 { { 1, 1, 1 }, 0, 1, false, 1, 1 },
1601 /* DBG_GRC_PARAM_DUMP_USTORM */
1602 { { 1, 1, 1 }, 0, 1, false, 1, 1 },
1604 /* DBG_GRC_PARAM_DUMP_XSTORM */
1605 { { 1, 1, 1 }, 0, 1, false, 1, 1 },
1607 /* DBG_GRC_PARAM_DUMP_YSTORM */
1608 { { 1, 1, 1 }, 0, 1, false, 1, 1 },
1610 /* DBG_GRC_PARAM_DUMP_PSTORM */
1611 { { 1, 1, 1 }, 0, 1, false, 1, 1 },
1613 /* DBG_GRC_PARAM_DUMP_REGS */
1614 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1616 /* DBG_GRC_PARAM_DUMP_RAM */
1617 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1619 /* DBG_GRC_PARAM_DUMP_PBUF */
1620 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1622 /* DBG_GRC_PARAM_DUMP_IOR */
1623 { { 0, 0, 0 }, 0, 1, false, 0, 1 },
1625 /* DBG_GRC_PARAM_DUMP_VFC */
1626 { { 0, 0, 0 }, 0, 1, false, 0, 1 },
1628 /* DBG_GRC_PARAM_DUMP_CM_CTX */
1629 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1631 /* DBG_GRC_PARAM_DUMP_ILT */
1632 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1634 /* DBG_GRC_PARAM_DUMP_RSS */
1635 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1637 /* DBG_GRC_PARAM_DUMP_CAU */
1638 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1640 /* DBG_GRC_PARAM_DUMP_QM */
1641 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1643 /* DBG_GRC_PARAM_DUMP_MCP */
1644 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1646 /* DBG_GRC_PARAM_RESERVED */
1647 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1649 /* DBG_GRC_PARAM_DUMP_CFC */
1650 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1652 /* DBG_GRC_PARAM_DUMP_IGU */
1653 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1655 /* DBG_GRC_PARAM_DUMP_BRB */
1656 { { 0, 0, 0 }, 0, 1, false, 0, 1 },
1658 /* DBG_GRC_PARAM_DUMP_BTB */
1659 { { 0, 0, 0 }, 0, 1, false, 0, 1 },
1661 /* DBG_GRC_PARAM_DUMP_BMB */
1662 { { 0, 0, 0 }, 0, 1, false, 0, 1 },
1664 /* DBG_GRC_PARAM_DUMP_NIG */
1665 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1667 /* DBG_GRC_PARAM_DUMP_MULD */
1668 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1670 /* DBG_GRC_PARAM_DUMP_PRS */
1671 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1673 /* DBG_GRC_PARAM_DUMP_DMAE */
1674 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1676 /* DBG_GRC_PARAM_DUMP_TM */
1677 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1679 /* DBG_GRC_PARAM_DUMP_SDM */
1680 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1682 /* DBG_GRC_PARAM_DUMP_DIF */
1683 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1685 /* DBG_GRC_PARAM_DUMP_STATIC */
1686 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1688 /* DBG_GRC_PARAM_UNSTALL */
1689 { { 0, 0, 0 }, 0, 1, false, 0, 0 },
1691 /* DBG_GRC_PARAM_NUM_LCIDS */
1692 { { MAX_LCIDS, MAX_LCIDS, MAX_LCIDS }, 1, MAX_LCIDS, false, MAX_LCIDS, MAX_LCIDS },
1694 /* DBG_GRC_PARAM_NUM_LTIDS */
1695 { { MAX_LTIDS, MAX_LTIDS, MAX_LTIDS }, 1, MAX_LTIDS, false, MAX_LTIDS, MAX_LTIDS },
1697 /* DBG_GRC_PARAM_EXCLUDE_ALL */
1698 { { 0, 0, 0 }, 0, 1, true, 0, 0 },
1700 /* DBG_GRC_PARAM_CRASH */
1701 { { 0, 0, 0 }, 0, 1, true, 0, 0 },
1703 /* DBG_GRC_PARAM_PARITY_SAFE */
1704 { { 0, 0, 0 }, 0, 1, false, 1, 0 },
1706 /* DBG_GRC_PARAM_DUMP_CM */
1707 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1709 /* DBG_GRC_PARAM_DUMP_PHY */
1710 { { 1, 1, 1 }, 0, 1, false, 0, 1 },
1712 /* DBG_GRC_PARAM_NO_MCP */
1713 { { 0, 0, 0 }, 0, 1, false, 0, 0 },
1715 /* DBG_GRC_PARAM_NO_FW_VER */
1716 { { 0, 0, 0 }, 0, 1, false, 0, 0 }
1719 static struct rss_mem_defs s_rss_mem_defs[] = {
1720 { "rss_mem_cid", "rss_cid", 0, 32,
1721 { 256, 320, 512 } },
1723 { "rss_mem_key_msb", "rss_key", 1024, 256,
1724 { 128, 208, 257 } },
1726 { "rss_mem_key_lsb", "rss_key", 2048, 64,
1727 { 128, 208, 257 } },
1729 { "rss_mem_info", "rss_info", 3072, 16,
1730 { 128, 208, 256 } },
1732 { "rss_mem_ind", "rss_ind", 4096, 16,
1733 { 16384, 26624, 32768 } }
1736 static struct vfc_ram_defs s_vfc_ram_defs[] = {
1737 { "vfc_ram_tt1", "vfc_ram", 0, 512 },
1738 { "vfc_ram_mtt2", "vfc_ram", 512, 128 },
1739 { "vfc_ram_stt2", "vfc_ram", 640, 32 },
1740 { "vfc_ram_ro_vect", "vfc_ram", 672, 32 }
1743 static struct big_ram_defs s_big_ram_defs[] = {
1744 { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
1745 BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA, MISC_REG_BLOCK_256B_EN, { 0, 0, 0 },
1746 { 153600, 180224, 282624 } },
1748 { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
1749 BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA, MISC_REG_BLOCK_256B_EN, { 0, 1, 1 },
1750 { 92160, 117760, 168960 } },
1752 { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
1753 BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA, MISCS_REG_BLOCK_256B_EN, { 0, 0, 0 },
1754 { 36864, 36864, 36864 } }
1757 static struct reset_reg_defs s_reset_regs_defs[] = {
1759 /* DBG_RESET_REG_MISCS_PL_UA */
1760 { MISCS_REG_RESET_PL_UA, { true, true, true }, { 0x0, 0x0, 0x0 } },
1762 /* DBG_RESET_REG_MISCS_PL_HV */
1763 { MISCS_REG_RESET_PL_HV, { true, true, true }, { 0x0, 0x400, 0x600 } },
1765 /* DBG_RESET_REG_MISCS_PL_HV_2 */
1766 { MISCS_REG_RESET_PL_HV_2_K2_E5, { false, true, true }, { 0x0, 0x0, 0x0 } },
1768 /* DBG_RESET_REG_MISC_PL_UA */
1769 { MISC_REG_RESET_PL_UA, { true, true, true }, { 0x0, 0x0, 0x0 } },
1771 /* DBG_RESET_REG_MISC_PL_HV */
1772 { MISC_REG_RESET_PL_HV, { true, true, true }, { 0x0, 0x0, 0x0 } },
1774 /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
1775 { MISC_REG_RESET_PL_PDA_VMAIN_1, { true, true, true }, { 0x4404040, 0x4404040, 0x404040 } },
1777 /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
1778 { MISC_REG_RESET_PL_PDA_VMAIN_2, { true, true, true }, { 0x7, 0x7c00007, 0x5c08007 } },
1780 /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
1781 { MISC_REG_RESET_PL_PDA_VAUX, { true, true, true }, { 0x2, 0x2, 0x2 } },
1784 static struct phy_defs s_phy_defs[] = {
1785 { "nw_phy", NWS_REG_NWS_CMU_K2, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5 },
1786 { "sgmii_phy", MS_REG_MS_CMU_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 },
1787 { "pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 },
1788 { "pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 },
1791 /* The order of indexes that should be applied to a PCI buffer line */
1792 static const u8 s_pci_buf_line_ind[PCI_BUF_LINE_SIZE_IN_DWORDS] = { 1, 0, 3, 2, 5, 4, 7, 6 };
1794 /******************************** Variables **********************************/
1796 /* The version of the calling app */
1797 static u32 s_app_ver;
1799 /**************************** Private Functions ******************************/
1801 static void ecore_static_asserts(void)
1803 CHECK_ARR_SIZE(s_dbg_arrays, MAX_BIN_DBG_BUFFER_TYPE);
1804 CHECK_ARR_SIZE(s_big_ram_defs, NUM_BIG_RAM_TYPES);
1805 CHECK_ARR_SIZE(s_vfc_ram_defs, NUM_VFC_RAM_TYPES);
1806 CHECK_ARR_SIZE(s_rss_mem_defs, NUM_RSS_MEM_TYPES);
1807 CHECK_ARR_SIZE(s_chip_defs, MAX_CHIP_IDS);
1808 CHECK_ARR_SIZE(s_platform_defs, MAX_PLATFORM_IDS);
1809 CHECK_ARR_SIZE(s_storm_defs, MAX_DBG_STORMS);
1810 CHECK_ARR_SIZE(s_constraint_op_defs, MAX_DBG_BUS_CONSTRAINT_OPS);
1811 CHECK_ARR_SIZE(s_dbg_target_names, MAX_DBG_BUS_TARGETS);
1812 CHECK_ARR_SIZE(s_storm_mode_defs, MAX_DBG_BUS_STORM_MODES);
1813 CHECK_ARR_SIZE(s_grc_param_defs, MAX_DBG_GRC_PARAMS);
1814 CHECK_ARR_SIZE(s_reset_regs_defs, MAX_DBG_RESET_REGS);
1817 /* Reads and returns a single dword from the specified unaligned buffer. */
1818 static u32 ecore_read_unaligned_dword(u8 *buf)
1822 OSAL_MEMCPY((u8 *)&dword, buf, sizeof(dword));
1826 /* Returns the difference in bytes between the specified physical addresses.
1827 * Assumes that the first address is bigger then the second, and that the
1828 * difference is a 32-bit value.
1830 static u32 ecore_phys_addr_diff(struct dbg_bus_mem_addr *a,
1831 struct dbg_bus_mem_addr *b)
1833 return a->hi == b->hi ? a->lo - b->lo : b->lo - a->lo;
1836 /* Sets the value of the specified GRC param */
1837 static void ecore_grc_set_param(struct ecore_hwfn *p_hwfn,
1838 enum dbg_grc_params grc_param,
1841 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1843 dev_data->grc.param_val[grc_param] = val;
1846 /* Returns the value of the specified GRC param */
1847 static u32 ecore_grc_get_param(struct ecore_hwfn *p_hwfn,
1848 enum dbg_grc_params grc_param)
1850 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1852 return dev_data->grc.param_val[grc_param];
1855 /* Initializes the GRC parameters */
1856 static void ecore_dbg_grc_init_params(struct ecore_hwfn *p_hwfn)
1858 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1860 if (!dev_data->grc.params_initialized) {
1861 ecore_dbg_grc_set_params_default(p_hwfn);
1862 dev_data->grc.params_initialized = 1;
1866 /* Initializes debug data for the specified device */
1867 static enum dbg_status ecore_dbg_dev_init(struct ecore_hwfn *p_hwfn,
1868 struct ecore_ptt *p_ptt)
1870 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1872 if (dev_data->initialized)
1873 return DBG_STATUS_OK;
1876 return DBG_STATUS_APP_VERSION_NOT_SET;
1878 if (ECORE_IS_E5(p_hwfn->p_dev)) {
1879 dev_data->chip_id = CHIP_E5;
1880 dev_data->mode_enable[MODE_E5] = 1;
1882 else if (ECORE_IS_K2(p_hwfn->p_dev)) {
1883 dev_data->chip_id = CHIP_K2;
1884 dev_data->mode_enable[MODE_K2] = 1;
1886 else if (ECORE_IS_BB_B0(p_hwfn->p_dev)) {
1887 dev_data->chip_id = CHIP_BB;
1888 dev_data->mode_enable[MODE_BB] = 1;
1891 return DBG_STATUS_UNKNOWN_CHIP;
1895 dev_data->platform_id = PLATFORM_ASIC;
1896 dev_data->mode_enable[MODE_ASIC] = 1;
1898 if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) {
1899 dev_data->platform_id = PLATFORM_ASIC;
1900 dev_data->mode_enable[MODE_ASIC] = 1;
1902 else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1903 if (ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED) & 0x20000000) {
1904 dev_data->platform_id = PLATFORM_EMUL_FULL;
1905 dev_data->mode_enable[MODE_EMUL_FULL] = 1;
1908 dev_data->platform_id = PLATFORM_EMUL_REDUCED;
1909 dev_data->mode_enable[MODE_EMUL_REDUCED] = 1;
1912 else if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1913 dev_data->platform_id = PLATFORM_FPGA;
1914 dev_data->mode_enable[MODE_FPGA] = 1;
1917 return DBG_STATUS_UNKNOWN_CHIP;
1921 /* Initializes the GRC parameters */
1922 ecore_dbg_grc_init_params(p_hwfn);
1924 dev_data->use_dmae = USE_DMAE;
1925 dev_data->num_regs_read = 0;
1926 dev_data->initialized = 1;
1928 return DBG_STATUS_OK;
1931 static struct dbg_bus_block* get_dbg_bus_block_desc(struct ecore_hwfn *p_hwfn,
1932 enum block_id block_id)
1934 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1936 return (struct dbg_bus_block *)&dbg_bus_blocks[block_id * MAX_CHIP_IDS + dev_data->chip_id];
1939 /* Returns OSAL_NULL for signature line, latency line and non-existing lines */
1940 static struct dbg_bus_line* get_dbg_bus_line_desc(struct ecore_hwfn *p_hwfn,
1941 enum block_id block_id)
1943 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1944 struct dbg_bus_block_data *block_bus;
1945 struct dbg_bus_block *block_desc;
1947 block_bus = &dev_data->bus.blocks[block_id];
1948 block_desc = get_dbg_bus_block_desc(p_hwfn, block_id);
1950 if (!block_bus->line_num ||
1951 (block_bus->line_num == 1 && block_desc->has_latency_events) ||
1952 block_bus->line_num >= NUM_DBG_LINES(block_desc))
1955 return (struct dbg_bus_line *)&dbg_bus_lines[block_desc->lines_offset + block_bus->line_num - NUM_EXTRA_DBG_LINES(block_desc)];
1958 /* Reads the FW info structure for the specified Storm from the chip,
1959 * and writes it to the specified fw_info pointer.
1961 static void ecore_read_fw_info(struct ecore_hwfn *p_hwfn,
1962 struct ecore_ptt *p_ptt,
1964 struct fw_info *fw_info)
1966 struct storm_defs *storm = &s_storm_defs[storm_id];
1967 struct fw_info_location fw_info_location;
1970 OSAL_MEMSET(&fw_info_location, 0, sizeof(fw_info_location));
1971 OSAL_MEMSET(fw_info, 0, sizeof(*fw_info));
1973 /* Read first the address that points to fw_info location.
1974 * The address is located in the last line of the Storm RAM.
1976 addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
1977 (ECORE_IS_E5(p_hwfn->p_dev) ?
1978 DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_E5) :
1979 DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_BB_K2))
1980 - sizeof(fw_info_location);
1982 dest = (u32 *)&fw_info_location;
1984 for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location)); i++, addr += BYTES_IN_DWORD)
1985 dest[i] = ecore_rd(p_hwfn, p_ptt, addr);
1987 /* Read FW version info from Storm RAM */
1988 if (fw_info_location.size > 0 && fw_info_location.size <= sizeof(*fw_info)) {
1989 addr = fw_info_location.grc_addr;
1990 dest = (u32 *)fw_info;
1991 for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size); i++, addr += BYTES_IN_DWORD)
1992 dest[i] = ecore_rd(p_hwfn, p_ptt, addr);
1996 /* Dumps the specified string to the specified buffer.
1997 * Returns the dumped size in bytes.
1999 static u32 ecore_dump_str(char *dump_buf,
2004 OSAL_STRCPY(dump_buf, str);
2006 return (u32)OSAL_STRLEN(str) + 1;
2009 /* Dumps zeros to align the specified buffer to dwords.
2010 * Returns the dumped size in bytes.
2012 static u32 ecore_dump_align(char *dump_buf,
2016 u8 offset_in_dword, align_size;
2018 offset_in_dword = (u8)(byte_offset & 0x3);
2019 align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
2021 if (dump && align_size)
2022 OSAL_MEMSET(dump_buf, 0, align_size);
2027 /* Writes the specified string param to the specified buffer.
2028 * Returns the dumped size in dwords.
2030 static u32 ecore_dump_str_param(u32 *dump_buf,
2032 const char *param_name,
2033 const char *param_val)
2035 char *char_buf = (char *)dump_buf;
2038 /* Dump param name */
2039 offset += ecore_dump_str(char_buf + offset, dump, param_name);
2041 /* Indicate a string param value */
2043 *(char_buf + offset) = 1;
2046 /* Dump param value */
2047 offset += ecore_dump_str(char_buf + offset, dump, param_val);
2049 /* Align buffer to next dword */
2050 offset += ecore_dump_align(char_buf + offset, dump, offset);
2052 return BYTES_TO_DWORDS(offset);
2055 /* Writes the specified numeric param to the specified buffer.
2056 * Returns the dumped size in dwords.
2058 static u32 ecore_dump_num_param(u32 *dump_buf,
2060 const char *param_name,
2063 char *char_buf = (char *)dump_buf;
2066 /* Dump param name */
2067 offset += ecore_dump_str(char_buf + offset, dump, param_name);
2069 /* Indicate a numeric param value */
2071 *(char_buf + offset) = 0;
2074 /* Align buffer to next dword */
2075 offset += ecore_dump_align(char_buf + offset, dump, offset);
2077 /* Dump param value (and change offset from bytes to dwords) */
2078 offset = BYTES_TO_DWORDS(offset);
2080 *(dump_buf + offset) = param_val;
2086 /* Reads the FW version and writes it as a param to the specified buffer.
2087 * Returns the dumped size in dwords.
2089 static u32 ecore_dump_fw_ver_param(struct ecore_hwfn *p_hwfn,
2090 struct ecore_ptt *p_ptt,
2094 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2095 char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
2096 char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
2097 struct fw_info fw_info = { { 0 }, { 0 } };
2100 if (dump && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2101 /* Read FW image/version from PRAM in a non-reset SEMI */
2105 for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found; storm_id++) {
2106 struct storm_defs *storm = &s_storm_defs[storm_id];
2108 /* Read FW version/image */
2109 if (dev_data->block_in_reset[storm->block_id])
2112 /* Read FW info for the current Storm */
2113 ecore_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
2115 /* Create FW version/image strings */
2116 if (OSAL_SNPRINTF(fw_ver_str, sizeof(fw_ver_str), "%d_%d_%d_%d", fw_info.ver.num.major, fw_info.ver.num.minor, fw_info.ver.num.rev, fw_info.ver.num.eng) < 0)
2117 DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid FW version string\n");
2118 switch (fw_info.ver.image_id) {
2119 case FW_IMG_KUKU: OSAL_STRCPY(fw_img_str, "kuku"); break;
2120 case FW_IMG_MAIN: OSAL_STRCPY(fw_img_str, "main"); break;
2121 case FW_IMG_L2B: OSAL_STRCPY(fw_img_str, "l2b"); break;
2122 default: OSAL_STRCPY(fw_img_str, "unknown"); break;
2129 /* Dump FW version, image and timestamp */
2130 offset += ecore_dump_str_param(dump_buf + offset, dump, "fw-version", fw_ver_str);
2131 offset += ecore_dump_str_param(dump_buf + offset, dump, "fw-image", fw_img_str);
2132 offset += ecore_dump_num_param(dump_buf + offset, dump, "fw-timestamp", fw_info.ver.timestamp);
2137 /* Reads the MFW version and writes it as a param to the specified buffer.
2138 * Returns the dumped size in dwords.
2140 static u32 ecore_dump_mfw_ver_param(struct ecore_hwfn *p_hwfn,
2141 struct ecore_ptt *p_ptt,
2145 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2146 char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
2148 if (dump && dev_data->platform_id == PLATFORM_ASIC && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2149 u32 public_data_addr, global_section_offsize_addr, global_section_offsize, global_section_addr, mfw_ver;
2151 /* Find MCP public data GRC address. Needs to be ORed with
2152 * MCP_REG_SCRATCH due to a HW bug.
2154 public_data_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR) | MCP_REG_SCRATCH;
2156 /* Find MCP public global section offset */
2157 global_section_offsize_addr = public_data_addr + OFFSETOF(struct mcp_public_data, sections) + sizeof(offsize_t) * PUBLIC_GLOBAL;
2158 global_section_offsize = ecore_rd(p_hwfn, p_ptt, global_section_offsize_addr);
2159 global_section_addr = MCP_REG_SCRATCH + (global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
2161 /* Read MFW version from MCP public global section */
2162 mfw_ver = ecore_rd(p_hwfn, p_ptt, global_section_addr + OFFSETOF(struct public_global, mfw_ver));
2164 /* Dump MFW version param */
2165 if (OSAL_SNPRINTF(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d", (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16), (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
2166 DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid MFW version string\n");
2169 return ecore_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
2172 /* Writes a section header to the specified buffer.
2173 * Returns the dumped size in dwords.
2175 static u32 ecore_dump_section_hdr(u32 *dump_buf,
2180 return ecore_dump_num_param(dump_buf, dump, name, num_params);
2183 /* Writes the common global params to the specified buffer.
2184 * Returns the dumped size in dwords.
2186 static u32 ecore_dump_common_global_params(struct ecore_hwfn *p_hwfn,
2187 struct ecore_ptt *p_ptt,
2190 u8 num_specific_global_params)
2192 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2196 /* Dump global params section header */
2197 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
2198 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "global_params", num_params);
2201 offset += ecore_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2202 offset += ecore_dump_mfw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2203 offset += ecore_dump_num_param(dump_buf + offset, dump, "tools-version", TOOLS_VERSION);
2204 offset += ecore_dump_str_param(dump_buf + offset, dump, "chip", s_chip_defs[dev_data->chip_id].name);
2205 offset += ecore_dump_str_param(dump_buf + offset, dump, "platform", s_platform_defs[dev_data->platform_id].name);
2206 offset += ecore_dump_num_param(dump_buf + offset, dump, "pci-func", p_hwfn->abs_pf_id);
2211 /* Writes the "last" section (including CRC) to the specified buffer at the
2212 * given offset. Returns the dumped size in dwords.
2214 static u32 ecore_dump_last_section(u32 *dump_buf,
2218 u32 start_offset = offset;
2220 /* Dump CRC section header */
2221 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "last", 0);
2223 /* Calculate CRC32 and add it to the dword after the "last" section */
2225 *(dump_buf + offset) = ~OSAL_CRC32(0xffffffff, (u8 *)dump_buf, DWORDS_TO_BYTES(offset));
2229 return offset - start_offset;
2232 /* Update blocks reset state */
2233 static void ecore_update_blocks_reset_state(struct ecore_hwfn *p_hwfn,
2234 struct ecore_ptt *p_ptt)
2236 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2237 u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2240 /* Read reset registers */
2241 for (i = 0; i < MAX_DBG_RESET_REGS; i++)
2242 if (s_reset_regs_defs[i].exists[dev_data->chip_id])
2243 reg_val[i] = ecore_rd(p_hwfn, p_ptt, s_reset_regs_defs[i].addr);
2245 /* Check if blocks are in reset */
2246 for (i = 0; i < MAX_BLOCK_ID; i++) {
2247 struct block_defs *block = s_block_defs[i];
2249 dev_data->block_in_reset[i] = block->has_reset_bit && !(reg_val[block->reset_reg] & (1 << block->reset_bit_offset));
2253 /* Enable / disable the Debug block */
2254 static void ecore_bus_enable_dbg_block(struct ecore_hwfn *p_hwfn,
2255 struct ecore_ptt *p_ptt,
2258 ecore_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
2261 /* Resets the Debug block */
2262 static void ecore_bus_reset_dbg_block(struct ecore_hwfn *p_hwfn,
2263 struct ecore_ptt *p_ptt)
2265 u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
2266 struct block_defs *dbg_block = s_block_defs[BLOCK_DBG];
2268 dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr;
2269 old_reset_reg_val = ecore_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
2270 new_reset_reg_val = old_reset_reg_val & ~(1 << dbg_block->reset_bit_offset);
2272 ecore_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
2273 ecore_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
2276 static void ecore_bus_set_framing_mode(struct ecore_hwfn *p_hwfn,
2277 struct ecore_ptt *p_ptt,
2278 enum dbg_bus_frame_modes mode)
2280 ecore_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
2283 /* Enable / disable Debug Bus clients according to the specified mask
2284 * (1 = enable, 0 = disable).
2286 static void ecore_bus_enable_clients(struct ecore_hwfn *p_hwfn,
2287 struct ecore_ptt *p_ptt,
2290 ecore_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
2293 /* Enables the specified Storm for Debug Bus. Assumes a valid Storm ID. */
2294 static void ecore_bus_enable_storm(struct ecore_hwfn *p_hwfn,
2295 struct ecore_ptt *p_ptt,
2296 enum dbg_storms storm_id)
2298 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2299 u32 base_addr, sem_filter_params = 0;
2300 struct dbg_bus_storm_data *storm_bus;
2301 struct storm_mode_defs *storm_mode;
2302 struct storm_defs *storm;
2304 storm = &s_storm_defs[storm_id];
2305 storm_bus = &dev_data->bus.storms[storm_id];
2306 storm_mode = &s_storm_mode_defs[storm_bus->mode];
2307 base_addr = storm->sem_fast_mem_addr;
2310 if (storm_mode->is_fast_dbg) {
2312 /* Enable fast debug */
2313 ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST);
2314 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DEBUG_MODE, storm_mode->id_in_hw);
2315 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DEBUG_ACTIVE, 1);
2317 /* Enable messages. Must be done after enabling
2318 * SEM_FAST_REG_DEBUG_ACTIVE, otherwise messages will
2319 * be dropped after the SEMI sync fifo is filled.
2321 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DBG_MODE23_SRC_DISABLE, SEM_FAST_MODE23_SRC_ENABLE_VAL);
2322 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DBG_MODE4_SRC_DISABLE, SEM_FAST_MODE4_SRC_ENABLE_VAL);
2323 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DBG_MODE6_SRC_DISABLE, SEM_FAST_MODE6_SRC_ENABLE_VAL);
2327 /* Enable slow debug */
2328 ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST);
2329 ecore_wr(p_hwfn, p_ptt, storm->sem_slow_enable_addr, 1);
2330 ecore_wr(p_hwfn, p_ptt, storm->sem_slow_mode_addr, storm_mode->id_in_hw);
2331 ecore_wr(p_hwfn, p_ptt, storm->sem_slow_mode1_conf_addr, SEM_SLOW_MODE1_DATA_ENABLE);
2334 /* Config SEM cid filter */
2335 if (storm_bus->cid_filter_en) {
2336 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_FILTER_CID, storm_bus->cid);
2337 sem_filter_params |= SEM_FILTER_CID_EN_MASK;
2340 /* Config SEM eid filter */
2341 if (storm_bus->eid_filter_en) {
2342 const union dbg_bus_storm_eid_params *eid_filter = &storm_bus->eid_filter_params;
2344 if (storm_bus->eid_range_not_mask) {
2345 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_EVENT_ID_RANGE_STRT, eid_filter->range.min);
2346 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_EVENT_ID_RANGE_END, eid_filter->range.max);
2347 sem_filter_params |= SEM_FILTER_EID_RANGE_EN_MASK;
2350 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_FILTER_EVENT_ID, eid_filter->mask.val);
2351 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_EVENT_ID_MASK, ~eid_filter->mask.mask);
2352 sem_filter_params |= SEM_FILTER_EID_MASK_EN_MASK;
2356 /* Config accumulaed SEM filter parameters (if any) */
2357 if (sem_filter_params)
2358 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_RECORD_FILTER_ENABLE, sem_filter_params);
2361 /* Disables Debug Bus block inputs */
2362 static enum dbg_status ecore_bus_disable_inputs(struct ecore_hwfn *p_hwfn,
2363 struct ecore_ptt *p_ptt,
2364 bool empty_semi_fifos)
2366 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2367 u8 storm_id, num_fifos_to_empty = MAX_DBG_STORMS;
2368 bool is_fifo_empty[MAX_DBG_STORMS] = { false };
2371 /* Disable messages output in all Storms */
2372 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2373 struct storm_defs *storm = &s_storm_defs[storm_id];
2375 if (dev_data->block_in_reset[storm->block_id])
2378 ecore_wr(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_DBG_MODE23_SRC_DISABLE, SEM_FAST_MODE23_SRC_DISABLE_VAL);
2379 ecore_wr(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_DBG_MODE4_SRC_DISABLE, SEM_FAST_MODE4_SRC_DISABLE_VAL);
2380 ecore_wr(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_DBG_MODE6_SRC_DISABLE, SEM_FAST_MODE6_SRC_DISABLE_VAL);
2383 /* Try to empty the SEMI sync fifo. Must be done after messages output
2384 * were disabled in all Storms.
2386 while (num_fifos_to_empty) {
2387 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2388 struct storm_defs *storm = &s_storm_defs[storm_id];
2390 if (is_fifo_empty[storm_id])
2393 /* Check if sync fifo got empty */
2394 if (dev_data->block_in_reset[storm->block_id] || ecore_rd(p_hwfn, p_ptt, storm->sem_sync_dbg_empty_addr)) {
2395 is_fifo_empty[storm_id] = true;
2396 num_fifos_to_empty--;
2400 /* Check if need to continue polling */
2401 if (num_fifos_to_empty) {
2402 u32 polling_ms = SEMI_SYNC_FIFO_POLLING_DELAY_MS * s_platform_defs[dev_data->platform_id].delay_factor;
2403 u32 polling_count = 0;
2405 if (empty_semi_fifos && polling_count < SEMI_SYNC_FIFO_POLLING_COUNT) {
2406 OSAL_MSLEEP(polling_ms);
2410 DP_NOTICE(p_hwfn, false, "Warning: failed to empty the SEMI sync FIFO. It means that the last few messages from the SEMI could not be sent to the DBG block. This can happen when the DBG block is blocked (e.g. due to a PCI problem).\n");
2416 /* Disable debug in all Storms */
2417 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2418 struct storm_defs *storm = &s_storm_defs[storm_id];
2419 u32 base_addr = storm->sem_fast_mem_addr;
2421 if (dev_data->block_in_reset[storm->block_id])
2424 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DEBUG_ACTIVE, 0);
2425 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_RECORD_FILTER_ENABLE, DBG_BUS_FILTER_TYPE_OFF);
2426 ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_FRAME_MODE_4HW_0ST);
2427 ecore_wr(p_hwfn, p_ptt, storm->sem_slow_enable_addr, 0);
2430 /* Disable all clients */
2431 ecore_bus_enable_clients(p_hwfn, p_ptt, 0);
2433 /* Disable all blocks */
2434 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2435 struct block_defs *block = s_block_defs[block_id];
2437 if (block->dbg_client_id[dev_data->chip_id] != MAX_DBG_BUS_CLIENTS && !dev_data->block_in_reset[block_id])
2438 ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
2441 /* Disable timestamp */
2442 ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_VALID_EN, 0);
2444 /* Disable filters and triggers */
2445 ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_ENABLE, DBG_BUS_FILTER_TYPE_OFF);
2446 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_ENABLE, 0);
2448 return DBG_STATUS_OK;
2451 /* Sets a Debug Bus trigger/filter constraint */
2452 static void ecore_bus_set_constraint(struct ecore_hwfn *p_hwfn,
2453 struct ecore_ptt *p_ptt,
2466 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2467 u32 reg_offset = constraint_id * BYTES_IN_DWORD;
2468 u8 curr_trigger_state;
2470 /* For trigger only - set register offset according to state */
2472 curr_trigger_state = dev_data->bus.next_trigger_state - 1;
2473 reg_offset += curr_trigger_state * TRIGGER_SETS_PER_STATE * MAX_CONSTRAINTS * BYTES_IN_DWORD;
2476 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_OPRTN_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_OPRTN_0) + reg_offset, hw_op_val);
2477 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_DATA_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_0) + reg_offset, data_val);
2478 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_DATA_MASK_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_MASK_0) + reg_offset, data_mask);
2479 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_FRAME_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_0) + reg_offset, frame_bit);
2480 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_FRAME_MASK_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_MASK_0) + reg_offset, frame_mask);
2481 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_OFFSET_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_OFFSET_0) + reg_offset, dword_offset);
2482 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_RANGE_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_RANGE_0) + reg_offset, range);
2483 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_CYCLIC_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_CYCLIC_0) + reg_offset, cyclic_bit);
2484 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_MUST_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_MUST_0) + reg_offset, must_bit);
2487 /* Reads the specified DBG Bus internal buffer range and copy it to the
2488 * specified buffer. Returns the dumped size in dwords.
2490 static u32 ecore_bus_dump_int_buf_range(struct ecore_hwfn *p_hwfn,
2491 struct ecore_ptt *p_ptt,
2497 u32 line, reg_addr, i, offset = 0;
2500 return (end_line - start_line + 1) * INT_BUF_LINE_SIZE_IN_DWORDS;
2502 for (line = start_line, reg_addr = DBG_REG_INTR_BUFFER + DWORDS_TO_BYTES(start_line * INT_BUF_LINE_SIZE_IN_DWORDS);
2504 line++, offset += INT_BUF_LINE_SIZE_IN_DWORDS)
2505 for (i = 0; i < INT_BUF_LINE_SIZE_IN_DWORDS; i++, reg_addr += BYTES_IN_DWORD)
2506 dump_buf[offset + INT_BUF_LINE_SIZE_IN_DWORDS - 1 - i] = ecore_rd(p_hwfn, p_ptt, reg_addr);
2511 /* Reads the DBG Bus internal buffer and copy its contents to a buffer.
2512 * Returns the dumped size in dwords.
2514 static u32 ecore_bus_dump_int_buf(struct ecore_hwfn *p_hwfn,
2515 struct ecore_ptt *p_ptt,
2519 u32 last_written_line, offset = 0;
2521 last_written_line = ecore_rd(p_hwfn, p_ptt, DBG_REG_INTR_BUFFER_WR_PTR);
2523 if (ecore_rd(p_hwfn, p_ptt, DBG_REG_WRAP_ON_INT_BUFFER)) {
2525 /* Internal buffer was wrapped: first dump from write pointer
2526 * to buffer end, then dump from buffer start to write pointer.
2528 if (last_written_line < INT_BUF_NUM_OF_LINES - 1)
2529 offset += ecore_bus_dump_int_buf_range(p_hwfn, p_ptt, dump_buf + offset, dump, last_written_line + 1, INT_BUF_NUM_OF_LINES - 1);
2530 offset += ecore_bus_dump_int_buf_range(p_hwfn, p_ptt, dump_buf + offset, dump, 0, last_written_line);
2532 else if (last_written_line) {
2534 /* Internal buffer wasn't wrapped: dump from buffer start until
2537 if (!ecore_rd(p_hwfn, p_ptt, DBG_REG_INTR_BUFFER_RD_PTR))
2538 offset += ecore_bus_dump_int_buf_range(p_hwfn, p_ptt, dump_buf + offset, dump, 0, last_written_line);
2540 DP_NOTICE(p_hwfn, true, "Unexpected Debug Bus error: internal buffer read pointer is not zero\n");
2546 /* Reads the specified DBG Bus PCI buffer range and copy it to the specified
2547 * buffer. Returns the dumped size in dwords.
2549 static u32 ecore_bus_dump_pci_buf_range(struct ecore_hwfn *p_hwfn,
2555 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2558 /* Extract PCI buffer pointer from virtual address */
2559 void *virt_addr_lo = &dev_data->bus.pci_buf.virt_addr.lo;
2560 u32 *pci_buf_start = (u32 *)(osal_uintptr_t)*((u64 *)virt_addr_lo);
2561 u32 *pci_buf, line, i;
2564 return (end_line - start_line + 1) * PCI_BUF_LINE_SIZE_IN_DWORDS;
2566 for (line = start_line, pci_buf = pci_buf_start + start_line * PCI_BUF_LINE_SIZE_IN_DWORDS;
2568 line++, offset += PCI_BUF_LINE_SIZE_IN_DWORDS)
2569 for (i = 0; i < PCI_BUF_LINE_SIZE_IN_DWORDS; i++, pci_buf++)
2570 dump_buf[offset + s_pci_buf_line_ind[i]] = *pci_buf;
2575 /* Copies the DBG Bus PCI buffer to the specified buffer.
2576 * Returns the dumped size in dwords.
2578 static u32 ecore_bus_dump_pci_buf(struct ecore_hwfn *p_hwfn,
2579 struct ecore_ptt *p_ptt,
2583 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2584 u32 next_wr_byte_offset, next_wr_line_offset;
2585 struct dbg_bus_mem_addr next_wr_phys_addr;
2586 u32 pci_buf_size_in_lines, offset = 0;
2588 pci_buf_size_in_lines = dev_data->bus.pci_buf.size / PCI_BUF_LINE_SIZE_IN_BYTES;
2590 /* Extract write pointer (physical address) */
2591 next_wr_phys_addr.lo = ecore_rd(p_hwfn, p_ptt, DBG_REG_EXT_BUFFER_WR_PTR);
2592 next_wr_phys_addr.hi = ecore_rd(p_hwfn, p_ptt, DBG_REG_EXT_BUFFER_WR_PTR + BYTES_IN_DWORD);
2594 /* Convert write pointer to offset */
2595 next_wr_byte_offset = ecore_phys_addr_diff(&next_wr_phys_addr, &dev_data->bus.pci_buf.phys_addr);
2596 if ((next_wr_byte_offset % PCI_BUF_LINE_SIZE_IN_BYTES) || next_wr_byte_offset > dev_data->bus.pci_buf.size)
2598 next_wr_line_offset = next_wr_byte_offset / PCI_BUF_LINE_SIZE_IN_BYTES;
2600 /* PCI buffer wrapped: first dump from write pointer to buffer end. */
2601 if (ecore_rd(p_hwfn, p_ptt, DBG_REG_WRAP_ON_EXT_BUFFER))
2602 offset += ecore_bus_dump_pci_buf_range(p_hwfn, dump_buf + offset, dump, next_wr_line_offset, pci_buf_size_in_lines - 1);
2604 /* Dump from buffer start until write pointer */
2605 if (next_wr_line_offset)
2606 offset += ecore_bus_dump_pci_buf_range(p_hwfn, dump_buf + offset, dump, 0, next_wr_line_offset - 1);
2611 /* Copies the DBG Bus recorded data to the specified buffer.
2612 * Returns the dumped size in dwords.
2614 static u32 ecore_bus_dump_data(struct ecore_hwfn *p_hwfn,
2615 struct ecore_ptt *p_ptt,
2619 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2621 switch (dev_data->bus.target) {
2622 case DBG_BUS_TARGET_ID_INT_BUF:
2623 return ecore_bus_dump_int_buf(p_hwfn, p_ptt, dump_buf, dump);
2624 case DBG_BUS_TARGET_ID_PCI:
2625 return ecore_bus_dump_pci_buf(p_hwfn, p_ptt, dump_buf, dump);
2633 /* Frees the Debug Bus PCI buffer */
2634 static void ecore_bus_free_pci_buf(struct ecore_hwfn *p_hwfn)
2636 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2637 dma_addr_t pci_buf_phys_addr;
2641 /* Extract PCI buffer pointer from virtual address */
2642 virt_addr_lo = &dev_data->bus.pci_buf.virt_addr.lo;
2643 pci_buf = (u32 *)(osal_uintptr_t)*((u64 *)virt_addr_lo);
2645 if (!dev_data->bus.pci_buf.size)
2648 OSAL_MEMCPY(&pci_buf_phys_addr, &dev_data->bus.pci_buf.phys_addr, sizeof(pci_buf_phys_addr));
2650 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, pci_buf, pci_buf_phys_addr, dev_data->bus.pci_buf.size);
2652 dev_data->bus.pci_buf.size = 0;
2655 /* Dumps the list of DBG Bus inputs (blocks/Storms) to the specified buffer.
2656 * Returns the dumped size in dwords.
2658 static u32 ecore_bus_dump_inputs(struct ecore_hwfn *p_hwfn,
2662 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2663 char storm_name[8] = "?storm";
2664 u32 block_id, offset = 0;
2668 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2669 struct dbg_bus_storm_data *storm_bus = &dev_data->bus.storms[storm_id];
2670 struct storm_defs *storm = &s_storm_defs[storm_id];
2672 if (!dev_data->bus.storms[storm_id].enabled)
2675 /* Dump section header */
2676 storm_name[0] = storm->letter;
2677 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "bus_input", 3);
2678 offset += ecore_dump_str_param(dump_buf + offset, dump, "name", storm_name);
2679 offset += ecore_dump_num_param(dump_buf + offset, dump, "id", storm_bus->hw_id);
2680 offset += ecore_dump_str_param(dump_buf + offset, dump, "mode", s_storm_mode_defs[storm_bus->mode].name);
2684 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2685 struct dbg_bus_block_data *block_bus = &dev_data->bus.blocks[block_id];
2686 struct block_defs *block = s_block_defs[block_id];
2688 if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
2691 /* Dump section header */
2692 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "bus_input", 4);
2693 offset += ecore_dump_str_param(dump_buf + offset, dump, "name", block->name);
2694 offset += ecore_dump_num_param(dump_buf + offset, dump, "line", block_bus->line_num);
2695 offset += ecore_dump_num_param(dump_buf + offset, dump, "en", GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK));
2696 offset += ecore_dump_num_param(dump_buf + offset, dump, "shr", GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT));
2702 /* Dumps the Debug Bus header (params, inputs, data header) to the specified
2703 * buffer. Returns the dumped size in dwords.
2705 static u32 ecore_bus_dump_hdr(struct ecore_hwfn *p_hwfn,
2706 struct ecore_ptt *p_ptt,
2710 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2711 char hw_id_mask_str[16];
2714 if (OSAL_SNPRINTF(hw_id_mask_str, sizeof(hw_id_mask_str), "0x%x", dev_data->bus.hw_id_mask) < 0)
2715 DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid HW ID mask\n");
2717 /* Dump global params */
2718 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 5);
2719 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "debug-bus");
2720 offset += ecore_dump_str_param(dump_buf + offset, dump, "wrap-mode", dev_data->bus.one_shot_en ? "one-shot" : "wrap-around");
2721 offset += ecore_dump_num_param(dump_buf + offset, dump, "hw-dwords", dev_data->bus.hw_dwords);
2722 offset += ecore_dump_str_param(dump_buf + offset, dump, "hw-id-mask", hw_id_mask_str);
2723 offset += ecore_dump_str_param(dump_buf + offset, dump, "target", s_dbg_target_names[dev_data->bus.target]);
2725 offset += ecore_bus_dump_inputs(p_hwfn, dump_buf + offset, dump);
2727 if (dev_data->bus.target != DBG_BUS_TARGET_ID_NIG) {
2728 u32 recorded_dwords = 0;
2731 recorded_dwords = ecore_bus_dump_data(p_hwfn, p_ptt, OSAL_NULL, false);
2733 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "bus_data", 1);
2734 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", recorded_dwords);
2740 static bool ecore_is_mode_match(struct ecore_hwfn *p_hwfn,
2741 u16 *modes_buf_offset)
2743 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2747 /* Get next element from modes tree buffer */
2748 tree_val = ((u8 *)s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr)[(*modes_buf_offset)++];
2751 case INIT_MODE_OP_NOT:
2752 return !ecore_is_mode_match(p_hwfn, modes_buf_offset);
2753 case INIT_MODE_OP_OR:
2754 case INIT_MODE_OP_AND:
2755 arg1 = ecore_is_mode_match(p_hwfn, modes_buf_offset);
2756 arg2 = ecore_is_mode_match(p_hwfn, modes_buf_offset);
2757 return (tree_val == INIT_MODE_OP_OR) ? (arg1 || arg2) : (arg1 && arg2);
2758 default: return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
2762 /* Returns true if the specified entity (indicated by GRC param) should be
2763 * included in the dump, false otherwise.
2765 static bool ecore_grc_is_included(struct ecore_hwfn *p_hwfn,
2766 enum dbg_grc_params grc_param)
2768 return ecore_grc_get_param(p_hwfn, grc_param) > 0;
2771 /* Returns true of the specified Storm should be included in the dump, false
2774 static bool ecore_grc_is_storm_included(struct ecore_hwfn *p_hwfn,
2775 enum dbg_storms storm)
2777 return ecore_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
2780 /* Returns true if the specified memory should be included in the dump, false
2783 static bool ecore_grc_is_mem_included(struct ecore_hwfn *p_hwfn,
2784 enum block_id block_id,
2787 struct block_defs *block = s_block_defs[block_id];
2790 /* Check Storm match */
2791 if (block->associated_to_storm &&
2792 !ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)block->storm_id))
2795 for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
2796 struct big_ram_defs *big_ram = &s_big_ram_defs[i];
2798 if (mem_group_id == big_ram->mem_group_id || mem_group_id == big_ram->ram_mem_group_id)
2799 return ecore_grc_is_included(p_hwfn, big_ram->grc_param);
2802 switch (mem_group_id) {
2803 case MEM_GROUP_PXP_ILT:
2804 case MEM_GROUP_PXP_MEM:
2805 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
2807 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
2808 case MEM_GROUP_PBUF:
2809 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
2810 case MEM_GROUP_CAU_MEM:
2811 case MEM_GROUP_CAU_SB:
2812 case MEM_GROUP_CAU_PI:
2813 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
2814 case MEM_GROUP_QM_MEM:
2815 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
2816 case MEM_GROUP_CFC_MEM:
2817 case MEM_GROUP_CONN_CFC_MEM:
2818 case MEM_GROUP_TASK_CFC_MEM:
2819 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) || ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
2820 case MEM_GROUP_IGU_MEM:
2821 case MEM_GROUP_IGU_MSIX:
2822 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
2823 case MEM_GROUP_MULD_MEM:
2824 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
2825 case MEM_GROUP_PRS_MEM:
2826 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
2827 case MEM_GROUP_DMAE_MEM:
2828 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
2829 case MEM_GROUP_TM_MEM:
2830 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
2831 case MEM_GROUP_SDM_MEM:
2832 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
2833 case MEM_GROUP_TDIF_CTX:
2834 case MEM_GROUP_RDIF_CTX:
2835 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
2836 case MEM_GROUP_CM_MEM:
2837 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
2839 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
2845 /* Stalls all Storms */
2846 static void ecore_grc_stall_storms(struct ecore_hwfn *p_hwfn,
2847 struct ecore_ptt *p_ptt,
2853 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2854 if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id))
2857 reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr + SEM_FAST_REG_STALL_0_BB_K2;
2858 ecore_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
2861 OSAL_MSLEEP(STALL_DELAY_MS);
2864 /* Takes all blocks out of reset */
2865 static void ecore_grc_unreset_blocks(struct ecore_hwfn *p_hwfn,
2866 struct ecore_ptt *p_ptt)
2868 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2869 u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2872 /* Fill reset regs values */
2873 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2874 struct block_defs *block = s_block_defs[block_id];
2876 if (block->exists[dev_data->chip_id] && block->has_reset_bit && block->unreset)
2877 reg_val[block->reset_reg] |= (1 << block->reset_bit_offset);
2880 /* Write reset registers */
2881 for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2882 if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2885 reg_val[i] |= s_reset_regs_defs[i].unreset_val[dev_data->chip_id];
2888 ecore_wr(p_hwfn, p_ptt, s_reset_regs_defs[i].addr + RESET_REG_UNRESET_OFFSET, reg_val[i]);
2892 /* Returns the attention block data of the specified block */
2893 static const struct dbg_attn_block_type_data* ecore_get_block_attn_data(enum block_id block_id,
2894 enum dbg_attn_type attn_type)
2896 const struct dbg_attn_block *base_attn_block_arr = (const struct dbg_attn_block *)s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
2898 return &base_attn_block_arr[block_id].per_type_data[attn_type];
2901 /* Returns the attention registers of the specified block */
2902 static const struct dbg_attn_reg* ecore_get_block_attn_regs(enum block_id block_id,
2903 enum dbg_attn_type attn_type,
2906 const struct dbg_attn_block_type_data *block_type_data = ecore_get_block_attn_data(block_id, attn_type);
2908 *num_attn_regs = block_type_data->num_regs;
2910 return &((const struct dbg_attn_reg *)s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->regs_offset];
2913 /* For each block, clear the status of all parities */
2914 static void ecore_grc_clear_all_prty(struct ecore_hwfn *p_hwfn,
2915 struct ecore_ptt *p_ptt)
2917 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2918 const struct dbg_attn_reg *attn_reg_arr;
2919 u8 reg_idx, num_attn_regs;
2922 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2923 if (dev_data->block_in_reset[block_id])
2926 attn_reg_arr = ecore_get_block_attn_regs((enum block_id)block_id, ATTN_TYPE_PARITY, &num_attn_regs);
2928 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2929 const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
2930 u16 modes_buf_offset;
2934 eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
2935 modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
2937 /* If Mode match: clear parity status */
2938 if (!eval_mode || ecore_is_mode_match(p_hwfn, &modes_buf_offset))
2939 ecore_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(reg_data->sts_clr_address));
2944 /* Dumps GRC registers section header. Returns the dumped size in dwords.
2945 * the following parameters are dumped:
2946 * - count: no. of dumped entries
2947 * - split: split type
2948 * - id: split ID (dumped only if split_id >= 0)
2949 * - param_name: user parameter value (dumped only if param_name != OSAL_NULL
2950 * and param_val != OSAL_NULL).
2952 static u32 ecore_grc_dump_regs_hdr(u32 *dump_buf,
2954 u32 num_reg_entries,
2955 const char *split_type,
2957 const char *param_name,
2958 const char *param_val)
2960 u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0);
2963 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "grc_regs", num_params);
2964 offset += ecore_dump_num_param(dump_buf + offset, dump, "count", num_reg_entries);
2965 offset += ecore_dump_str_param(dump_buf + offset, dump, "split", split_type);
2967 offset += ecore_dump_num_param(dump_buf + offset, dump, "id", split_id);
2968 if (param_name && param_val)
2969 offset += ecore_dump_str_param(dump_buf + offset, dump, param_name, param_val);
2974 /* Reads the specified registers into the specified buffer.
2975 * The addr and len arguments are specified in dwords.
2977 void ecore_read_regs(struct ecore_hwfn *p_hwfn,
2978 struct ecore_ptt *p_ptt,
2985 for (i = 0; i < len; i++)
2986 buf[i] = ecore_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
2989 /* Dumps the GRC registers in the specified address range.
2990 * Returns the dumped size in dwords.
2991 * The addr and len arguments are specified in dwords.
2993 static u32 ecore_grc_dump_addr_range(struct ecore_hwfn *p_hwfn,
2994 struct ecore_ptt *p_ptt,
3001 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3006 /* Print log if needed */
3007 dev_data->num_regs_read += len;
3008 if (dev_data->num_regs_read >= s_platform_defs[dev_data->platform_id].log_thresh) {
3009 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Dumping %d registers...\n", dev_data->num_regs_read);
3010 dev_data->num_regs_read = 0;
3013 /* Try reading using DMAE */
3014 if (dev_data->use_dmae && (len >= s_platform_defs[dev_data->platform_id].dmae_thresh || (PROTECT_WIDE_BUS && wide_bus))) {
3015 if (!ecore_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr), (u64)(osal_uintptr_t)(dump_buf), len, OSAL_NULL))
3017 dev_data->use_dmae = 0;
3018 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Failed reading from chip using DMAE, using GRC instead\n");
3021 /* Read registers */
3022 ecore_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
3027 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
3028 * The addr and len arguments are specified in dwords.
3030 static u32 ecore_grc_dump_reg_entry_hdr(u32 *dump_buf,
3036 *dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
3041 /* Dumps GRC registers sequence. Returns the dumped size in dwords.
3042 * The addr and len arguments are specified in dwords.
3044 static u32 ecore_grc_dump_reg_entry(struct ecore_hwfn *p_hwfn,
3045 struct ecore_ptt *p_ptt,
3054 offset += ecore_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
3055 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, len, wide_bus);
3060 /* Dumps GRC registers sequence with skip cycle.
3061 * Returns the dumped size in dwords.
3062 * - addr: start GRC address in dwords
3063 * - total_len: total no. of dwords to dump
3064 * - read_len: no. consecutive dwords to read
3065 * - skip_len: no. of dwords to skip (and fill with zeros)
3067 static u32 ecore_grc_dump_reg_entry_skip(struct ecore_hwfn *p_hwfn,
3068 struct ecore_ptt *p_ptt,
3076 u32 offset = 0, reg_offset = 0;
3078 offset += ecore_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
3081 return offset + total_len;
3083 while (reg_offset < total_len) {
3084 u32 curr_len = OSAL_MIN_T(u32, read_len, total_len - reg_offset);
3086 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, curr_len, false);
3087 reg_offset += curr_len;
3090 if (reg_offset < total_len) {
3091 curr_len = OSAL_MIN_T(u32, skip_len, total_len - skip_len);
3092 OSAL_MEMSET(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
3094 reg_offset += curr_len;
3102 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
3103 static u32 ecore_grc_dump_regs_entries(struct ecore_hwfn *p_hwfn,
3104 struct ecore_ptt *p_ptt,
3105 struct dbg_array input_regs_arr,
3108 bool block_enable[MAX_BLOCK_ID],
3109 u32 *num_dumped_reg_entries)
3111 u32 i, offset = 0, input_offset = 0;
3112 bool mode_match = true;
3114 *num_dumped_reg_entries = 0;
3116 while (input_offset < input_regs_arr.size_in_dwords) {
3117 const struct dbg_dump_cond_hdr *cond_hdr = (const struct dbg_dump_cond_hdr *)&input_regs_arr.ptr[input_offset++];
3118 u16 modes_buf_offset;
3121 /* Check mode/block */
3122 eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
3124 modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
3125 mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
3128 if (!mode_match || !block_enable[cond_hdr->block_id]) {
3129 input_offset += cond_hdr->data_size;
3133 for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
3134 const struct dbg_dump_reg *reg = (const struct dbg_dump_reg *)&input_regs_arr.ptr[input_offset];
3136 offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump,
3137 GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS),
3138 GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH),
3139 GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS));
3140 (*num_dumped_reg_entries)++;
3147 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
3148 static u32 ecore_grc_dump_split_data(struct ecore_hwfn *p_hwfn,
3149 struct ecore_ptt *p_ptt,
3150 struct dbg_array input_regs_arr,
3153 bool block_enable[MAX_BLOCK_ID],
3154 const char *split_type_name,
3156 const char *param_name,
3157 const char *param_val)
3159 u32 num_dumped_reg_entries, offset;
3161 /* Calculate register dump header size (and skip it for now) */
3162 offset = ecore_grc_dump_regs_hdr(dump_buf, false, 0, split_type_name, split_id, param_name, param_val);
3164 /* Dump registers */
3165 offset += ecore_grc_dump_regs_entries(p_hwfn, p_ptt, input_regs_arr, dump_buf + offset, dump, block_enable, &num_dumped_reg_entries);
3167 /* Write register dump header */
3168 if (dump && num_dumped_reg_entries > 0)
3169 ecore_grc_dump_regs_hdr(dump_buf, dump, num_dumped_reg_entries, split_type_name, split_id, param_name, param_val);
3171 return num_dumped_reg_entries > 0 ? offset : 0;
3174 /* Dumps registers according to the input registers array. Returns the dumped
3177 static u32 ecore_grc_dump_registers(struct ecore_hwfn *p_hwfn,
3178 struct ecore_ptt *p_ptt,
3181 bool block_enable[MAX_BLOCK_ID],
3182 const char *param_name,
3183 const char *param_val)
3185 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3186 struct chip_platform_defs *chip_platform;
3187 u32 offset = 0, input_offset = 0;
3188 u8 port_id, pf_id, vf_id;
3190 chip_platform = &s_chip_defs[dev_data->chip_id].per_platform[dev_data->platform_id];
3192 while (input_offset < s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
3193 const struct dbg_dump_split_hdr *split_hdr;
3194 struct dbg_array curr_input_regs_arr;
3195 u32 split_data_size;
3198 split_hdr = (const struct dbg_dump_split_hdr *)&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
3199 split_type_id = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3200 split_data_size = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3201 curr_input_regs_arr.ptr = &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
3202 curr_input_regs_arr.size_in_dwords = split_data_size;
3204 switch(split_type_id) {
3205 case SPLIT_TYPE_NONE:
3206 offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "eng", (u32)(-1), param_name, param_val);
3209 case SPLIT_TYPE_PORT:
3210 for (port_id = 0; port_id < chip_platform->num_ports; port_id++) {
3212 ecore_port_pretend(p_hwfn, p_ptt, port_id);
3213 offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "port", port_id, param_name, param_val);
3218 case SPLIT_TYPE_PORT_PF:
3219 for (pf_id = 0; pf_id < chip_platform->num_pfs; pf_id++) {
3221 ecore_fid_pretend(p_hwfn, p_ptt, (pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT));
3222 offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "pf", pf_id, param_name, param_val);
3227 for (vf_id = 0; vf_id < chip_platform->num_vfs; vf_id++) {
3229 ecore_fid_pretend(p_hwfn, p_ptt, (1 << PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT) | (vf_id << PXP_PRETEND_CONCRETE_FID_VFID_SHIFT));
3230 offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "vf", vf_id, param_name, param_val);
3238 input_offset += split_data_size;
3241 /* Pretend to original PF */
3243 ecore_fid_pretend(p_hwfn, p_ptt, (p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT));
3248 /* Dump reset registers. Returns the dumped size in dwords. */
3249 static u32 ecore_grc_dump_reset_regs(struct ecore_hwfn *p_hwfn,
3250 struct ecore_ptt *p_ptt,
3254 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3255 u32 i, offset = 0, num_regs = 0;
3257 /* Calculate header size */
3258 offset += ecore_grc_dump_regs_hdr(dump_buf, false, 0, "eng", -1, OSAL_NULL, OSAL_NULL);
3260 /* Write reset registers */
3261 for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
3262 if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
3265 offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(s_reset_regs_defs[i].addr), 1, false);
3271 ecore_grc_dump_regs_hdr(dump_buf, true, num_regs, "eng", -1, OSAL_NULL, OSAL_NULL);
3276 /* Dump registers that are modified during GRC Dump and therefore must be
3277 * dumped first. Returns the dumped size in dwords.
3279 static u32 ecore_grc_dump_modified_regs(struct ecore_hwfn *p_hwfn,
3280 struct ecore_ptt *p_ptt,
3284 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3285 u32 block_id, offset = 0, num_reg_entries = 0;
3286 const struct dbg_attn_reg *attn_reg_arr;
3287 u8 storm_id, reg_idx, num_attn_regs;
3289 /* Calculate header size */
3290 offset += ecore_grc_dump_regs_hdr(dump_buf, false, 0, "eng", -1, OSAL_NULL, OSAL_NULL);
3292 /* Write parity registers */
3293 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3294 if (dev_data->block_in_reset[block_id] && dump)
3297 attn_reg_arr = ecore_get_block_attn_regs((enum block_id)block_id, ATTN_TYPE_PARITY, &num_attn_regs);
3299 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
3300 const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
3301 u16 modes_buf_offset;
3305 eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
3306 modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
3307 if (eval_mode && !ecore_is_mode_match(p_hwfn, &modes_buf_offset))
3310 /* Mode match: read & dump registers */
3311 offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, reg_data->mask_address, 1, false);
3312 offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, GET_FIELD(reg_data->data, DBG_ATTN_REG_STS_ADDRESS), 1, false);
3313 num_reg_entries += 2;
3317 /* Write Storm stall status registers */
3318 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3319 struct storm_defs *storm = &s_storm_defs[storm_id];
3321 if (dev_data->block_in_reset[storm->block_id] && dump)
3324 offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump,
3325 BYTES_TO_DWORDS(storm->sem_fast_mem_addr + SEM_FAST_REG_STALLED), 1, false);
3331 ecore_grc_dump_regs_hdr(dump_buf, true, num_reg_entries, "eng", -1, OSAL_NULL, OSAL_NULL);
3336 /* Dumps registers that can't be represented in the debug arrays */
3337 static u32 ecore_grc_dump_special_regs(struct ecore_hwfn *p_hwfn,
3338 struct ecore_ptt *p_ptt,
3344 offset += ecore_grc_dump_regs_hdr(dump_buf, dump, 2, "eng", -1, OSAL_NULL, OSAL_NULL);
3346 /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
3349 offset += ecore_grc_dump_reg_entry_skip(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO), RDIF_REG_DEBUG_ERROR_INFO_SIZE, 7, 1);
3350 offset += ecore_grc_dump_reg_entry_skip(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO), TDIF_REG_DEBUG_ERROR_INFO_SIZE, 7, 1);
3355 /* Dumps a GRC memory header (section and params). Returns the dumped size in
3356 * dwords. The following parameters are dumped:
3357 * - name: dumped only if it's not OSAL_NULL.
3358 * - addr: in dwords, dumped only if name is OSAL_NULL.
3359 * - len: in dwords, always dumped.
3360 * - width: dumped if it's not zero.
3361 * - packed: dumped only if it's not false.
3362 * - mem_group: always dumped.
3363 * - is_storm: true only if the memory is related to a Storm.
3364 * - storm_letter: valid only if is_storm is true.
3367 static u32 ecore_grc_dump_mem_hdr(struct ecore_hwfn *p_hwfn,
3375 const char *mem_group,
3384 DP_NOTICE(p_hwfn, true, "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
3391 /* Dump section header */
3392 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "grc_mem", num_params);
3398 OSAL_STRCPY(buf, "?STORM_");
3399 buf[0] = storm_letter;
3400 OSAL_STRCPY(buf + OSAL_STRLEN(buf), name);
3403 OSAL_STRCPY(buf, name);
3406 offset += ecore_dump_str_param(dump_buf + offset, dump, "name", buf);
3411 u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
3413 offset += ecore_dump_num_param(dump_buf + offset, dump, "addr", addr_in_bytes);
3417 offset += ecore_dump_num_param(dump_buf + offset, dump, "len", len);
3419 /* Dump bit width */
3421 offset += ecore_dump_num_param(dump_buf + offset, dump, "width", bit_width);
3425 offset += ecore_dump_num_param(dump_buf + offset, dump, "packed", 1);
3429 OSAL_STRCPY(buf, "?STORM_");
3430 buf[0] = storm_letter;
3431 OSAL_STRCPY(buf + OSAL_STRLEN(buf), mem_group);
3434 OSAL_STRCPY(buf, mem_group);
3437 offset += ecore_dump_str_param(dump_buf + offset, dump, "type", buf);
3442 /* Dumps a single GRC memory. If name is OSAL_NULL, the memory is stored by address.
3443 * Returns the dumped size in dwords.
3444 * The addr and len arguments are specified in dwords.
3446 static u32 ecore_grc_dump_mem(struct ecore_hwfn *p_hwfn,
3447 struct ecore_ptt *p_ptt,
3456 const char *mem_group,
3462 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, name, addr, len, bit_width, packed, mem_group, is_storm, storm_letter);
3463 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, len, wide_bus);
3468 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
3469 static u32 ecore_grc_dump_mem_entries(struct ecore_hwfn *p_hwfn,
3470 struct ecore_ptt *p_ptt,
3471 struct dbg_array input_mems_arr,
3475 u32 i, offset = 0, input_offset = 0;
3476 bool mode_match = true;
3478 while (input_offset < input_mems_arr.size_in_dwords) {
3479 const struct dbg_dump_cond_hdr *cond_hdr;
3480 u16 modes_buf_offset;
3484 cond_hdr = (const struct dbg_dump_cond_hdr *)&input_mems_arr.ptr[input_offset++];
3485 num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
3487 /* Check required mode */
3488 eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
3490 modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
3491 mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
3495 input_offset += cond_hdr->data_size;
3499 for (i = 0; i < num_entries; i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
3500 const struct dbg_dump_mem *mem = (const struct dbg_dump_mem *)&input_mems_arr.ptr[input_offset];
3501 u8 mem_group_id = GET_FIELD(mem->dword0, DBG_DUMP_MEM_MEM_GROUP_ID);
3502 bool is_storm = false, mem_wide_bus;
3503 char storm_letter = 'a';
3504 u32 mem_addr, mem_len;
3506 if (mem_group_id >= MEM_GROUPS_NUM) {
3507 DP_NOTICE(p_hwfn, true, "Invalid mem_group_id\n");
3511 if (!ecore_grc_is_mem_included(p_hwfn, (enum block_id)cond_hdr->block_id, mem_group_id))
3514 mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
3515 mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
3516 mem_wide_bus = GET_FIELD(mem->dword1, DBG_DUMP_MEM_WIDE_BUS);
3518 /* Update memory length for CCFC/TCFC memories
3519 * according to number of LCIDs/LTIDs.
3521 if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
3522 if (mem_len % MAX_LCIDS) {
3523 DP_NOTICE(p_hwfn, true, "Invalid CCFC connection memory size\n");
3527 mem_len = ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS) * (mem_len / MAX_LCIDS);
3529 else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) {
3530 if (mem_len % MAX_LTIDS) {
3531 DP_NOTICE(p_hwfn, true, "Invalid TCFC task memory size\n");
3535 mem_len = ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS) * (mem_len / MAX_LTIDS);
3538 /* If memory is associated with Storm, udpate Storm
3541 if (s_block_defs[cond_hdr->block_id]->associated_to_storm) {
3543 storm_letter = s_storm_defs[s_block_defs[cond_hdr->block_id]->storm_id].letter;
3547 offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, OSAL_NULL, mem_addr, mem_len, mem_wide_bus,
3548 0, false, s_mem_group_names[mem_group_id], is_storm, storm_letter);
3555 /* Dumps GRC memories according to the input array dump_mem.
3556 * Returns the dumped size in dwords.
3558 static u32 ecore_grc_dump_memories(struct ecore_hwfn *p_hwfn,
3559 struct ecore_ptt *p_ptt,
3563 u32 offset = 0, input_offset = 0;
3565 while (input_offset < s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
3566 const struct dbg_dump_split_hdr *split_hdr;
3567 struct dbg_array curr_input_mems_arr;
3568 u32 split_data_size;
3571 split_hdr = (const struct dbg_dump_split_hdr *)&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
3572 split_type_id = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3573 split_data_size = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3574 curr_input_mems_arr.ptr = &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
3575 curr_input_mems_arr.size_in_dwords = split_data_size;
3577 switch (split_type_id) {
3578 case SPLIT_TYPE_NONE:
3579 offset += ecore_grc_dump_mem_entries(p_hwfn, p_ptt, curr_input_mems_arr, dump_buf + offset, dump);
3583 DP_NOTICE(p_hwfn, true, "Dumping split memories is currently not supported\n");
3587 input_offset += split_data_size;
3593 /* Dumps GRC context data for the specified Storm.
3594 * Returns the dumped size in dwords.
3595 * The lid_size argument is specified in quad-regs.
3597 static u32 ecore_grc_dump_ctx_data(struct ecore_hwfn *p_hwfn,
3598 struct ecore_ptt *p_ptt,
3607 struct storm_defs *storm = &s_storm_defs[storm_id];
3608 u32 i, lid, total_size, offset = 0;
3613 lid_size *= BYTES_IN_DWORD;
3614 total_size = num_lids * lid_size;
3616 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, name, 0, total_size, lid_size * 32, false, name, true, storm->letter);
3619 return offset + total_size;
3621 /* Dump context data */
3622 for (lid = 0; lid < num_lids; lid++) {
3623 for (i = 0; i < lid_size; i++, offset++) {
3624 ecore_wr(p_hwfn, p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
3625 *(dump_buf + offset) = ecore_rd(p_hwfn, p_ptt, rd_reg_addr);
3632 /* Dumps GRC contexts. Returns the dumped size in dwords. */
3633 static u32 ecore_grc_dump_ctx(struct ecore_hwfn *p_hwfn,
3634 struct ecore_ptt *p_ptt,
3641 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3642 struct storm_defs *storm = &s_storm_defs[storm_id];
3644 if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id))
3647 /* Dump Conn AG context size */
3648 offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "CONN_AG_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS),
3649 storm->cm_conn_ag_ctx_lid_size, storm->cm_conn_ag_ctx_rd_addr, storm_id);
3651 /* Dump Conn ST context size */
3652 offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "CONN_ST_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS),
3653 storm->cm_conn_st_ctx_lid_size, storm->cm_conn_st_ctx_rd_addr, storm_id);
3655 /* Dump Task AG context size */
3656 offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "TASK_AG_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS),
3657 storm->cm_task_ag_ctx_lid_size, storm->cm_task_ag_ctx_rd_addr, storm_id);
3659 /* Dump Task ST context size */
3660 offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "TASK_ST_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS),
3661 storm->cm_task_st_ctx_lid_size, storm->cm_task_st_ctx_rd_addr, storm_id);
3667 /* Dumps GRC IORs data. Returns the dumped size in dwords. */
3668 static u32 ecore_grc_dump_iors(struct ecore_hwfn *p_hwfn,
3669 struct ecore_ptt *p_ptt,
3673 char buf[10] = "IOR_SET_?";
3674 u32 addr, offset = 0;
3675 u8 storm_id, set_id;
3677 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3678 struct storm_defs *storm = &s_storm_defs[storm_id];
3680 if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id))
3683 for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
3684 addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr + SEM_FAST_REG_STORM_REG_FILE) + IOR_SET_OFFSET(set_id);
3685 buf[OSAL_STRLEN(buf) - 1] = '0' + set_id;
3686 offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, buf, addr, IORS_PER_SET, false, 32, false, "ior", true, storm->letter);
3693 /* Dump VFC CAM. Returns the dumped size in dwords. */
3694 static u32 ecore_grc_dump_vfc_cam(struct ecore_hwfn *p_hwfn,
3695 struct ecore_ptt *p_ptt,
3700 u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
3701 struct storm_defs *storm = &s_storm_defs[storm_id];
3702 u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
3703 u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
3704 u32 row, i, offset = 0;
3706 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, "vfc_cam", 0, total_size, 256, false, "vfc_cam", true, storm->letter);
3709 return offset + total_size;
3711 /* Prepare CAM address */
3712 SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
3714 for (row = 0; row < VFC_CAM_NUM_ROWS; row++, offset += VFC_CAM_RESP_DWORDS) {
3716 /* Write VFC CAM command */
3717 SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
3718 ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR, cam_cmd, VFC_CAM_CMD_DWORDS);
3720 /* Write VFC CAM address */
3721 ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR, cam_addr, VFC_CAM_ADDR_DWORDS);
3723 /* Read VFC CAM read response */
3724 ARR_REG_RD(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD, dump_buf + offset, VFC_CAM_RESP_DWORDS);
3730 /* Dump VFC RAM. Returns the dumped size in dwords. */
3731 static u32 ecore_grc_dump_vfc_ram(struct ecore_hwfn *p_hwfn,
3732 struct ecore_ptt *p_ptt,
3736 struct vfc_ram_defs *ram_defs)
3738 u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
3739 struct storm_defs *storm = &s_storm_defs[storm_id];
3740 u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
3741 u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
3742 u32 row, i, offset = 0;
3744 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, ram_defs->mem_name, 0, total_size, 256, false, ram_defs->type_name, true, storm->letter);
3746 /* Prepare RAM address */
3747 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
3750 return offset + total_size;
3752 for (row = ram_defs->base_row; row < ram_defs->base_row + ram_defs->num_rows; row++, offset += VFC_RAM_RESP_DWORDS) {
3754 /* Write VFC RAM command */
3755 ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR, ram_cmd, VFC_RAM_CMD_DWORDS);
3757 /* Write VFC RAM address */
3758 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
3759 ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR, ram_addr, VFC_RAM_ADDR_DWORDS);
3761 /* Read VFC RAM read response */
3762 ARR_REG_RD(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD, dump_buf + offset, VFC_RAM_RESP_DWORDS);
3768 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
3769 static u32 ecore_grc_dump_vfc(struct ecore_hwfn *p_hwfn,
3770 struct ecore_ptt *p_ptt,
3774 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3778 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3779 if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id) ||
3780 !s_storm_defs[storm_id].has_vfc ||
3781 (storm_id == DBG_PSTORM_ID && dev_data->platform_id != PLATFORM_ASIC))
3785 offset += ecore_grc_dump_vfc_cam(p_hwfn, p_ptt, dump_buf + offset, dump, storm_id);
3788 for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
3789 offset += ecore_grc_dump_vfc_ram(p_hwfn, p_ptt, dump_buf + offset, dump, storm_id, &s_vfc_ram_defs[i]);
3795 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
3796 static u32 ecore_grc_dump_rss(struct ecore_hwfn *p_hwfn,
3797 struct ecore_ptt *p_ptt,
3801 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3805 for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
3806 u32 rss_addr, num_entries, total_dwords;
3807 struct rss_mem_defs *rss_defs;
3810 rss_defs = &s_rss_mem_defs[rss_mem_id];
3811 rss_addr = rss_defs->addr;
3812 num_entries = rss_defs->num_entries[dev_data->chip_id];
3813 total_dwords = (num_entries * rss_defs->entry_width) / 32;
3814 packed = (rss_defs->entry_width == 16);
3816 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, rss_defs->mem_name, 0, total_dwords,
3817 rss_defs->entry_width, packed, rss_defs->type_name, false, 0);
3821 offset += total_dwords;
3825 while (total_dwords) {
3826 u32 num_dwords_to_read = OSAL_MIN_T(u32, RSS_REG_RSS_RAM_DATA_SIZE, total_dwords);
3827 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3828 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA), num_dwords_to_read, false);
3829 total_dwords -= num_dwords_to_read;
3837 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
3838 static u32 ecore_grc_dump_big_ram(struct ecore_hwfn *p_hwfn,
3839 struct ecore_ptt *p_ptt,
3844 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3845 u32 block_size, ram_size, offset = 0, reg_val, i;
3846 char mem_name[12] = "???_BIG_RAM";
3847 char type_name[8] = "???_RAM";
3848 struct big_ram_defs *big_ram;
3850 big_ram = &s_big_ram_defs[big_ram_id];
3851 ram_size = big_ram->ram_size[dev_data->chip_id];
3853 reg_val = ecore_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
3854 block_size = reg_val & (1 << big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256 : 128;
3856 OSAL_STRNCPY(type_name, big_ram->instance_name, OSAL_STRLEN(big_ram->instance_name));
3857 OSAL_STRNCPY(mem_name, big_ram->instance_name, OSAL_STRLEN(big_ram->instance_name));
3859 /* Dump memory header */
3860 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, mem_name, 0, ram_size, block_size * 8, false, type_name, false, 0);
3862 /* Read and dump Big RAM data */
3864 return offset + ram_size;
3867 for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE); i++) {
3868 ecore_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3869 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(big_ram->data_reg_addr), BRB_REG_BIG_RAM_DATA_SIZE, false);
3875 static u32 ecore_grc_dump_mcp(struct ecore_hwfn *p_hwfn,
3876 struct ecore_ptt *p_ptt,
3880 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3881 bool block_enable[MAX_BLOCK_ID] = { 0 };
3882 bool halted = false;
3886 if (dump && dev_data->platform_id == PLATFORM_ASIC && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3887 halted = !ecore_mcp_halt(p_hwfn, p_ptt);
3889 DP_NOTICE(p_hwfn, false, "MCP halt failed!\n");
3892 /* Dump MCP scratchpad */
3893 offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, OSAL_NULL, BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3894 ECORE_IS_E5(p_hwfn->p_dev) ? MCP_REG_SCRATCH_SIZE_E5 : MCP_REG_SCRATCH_SIZE_BB_K2, false, 0, false, "MCP", false, 0);
3896 /* Dump MCP cpu_reg_file */
3897 offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, OSAL_NULL, BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3898 MCP_REG_CPU_REG_FILE_SIZE, false, 0, false, "MCP", false, 0);
3900 /* Dump MCP registers */
3901 block_enable[BLOCK_MCP] = true;
3902 offset += ecore_grc_dump_registers(p_hwfn, p_ptt, dump_buf + offset, dump, block_enable, "block", "MCP");
3904 /* Dump required non-MCP registers */
3905 offset += ecore_grc_dump_regs_hdr(dump_buf + offset, dump, 1, "eng", -1, "block", "MCP");
3906 offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR), 1, false);
3909 if (halted && ecore_mcp_resume(p_hwfn, p_ptt))
3910 DP_NOTICE(p_hwfn, false, "Failed to resume MCP after halt!\n");
3915 /* Dumps the tbus indirect memory for all PHYs. */
3916 static u32 ecore_grc_dump_phy(struct ecore_hwfn *p_hwfn,
3917 struct ecore_ptt *p_ptt,
3921 u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3925 for (phy_id = 0; phy_id < OSAL_ARRAY_SIZE(s_phy_defs); phy_id++) {
3926 u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3927 struct phy_defs *phy_defs;
3930 phy_defs = &s_phy_defs[phy_id];
3931 addr_lo_addr = phy_defs->base_addr + phy_defs->tbus_addr_lo_addr;
3932 addr_hi_addr = phy_defs->base_addr + phy_defs->tbus_addr_hi_addr;
3933 data_lo_addr = phy_defs->base_addr + phy_defs->tbus_data_lo_addr;
3934 data_hi_addr = phy_defs->base_addr + phy_defs->tbus_data_hi_addr;
3936 if (OSAL_SNPRINTF(mem_name, sizeof(mem_name), "tbus_%s", phy_defs->phy_name) < 0)
3937 DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid PHY memory name\n");
3939 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, mem_name, 0, PHY_DUMP_SIZE_DWORDS, 16, true, mem_name, false, 0);
3942 offset += PHY_DUMP_SIZE_DWORDS;
3946 bytes_buf = (u8 *)(dump_buf + offset);
3947 for (tbus_hi_offset = 0; tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8); tbus_hi_offset++) {
3948 ecore_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3949 for (tbus_lo_offset = 0; tbus_lo_offset < 256; tbus_lo_offset++) {
3950 ecore_wr(p_hwfn, p_ptt, addr_lo_addr, tbus_lo_offset);
3951 *(bytes_buf++) = (u8)ecore_rd(p_hwfn, p_ptt, data_lo_addr);
3952 *(bytes_buf++) = (u8)ecore_rd(p_hwfn, p_ptt, data_hi_addr);
3956 offset += PHY_DUMP_SIZE_DWORDS;
3962 static void ecore_config_dbg_line(struct ecore_hwfn *p_hwfn,
3963 struct ecore_ptt *p_ptt,
3964 enum block_id block_id,
3968 u8 force_valid_mask,
3969 u8 force_frame_mask)
3971 struct block_defs *block = s_block_defs[block_id];
3973 ecore_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
3974 ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
3975 ecore_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
3976 ecore_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
3977 ecore_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
3980 /* Dumps Static Debug data. Returns the dumped size in dwords. */
3981 static u32 ecore_grc_dump_static_debug(struct ecore_hwfn *p_hwfn,
3982 struct ecore_ptt *p_ptt,
3986 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3987 u32 block_id, line_id, offset = 0;
3989 /* don't dump static debug if a debug bus recording is in progress */
3990 if (dump && ecore_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3994 /* Disable all blocks debug output */
3995 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3996 struct block_defs *block = s_block_defs[block_id];
3998 if (block->dbg_client_id[dev_data->chip_id] != MAX_DBG_BUS_CLIENTS)
3999 ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
4002 ecore_bus_reset_dbg_block(p_hwfn, p_ptt);
4003 ecore_bus_set_framing_mode(p_hwfn, p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
4004 ecore_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
4005 ecore_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
4006 ecore_bus_enable_dbg_block(p_hwfn, p_ptt, true);
4009 /* Dump all static debug lines for each relevant block */
4010 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
4011 struct block_defs *block = s_block_defs[block_id];
4012 struct dbg_bus_block *block_desc;
4015 if (block->dbg_client_id[dev_data->chip_id] == MAX_DBG_BUS_CLIENTS)
4018 block_desc = get_dbg_bus_block_desc(p_hwfn, (enum block_id)block_id);
4019 block_dwords = NUM_DBG_LINES(block_desc) * STATIC_DEBUG_LINE_DWORDS;
4021 /* Dump static section params */
4022 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, block->name, 0, block_dwords, 32, false, "STATIC", false, 0);
4025 offset += block_dwords;
4029 /* If all lines are invalid - dump zeros */
4030 if (dev_data->block_in_reset[block_id]) {
4031 OSAL_MEMSET(dump_buf + offset, 0, DWORDS_TO_BYTES(block_dwords));
4032 offset += block_dwords;
4036 /* Enable block's client */
4037 ecore_bus_enable_clients(p_hwfn, p_ptt, 1 << block->dbg_client_id[dev_data->chip_id]);
4038 for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc); line_id++) {
4040 /* Configure debug line ID */
4041 ecore_config_dbg_line(p_hwfn, p_ptt, (enum block_id)block_id, (u8)line_id, 0xf, 0, 0, 0);
4043 /* Read debug line info */
4044 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA), STATIC_DEBUG_LINE_DWORDS, true);
4047 /* Disable block's client and debug output */
4048 ecore_bus_enable_clients(p_hwfn, p_ptt, 0);
4049 ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
4053 ecore_bus_enable_dbg_block(p_hwfn, p_ptt, false);
4054 ecore_bus_enable_clients(p_hwfn, p_ptt, 0);
4060 /* Performs GRC Dump to the specified buffer.
4061 * Returns the dumped size in dwords.
4063 static enum dbg_status ecore_grc_dump(struct ecore_hwfn *p_hwfn,
4064 struct ecore_ptt *p_ptt,
4067 u32 *num_dumped_dwords)
4069 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4070 bool is_asic, parities_masked = false;
4071 u8 i, port_mode = 0;
4074 is_asic = dev_data->platform_id == PLATFORM_ASIC;
4076 *num_dumped_dwords = 0;
4080 /* Find port mode */
4081 switch (ecore_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
4082 case 0: port_mode = 1; break;
4083 case 1: port_mode = 2; break;
4084 case 2: port_mode = 4; break;
4087 /* Update reset state */
4088 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
4091 /* Dump global params */
4092 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 4);
4093 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "grc-dump");
4094 offset += ecore_dump_num_param(dump_buf + offset, dump, "num-lcids", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS));
4095 offset += ecore_dump_num_param(dump_buf + offset, dump, "num-ltids", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS));
4096 offset += ecore_dump_num_param(dump_buf + offset, dump, "num-ports", port_mode);
4098 /* Dump reset registers (dumped before taking blocks out of reset ) */
4099 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4100 offset += ecore_grc_dump_reset_regs(p_hwfn, p_ptt, dump_buf + offset, dump);
4102 /* Take all blocks out of reset (using reset registers) */
4104 ecore_grc_unreset_blocks(p_hwfn, p_ptt);
4105 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
4108 /* Disable all parities using MFW command */
4109 if (dump && is_asic && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
4110 parities_masked = !ecore_mcp_mask_parities(p_hwfn, p_ptt, 1);
4111 if (!parities_masked) {
4112 DP_NOTICE(p_hwfn, false, "Failed to mask parities using MFW\n");
4113 if (ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
4114 return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
4118 /* Dump modified registers (dumped before modifying them) */
4119 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4120 offset += ecore_grc_dump_modified_regs(p_hwfn, p_ptt, dump_buf + offset, dump);
4123 if (dump && (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR) || ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
4124 ecore_grc_stall_storms(p_hwfn, p_ptt, true);
4127 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
4128 bool block_enable[MAX_BLOCK_ID];
4130 /* Dump all blocks except MCP */
4131 for (i = 0; i < MAX_BLOCK_ID; i++)
4132 block_enable[i] = true;
4133 block_enable[BLOCK_MCP] = false;
4134 offset += ecore_grc_dump_registers(p_hwfn, p_ptt, dump_buf + offset, dump, block_enable, OSAL_NULL, OSAL_NULL);
4136 /* Dump special registers */
4137 offset += ecore_grc_dump_special_regs(p_hwfn, p_ptt, dump_buf + offset, dump);
4141 offset += ecore_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
4144 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
4145 offset += ecore_grc_dump_mcp(p_hwfn, p_ptt, dump_buf + offset, dump);
4148 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
4149 offset += ecore_grc_dump_ctx(p_hwfn, p_ptt, dump_buf + offset, dump);
4151 /* Dump RSS memories */
4152 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
4153 offset += ecore_grc_dump_rss(p_hwfn, p_ptt, dump_buf + offset, dump);
4156 for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
4157 if (ecore_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
4158 offset += ecore_grc_dump_big_ram(p_hwfn, p_ptt, dump_buf + offset, dump, i);
4161 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
4162 offset += ecore_grc_dump_iors(p_hwfn, p_ptt, dump_buf + offset, dump);
4165 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
4166 offset += ecore_grc_dump_vfc(p_hwfn, p_ptt, dump_buf + offset, dump);
4169 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id == CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
4170 offset += ecore_grc_dump_phy(p_hwfn, p_ptt, dump_buf + offset, dump);
4172 /* Dump static debug data */
4173 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_STATIC) && dev_data->bus.state == DBG_BUS_STATE_IDLE)
4174 offset += ecore_grc_dump_static_debug(p_hwfn, p_ptt, dump_buf + offset, dump);
4176 /* Dump last section */
4177 offset += ecore_dump_last_section(dump_buf, offset, dump);
4181 /* Unstall storms */
4182 if (ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
4183 ecore_grc_stall_storms(p_hwfn, p_ptt, false);
4185 /* Clear parity status */
4187 ecore_grc_clear_all_prty(p_hwfn, p_ptt);
4189 /* Enable all parities using MFW command */
4190 if (parities_masked)
4191 ecore_mcp_mask_parities(p_hwfn, p_ptt, 0);
4194 *num_dumped_dwords = offset;
4196 return DBG_STATUS_OK;
4199 /* Writes the specified failing Idle Check rule to the specified buffer.
4200 * Returns the dumped size in dwords.
4202 static u32 ecore_idle_chk_dump_failure(struct ecore_hwfn *p_hwfn,
4203 struct ecore_ptt *p_ptt,
4207 const struct dbg_idle_chk_rule *rule,
4209 u32 *cond_reg_values)
4211 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4212 const struct dbg_idle_chk_cond_reg *cond_regs;
4213 const struct dbg_idle_chk_info_reg *info_regs;
4214 u32 i, next_reg_offset = 0, offset = 0;
4215 struct dbg_idle_chk_result_hdr *hdr;
4216 const union dbg_idle_chk_reg *regs;
4219 hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
4220 regs = &((const union dbg_idle_chk_reg *)s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4221 cond_regs = ®s[0].cond_reg;
4222 info_regs = ®s[rule->num_cond_regs].info_reg;
4224 /* Dump rule data */
4226 OSAL_MEMSET(hdr, 0, sizeof(*hdr));
4227 hdr->rule_id = rule_id;
4228 hdr->mem_entry_id = fail_entry_id;
4229 hdr->severity = rule->severity;
4230 hdr->num_dumped_cond_regs = rule->num_cond_regs;
4233 offset += IDLE_CHK_RESULT_HDR_DWORDS;
4235 /* Dump condition register values */
4236 for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4237 const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4238 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4240 reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)(dump_buf + offset);
4242 /* Write register header */
4244 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->entry_size;
4248 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4249 OSAL_MEMSET(reg_hdr, 0, sizeof(*reg_hdr));
4250 reg_hdr->start_entry = reg->start_entry;
4251 reg_hdr->size = reg->entry_size;
4252 SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM, reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
4253 SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
4255 /* Write register values */
4256 for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
4257 dump_buf[offset] = cond_reg_values[next_reg_offset];
4260 /* Dump info register values */
4261 for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
4262 const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
4265 /* Check if register's block is in reset */
4267 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
4271 block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
4272 if (block_id >= MAX_BLOCK_ID) {
4273 DP_NOTICE(p_hwfn, true, "Invalid block_id\n");
4277 if (!dev_data->block_in_reset[block_id]) {
4278 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4279 bool wide_bus, eval_mode, mode_match = true;
4280 u16 modes_buf_offset;
4283 reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)(dump_buf + offset);
4286 eval_mode = GET_FIELD(reg->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
4288 modes_buf_offset = GET_FIELD(reg->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
4289 mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
4295 addr = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_ADDRESS);
4296 wide_bus = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
4298 /* Write register header */
4299 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4300 hdr->num_dumped_info_regs++;
4301 OSAL_MEMSET(reg_hdr, 0, sizeof(*reg_hdr));
4302 reg_hdr->size = reg->size;
4303 SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, rule->num_cond_regs + reg_id);
4305 /* Write register values */
4306 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, reg->size, wide_bus);
4313 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
4314 static u32 ecore_idle_chk_dump_rule_entries(struct ecore_hwfn *p_hwfn,
4315 struct ecore_ptt *p_ptt,
4318 const struct dbg_idle_chk_rule *input_rules,
4319 u32 num_input_rules,
4320 u32 *num_failing_rules)
4322 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4323 u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
4328 *num_failing_rules = 0;
4330 for (i = 0; i < num_input_rules; i++) {
4331 const struct dbg_idle_chk_cond_reg *cond_regs;
4332 const struct dbg_idle_chk_rule *rule;
4333 const union dbg_idle_chk_reg *regs;
4334 u16 num_reg_entries = 1;
4335 bool check_rule = true;
4336 const u32 *imm_values;
4338 rule = &input_rules[i];
4339 regs = &((const union dbg_idle_chk_reg *)s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4340 cond_regs = ®s[0].cond_reg;
4341 imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr[rule->imm_offset];
4343 /* Check if all condition register blocks are out of reset, and
4344 * find maximal number of entries (all condition registers that
4345 * are memories must have the same size, which is > 1).
4347 for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule; reg_id++) {
4348 u32 block_id = GET_FIELD(cond_regs[reg_id].data, DBG_IDLE_CHK_COND_REG_BLOCK_ID);
4350 if (block_id >= MAX_BLOCK_ID) {
4351 DP_NOTICE(p_hwfn, true, "Invalid block_id\n");
4355 check_rule = !dev_data->block_in_reset[block_id];
4356 if (cond_regs[reg_id].num_entries > num_reg_entries)
4357 num_reg_entries = cond_regs[reg_id].num_entries;
4360 if (!check_rule && dump)
4364 u32 entry_dump_size = ecore_idle_chk_dump_failure(p_hwfn, p_ptt, dump_buf + offset, false, rule->rule_id, rule, 0, OSAL_NULL);
4366 offset += num_reg_entries * entry_dump_size;
4367 (*num_failing_rules) += num_reg_entries;
4371 /* Go over all register entries (number of entries is the same for all
4372 * condition registers).
4374 for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
4375 u32 next_reg_offset = 0;
4377 /* Read current entry of all condition registers */
4378 for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4379 const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4380 u32 padded_entry_size, addr;
4383 /* Find GRC address (if it's a memory, the address of the
4384 * specific entry is calculated).
4386 addr = GET_FIELD(reg->data, DBG_IDLE_CHK_COND_REG_ADDRESS);
4387 wide_bus = GET_FIELD(reg->data, DBG_IDLE_CHK_COND_REG_WIDE_BUS);
4388 if (reg->num_entries > 1 || reg->start_entry > 0) {
4389 padded_entry_size = reg->entry_size > 1 ? OSAL_ROUNDUP_POW_OF_TWO(reg->entry_size) : 1;
4390 addr += (reg->start_entry + entry_id) * padded_entry_size;
4393 /* Read registers */
4394 if (next_reg_offset + reg->entry_size >= IDLE_CHK_MAX_ENTRIES_SIZE) {
4395 DP_NOTICE(p_hwfn, true, "idle check registers entry is too large\n");
4399 next_reg_offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, cond_reg_values + next_reg_offset, dump, addr, reg->entry_size, wide_bus);
4402 /* Call rule condition function. if returns true, it's a failure.*/
4403 if ((*cond_arr[rule->cond_id])(cond_reg_values, imm_values)) {
4404 offset += ecore_idle_chk_dump_failure(p_hwfn, p_ptt, dump_buf + offset, dump, rule->rule_id, rule, entry_id, cond_reg_values);
4405 (*num_failing_rules)++;
4413 /* Performs Idle Check Dump to the specified buffer.
4414 * Returns the dumped size in dwords.
4416 static u32 ecore_idle_chk_dump(struct ecore_hwfn *p_hwfn,
4417 struct ecore_ptt *p_ptt,
4421 u32 num_failing_rules_offset, offset = 0, input_offset = 0, num_failing_rules = 0;
4423 /* Dump global params */
4424 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4425 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "idle-chk");
4427 /* Dump idle check section header with a single parameter */
4428 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
4429 num_failing_rules_offset = offset;
4430 offset += ecore_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
4432 while (input_offset < s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
4433 const struct dbg_idle_chk_cond_hdr *cond_hdr = (const struct dbg_idle_chk_cond_hdr *)&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr[input_offset++];
4434 bool eval_mode, mode_match = true;
4435 u32 curr_failing_rules;
4436 u16 modes_buf_offset;
4439 eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
4441 modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
4442 mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
4446 offset += ecore_idle_chk_dump_rule_entries(p_hwfn, p_ptt, dump_buf + offset, dump, (const struct dbg_idle_chk_rule *)&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr[input_offset], cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS, &curr_failing_rules);
4447 num_failing_rules += curr_failing_rules;
4450 input_offset += cond_hdr->data_size;
4453 /* Overwrite num_rules parameter */
4455 ecore_dump_num_param(dump_buf + num_failing_rules_offset, dump, "num_rules", num_failing_rules);
4457 /* Dump last section */
4458 offset += ecore_dump_last_section(dump_buf, offset, dump);
4463 /* Finds the meta data image in NVRAM */
4464 static enum dbg_status ecore_find_nvram_image(struct ecore_hwfn *p_hwfn,
4465 struct ecore_ptt *p_ptt,
4467 u32 *nvram_offset_bytes,
4468 u32 *nvram_size_bytes)
4470 u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
4471 struct mcp_file_att file_att;
4474 /* Call NVRAM get file command */
4475 nvm_result = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_GET_FILE_ATT, image_type, &ret_mcp_resp, &ret_mcp_param, &ret_txn_size, (u32 *)&file_att);
4477 /* Check response */
4478 if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4479 return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4481 /* Update return values */
4482 *nvram_offset_bytes = file_att.nvm_start_addr;
4483 *nvram_size_bytes = file_att.len;
4485 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n", image_type, *nvram_offset_bytes, *nvram_size_bytes);
4487 /* Check alignment */
4488 if (*nvram_size_bytes & 0x3)
4489 return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
4491 return DBG_STATUS_OK;
4494 /* Reads data from NVRAM */
4495 static enum dbg_status ecore_nvram_read(struct ecore_hwfn *p_hwfn,
4496 struct ecore_ptt *p_ptt,
4497 u32 nvram_offset_bytes,
4498 u32 nvram_size_bytes,
4501 u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
4502 s32 bytes_left = nvram_size_bytes;
4503 u32 read_offset = 0;
4505 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "nvram_read: reading image of size %d bytes from NVRAM\n", nvram_size_bytes);
4508 bytes_to_copy = (bytes_left > MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
4510 /* Call NVRAM read command */
4511 if (ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_READ_NVRAM, (nvram_offset_bytes + read_offset) | (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_OFFSET), &ret_mcp_resp, &ret_mcp_param, &ret_read_size, (u32 *)((u8 *)ret_buf + read_offset)))
4512 return DBG_STATUS_NVRAM_READ_FAILED;
4514 /* Check response */
4515 if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4516 return DBG_STATUS_NVRAM_READ_FAILED;
4518 /* Update read offset */
4519 read_offset += ret_read_size;
4520 bytes_left -= ret_read_size;
4521 } while (bytes_left > 0);
4523 return DBG_STATUS_OK;
4526 /* Get info on the MCP Trace data in the scratchpad:
4527 * - trace_data_grc_addr (OUT): trace data GRC address in bytes
4528 * - trace_data_size (OUT): trace data size in bytes (without the header)
4530 static enum dbg_status ecore_mcp_trace_get_data_info(struct ecore_hwfn *p_hwfn,
4531 struct ecore_ptt *p_ptt,
4532 u32 *trace_data_grc_addr,
4533 u32 *trace_data_size)
4535 u32 spad_trace_offsize, signature;
4537 /* Read trace section offsize structure from MCP scratchpad */
4538 spad_trace_offsize = ecore_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4540 /* Extract trace section address from offsize (in scratchpad) */
4541 *trace_data_grc_addr = MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4543 /* Read signature from MCP trace section */
4544 signature = ecore_rd(p_hwfn, p_ptt, *trace_data_grc_addr + OFFSETOF(struct mcp_trace, signature));
4546 if (signature != MFW_TRACE_SIGNATURE)
4547 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4549 /* Read trace size from MCP trace section */
4550 *trace_data_size = ecore_rd(p_hwfn, p_ptt, *trace_data_grc_addr + OFFSETOF(struct mcp_trace, size));
4552 return DBG_STATUS_OK;
4555 /* Reads MCP trace meta data image from NVRAM
4556 * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4557 * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4558 * loaded from file).
4559 * - trace_meta_size (OUT): size in bytes of the trace meta data.
4561 static enum dbg_status ecore_mcp_trace_get_meta_info(struct ecore_hwfn *p_hwfn,
4562 struct ecore_ptt *p_ptt,
4563 u32 trace_data_size_bytes,
4564 u32 *running_bundle_id,
4565 u32 *trace_meta_offset,
4566 u32 *trace_meta_size)
4568 u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4570 /* Read MCP trace section offsize structure from MCP scratchpad */
4571 spad_trace_offsize = ecore_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4573 /* Find running bundle ID */
4574 running_mfw_addr = MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) + SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4575 *running_bundle_id = ecore_rd(p_hwfn, p_ptt, running_mfw_addr);
4576 if (*running_bundle_id > 1)
4577 return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4579 /* Find image in NVRAM */
4580 nvram_image_type = (*running_bundle_id == DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4581 return ecore_find_nvram_image(p_hwfn, p_ptt, nvram_image_type, trace_meta_offset, trace_meta_size);
4584 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
4585 static enum dbg_status ecore_mcp_trace_read_meta(struct ecore_hwfn *p_hwfn,
4586 struct ecore_ptt *p_ptt,
4587 u32 nvram_offset_in_bytes,
4591 u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
4592 enum dbg_status status;
4595 /* Read meta data from NVRAM */
4596 status = ecore_nvram_read(p_hwfn, p_ptt, nvram_offset_in_bytes, size_in_bytes, buf);
4597 if (status != DBG_STATUS_OK)
4600 /* Extract and check first signature */
4601 signature = ecore_read_unaligned_dword(byte_buf);
4602 byte_buf += sizeof(signature);
4603 if (signature != NVM_MAGIC_VALUE)
4604 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4606 /* Extract number of modules */
4607 modules_num = *(byte_buf++);
4609 /* Skip all modules */
4610 for (i = 0; i < modules_num; i++) {
4611 module_len = *(byte_buf++);
4612 byte_buf += module_len;
4615 /* Extract and check second signature */
4616 signature = ecore_read_unaligned_dword(byte_buf);
4617 byte_buf += sizeof(signature);
4618 if (signature != NVM_MAGIC_VALUE)
4619 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4621 return DBG_STATUS_OK;
4624 /* Dump MCP Trace */
4625 static enum dbg_status ecore_mcp_trace_dump(struct ecore_hwfn *p_hwfn,
4626 struct ecore_ptt *p_ptt,
4629 u32 *num_dumped_dwords)
4631 u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0, trace_meta_size_dwords = 0;
4632 u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4633 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4634 u32 running_bundle_id, offset = 0;
4635 enum dbg_status status;
4639 *num_dumped_dwords = 0;
4641 mcp_access = dev_data->platform_id == PLATFORM_ASIC && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4643 /* Get trace data info */
4644 status = ecore_mcp_trace_get_data_info(p_hwfn, p_ptt, &trace_data_grc_addr, &trace_data_size_bytes);
4645 if (status != DBG_STATUS_OK)
4648 /* Dump global params */
4649 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4650 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "mcp-trace");
4652 /* Halt MCP while reading from scratchpad so the read data will be
4653 * consistent. if halt fails, MCP trace is taken anyway, with a small
4654 * risk that it may be corrupt.
4656 if (dump && mcp_access) {
4657 halted = !ecore_mcp_halt(p_hwfn, p_ptt);
4659 DP_NOTICE(p_hwfn, false, "MCP halt failed!\n");
4662 /* Find trace data size */
4663 trace_data_size_dwords = DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace), BYTES_IN_DWORD);
4665 /* Dump trace data section header and param */
4666 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "mcp_trace_data", 1);
4667 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", trace_data_size_dwords);
4669 /* Read trace data from scratchpad into dump buffer */
4670 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(trace_data_grc_addr), trace_data_size_dwords, false);
4672 /* Resume MCP (only if halt succeeded) */
4673 if (halted && ecore_mcp_resume(p_hwfn, p_ptt))
4674 DP_NOTICE(p_hwfn, false, "Failed to resume MCP after halt!\n");
4676 /* Dump trace meta section header */
4677 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "mcp_trace_meta", 1);
4679 /* Read trace meta only if NVRAM access is enabled
4680 * (trace_meta_size_bytes is dword-aligned).
4682 if (OSAL_NVM_IS_ACCESS_ENABLED(p_hwfn) && mcp_access) {
4683 status = ecore_mcp_trace_get_meta_info(p_hwfn, p_ptt, trace_data_size_bytes, &running_bundle_id, &trace_meta_offset_bytes, &trace_meta_size_bytes);
4684 if (status == DBG_STATUS_OK)
4685 trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
4688 /* Dump trace meta size param */
4689 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", trace_meta_size_dwords);
4691 /* Read trace meta image into dump buffer */
4692 if (dump && trace_meta_size_dwords)
4693 status = ecore_mcp_trace_read_meta(p_hwfn, p_ptt, trace_meta_offset_bytes, trace_meta_size_bytes, dump_buf + offset);
4694 if (status == DBG_STATUS_OK)
4695 offset += trace_meta_size_dwords;
4697 /* Dump last section */
4698 offset += ecore_dump_last_section(dump_buf, offset, dump);
4700 *num_dumped_dwords = offset;
4702 /* If no mcp access, indicate that the dump doesn't contain the meta
4705 return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4709 static enum dbg_status ecore_reg_fifo_dump(struct ecore_hwfn *p_hwfn,
4710 struct ecore_ptt *p_ptt,
4713 u32 *num_dumped_dwords)
4715 u32 dwords_read, size_param_offset, offset = 0;
4718 *num_dumped_dwords = 0;
4720 /* Dump global params */
4721 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4722 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "reg-fifo");
4724 /* Dump fifo data section header and param. The size param is 0 for
4725 * now, and is overwritten after reading the FIFO.
4727 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "reg_fifo_data", 1);
4728 size_param_offset = offset;
4729 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", 0);
4732 fifo_has_data = ecore_rd(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4734 /* Pull available data from fifo. Use DMAE since this is
4735 * widebus memory and must be accessed atomically. Test for
4736 * dwords_read not passing buffer size since more entries could
4737 * be added to the buffer as we
4740 for (dwords_read = 0; fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS; dwords_read += REG_FIFO_ELEMENT_DWORDS) {
4741 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, true, BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO), REG_FIFO_ELEMENT_DWORDS, true);
4742 fifo_has_data = ecore_rd(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4745 ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", dwords_read);
4749 /* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4750 * test how much data is available, except for reading it.
4752 offset += REG_FIFO_DEPTH_DWORDS;
4755 /* Dump last section */
4756 offset += ecore_dump_last_section(dump_buf, offset, dump);
4758 *num_dumped_dwords = offset;
4760 return DBG_STATUS_OK;
4764 static enum dbg_status ecore_igu_fifo_dump(struct ecore_hwfn *p_hwfn,
4765 struct ecore_ptt *p_ptt,
4768 u32 *num_dumped_dwords)
4770 u32 dwords_read, size_param_offset, offset = 0;
4773 *num_dumped_dwords = 0;
4775 /* Dump global params */
4776 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4777 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "igu-fifo");
4779 /* Dump fifo data section header and param. The size param is 0 for
4780 * now, and is overwritten after reading the FIFO.
4782 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "igu_fifo_data", 1);
4783 size_param_offset = offset;
4784 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", 0);
4787 fifo_has_data = ecore_rd(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4789 /* Pull available data from fifo. Use DMAE since this is
4790 * widebus memory and must be accessed atomically. Test for
4791 * dwords_read not passing buffer size since more entries could
4792 * be added to the buffer as we are emptying it.
4794 for (dwords_read = 0; fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS; dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
4795 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, true, BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY), IGU_FIFO_ELEMENT_DWORDS, true);
4796 fifo_has_data = ecore_rd(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4799 ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", dwords_read);
4803 /* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4804 * test how much data is available, except for reading it.
4806 offset += IGU_FIFO_DEPTH_DWORDS;
4809 /* Dump last section */
4810 offset += ecore_dump_last_section(dump_buf, offset, dump);
4812 *num_dumped_dwords = offset;
4814 return DBG_STATUS_OK;
4817 /* Protection Override dump */
4818 static enum dbg_status ecore_protection_override_dump(struct ecore_hwfn *p_hwfn,
4819 struct ecore_ptt *p_ptt,
4822 u32 *num_dumped_dwords)
4824 u32 size_param_offset, override_window_dwords, offset = 0;
4826 *num_dumped_dwords = 0;
4828 /* Dump global params */
4829 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4830 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "protection-override");
4832 /* Dump data section header and param. The size param is 0 for now,
4833 * and is overwritten after reading the data.
4835 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "protection_override_data", 1);
4836 size_param_offset = offset;
4837 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", 0);
4840 /* Add override window info to buffer */
4841 override_window_dwords = ecore_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) * PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4842 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, true, BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW), override_window_dwords, true);
4843 ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", override_window_dwords);
4846 offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4849 /* Dump last section */
4850 offset += ecore_dump_last_section(dump_buf, offset, dump);
4852 *num_dumped_dwords = offset;
4854 return DBG_STATUS_OK;
4857 /* Performs FW Asserts Dump to the specified buffer.
4858 * Returns the dumped size in dwords.
4860 static u32 ecore_fw_asserts_dump(struct ecore_hwfn *p_hwfn,
4861 struct ecore_ptt *p_ptt,
4865 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4866 struct fw_asserts_ram_section *asserts;
4867 char storm_letter_str[2] = "?";
4868 struct fw_info fw_info;
4872 /* Dump global params */
4873 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4874 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "fw-asserts");
4876 /* Find Storm dump size */
4877 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4878 u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx, last_list_idx, addr;
4879 struct storm_defs *storm = &s_storm_defs[storm_id];
4881 if (dev_data->block_in_reset[storm->block_id])
4884 /* Read FW info for the current Storm */
4885 ecore_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
4887 asserts = &fw_info.fw_asserts_section;
4889 /* Dump FW Asserts section header and params */
4890 storm_letter_str[0] = storm->letter;
4891 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "fw_asserts", 2);
4892 offset += ecore_dump_str_param(dump_buf + offset, dump, "storm", storm_letter_str);
4893 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", asserts->list_element_dword_size);
4895 /* Read and dump FW Asserts data */
4897 offset += asserts->list_element_dword_size;
4901 fw_asserts_section_addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
4902 RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
4903 next_list_idx_addr = fw_asserts_section_addr + DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
4904 next_list_idx = ecore_rd(p_hwfn, p_ptt, next_list_idx_addr);
4905 last_list_idx = (next_list_idx > 0 ? next_list_idx : asserts->list_num_elements) - 1;
4906 addr = BYTES_TO_DWORDS(fw_asserts_section_addr) + asserts->list_dword_offset +
4907 last_list_idx * asserts->list_element_dword_size;
4908 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, asserts->list_element_dword_size, false);
4911 /* Dump last section */
4912 offset += ecore_dump_last_section(dump_buf, offset, dump);
4917 /***************************** Public Functions *******************************/
4919 enum dbg_status ecore_dbg_set_bin_ptr(const u8 * const bin_ptr)
4921 struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
4924 /* convert binary data to debug arrays */
4925 for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
4926 s_dbg_arrays[buf_id].ptr = (u32 *)(bin_ptr + buf_array[buf_id].offset);
4927 s_dbg_arrays[buf_id].size_in_dwords = BYTES_TO_DWORDS(buf_array[buf_id].length);
4930 return DBG_STATUS_OK;
4933 enum dbg_status ecore_dbg_set_app_ver(u32 ver)
4935 if (ver < TOOLS_VERSION)
4936 return DBG_STATUS_UNSUPPORTED_APP_VERSION;
4940 return DBG_STATUS_OK;
4943 u32 ecore_dbg_get_fw_func_ver(void)
4945 return TOOLS_VERSION;
4948 enum chip_ids ecore_dbg_get_chip_id(struct ecore_hwfn *p_hwfn)
4950 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4952 return (enum chip_ids)dev_data->chip_id;
4955 enum dbg_status ecore_dbg_bus_reset(struct ecore_hwfn *p_hwfn,
4956 struct ecore_ptt *p_ptt,
4962 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4963 enum dbg_status status;
4965 status = ecore_dbg_dev_init(p_hwfn, p_ptt);
4966 if (status != DBG_STATUS_OK)
4969 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_reset: one_shot_en = %d, force_hw_dwords = %d, unify_inputs = %d, grc_input_en = %d\n", one_shot_en, force_hw_dwords, unify_inputs, grc_input_en);
4971 if (force_hw_dwords &&
4972 force_hw_dwords != 4 &&
4973 force_hw_dwords != 8)
4974 return DBG_STATUS_INVALID_ARGS;
4976 if (ecore_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
4977 return DBG_STATUS_DBG_BUS_IN_USE;
4979 /* Update reset state of all blocks */
4980 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
4982 /* Disable all debug inputs */
4983 status = ecore_bus_disable_inputs(p_hwfn, p_ptt, false);
4984 if (status != DBG_STATUS_OK)
4987 /* Reset DBG block */
4988 ecore_bus_reset_dbg_block(p_hwfn, p_ptt);
4990 /* Set one-shot / wrap-around */
4991 ecore_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, one_shot_en ? 0 : 1);
4993 /* Init state params */
4994 OSAL_MEMSET(&dev_data->bus, 0, sizeof(dev_data->bus));
4995 dev_data->bus.target = DBG_BUS_TARGET_ID_INT_BUF;
4996 dev_data->bus.state = DBG_BUS_STATE_READY;
4997 dev_data->bus.one_shot_en = one_shot_en;
4998 dev_data->bus.hw_dwords = force_hw_dwords;
4999 dev_data->bus.grc_input_en = grc_input_en;
5000 dev_data->bus.unify_inputs = unify_inputs;
5001 dev_data->bus.num_enabled_blocks = grc_input_en ? 1 : 0;
5003 /* Init special DBG block */
5005 SET_FIELD(dev_data->bus.blocks[BLOCK_DBG].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, 0x1);
5007 return DBG_STATUS_OK;
5010 enum dbg_status ecore_dbg_bus_set_pci_output(struct ecore_hwfn *p_hwfn,
5011 struct ecore_ptt *p_ptt,
5014 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5015 dma_addr_t pci_buf_phys_addr;
5018 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_set_pci_output: buf_size_kb = %d\n", buf_size_kb);
5020 if (dev_data->bus.target != DBG_BUS_TARGET_ID_INT_BUF)
5021 return DBG_STATUS_OUTPUT_ALREADY_SET;
5022 if (dev_data->bus.state != DBG_BUS_STATE_READY || dev_data->bus.pci_buf.size > 0)
5023 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5025 dev_data->bus.target = DBG_BUS_TARGET_ID_PCI;
5026 dev_data->bus.pci_buf.size = buf_size_kb * 1024;
5027 if (dev_data->bus.pci_buf.size % PCI_PKT_SIZE_IN_BYTES)
5028 return DBG_STATUS_INVALID_ARGS;
5030 pci_buf = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &pci_buf_phys_addr, dev_data->bus.pci_buf.size);
5032 return DBG_STATUS_PCI_BUF_ALLOC_FAILED;
5034 OSAL_MEMCPY(&dev_data->bus.pci_buf.phys_addr, &pci_buf_phys_addr, sizeof(pci_buf_phys_addr));
5036 dev_data->bus.pci_buf.virt_addr.lo = (u32)((u64)(osal_uintptr_t)pci_buf);
5037 dev_data->bus.pci_buf.virt_addr.hi = (u32)((u64)(osal_uintptr_t)pci_buf >> 32);
5039 ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_LSB, dev_data->bus.pci_buf.phys_addr.lo);
5040 ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_MSB, dev_data->bus.pci_buf.phys_addr.hi);
5041 ecore_wr(p_hwfn, p_ptt, DBG_REG_TARGET_PACKET_SIZE, PCI_PKT_SIZE_IN_CHUNKS);
5042 ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_EXT_BUFFER_SIZE, dev_data->bus.pci_buf.size / PCI_PKT_SIZE_IN_BYTES);
5043 ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_FUNC_NUM, OPAQUE_FID(p_hwfn->rel_pf_id));
5044 ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_LOGIC_ADDR, PCI_PHYS_ADDR_TYPE);
5045 ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_REQ_CREDIT, PCI_REQ_CREDIT);
5046 ecore_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_PCI);
5047 ecore_wr(p_hwfn, p_ptt, DBG_REG_OUTPUT_ENABLE, TARGET_EN_MASK_PCI);
5049 return DBG_STATUS_OK;
5052 enum dbg_status ecore_dbg_bus_set_nw_output(struct ecore_hwfn *p_hwfn,
5053 struct ecore_ptt *p_ptt,
5057 u16 data_limit_size_kb,
5058 bool send_to_other_engine,
5059 bool rcv_from_other_engine)
5061 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5063 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_set_nw_output: port_id = %d, dest_addr_lo32 = 0x%x, dest_addr_hi16 = 0x%x, data_limit_size_kb = %d, send_to_other_engine = %d, rcv_from_other_engine = %d\n", port_id, dest_addr_lo32, dest_addr_hi16, data_limit_size_kb, send_to_other_engine, rcv_from_other_engine);
5065 if (dev_data->bus.target != DBG_BUS_TARGET_ID_INT_BUF)
5066 return DBG_STATUS_OUTPUT_ALREADY_SET;
5067 if (dev_data->bus.state != DBG_BUS_STATE_READY)
5068 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5069 if (port_id >= s_chip_defs[dev_data->chip_id].per_platform[dev_data->platform_id].num_ports || (send_to_other_engine && rcv_from_other_engine))
5070 return DBG_STATUS_INVALID_ARGS;
5072 dev_data->bus.target = DBG_BUS_TARGET_ID_NIG;
5073 dev_data->bus.rcv_from_other_engine = rcv_from_other_engine;
5075 ecore_wr(p_hwfn, p_ptt, DBG_REG_OUTPUT_ENABLE, TARGET_EN_MASK_NIG);
5076 ecore_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_NIG);
5078 if (send_to_other_engine)
5079 ecore_wr(p_hwfn, p_ptt, DBG_REG_OTHER_ENGINE_MODE_BB_K2, DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX);
5081 ecore_wr(p_hwfn, p_ptt, NIG_REG_DEBUG_PORT, port_id);
5083 if (rcv_from_other_engine) {
5084 ecore_wr(p_hwfn, p_ptt, DBG_REG_OTHER_ENGINE_MODE_BB_K2, DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX);
5088 /* Configure ethernet header of 14 bytes */
5089 ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_WIDTH, 0);
5090 ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_7, dest_addr_lo32);
5091 ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_6, (u32)SRC_MAC_ADDR_LO16 | ((u32)dest_addr_hi16 << 16));
5092 ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_5, SRC_MAC_ADDR_HI32);
5093 ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_4, (u32)ETH_TYPE << 16);
5094 ecore_wr(p_hwfn, p_ptt, DBG_REG_TARGET_PACKET_SIZE, NIG_PKT_SIZE_IN_CHUNKS);
5095 if (data_limit_size_kb)
5096 ecore_wr(p_hwfn, p_ptt, DBG_REG_NIG_DATA_LIMIT_SIZE, (data_limit_size_kb * 1024) / CHUNK_SIZE_IN_BYTES);
5099 return DBG_STATUS_OK;
5102 static bool ecore_is_overlapping_enable_mask(struct ecore_hwfn *p_hwfn,
5106 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5107 u8 curr_shifted_enable_mask, shifted_enable_mask;
5110 shifted_enable_mask = SHR(enable_mask, VALUES_PER_CYCLE, right_shift);
5112 if (dev_data->bus.num_enabled_blocks) {
5113 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5114 struct dbg_bus_block_data *block_bus = &dev_data->bus.blocks[block_id];
5116 if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5119 curr_shifted_enable_mask =
5120 SHR(GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5122 GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT));
5123 if (shifted_enable_mask & curr_shifted_enable_mask)
5131 enum dbg_status ecore_dbg_bus_enable_block(struct ecore_hwfn *p_hwfn,
5132 enum block_id block_id,
5136 u8 force_valid_mask,
5137 u8 force_frame_mask)
5139 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5140 struct block_defs *block = s_block_defs[block_id];
5141 struct dbg_bus_block_data *block_bus;
5142 struct dbg_bus_block *block_desc;
5144 block_bus = &dev_data->bus.blocks[block_id];
5145 block_desc = get_dbg_bus_block_desc(p_hwfn, block_id);
5147 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_block: block = %d, line_num = %d, enable_mask = 0x%x, right_shift = %d, force_valid_mask = 0x%x, force_frame_mask = 0x%x\n", block_id, line_num, enable_mask, right_shift, force_valid_mask, force_frame_mask);
5149 if (dev_data->bus.state != DBG_BUS_STATE_READY)
5150 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5151 if (block_id >= MAX_BLOCK_ID)
5152 return DBG_STATUS_INVALID_ARGS;
5153 if (GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5154 return DBG_STATUS_BLOCK_ALREADY_ENABLED;
5155 if (block->dbg_client_id[dev_data->chip_id] == MAX_DBG_BUS_CLIENTS ||
5156 line_num >= NUM_DBG_LINES(block_desc) ||
5158 enable_mask > MAX_CYCLE_VALUES_MASK ||
5159 force_valid_mask > MAX_CYCLE_VALUES_MASK ||
5160 force_frame_mask > MAX_CYCLE_VALUES_MASK ||
5161 right_shift > VALUES_PER_CYCLE - 1)
5162 return DBG_STATUS_INVALID_ARGS;
5163 if (dev_data->block_in_reset[block_id])
5164 return DBG_STATUS_BLOCK_IN_RESET;
5165 if (!dev_data->bus.unify_inputs && ecore_is_overlapping_enable_mask(p_hwfn, enable_mask, right_shift))
5166 return DBG_STATUS_INPUT_OVERLAP;
5168 dev_data->bus.blocks[block_id].line_num = line_num;
5169 SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, enable_mask);
5170 SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT, right_shift);
5171 SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK, force_valid_mask);
5172 SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK, force_frame_mask);
5174 dev_data->bus.num_enabled_blocks++;
5176 return DBG_STATUS_OK;
5179 enum dbg_status ecore_dbg_bus_enable_storm(struct ecore_hwfn *p_hwfn,
5180 enum dbg_storms storm_id,
5181 enum dbg_bus_storm_modes storm_mode)
5183 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5184 struct dbg_bus_data *bus = &dev_data->bus;
5185 struct dbg_bus_storm_data *storm_bus;
5186 struct storm_defs *storm;
5188 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_storm: storm = %d, storm_mode = %d\n", storm_id, storm_mode);
5190 if (bus->state != DBG_BUS_STATE_READY)
5191 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5192 if (bus->hw_dwords >= 4)
5193 return DBG_STATUS_HW_ONLY_RECORDING;
5194 if (storm_id >= MAX_DBG_STORMS)
5195 return DBG_STATUS_INVALID_ARGS;
5196 if (storm_mode >= MAX_DBG_BUS_STORM_MODES)
5197 return DBG_STATUS_INVALID_ARGS;
5198 if (bus->unify_inputs)
5199 return DBG_STATUS_INVALID_ARGS;
5200 if (bus->storms[storm_id].enabled)
5201 return DBG_STATUS_STORM_ALREADY_ENABLED;
5203 storm = &s_storm_defs[storm_id];
5204 storm_bus = &bus->storms[storm_id];
5206 if (dev_data->block_in_reset[storm->block_id])
5207 return DBG_STATUS_BLOCK_IN_RESET;
5209 storm_bus->enabled = true;
5210 storm_bus->mode = (u8)storm_mode;
5211 storm_bus->hw_id = bus->num_enabled_storms;
5213 bus->num_enabled_storms++;
5215 return DBG_STATUS_OK;
5218 enum dbg_status ecore_dbg_bus_enable_timestamp(struct ecore_hwfn *p_hwfn,
5219 struct ecore_ptt *p_ptt,
5224 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5226 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_timestamp: valid_mask = 0x%x, frame_mask = 0x%x, tick_len = %d\n", valid_mask, frame_mask, tick_len);
5228 if (dev_data->bus.state != DBG_BUS_STATE_READY)
5229 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5230 if (valid_mask > 0x7 || frame_mask > 0x7)
5231 return DBG_STATUS_INVALID_ARGS;
5232 if (!dev_data->bus.unify_inputs && ecore_is_overlapping_enable_mask(p_hwfn, 0x1, 0))
5233 return DBG_STATUS_INPUT_OVERLAP;
5235 dev_data->bus.timestamp_input_en = true;
5236 dev_data->bus.num_enabled_blocks++;
5238 SET_FIELD(dev_data->bus.blocks[BLOCK_DBG].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, 0x1);
5240 ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_VALID_EN, valid_mask);
5241 ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_FRAME_EN, frame_mask);
5242 ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_TICK, tick_len);
5244 return DBG_STATUS_OK;
5247 enum dbg_status ecore_dbg_bus_add_eid_range_sem_filter(struct ecore_hwfn *p_hwfn,
5248 enum dbg_storms storm_id,
5252 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5253 struct dbg_bus_storm_data *storm_bus;
5255 storm_bus = &dev_data->bus.storms[storm_id];
5257 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_eid_range_sem_filter: storm = %d, min_eid = 0x%x, max_eid = 0x%x\n", storm_id, min_eid, max_eid);
5259 if (storm_id >= MAX_DBG_STORMS)
5260 return DBG_STATUS_INVALID_ARGS;
5261 if (min_eid > max_eid)
5262 return DBG_STATUS_INVALID_ARGS;
5263 if (!storm_bus->enabled)
5264 return DBG_STATUS_STORM_NOT_ENABLED;
5266 storm_bus->eid_filter_en = 1;
5267 storm_bus->eid_range_not_mask = 1;
5268 storm_bus->eid_filter_params.range.min = min_eid;
5269 storm_bus->eid_filter_params.range.max = max_eid;
5271 return DBG_STATUS_OK;
5274 enum dbg_status ecore_dbg_bus_add_eid_mask_sem_filter(struct ecore_hwfn *p_hwfn,
5275 enum dbg_storms storm_id,
5279 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5280 struct dbg_bus_storm_data *storm_bus;
5282 storm_bus = &dev_data->bus.storms[storm_id];
5284 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_eid_mask_sem_filter: storm = %d, eid_val = 0x%x, eid_mask = 0x%x\n", storm_id, eid_val, eid_mask);
5286 if (storm_id >= MAX_DBG_STORMS)
5287 return DBG_STATUS_INVALID_ARGS;
5288 if (!storm_bus->enabled)
5289 return DBG_STATUS_STORM_NOT_ENABLED;
5291 storm_bus->eid_filter_en = 1;
5292 storm_bus->eid_range_not_mask = 0;
5293 storm_bus->eid_filter_params.mask.val = eid_val;
5294 storm_bus->eid_filter_params.mask.mask = eid_mask;
5296 return DBG_STATUS_OK;
5299 enum dbg_status ecore_dbg_bus_add_cid_sem_filter(struct ecore_hwfn *p_hwfn,
5300 enum dbg_storms storm_id,
5303 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5304 struct dbg_bus_storm_data *storm_bus;
5306 storm_bus = &dev_data->bus.storms[storm_id];
5308 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_cid_sem_filter: storm = %d, cid = 0x%x\n", storm_id, cid);
5310 if (storm_id >= MAX_DBG_STORMS)
5311 return DBG_STATUS_INVALID_ARGS;
5312 if (!storm_bus->enabled)
5313 return DBG_STATUS_STORM_NOT_ENABLED;
5315 storm_bus->cid_filter_en = 1;
5316 storm_bus->cid = cid;
5318 return DBG_STATUS_OK;
5321 enum dbg_status ecore_dbg_bus_enable_filter(struct ecore_hwfn *p_hwfn,
5322 struct ecore_ptt *p_ptt,
5323 enum block_id block_id,
5326 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5328 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_filter: block = %d, const_msg_len = %d\n", block_id, const_msg_len);
5330 if (dev_data->bus.state != DBG_BUS_STATE_READY)
5331 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5332 if (dev_data->bus.filter_en)
5333 return DBG_STATUS_FILTER_ALREADY_ENABLED;
5334 if (block_id >= MAX_BLOCK_ID)
5335 return DBG_STATUS_INVALID_ARGS;
5336 if (!GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5337 return DBG_STATUS_BLOCK_NOT_ENABLED;
5338 if (!dev_data->bus.unify_inputs)
5339 return DBG_STATUS_FILTER_BUG;
5341 dev_data->bus.filter_en = true;
5342 dev_data->bus.next_constraint_id = 0;
5343 dev_data->bus.adding_filter = true;
5345 /* HW ID is set to 0 due to required unifyInputs */
5346 ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_ID_NUM, 0);
5347 ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_MSG_LENGTH_ENABLE, const_msg_len > 0 ? 1 : 0);
5348 if (const_msg_len > 0)
5349 ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_MSG_LENGTH, const_msg_len - 1);
5351 return DBG_STATUS_OK;
5354 enum dbg_status ecore_dbg_bus_enable_trigger(struct ecore_hwfn *p_hwfn,
5355 struct ecore_ptt *p_ptt,
5356 bool rec_pre_trigger,
5358 bool rec_post_trigger,
5360 bool filter_pre_trigger,
5361 bool filter_post_trigger)
5363 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5364 enum dbg_bus_post_trigger_types post_trigger_type;
5365 enum dbg_bus_pre_trigger_types pre_trigger_type;
5366 struct dbg_bus_data *bus = &dev_data->bus;
5368 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_trigger: rec_pre_trigger = %d, pre_chunks = %d, rec_post_trigger = %d, post_cycles = %d, filter_pre_trigger = %d, filter_post_trigger = %d\n", rec_pre_trigger, pre_chunks, rec_post_trigger, post_cycles, filter_pre_trigger, filter_post_trigger);
5370 if (bus->state != DBG_BUS_STATE_READY)
5371 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5372 if (bus->trigger_en)
5373 return DBG_STATUS_TRIGGER_ALREADY_ENABLED;
5374 if (rec_pre_trigger && pre_chunks >= INT_BUF_SIZE_IN_CHUNKS)
5375 return DBG_STATUS_INVALID_ARGS;
5377 bus->trigger_en = true;
5378 bus->filter_pre_trigger = filter_pre_trigger;
5379 bus->filter_post_trigger = filter_post_trigger;
5381 if (rec_pre_trigger) {
5382 pre_trigger_type = pre_chunks ? DBG_BUS_PRE_TRIGGER_NUM_CHUNKS : DBG_BUS_PRE_TRIGGER_START_FROM_ZERO;
5383 ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_PRE_NUM_CHUNKS, pre_chunks);
5386 pre_trigger_type = DBG_BUS_PRE_TRIGGER_DROP;
5389 if (rec_post_trigger) {
5390 post_trigger_type = DBG_BUS_POST_TRIGGER_RECORD;
5391 ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_POST_NUM_CYCLES, post_cycles ? post_cycles : 0xffffffff);
5394 post_trigger_type = DBG_BUS_POST_TRIGGER_DROP;
5397 ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_PRE_TRGR_EVNT_MODE, pre_trigger_type);
5398 ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_POST_TRGR_EVNT_MODE, post_trigger_type);
5399 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_ENABLE, 1);
5401 return DBG_STATUS_OK;
5404 enum dbg_status ecore_dbg_bus_add_trigger_state(struct ecore_hwfn *p_hwfn,
5405 struct ecore_ptt *p_ptt,
5406 enum block_id block_id,
5410 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5411 struct dbg_bus_data *bus = &dev_data->bus;
5412 struct dbg_bus_block_data *block_bus;
5415 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_trigger_state: block = %d, const_msg_len = %d, count_to_next = %d\n", block_id, const_msg_len, count_to_next);
5417 block_bus = &bus->blocks[block_id];
5419 if (!bus->trigger_en)
5420 return DBG_STATUS_TRIGGER_NOT_ENABLED;
5421 if (bus->next_trigger_state == MAX_TRIGGER_STATES)
5422 return DBG_STATUS_TOO_MANY_TRIGGER_STATES;
5423 if (block_id >= MAX_BLOCK_ID)
5424 return DBG_STATUS_INVALID_ARGS;
5425 if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5426 return DBG_STATUS_BLOCK_NOT_ENABLED;
5428 return DBG_STATUS_INVALID_ARGS;
5430 bus->next_constraint_id = 0;
5431 bus->adding_filter = false;
5433 /* Store block's shifted enable mask */
5434 SET_FIELD(bus->trigger_states[dev_data->bus.next_trigger_state].data, DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK, SHR(GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5436 GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT)));
5438 /* Set trigger state registers */
5439 reg_offset = bus->next_trigger_state * BYTES_IN_DWORD;
5440 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_MSG_LENGTH_ENABLE_0 + reg_offset, const_msg_len > 0 ? 1 : 0);
5441 if (const_msg_len > 0)
5442 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_MSG_LENGTH_0 + reg_offset, const_msg_len - 1);
5444 /* Set trigger set registers */
5445 reg_offset = bus->next_trigger_state * TRIGGER_SETS_PER_STATE * BYTES_IN_DWORD;
5446 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_SET_COUNT_0 + reg_offset, count_to_next);
5448 /* Set next state to final state, and overwrite previous next state
5451 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_SET_NXT_STATE_0 + reg_offset, MAX_TRIGGER_STATES);
5452 if (bus->next_trigger_state > 0) {
5453 reg_offset = (bus->next_trigger_state - 1) * TRIGGER_SETS_PER_STATE * BYTES_IN_DWORD;
5454 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_SET_NXT_STATE_0 + reg_offset, bus->next_trigger_state);
5457 bus->next_trigger_state++;
5459 return DBG_STATUS_OK;
5462 enum dbg_status ecore_dbg_bus_add_constraint(struct ecore_hwfn *p_hwfn,
5463 struct ecore_ptt *p_ptt,
5464 enum dbg_bus_constraint_ops constraint_op,
5470 u8 dword_offset_in_cycle,
5473 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5474 struct dbg_bus_data *bus = &dev_data->bus;
5475 u16 dword_offset, range = 0;
5477 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_constraint: op = %d, data_val = 0x%x, data_mask = 0x%x, compare_frame = %d, frame_bit = %d, cycle_offset = %d, dword_offset_in_cycle = %d, is_mandatory = %d\n", constraint_op, data_val, data_mask, compare_frame, frame_bit, cycle_offset, dword_offset_in_cycle, is_mandatory);
5479 if (!bus->filter_en && !dev_data->bus.trigger_en)
5480 return DBG_STATUS_CANT_ADD_CONSTRAINT;
5481 if (bus->trigger_en && !bus->adding_filter && !bus->next_trigger_state)
5482 return DBG_STATUS_CANT_ADD_CONSTRAINT;
5483 if (bus->next_constraint_id >= MAX_CONSTRAINTS)
5484 return DBG_STATUS_TOO_MANY_CONSTRAINTS;
5485 if (constraint_op >= MAX_DBG_BUS_CONSTRAINT_OPS || frame_bit > 1 || dword_offset_in_cycle > 3 || (bus->adding_filter && cycle_offset > 3))
5486 return DBG_STATUS_INVALID_ARGS;
5487 if (compare_frame &&
5488 constraint_op != DBG_BUS_CONSTRAINT_OP_EQ &&
5489 constraint_op != DBG_BUS_CONSTRAINT_OP_NE)
5490 return DBG_STATUS_INVALID_ARGS;
5492 dword_offset = cycle_offset * VALUES_PER_CYCLE + dword_offset_in_cycle;
5494 if (!bus->adding_filter) {
5495 u8 curr_trigger_state_id = bus->next_trigger_state - 1;
5496 struct dbg_bus_trigger_state_data *trigger_state;
5498 trigger_state = &bus->trigger_states[curr_trigger_state_id];
5500 /* Check if the selected dword is enabled in the block */
5501 if (!(GET_FIELD(trigger_state->data, DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK) & (u8)(1 << dword_offset_in_cycle)))
5502 return DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET;
5504 /* Add selected dword to trigger state's dword mask */
5505 SET_FIELD(trigger_state->data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK, GET_FIELD(trigger_state->data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK) | (u8)(1 << dword_offset_in_cycle));
5508 /* Prepare data mask and range */
5509 if (constraint_op == DBG_BUS_CONSTRAINT_OP_EQ ||
5510 constraint_op == DBG_BUS_CONSTRAINT_OP_NE) {
5511 data_mask = ~data_mask;
5516 /* Extract lsb and width from mask */
5518 return DBG_STATUS_INVALID_ARGS;
5520 for (lsb = 0; lsb < 32 && !(data_mask & 1); lsb++, data_mask >>= 1);
5521 for (width = 0; width < 32 - lsb && (data_mask & 1); width++, data_mask >>= 1);
5523 return DBG_STATUS_INVALID_ARGS;
5524 range = (lsb << 5) | (width - 1);
5527 /* Add constraint */
5528 ecore_bus_set_constraint(p_hwfn, p_ptt, dev_data->bus.adding_filter ? 1 : 0,
5529 dev_data->bus.next_constraint_id,
5530 s_constraint_op_defs[constraint_op].hw_op_val,
5531 data_val, data_mask, frame_bit,
5532 compare_frame ? 0 : 1, dword_offset, range,
5533 s_constraint_op_defs[constraint_op].is_cyclic ? 1 : 0,
5534 is_mandatory ? 1 : 0);
5536 /* If first constraint, fill other 3 constraints with dummy constraints
5537 * that always match (using the same offset).
5539 if (!dev_data->bus.next_constraint_id) {
5542 for (i = 1; i < MAX_CONSTRAINTS; i++)
5543 ecore_bus_set_constraint(p_hwfn, p_ptt, bus->adding_filter ? 1 : 0,
5544 i, DBG_BUS_CONSTRAINT_OP_EQ, 0, 0xffffffff,
5545 0, 1, dword_offset, 0, 0, 1);
5548 bus->next_constraint_id++;
5550 return DBG_STATUS_OK;
5553 /* Configure the DBG block client mask */
5554 static void ecore_config_dbg_block_client_mask(struct ecore_hwfn *p_hwfn,
5555 struct ecore_ptt *p_ptt)
5557 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5558 struct dbg_bus_data *bus = &dev_data->bus;
5559 u32 block_id, client_mask = 0;
5562 /* Update client mask for Storm inputs */
5563 if (bus->num_enabled_storms)
5564 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5565 struct storm_defs *storm = &s_storm_defs[storm_id];
5567 if (bus->storms[storm_id].enabled)
5568 client_mask |= (1 << storm->dbg_client_id[dev_data->chip_id]);
5571 /* Update client mask for block inputs */
5572 if (bus->num_enabled_blocks) {
5573 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5574 struct dbg_bus_block_data *block_bus = &bus->blocks[block_id];
5575 struct block_defs *block = s_block_defs[block_id];
5577 if (GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK) && block_id != BLOCK_DBG)
5578 client_mask |= (1 << block->dbg_client_id[dev_data->chip_id]);
5582 /* Update client mask for GRC input */
5583 if (bus->grc_input_en)
5584 client_mask |= (1 << DBG_BUS_CLIENT_CPU);
5586 /* Update client mask for timestamp input */
5587 if (bus->timestamp_input_en)
5588 client_mask |= (1 << DBG_BUS_CLIENT_TIMESTAMP);
5590 ecore_bus_enable_clients(p_hwfn, p_ptt, client_mask);
5593 /* Configure the DBG block framing mode */
5594 static enum dbg_status ecore_config_dbg_block_framing_mode(struct ecore_hwfn *p_hwfn,
5595 struct ecore_ptt *p_ptt)
5597 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5598 struct dbg_bus_data *bus = &dev_data->bus;
5599 enum dbg_bus_frame_modes dbg_framing_mode;
5602 if (!bus->hw_dwords && bus->num_enabled_blocks) {
5603 struct dbg_bus_line *line_desc;
5606 /* Choose either 4 HW dwords (128-bit mode) or 8 HW dwords
5609 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5610 struct dbg_bus_block_data *block_bus = &bus->blocks[block_id];
5612 if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5615 line_desc = get_dbg_bus_line_desc(p_hwfn, (enum block_id)block_id);
5616 hw_dwords = line_desc && GET_FIELD(line_desc->data, DBG_BUS_LINE_IS_256B) ? 8 : 4;
5618 if (bus->hw_dwords > 0 && bus->hw_dwords != hw_dwords)
5619 return DBG_STATUS_NON_MATCHING_LINES;
5621 /* The DBG block doesn't support triggers and
5622 * filters on 256b debug lines.
5624 if (hw_dwords == 8 && (bus->trigger_en || bus->filter_en))
5625 return DBG_STATUS_NO_FILTER_TRIGGER_64B;
5627 bus->hw_dwords = hw_dwords;
5631 switch (bus->hw_dwords) {
5632 case 0: dbg_framing_mode = DBG_BUS_FRAME_MODE_0HW_4ST; break;
5633 case 4: dbg_framing_mode = DBG_BUS_FRAME_MODE_4HW_0ST; break;
5634 case 8: dbg_framing_mode = DBG_BUS_FRAME_MODE_8HW_0ST; break;
5635 default: dbg_framing_mode = DBG_BUS_FRAME_MODE_0HW_4ST; break;
5637 ecore_bus_set_framing_mode(p_hwfn, p_ptt, dbg_framing_mode);
5639 return DBG_STATUS_OK;
5642 /* Configure the DBG block Storm data */
5643 static enum dbg_status ecore_config_storm_inputs(struct ecore_hwfn *p_hwfn,
5644 struct ecore_ptt *p_ptt)
5646 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5647 struct dbg_bus_data *bus = &dev_data->bus;
5648 u8 storm_id, i, next_storm_id = 0;
5649 u32 storm_id_mask = 0;
5651 /* Check if SEMI sync FIFO is empty */
5652 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5653 struct dbg_bus_storm_data *storm_bus = &bus->storms[storm_id];
5654 struct storm_defs *storm = &s_storm_defs[storm_id];
5656 if (storm_bus->enabled && !ecore_rd(p_hwfn, p_ptt, storm->sem_sync_dbg_empty_addr))
5657 return DBG_STATUS_SEMI_FIFO_NOT_EMPTY;
5660 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5661 struct dbg_bus_storm_data *storm_bus = &bus->storms[storm_id];
5663 if (storm_bus->enabled)
5664 storm_id_mask |= (storm_bus->hw_id << (storm_id * HW_ID_BITS));
5667 ecore_wr(p_hwfn, p_ptt, DBG_REG_STORM_ID_NUM, storm_id_mask);
5669 /* Disable storm stall if recording to internal buffer in one-shot */
5670 ecore_wr(p_hwfn, p_ptt, DBG_REG_NO_GRANT_ON_FULL, (dev_data->bus.target == DBG_BUS_TARGET_ID_INT_BUF && bus->one_shot_en) ? 0 : 1);
5672 /* Configure calendar */
5673 for (i = 0; i < NUM_CALENDAR_SLOTS; i++, next_storm_id = (next_storm_id + 1) % MAX_DBG_STORMS) {
5675 /* Find next enabled Storm */
5676 for (; !dev_data->bus.storms[next_storm_id].enabled; next_storm_id = (next_storm_id + 1) % MAX_DBG_STORMS);
5678 /* Configure calendar slot */
5679 ecore_wr(p_hwfn, p_ptt, DBG_REG_CALENDAR_SLOT0 + DWORDS_TO_BYTES(i), next_storm_id);
5682 return DBG_STATUS_OK;
5685 /* Assign HW ID to each dword/qword:
5686 * if the inputs are unified, HW ID 0 is assigned to all dwords/qwords.
5687 * Otherwise, we would like to assign a different HW ID to each dword, to avoid
5688 * data synchronization issues. however, we need to check if there is a trigger
5689 * state for which more than one dword has a constraint. if there is, we cannot
5690 * assign a different HW ID to each dword (since a trigger state has a single
5691 * HW ID), so we assign a different HW ID to each block.
5693 static void ecore_assign_hw_ids(struct ecore_hwfn *p_hwfn,
5694 u8 hw_ids[VALUES_PER_CYCLE])
5696 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5697 struct dbg_bus_data *bus = &dev_data->bus;
5698 bool hw_id_per_dword = true;
5699 u8 val_id, state_id;
5702 OSAL_MEMSET(hw_ids, 0, VALUES_PER_CYCLE);
5704 if (bus->unify_inputs)
5707 if (bus->trigger_en) {
5708 for (state_id = 0; state_id < bus->next_trigger_state && hw_id_per_dword; state_id++) {
5711 for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5712 if (GET_FIELD(bus->trigger_states[state_id].data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK) & (1 << val_id))
5716 hw_id_per_dword = false;
5720 if (hw_id_per_dword) {
5722 /* Assign a different HW ID for each dword */
5723 for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5724 hw_ids[val_id] = val_id;
5727 u8 shifted_enable_mask, next_hw_id = 0;
5729 /* Assign HW IDs according to blocks enable / */
5730 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5731 struct dbg_bus_block_data *block_bus = &bus->blocks[block_id];
5733 if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5736 block_bus->hw_id = next_hw_id++;
5737 if (!block_bus->hw_id)
5740 shifted_enable_mask =
5741 SHR(GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5743 GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT));
5745 for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5746 if (shifted_enable_mask & (1 << val_id))
5747 hw_ids[val_id] = block_bus->hw_id;
5752 /* Configure the DBG block HW blocks data */
5753 static void ecore_config_block_inputs(struct ecore_hwfn *p_hwfn,
5754 struct ecore_ptt *p_ptt)
5756 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5757 struct dbg_bus_data *bus = &dev_data->bus;
5758 u8 hw_ids[VALUES_PER_CYCLE];
5759 u8 val_id, state_id;
5761 ecore_assign_hw_ids(p_hwfn, hw_ids);
5763 /* Assign a HW ID to each trigger state */
5764 if (dev_data->bus.trigger_en) {
5765 for (state_id = 0; state_id < bus->next_trigger_state; state_id++) {
5766 for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++) {
5767 u8 state_data = bus->trigger_states[state_id].data;
5769 if (GET_FIELD(state_data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK) & (1 << val_id)) {
5770 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_ID_0 + state_id * BYTES_IN_DWORD, hw_ids[val_id]);
5777 /* Configure HW ID mask */
5778 dev_data->bus.hw_id_mask = 0;
5779 for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5780 bus->hw_id_mask |= (hw_ids[val_id] << (val_id * HW_ID_BITS));
5781 ecore_wr(p_hwfn, p_ptt, DBG_REG_HW_ID_NUM, bus->hw_id_mask);
5783 /* Configure additional K2 PCIE registers */
5784 if (dev_data->chip_id == CHIP_K2 &&
5785 (GET_FIELD(bus->blocks[BLOCK_PCIE].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK) ||
5786 GET_FIELD(bus->blocks[BLOCK_PHY_PCIE].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))) {
5787 ecore_wr(p_hwfn, p_ptt, PCIE_REG_DBG_REPEAT_THRESHOLD_COUNT_K2_E5, 1);
5788 ecore_wr(p_hwfn, p_ptt, PCIE_REG_DBG_FW_TRIGGER_ENABLE_K2_E5, 1);
5792 enum dbg_status ecore_dbg_bus_start(struct ecore_hwfn *p_hwfn,
5793 struct ecore_ptt *p_ptt)
5795 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5796 struct dbg_bus_data *bus = &dev_data->bus;
5797 enum dbg_bus_filter_types filter_type;
5798 enum dbg_status status;
5802 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_start\n");
5804 if (bus->state != DBG_BUS_STATE_READY)
5805 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5807 /* Check if any input was enabled */
5808 if (!bus->num_enabled_storms &&
5809 !bus->num_enabled_blocks &&
5810 !bus->rcv_from_other_engine)
5811 return DBG_STATUS_NO_INPUT_ENABLED;
5813 /* Check if too many input types were enabled (storm+dbgmux) */
5814 if (bus->num_enabled_storms && bus->num_enabled_blocks)
5815 return DBG_STATUS_TOO_MANY_INPUTS;
5817 /* Configure framing mode */
5818 if ((status = ecore_config_dbg_block_framing_mode(p_hwfn, p_ptt)) != DBG_STATUS_OK)
5821 /* Configure DBG block for Storm inputs */
5822 if (bus->num_enabled_storms)
5823 if ((status = ecore_config_storm_inputs(p_hwfn, p_ptt)) != DBG_STATUS_OK)
5826 /* Configure DBG block for block inputs */
5827 if (bus->num_enabled_blocks)
5828 ecore_config_block_inputs(p_hwfn, p_ptt);
5830 /* Configure filter type */
5831 if (bus->filter_en) {
5832 if (bus->trigger_en) {
5833 if (bus->filter_pre_trigger)
5834 filter_type = bus->filter_post_trigger ? DBG_BUS_FILTER_TYPE_ON : DBG_BUS_FILTER_TYPE_PRE;
5836 filter_type = bus->filter_post_trigger ? DBG_BUS_FILTER_TYPE_POST : DBG_BUS_FILTER_TYPE_OFF;
5839 filter_type = DBG_BUS_FILTER_TYPE_ON;
5843 filter_type = DBG_BUS_FILTER_TYPE_OFF;
5845 ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_ENABLE, filter_type);
5847 /* Restart timestamp */
5848 ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP, 0);
5850 /* Enable debug block */
5851 ecore_bus_enable_dbg_block(p_hwfn, p_ptt, 1);
5853 /* Configure enabled blocks - must be done before the DBG block is
5856 if (dev_data->bus.num_enabled_blocks) {
5857 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5858 if (!GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK) || block_id == BLOCK_DBG)
5861 ecore_config_dbg_line(p_hwfn, p_ptt, (enum block_id)block_id,
5862 dev_data->bus.blocks[block_id].line_num,
5863 GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5864 GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT),
5865 GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK),
5866 GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK));
5870 /* Configure client mask */
5871 ecore_config_dbg_block_client_mask(p_hwfn, p_ptt);
5873 /* Configure enabled Storms - must be done after the DBG block is
5876 if (dev_data->bus.num_enabled_storms)
5877 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++)
5878 if (dev_data->bus.storms[storm_id].enabled)
5879 ecore_bus_enable_storm(p_hwfn, p_ptt, (enum dbg_storms)storm_id);
5881 dev_data->bus.state = DBG_BUS_STATE_RECORDING;
5883 return DBG_STATUS_OK;
5886 enum dbg_status ecore_dbg_bus_stop(struct ecore_hwfn *p_hwfn,
5887 struct ecore_ptt *p_ptt)
5889 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5890 struct dbg_bus_data *bus = &dev_data->bus;
5891 enum dbg_status status = DBG_STATUS_OK;
5893 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_stop\n");
5895 if (bus->state != DBG_BUS_STATE_RECORDING)
5896 return DBG_STATUS_RECORDING_NOT_STARTED;
5898 status = ecore_bus_disable_inputs(p_hwfn, p_ptt, true);
5899 if (status != DBG_STATUS_OK)
5902 ecore_wr(p_hwfn, p_ptt, DBG_REG_CPU_TIMEOUT, 1);
5904 OSAL_MSLEEP(FLUSH_DELAY_MS);
5906 ecore_bus_enable_dbg_block(p_hwfn, p_ptt, false);
5908 /* Check if trigger worked */
5909 if (bus->trigger_en) {
5910 u32 trigger_state = ecore_rd(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATUS_CUR_STATE);
5912 if (trigger_state != MAX_TRIGGER_STATES)
5913 return DBG_STATUS_DATA_DIDNT_TRIGGER;
5916 bus->state = DBG_BUS_STATE_STOPPED;
5921 enum dbg_status ecore_dbg_bus_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
5922 struct ecore_ptt *p_ptt,
5925 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5926 struct dbg_bus_data *bus = &dev_data->bus;
5927 enum dbg_status status;
5929 status = ecore_dbg_dev_init(p_hwfn, p_ptt);
5933 if (status != DBG_STATUS_OK)
5936 /* Add dump header */
5937 *buf_size = (u32)ecore_bus_dump_hdr(p_hwfn, p_ptt, OSAL_NULL, false);
5939 switch (bus->target) {
5940 case DBG_BUS_TARGET_ID_INT_BUF:
5941 *buf_size += INT_BUF_SIZE_IN_DWORDS; break;
5942 case DBG_BUS_TARGET_ID_PCI:
5943 *buf_size += BYTES_TO_DWORDS(bus->pci_buf.size); break;
5948 /* Dump last section */
5949 *buf_size += ecore_dump_last_section(OSAL_NULL, 0, false);
5951 return DBG_STATUS_OK;
5954 enum dbg_status ecore_dbg_bus_dump(struct ecore_hwfn *p_hwfn,
5955 struct ecore_ptt *p_ptt,
5957 u32 buf_size_in_dwords,
5958 u32 *num_dumped_dwords)
5960 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5961 u32 min_buf_size_in_dwords, block_id, offset = 0;
5962 struct dbg_bus_data *bus = &dev_data->bus;
5963 enum dbg_status status;
5966 *num_dumped_dwords = 0;
5968 status = ecore_dbg_bus_get_dump_buf_size(p_hwfn, p_ptt, &min_buf_size_in_dwords);
5969 if (status != DBG_STATUS_OK)
5972 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_dump: dump_buf = 0x%p, buf_size_in_dwords = %d\n", dump_buf, buf_size_in_dwords);
5974 if (bus->state != DBG_BUS_STATE_RECORDING && bus->state != DBG_BUS_STATE_STOPPED)
5975 return DBG_STATUS_RECORDING_NOT_STARTED;
5977 if (bus->state == DBG_BUS_STATE_RECORDING) {
5978 enum dbg_status stop_state = ecore_dbg_bus_stop(p_hwfn, p_ptt);
5979 if (stop_state != DBG_STATUS_OK)
5983 if (buf_size_in_dwords < min_buf_size_in_dwords)
5984 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5986 if (bus->target == DBG_BUS_TARGET_ID_PCI && !bus->pci_buf.size)
5987 return DBG_STATUS_PCI_BUF_NOT_ALLOCATED;
5990 offset += ecore_bus_dump_hdr(p_hwfn, p_ptt, dump_buf + offset, true);
5992 /* Dump recorded data */
5993 if (bus->target != DBG_BUS_TARGET_ID_NIG) {
5994 u32 recorded_dwords = ecore_bus_dump_data(p_hwfn, p_ptt, dump_buf + offset, true);
5996 if (!recorded_dwords)
5997 return DBG_STATUS_NO_DATA_RECORDED;
5998 if (recorded_dwords % CHUNK_SIZE_IN_DWORDS)
5999 return DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED;
6000 offset += recorded_dwords;
6003 /* Dump last section */
6004 offset += ecore_dump_last_section(dump_buf, offset, true);
6006 /* If recorded to PCI buffer - free the buffer */
6007 ecore_bus_free_pci_buf(p_hwfn);
6009 /* Clear debug bus parameters */
6010 bus->state = DBG_BUS_STATE_IDLE;
6011 bus->num_enabled_blocks = 0;
6012 bus->num_enabled_storms = 0;
6013 bus->filter_en = bus->trigger_en = 0;
6015 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++)
6016 SET_FIELD(bus->blocks[BLOCK_PCIE].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, 0);
6018 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
6019 struct dbg_bus_storm_data *storm_bus = &bus->storms[storm_id];
6021 storm_bus->enabled = false;
6022 storm_bus->eid_filter_en = storm_bus->cid_filter_en = 0;
6025 *num_dumped_dwords = offset;
6027 return DBG_STATUS_OK;
6030 enum dbg_status ecore_dbg_grc_config(struct ecore_hwfn *p_hwfn,
6031 enum dbg_grc_params grc_param,
6036 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val);
6038 /* Initializes the GRC parameters (if not initialized). Needed in order
6039 * to set the default parameter values for the first time.
6041 ecore_dbg_grc_init_params(p_hwfn);
6043 if (grc_param >= MAX_DBG_GRC_PARAMS)
6044 return DBG_STATUS_INVALID_ARGS;
6045 if (val < s_grc_param_defs[grc_param].min ||
6046 val > s_grc_param_defs[grc_param].max)
6047 return DBG_STATUS_INVALID_ARGS;
6049 if (s_grc_param_defs[grc_param].is_preset) {
6053 /* Disabling a preset is not allowed. Call
6054 * dbg_grc_set_params_default instead.
6057 return DBG_STATUS_INVALID_ARGS;
6059 /* Update all params with the preset values */
6060 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) {
6063 if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL)
6064 preset_val = s_grc_param_defs[i].exclude_all_preset_val;
6065 else if (grc_param == DBG_GRC_PARAM_CRASH)
6066 preset_val = s_grc_param_defs[i].crash_preset_val;
6068 return DBG_STATUS_INVALID_ARGS;
6070 ecore_grc_set_param(p_hwfn, (enum dbg_grc_params)i, preset_val);
6075 /* Regular param - set its value */
6076 ecore_grc_set_param(p_hwfn, grc_param, val);
6079 return DBG_STATUS_OK;
6082 /* Assign default GRC param values */
6083 void ecore_dbg_grc_set_params_default(struct ecore_hwfn *p_hwfn)
6085 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
6088 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
6089 dev_data->grc.param_val[i] = s_grc_param_defs[i].default_val[dev_data->chip_id];
6092 enum dbg_status ecore_dbg_grc_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6093 struct ecore_ptt *p_ptt,
6096 enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6100 if (status != DBG_STATUS_OK)
6103 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr || !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
6104 !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
6105 return DBG_STATUS_DBG_ARRAY_NOT_SET;
6107 return ecore_grc_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6110 enum dbg_status ecore_dbg_grc_dump(struct ecore_hwfn *p_hwfn,
6111 struct ecore_ptt *p_ptt,
6113 u32 buf_size_in_dwords,
6114 u32 *num_dumped_dwords)
6116 u32 needed_buf_size_in_dwords;
6117 enum dbg_status status;
6119 *num_dumped_dwords = 0;
6121 status = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6122 if (status != DBG_STATUS_OK)
6125 if (buf_size_in_dwords < needed_buf_size_in_dwords)
6126 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6128 /* Doesn't do anything, needed for compile time asserts */
6129 ecore_static_asserts();
6132 status = ecore_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6134 /* Reveret GRC params to their default */
6135 ecore_dbg_grc_set_params_default(p_hwfn);
6140 enum dbg_status ecore_dbg_idle_chk_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6141 struct ecore_ptt *p_ptt,
6144 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
6145 struct idle_chk_data *idle_chk = &dev_data->idle_chk;
6146 enum dbg_status status;
6150 status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6151 if (status != DBG_STATUS_OK)
6154 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
6155 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr || !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
6156 return DBG_STATUS_DBG_ARRAY_NOT_SET;
6158 if (!idle_chk->buf_size_set) {
6159 idle_chk->buf_size = ecore_idle_chk_dump(p_hwfn, p_ptt, OSAL_NULL, false);
6160 idle_chk->buf_size_set = true;
6163 *buf_size = idle_chk->buf_size;
6165 return DBG_STATUS_OK;
6168 enum dbg_status ecore_dbg_idle_chk_dump(struct ecore_hwfn *p_hwfn,
6169 struct ecore_ptt *p_ptt,
6171 u32 buf_size_in_dwords,
6172 u32 *num_dumped_dwords)
6174 u32 needed_buf_size_in_dwords;
6175 enum dbg_status status;
6177 *num_dumped_dwords = 0;
6179 status = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6180 if (status != DBG_STATUS_OK)
6183 if (buf_size_in_dwords < needed_buf_size_in_dwords)
6184 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6186 /* Update reset state */
6187 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6189 /* Idle Check Dump */
6190 *num_dumped_dwords = ecore_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
6192 /* Reveret GRC params to their default */
6193 ecore_dbg_grc_set_params_default(p_hwfn);
6195 return DBG_STATUS_OK;
6198 enum dbg_status ecore_dbg_mcp_trace_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6199 struct ecore_ptt *p_ptt,
6202 enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6206 if (status != DBG_STATUS_OK)
6209 return ecore_mcp_trace_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6212 enum dbg_status ecore_dbg_mcp_trace_dump(struct ecore_hwfn *p_hwfn,
6213 struct ecore_ptt *p_ptt,
6215 u32 buf_size_in_dwords,
6216 u32 *num_dumped_dwords)
6218 u32 needed_buf_size_in_dwords;
6219 enum dbg_status status;
6221 status = ecore_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6222 if (status != DBG_STATUS_OK && status != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
6225 if (buf_size_in_dwords < needed_buf_size_in_dwords)
6226 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6228 /* Update reset state */
6229 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6232 status = ecore_mcp_trace_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6234 /* Reveret GRC params to their default */
6235 ecore_dbg_grc_set_params_default(p_hwfn);
6240 enum dbg_status ecore_dbg_reg_fifo_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6241 struct ecore_ptt *p_ptt,
6244 enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6248 if (status != DBG_STATUS_OK)
6251 return ecore_reg_fifo_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6254 enum dbg_status ecore_dbg_reg_fifo_dump(struct ecore_hwfn *p_hwfn,
6255 struct ecore_ptt *p_ptt,
6257 u32 buf_size_in_dwords,
6258 u32 *num_dumped_dwords)
6260 u32 needed_buf_size_in_dwords;
6261 enum dbg_status status;
6263 *num_dumped_dwords = 0;
6265 status = ecore_dbg_reg_fifo_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6266 if (status != DBG_STATUS_OK)
6269 if (buf_size_in_dwords < needed_buf_size_in_dwords)
6270 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6272 /* Update reset state */
6273 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6275 status = ecore_reg_fifo_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6277 /* Reveret GRC params to their default */
6278 ecore_dbg_grc_set_params_default(p_hwfn);
6283 enum dbg_status ecore_dbg_igu_fifo_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6284 struct ecore_ptt *p_ptt,
6287 enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6291 if (status != DBG_STATUS_OK)
6294 return ecore_igu_fifo_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6297 enum dbg_status ecore_dbg_igu_fifo_dump(struct ecore_hwfn *p_hwfn,
6298 struct ecore_ptt *p_ptt,
6300 u32 buf_size_in_dwords,
6301 u32 *num_dumped_dwords)
6303 u32 needed_buf_size_in_dwords;
6304 enum dbg_status status;
6306 *num_dumped_dwords = 0;
6308 status = ecore_dbg_igu_fifo_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6309 if (status != DBG_STATUS_OK)
6312 if (buf_size_in_dwords < needed_buf_size_in_dwords)
6313 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6315 /* Update reset state */
6316 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6318 status = ecore_igu_fifo_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6320 /* Reveret GRC params to their default */
6321 ecore_dbg_grc_set_params_default(p_hwfn);
6326 enum dbg_status ecore_dbg_protection_override_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6327 struct ecore_ptt *p_ptt,
6330 enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6334 if (status != DBG_STATUS_OK)
6337 return ecore_protection_override_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6340 enum dbg_status ecore_dbg_protection_override_dump(struct ecore_hwfn *p_hwfn,
6341 struct ecore_ptt *p_ptt,
6343 u32 buf_size_in_dwords,
6344 u32 *num_dumped_dwords)
6346 u32 needed_buf_size_in_dwords;
6347 enum dbg_status status;
6349 *num_dumped_dwords = 0;
6351 status = ecore_dbg_protection_override_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6352 if (status != DBG_STATUS_OK)
6355 if (buf_size_in_dwords < needed_buf_size_in_dwords)
6356 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6358 /* Update reset state */
6359 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6361 status = ecore_protection_override_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6363 /* Reveret GRC params to their default */
6364 ecore_dbg_grc_set_params_default(p_hwfn);
6369 enum dbg_status ecore_dbg_fw_asserts_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6370 struct ecore_ptt *p_ptt,
6373 enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6377 if (status != DBG_STATUS_OK)
6380 /* Update reset state */
6381 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6383 *buf_size = ecore_fw_asserts_dump(p_hwfn, p_ptt, OSAL_NULL, false);
6385 return DBG_STATUS_OK;
6388 enum dbg_status ecore_dbg_fw_asserts_dump(struct ecore_hwfn *p_hwfn,
6389 struct ecore_ptt *p_ptt,
6391 u32 buf_size_in_dwords,
6392 u32 *num_dumped_dwords)
6394 u32 needed_buf_size_in_dwords;
6395 enum dbg_status status;
6397 *num_dumped_dwords = 0;
6399 status = ecore_dbg_fw_asserts_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6400 if (status != DBG_STATUS_OK)
6403 if (buf_size_in_dwords < needed_buf_size_in_dwords)
6404 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6406 *num_dumped_dwords = ecore_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
6408 /* Reveret GRC params to their default */
6409 ecore_dbg_grc_set_params_default(p_hwfn);
6411 return DBG_STATUS_OK;
6414 enum dbg_status ecore_dbg_read_attn(struct ecore_hwfn *p_hwfn,
6415 struct ecore_ptt *p_ptt,
6416 enum block_id block_id,
6417 enum dbg_attn_type attn_type,
6419 struct dbg_attn_block_result *results)
6421 enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6422 u8 reg_idx, num_attn_regs, num_result_regs = 0;
6423 const struct dbg_attn_reg *attn_reg_arr;
6425 if (status != DBG_STATUS_OK)
6428 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
6429 return DBG_STATUS_DBG_ARRAY_NOT_SET;
6431 attn_reg_arr = ecore_get_block_attn_regs(block_id, attn_type, &num_attn_regs);
6433 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
6434 const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
6435 struct dbg_attn_reg_result *reg_result;
6436 u32 sts_addr, sts_val;
6437 u16 modes_buf_offset;
6441 eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
6442 modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
6443 if (eval_mode && !ecore_is_mode_match(p_hwfn, &modes_buf_offset))
6446 /* Mode match - read attention status register */
6447 sts_addr = DWORDS_TO_BYTES(clear_status ? reg_data->sts_clr_address : GET_FIELD(reg_data->data, DBG_ATTN_REG_STS_ADDRESS));
6448 sts_val = ecore_rd(p_hwfn, p_ptt, sts_addr);
6452 /* Non-zero attention status - add to results */
6453 reg_result = &results->reg_results[num_result_regs];
6454 SET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
6455 SET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_NUM_REG_ATTN, GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
6456 reg_result->block_attn_offset = reg_data->block_attn_offset;
6457 reg_result->sts_val = sts_val;
6458 reg_result->mask_val = ecore_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(reg_data->mask_address));
6462 results->block_id = (u8)block_id;
6463 results->names_offset = ecore_get_block_attn_data(block_id, attn_type)->names_offset;
6464 SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
6465 SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
6467 return DBG_STATUS_OK;
6470 enum dbg_status ecore_dbg_print_attn(struct ecore_hwfn *p_hwfn,
6471 struct dbg_attn_block_result *results)
6473 enum dbg_attn_type attn_type;
6476 num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
6477 attn_type = (enum dbg_attn_type)GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
6479 for (i = 0; i < num_regs; i++) {
6480 struct dbg_attn_reg_result *reg_result;
6481 const char *attn_type_str;
6484 reg_result = &results->reg_results[i];
6485 attn_type_str = (attn_type == ATTN_TYPE_INTERRUPT ? "interrupt" : "parity");
6486 sts_addr = GET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_STS_ADDRESS);
6487 DP_NOTICE(p_hwfn, false, "%s: address 0x%08x, status 0x%08x, mask 0x%08x\n", attn_type_str, sts_addr, reg_result->sts_val, reg_result->mask_val);
6490 return DBG_STATUS_OK;
6493 bool ecore_is_block_in_reset(struct ecore_hwfn *p_hwfn,
6494 struct ecore_ptt *p_ptt,
6495 enum block_id block_id)
6497 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
6498 struct block_defs *block = s_block_defs[block_id];
6501 if (!block->has_reset_bit)
6504 reset_reg = block->reset_reg;
6506 return s_reset_regs_defs[reset_reg].exists[dev_data->chip_id] ?
6507 !(ecore_rd(p_hwfn, p_ptt, s_reset_regs_defs[reset_reg].addr) & (1 << block->reset_bit_offset)) : true;