]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/dev/qlnx/qlnxe/ecore_dbg_fw_funcs.c
MFC r316485
[FreeBSD/stable/10.git] / sys / dev / qlnx / qlnxe / ecore_dbg_fw_funcs.c
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc. 
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27
28 /*
29  * File : ecore_dbg_fw_funcs.c
30  */
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34
35 #include "bcm_osal.h"
36 #include "ecore.h"
37 #include "ecore_hw.h"
38 #include "ecore_mcp.h"
39 #include "spad_layout.h"
40 #include "nvm_map.h"
41 #include "reg_addr.h"
42 #include "ecore_hsi_common.h"
43 #include "ecore_hsi_debug_tools.h"
44 #include "mcp_public.h"
45 #include "nvm_map.h"
46 #ifndef USE_DBG_BIN_FILE
47 #include "ecore_dbg_values.h"
48 #endif
49 #include "ecore_dbg_fw_funcs.h"
50
51 /* Memory groups enum */
52 enum mem_groups {
53         MEM_GROUP_PXP_MEM,
54         MEM_GROUP_DMAE_MEM,
55         MEM_GROUP_CM_MEM,
56         MEM_GROUP_QM_MEM,
57         MEM_GROUP_TM_MEM,
58         MEM_GROUP_BRB_RAM,
59         MEM_GROUP_BRB_MEM,
60         MEM_GROUP_PRS_MEM,
61         MEM_GROUP_SDM_MEM,
62         MEM_GROUP_IOR,
63         MEM_GROUP_RAM,
64         MEM_GROUP_BTB_RAM,
65         MEM_GROUP_RDIF_CTX,
66         MEM_GROUP_TDIF_CTX,
67         MEM_GROUP_CFC_MEM,
68         MEM_GROUP_CONN_CFC_MEM,
69         MEM_GROUP_TASK_CFC_MEM,
70         MEM_GROUP_CAU_PI,
71         MEM_GROUP_CAU_MEM,
72         MEM_GROUP_PXP_ILT,
73         MEM_GROUP_PBUF,
74         MEM_GROUP_MULD_MEM,
75         MEM_GROUP_BTB_MEM,
76         MEM_GROUP_IGU_MEM,
77         MEM_GROUP_IGU_MSIX,
78         MEM_GROUP_CAU_SB,
79         MEM_GROUP_BMB_RAM,
80         MEM_GROUP_BMB_MEM,
81         MEM_GROUPS_NUM
82 };
83
84 /* Memory groups names */
85 static const char* s_mem_group_names[] = {
86         "PXP_MEM",
87         "DMAE_MEM",
88         "CM_MEM",
89         "QM_MEM",
90         "TM_MEM",
91         "BRB_RAM",
92         "BRB_MEM",
93         "PRS_MEM",
94         "SDM_MEM",
95         "IOR",
96         "RAM",
97         "BTB_RAM",
98         "RDIF_CTX",
99         "TDIF_CTX",
100         "CFC_MEM",
101         "CONN_CFC_MEM",
102         "TASK_CFC_MEM",
103         "CAU_PI",
104         "CAU_MEM",
105         "PXP_ILT",
106         "PBUF",
107         "MULD_MEM",
108         "BTB_MEM",
109         "IGU_MEM",
110         "IGU_MSIX",
111         "CAU_SB",
112         "BMB_RAM",
113         "BMB_MEM",
114 };
115
116 /* Idle check conditions */
117
118 #ifndef __PREVENT_COND_ARR__
119
120 static u32 cond5(const u32 *r, const u32 *imm) {
121         return (((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]));
122 }
123
124 static u32 cond7(const u32 *r, const u32 *imm) {
125         return (((r[0] >> imm[0]) & imm[1]) != imm[2]);
126 }
127
128 static u32 cond14(const u32 *r, const u32 *imm) {
129         return ((r[0] != imm[0]) && (((r[1] >> imm[1]) & imm[2]) == imm[3]));
130 }
131
132 static u32 cond6(const u32 *r, const u32 *imm) {
133         return ((r[0] & imm[0]) != imm[1]);
134 }
135
136 static u32 cond9(const u32 *r, const u32 *imm) {
137         return ((r[0] & imm[0]) >> imm[1]) != (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
138 }
139
140 static u32 cond10(const u32 *r, const u32 *imm) {
141         return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
142 }
143
144 static u32 cond4(const u32 *r, const u32 *imm) {
145         return ((r[0] & ~imm[0]) != imm[1]);
146 }
147
148 static u32 cond0(const u32 *r, const u32 *imm) {
149         return ((r[0] & ~r[1]) != imm[0]);
150 }
151
152 static u32 cond1(const u32 *r, const u32 *imm) {
153         return (r[0] != imm[0]);
154 }
155
156 static u32 cond11(const u32 *r, const u32 *imm) {
157         return (r[0] != r[1] && r[2] == imm[0]);
158 }
159
160 static u32 cond12(const u32 *r, const u32 *imm) {
161         return (r[0] != r[1] && r[2] > imm[0]);
162 }
163
164 static u32 cond3(const u32 *r, const u32 *imm) {
165         return (r[0] != r[1]);
166 }
167
168 static u32 cond13(const u32 *r, const u32 *imm) {
169         return (r[0] & imm[0]);
170 }
171
172 static u32 cond8(const u32 *r, const u32 *imm) {
173         return (r[0] < (r[1] - imm[0]));
174 }
175
176 static u32 cond2(const u32 *r, const u32 *imm) {
177         return (r[0] > imm[0]);
178 }
179
180 /* Array of Idle Check conditions */
181 static u32 (*cond_arr[])(const u32 *r, const u32 *imm) = {
182         cond0,
183         cond1,
184         cond2,
185         cond3,
186         cond4,
187         cond5,
188         cond6,
189         cond7,
190         cond8,
191         cond9,
192         cond10,
193         cond11,
194         cond12,
195         cond13,
196         cond14,
197 };
198
199 #endif /* __PREVENT_COND_ARR__ */
200
201
202 /******************************* Data Types **********************************/
203
204 enum platform_ids {
205         PLATFORM_ASIC,
206         PLATFORM_EMUL_FULL,
207         PLATFORM_EMUL_REDUCED,
208         PLATFORM_FPGA,
209         MAX_PLATFORM_IDS
210 };
211
212 struct chip_platform_defs {
213         u8 num_ports;
214         u8 num_pfs;
215         u8 num_vfs;
216 };
217
218 /* Chip constant definitions */
219 struct chip_defs {
220         const char *name;
221         struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
222 };
223
224 /* Platform constant definitions */
225 struct platform_defs {
226         const char *name;
227         u32 delay_factor;
228 };
229
230 /* Storm constant definitions.
231  * Addresses are in bytes, sizes are in quad-regs.
232  */
233 struct storm_defs {
234         char letter;
235         enum block_id block_id;
236         enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
237         bool has_vfc;
238         u32 sem_fast_mem_addr;
239         u32 sem_frame_mode_addr;
240         u32 sem_slow_enable_addr;
241         u32 sem_slow_mode_addr;
242         u32 sem_slow_mode1_conf_addr;
243         u32 sem_sync_dbg_empty_addr;
244         u32 sem_slow_dbg_empty_addr;
245         u32 cm_ctx_wr_addr;
246         u32 cm_conn_ag_ctx_lid_size;
247         u32 cm_conn_ag_ctx_rd_addr;
248         u32 cm_conn_st_ctx_lid_size;
249         u32 cm_conn_st_ctx_rd_addr;
250         u32 cm_task_ag_ctx_lid_size;
251         u32 cm_task_ag_ctx_rd_addr;
252         u32 cm_task_st_ctx_lid_size;
253         u32 cm_task_st_ctx_rd_addr;
254 };
255
256 /* Block constant definitions */
257 struct block_defs {
258         const char *name;
259         bool has_dbg_bus[MAX_CHIP_IDS];
260         bool associated_to_storm;
261
262         /* Valid only if associated_to_storm is true */
263         u32 storm_id;
264         enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
265         u32 dbg_select_addr;
266         u32 dbg_enable_addr;
267         u32 dbg_shift_addr;
268         u32 dbg_force_valid_addr;
269         u32 dbg_force_frame_addr;
270         bool has_reset_bit;
271
272         /* If true, block is taken out of reset before dump */
273         bool unreset;
274         enum dbg_reset_regs reset_reg;
275
276         /* Bit offset in reset register */
277         u8 reset_bit_offset;
278 };
279
280 /* Reset register definitions */
281 struct reset_reg_defs {
282         u32 addr;
283         u32 unreset_val;
284         bool exists[MAX_CHIP_IDS];
285 };
286
287 /* Debug Bus Constraint operation constant definitions */
288 struct dbg_bus_constraint_op_defs {
289         u8 hw_op_val;
290         bool is_cyclic;
291 };
292
293 /* Storm Mode definitions */
294 struct storm_mode_defs {
295         const char *name;
296         bool is_fast_dbg;
297         u8 id_in_hw;
298 };
299
300 struct grc_param_defs {
301         u32 default_val[MAX_CHIP_IDS];
302         u32 min;
303         u32 max;
304         bool is_preset;
305         u32 exclude_all_preset_val;
306         u32 crash_preset_val;
307 };
308
309 /* address is in 128b units. Width is in bits. */
310 struct rss_mem_defs {
311         const char *mem_name;
312         const char *type_name;
313         u32 addr;
314         u32 num_entries[MAX_CHIP_IDS];
315         u32 entry_width[MAX_CHIP_IDS];
316 };
317
318 struct vfc_ram_defs {
319         const char *mem_name;
320         const char *type_name;
321         u32 base_row;
322         u32 num_rows;
323 };
324
325 struct big_ram_defs {
326         const char *instance_name;
327         enum mem_groups mem_group_id;
328         enum mem_groups ram_mem_group_id;
329         enum dbg_grc_params grc_param;
330         u32 addr_reg_addr;
331         u32 data_reg_addr;
332         u32 num_of_blocks[MAX_CHIP_IDS];
333 };
334
335 struct phy_defs {
336         const char *phy_name;
337
338         /* PHY base GRC address */
339         u32 base_addr;
340
341         /* Relative address of indirect TBUS address register (bits 0..7) */
342         u32 tbus_addr_lo_addr;
343
344         /* Relative address of indirect TBUS address register (bits 8..10) */
345         u32 tbus_addr_hi_addr;
346
347         /* Relative address of indirect TBUS data register (bits 0..7) */
348         u32 tbus_data_lo_addr;
349
350         /* Relative address of indirect TBUS data register (bits 8..11) */
351         u32 tbus_data_hi_addr;
352 };
353
354 /******************************** Constants **********************************/
355
356 #define MAX_LCIDS                       320
357 #define MAX_LTIDS                       320
358
359 #define NUM_IOR_SETS                    2
360 #define IORS_PER_SET                    176
361 #define IOR_SET_OFFSET(set_id)          ((set_id) * 256)
362
363 #define BYTES_IN_DWORD                  sizeof(u32)
364
365 /* Cyclic  right */
366 #define SHR(val, val_width, amount)     (((val) | ((val) << (val_width)))                                       >> (amount)) & ((1 << (val_width)) - 1)
367
368 /* In the macros below, size and offset are specified in bits */
369 #define CEIL_DWORDS(size)               DIV_ROUND_UP(size, 32)
370 #define FIELD_BIT_OFFSET(type, field)   type##_##field##_##OFFSET
371 #define FIELD_BIT_SIZE(type, field)     type##_##field##_##SIZE
372 #define FIELD_DWORD_OFFSET(type, field)         (int)(FIELD_BIT_OFFSET(type, field) / 32)
373 #define FIELD_DWORD_SHIFT(type, field)  (FIELD_BIT_OFFSET(type, field) % 32)
374 #define FIELD_BIT_MASK(type, field)             (((1 << FIELD_BIT_SIZE(type, field)) - 1)       << FIELD_DWORD_SHIFT(type, field))
375
376 #define SET_VAR_FIELD(var, type, field, val)    var[FIELD_DWORD_OFFSET(type, field)] &=                 (~FIELD_BIT_MASK(type, field));         var[FIELD_DWORD_OFFSET(type, field)] |=                 (val) << FIELD_DWORD_SHIFT(type, field)
377
378 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size)       for (i = 0; i < (arr_size); i++)                ecore_wr(dev, ptt, addr, (arr)[i])
379
380 #define ARR_REG_RD(dev, ptt, addr, arr, arr_size)       for (i = 0; i < (arr_size); i++)                (arr)[i] = ecore_rd(dev, ptt, addr)
381
382 #define CHECK_ARR_SIZE(arr, size)       OSAL_BUILD_BUG_ON(!(OSAL_ARRAY_SIZE(arr) == size))
383
384 #ifndef DWORDS_TO_BYTES
385 #define DWORDS_TO_BYTES(dwords)         ((dwords) * BYTES_IN_DWORD)
386 #endif
387 #ifndef BYTES_TO_DWORDS
388 #define BYTES_TO_DWORDS(bytes)          ((bytes) / BYTES_IN_DWORD)
389 #endif
390
391 /* extra lines include a signature line + optional latency events line */
392 #ifndef NUM_DBG_LINES
393 #define NUM_EXTRA_DBG_LINES(block_desc)         (1 + (block_desc->has_latency_events ? 1 : 0))
394 #define NUM_DBG_LINES(block_desc)               (block_desc->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
395 #endif
396
397 #define RAM_LINES_TO_DWORDS(lines)      ((lines) * 2)
398 #define RAM_LINES_TO_BYTES(lines)               DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
399
400 #define REG_DUMP_LEN_SHIFT              24
401 #define MEM_DUMP_ENTRY_SIZE_DWORDS              BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
402
403 #define IDLE_CHK_RULE_SIZE_DWORDS               BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
404
405 #define IDLE_CHK_RESULT_HDR_DWORDS              BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
406
407 #define IDLE_CHK_RESULT_REG_HDR_DWORDS          BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
408
409 #define IDLE_CHK_MAX_ENTRIES_SIZE       32
410
411 /* The sizes and offsets below are specified in bits */
412 #define VFC_CAM_CMD_STRUCT_SIZE         64
413 #define VFC_CAM_CMD_ROW_OFFSET          48
414 #define VFC_CAM_CMD_ROW_SIZE            9
415 #define VFC_CAM_ADDR_STRUCT_SIZE        16
416 #define VFC_CAM_ADDR_OP_OFFSET          0
417 #define VFC_CAM_ADDR_OP_SIZE            4
418 #define VFC_CAM_RESP_STRUCT_SIZE        256
419 #define VFC_RAM_ADDR_STRUCT_SIZE        16
420 #define VFC_RAM_ADDR_OP_OFFSET          0
421 #define VFC_RAM_ADDR_OP_SIZE            2
422 #define VFC_RAM_ADDR_ROW_OFFSET         2
423 #define VFC_RAM_ADDR_ROW_SIZE           10
424 #define VFC_RAM_RESP_STRUCT_SIZE        256
425
426 #define VFC_CAM_CMD_DWORDS              CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
427 #define VFC_CAM_ADDR_DWORDS             CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
428 #define VFC_CAM_RESP_DWORDS             CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
429 #define VFC_RAM_CMD_DWORDS              VFC_CAM_CMD_DWORDS
430 #define VFC_RAM_ADDR_DWORDS             CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
431 #define VFC_RAM_RESP_DWORDS             CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
432
433 #define NUM_VFC_RAM_TYPES               4
434
435 #define VFC_CAM_NUM_ROWS                512
436
437 #define VFC_OPCODE_CAM_RD               14
438 #define VFC_OPCODE_RAM_RD               0
439
440 #define NUM_RSS_MEM_TYPES               5
441
442 #define NUM_BIG_RAM_TYPES               3
443 #define BIG_RAM_BLOCK_SIZE_BYTES        128
444 #define BIG_RAM_BLOCK_SIZE_DWORDS               BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES)
445
446 #define NUM_PHY_TBUS_ADDRESSES          2048
447 #define PHY_DUMP_SIZE_DWORDS            (NUM_PHY_TBUS_ADDRESSES / 2)
448
449 #define SEM_FAST_MODE6_SRC_ENABLE       0x10
450 #define SEM_FAST_MODE6_SRC_DISABLE      0x3f
451
452 #define SEM_SLOW_MODE1_DATA_ENABLE      0x1
453
454 #define VALUES_PER_CYCLE                4
455 #define MAX_CYCLE_VALUES_MASK           ((1 << VALUES_PER_CYCLE) - 1)
456
457 #define MAX_DWORDS_PER_CYCLE            8
458
459 #define HW_ID_BITS                      3
460
461 #define NUM_CALENDAR_SLOTS              16
462
463 #define MAX_TRIGGER_STATES              3
464 #define TRIGGER_SETS_PER_STATE          2
465 #define MAX_CONSTRAINTS                 4
466
467 #define SEM_FILTER_CID_EN_MASK          0x008
468 #define SEM_FILTER_EID_MASK_EN_MASK     0x010
469 #define SEM_FILTER_EID_RANGE_EN_MASK    0x110
470
471 #define CHUNK_SIZE_IN_DWORDS            64
472 #define CHUNK_SIZE_IN_BYTES             DWORDS_TO_BYTES(CHUNK_SIZE_IN_DWORDS)
473
474 #define INT_BUF_NUM_OF_LINES            192
475 #define INT_BUF_LINE_SIZE_IN_DWORDS     16
476 #define INT_BUF_SIZE_IN_DWORDS                  (INT_BUF_NUM_OF_LINES * INT_BUF_LINE_SIZE_IN_DWORDS)
477 #define INT_BUF_SIZE_IN_CHUNKS                  (INT_BUF_SIZE_IN_DWORDS / CHUNK_SIZE_IN_DWORDS)
478
479 #define PCI_BUF_LINE_SIZE_IN_DWORDS     8
480 #define PCI_BUF_LINE_SIZE_IN_BYTES              DWORDS_TO_BYTES(PCI_BUF_LINE_SIZE_IN_DWORDS)
481
482 #define TARGET_EN_MASK_PCI              0x3
483 #define TARGET_EN_MASK_NIG              0x4
484
485 #define PCI_REQ_CREDIT                  1
486 #define PCI_PHYS_ADDR_TYPE              0
487
488 #define OPAQUE_FID(pci_func)            ((pci_func << 4) | 0xff00)
489
490 #define RESET_REG_UNRESET_OFFSET        4
491
492 #define PCI_PKT_SIZE_IN_CHUNKS          1
493 #define PCI_PKT_SIZE_IN_BYTES                   (PCI_PKT_SIZE_IN_CHUNKS * CHUNK_SIZE_IN_BYTES)
494
495 #define NIG_PKT_SIZE_IN_CHUNKS          4
496
497 #define FLUSH_DELAY_MS                  500
498 #define STALL_DELAY_MS                  500
499
500 #define SRC_MAC_ADDR_LO16               0x0a0b
501 #define SRC_MAC_ADDR_HI32               0x0c0d0e0f
502 #define ETH_TYPE                        0x1000
503
504 #define STATIC_DEBUG_LINE_DWORDS        9
505
506 #define NUM_COMMON_GLOBAL_PARAMS        8
507
508 #define FW_IMG_KUKU                     0
509 #define FW_IMG_MAIN                     1
510 #define FW_IMG_L2B                      2
511
512 #ifndef REG_FIFO_ELEMENT_DWORDS
513 #define REG_FIFO_ELEMENT_DWORDS         2
514 #endif
515 #define REG_FIFO_DEPTH_ELEMENTS         32
516 #define REG_FIFO_DEPTH_DWORDS                   (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
517
518 #ifndef IGU_FIFO_ELEMENT_DWORDS
519 #define IGU_FIFO_ELEMENT_DWORDS         4
520 #endif
521 #define IGU_FIFO_DEPTH_ELEMENTS         64
522 #define IGU_FIFO_DEPTH_DWORDS                   (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
523
524 #define SEMI_SYNC_FIFO_POLLING_DELAY_MS 5
525 #define SEMI_SYNC_FIFO_POLLING_COUNT    20
526
527 #ifndef PROTECTION_OVERRIDE_ELEMENT_DWORDS
528 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
529 #endif
530 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
531 #define PROTECTION_OVERRIDE_DEPTH_DWORDS        (PROTECTION_OVERRIDE_DEPTH_ELEMENTS     * PROTECTION_OVERRIDE_ELEMENT_DWORDS)
532
533 #define MCP_SPAD_TRACE_OFFSIZE_ADDR             (MCP_REG_SCRATCH +      OFFSETOF(struct static_init, sections[SPAD_SECTION_TRACE]))
534
535 #define EMPTY_FW_VERSION_STR            "???_???_???_???"
536 #define EMPTY_FW_IMAGE_STR              "???????????????"
537
538
539 /***************************** Constant Arrays *******************************/
540
541 struct dbg_array {
542         const u32 *ptr;
543         u32 size_in_dwords;
544 };
545
546 /* Debug arrays */
547 #ifdef USE_DBG_BIN_FILE
548 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { { OSAL_NULL } };
549 #else
550 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = {
551
552         /* BIN_BUF_DBG_MODE_TREE */
553         { (const u32 *)dbg_modes_tree_buf, OSAL_ARRAY_SIZE(dbg_modes_tree_buf)},
554
555         /* BIN_BUF_DBG_DUMP_REG */
556         { dump_reg, OSAL_ARRAY_SIZE(dump_reg) },
557
558         /* BIN_BUF_DBG_DUMP_MEM */
559         { dump_mem, OSAL_ARRAY_SIZE(dump_mem) },
560
561         /* BIN_BUF_DBG_IDLE_CHK_REGS */
562         { idle_chk_regs, OSAL_ARRAY_SIZE(idle_chk_regs) },
563
564         /* BIN_BUF_DBG_IDLE_CHK_IMMS */
565         { idle_chk_imms, OSAL_ARRAY_SIZE(idle_chk_imms) },
566
567         /* BIN_BUF_DBG_IDLE_CHK_RULES */
568         { idle_chk_rules, OSAL_ARRAY_SIZE(idle_chk_rules) },
569
570         /* BIN_BUF_DBG_IDLE_CHK_PARSING_DATA */
571         { OSAL_NULL, 0 },
572
573         /* BIN_BUF_DBG_ATTN_BLOCKS */
574         { attn_block, OSAL_ARRAY_SIZE(attn_block) },
575
576         /* BIN_BUF_DBG_ATTN_REGSS */
577         { attn_reg, OSAL_ARRAY_SIZE(attn_reg) },
578
579         /* BIN_BUF_DBG_ATTN_INDEXES */
580         { OSAL_NULL, 0 },
581
582         /* BIN_BUF_DBG_ATTN_NAME_OFFSETS */
583         { OSAL_NULL, 0 },
584
585         /* BIN_BUF_DBG_BUS_BLOCKS */
586         { dbg_bus_blocks, OSAL_ARRAY_SIZE(dbg_bus_blocks) },
587
588         /* BIN_BUF_DBG_BUS_LINES */
589         { dbg_bus_lines, OSAL_ARRAY_SIZE(dbg_bus_lines) },
590
591         /* BIN_BUF_DBG_BUS_BLOCKS_USER_DATA */
592         { OSAL_NULL, 0 },
593
594         /* BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS */
595         { OSAL_NULL, 0 },
596
597         /* BIN_BUF_DBG_PARSING_STRINGS */
598         { OSAL_NULL, 0 }
599 };
600 #endif
601
602 /* Chip constant definitions array */
603 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
604         { "bb",
605
606                 /* ASIC */
607                 { { MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB },
608
609                 /* EMUL_FULL */
610                 { MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB },
611
612                 /* EMUL_REDUCED */
613                 { MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB },
614
615                 /* FPGA */
616                 { MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB } } },
617
618         { "k2",
619
620                 /* ASIC */
621                 { { MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2 },
622
623                 /* EMUL_FULL */
624                 { MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2 },
625
626                 /* EMUL_REDUCED */
627                 { MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2 },
628
629                 /* FPGA */
630                 { MAX_NUM_PORTS_K2, 8, MAX_NUM_VFS_K2 } } }
631 };
632
633 /* Storm constant definitions array */
634 static struct storm_defs s_storm_defs[] = {
635
636         /* Tstorm */
637         {       'T', BLOCK_TSEM,
638                 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT }, true,
639                 TSEM_REG_FAST_MEMORY,
640                 TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
641                 TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
642                 TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
643                 TCM_REG_CTX_RBC_ACCS,
644                 4, TCM_REG_AGG_CON_CTX,
645                 16, TCM_REG_SM_CON_CTX,
646                 2, TCM_REG_AGG_TASK_CTX,
647                 4, TCM_REG_SM_TASK_CTX },
648
649         /* Mstorm */
650         {       'M', BLOCK_MSEM,
651                 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM }, false,
652                 MSEM_REG_FAST_MEMORY,
653                 MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
654                 MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
655                 MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2,
656                 MCM_REG_CTX_RBC_ACCS,
657                 1, MCM_REG_AGG_CON_CTX,
658                 10, MCM_REG_SM_CON_CTX,
659                 2, MCM_REG_AGG_TASK_CTX,
660                 7, MCM_REG_SM_TASK_CTX },
661
662         /* Ustorm */
663         {       'U', BLOCK_USEM,
664                 { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU }, false,
665                 USEM_REG_FAST_MEMORY,
666                 USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
667                 USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
668                 USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2,
669                 UCM_REG_CTX_RBC_ACCS,
670                 2, UCM_REG_AGG_CON_CTX,
671                 13, UCM_REG_SM_CON_CTX,
672                 3, UCM_REG_AGG_TASK_CTX,
673                 3, UCM_REG_SM_TASK_CTX },
674
675         /* Xstorm */
676         {       'X', BLOCK_XSEM,
677                 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX }, false,
678                 XSEM_REG_FAST_MEMORY,
679                 XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
680                 XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
681                 XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2,
682                 XCM_REG_CTX_RBC_ACCS,
683                 9, XCM_REG_AGG_CON_CTX,
684                 15, XCM_REG_SM_CON_CTX,
685                 0, 0,
686                 0, 0 },
687
688         /* Ystorm */
689         {       'Y', BLOCK_YSEM,
690                 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY }, false,
691                 YSEM_REG_FAST_MEMORY,
692                 YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
693                 YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
694                 YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
695                 YCM_REG_CTX_RBC_ACCS,
696                 2, YCM_REG_AGG_CON_CTX,
697                 3, YCM_REG_SM_CON_CTX,
698                 2, YCM_REG_AGG_TASK_CTX,
699                 12, YCM_REG_SM_TASK_CTX },
700
701         /* Pstorm */
702         {       'P', BLOCK_PSEM,
703                 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS }, true,
704                 PSEM_REG_FAST_MEMORY,
705                 PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
706                 PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
707                 PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2,
708                 PCM_REG_CTX_RBC_ACCS,
709                 0, 0,
710                 10, PCM_REG_SM_CON_CTX,
711                 0, 0,
712                 0, 0 }
713 };
714
715 /* Block definitions array */
716
717 static struct block_defs block_grc_defs = {
718         "grc", { true, true }, false, 0,
719         { DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN },
720         GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
721         GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
722         GRC_REG_DBG_FORCE_FRAME,
723         true, false, DBG_RESET_REG_MISC_PL_UA, 1 };
724
725 static struct block_defs block_miscs_defs = {
726         "miscs", { false, false }, false, 0,
727         { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
728         0, 0, 0, 0, 0,
729         false, false, MAX_DBG_RESET_REGS, 0 };
730
731 static struct block_defs block_misc_defs = {
732         "misc", { false, false }, false, 0,
733         { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
734         0, 0, 0, 0, 0,
735         false, false, MAX_DBG_RESET_REGS, 0 };
736
737 static struct block_defs block_dbu_defs = {
738         "dbu", { false, false }, false, 0,
739         { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
740         0, 0, 0, 0, 0,
741         false, false, MAX_DBG_RESET_REGS, 0 };
742
743 static struct block_defs block_pglue_b_defs = {
744         "pglue_b", { true, true }, false, 0,
745         { DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH },
746         PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
747         PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
748         PGLUE_B_REG_DBG_FORCE_FRAME,
749         true, false, DBG_RESET_REG_MISCS_PL_HV, 1 };
750
751 static struct block_defs block_cnig_defs = {
752         "cnig", { false, true }, false, 0,
753         { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW },
754         CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
755         CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
756         CNIG_REG_DBG_FORCE_FRAME_K2_E5,
757         true, false, DBG_RESET_REG_MISCS_PL_HV, 0 };
758
759 static struct block_defs block_cpmu_defs = {
760         "cpmu", { false, false }, false, 0,
761         { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
762         0, 0, 0, 0, 0,
763         true, false, DBG_RESET_REG_MISCS_PL_HV, 8 };
764
765 static struct block_defs block_ncsi_defs = {
766         "ncsi", { true, true }, false, 0,
767         { DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
768         NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
769         NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
770         NCSI_REG_DBG_FORCE_FRAME,
771         true, false, DBG_RESET_REG_MISCS_PL_HV, 5 };
772
773 static struct block_defs block_opte_defs = {
774         "opte", { false, false }, false, 0,
775         { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
776         0, 0, 0, 0, 0,
777         true, false, DBG_RESET_REG_MISCS_PL_HV, 4 };
778
779 static struct block_defs block_bmb_defs = {
780         "bmb", { true, true }, false, 0,
781         { DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB },
782         BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
783         BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
784         BMB_REG_DBG_FORCE_FRAME,
785         true, false, DBG_RESET_REG_MISCS_PL_UA, 7 };
786
787 static struct block_defs block_pcie_defs = {
788         "pcie", { false, true }, false, 0,
789         { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH },
790         PCIE_REG_DBG_COMMON_SELECT_K2_E5, PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
791         PCIE_REG_DBG_COMMON_SHIFT_K2_E5, PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
792         PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
793         false, false, MAX_DBG_RESET_REGS, 0 };
794
795 static struct block_defs block_mcp_defs = {
796         "mcp", { false, false }, false, 0,
797         { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
798         0, 0, 0, 0, 0,
799         false, false, MAX_DBG_RESET_REGS, 0 };
800
801 static struct block_defs block_mcp2_defs = {
802         "mcp2", { true, true }, false, 0,
803         { DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
804         MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
805         MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
806         MCP2_REG_DBG_FORCE_FRAME,
807         false, false, MAX_DBG_RESET_REGS, 0 };
808
809 static struct block_defs block_pswhst_defs = {
810         "pswhst", { true, true }, false, 0,
811         { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
812         PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
813         PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
814         PSWHST_REG_DBG_FORCE_FRAME,
815         true, false, DBG_RESET_REG_MISC_PL_HV, 0 };
816
817 static struct block_defs block_pswhst2_defs = {
818         "pswhst2", { true, true }, false, 0,
819         { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
820         PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
821         PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
822         PSWHST2_REG_DBG_FORCE_FRAME,
823         true, false, DBG_RESET_REG_MISC_PL_HV, 0 };
824
825 static struct block_defs block_pswrd_defs = {
826         "pswrd", { true, true }, false, 0,
827         { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
828         PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
829         PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
830         PSWRD_REG_DBG_FORCE_FRAME,
831         true, false, DBG_RESET_REG_MISC_PL_HV, 2 };
832
833 static struct block_defs block_pswrd2_defs = {
834         "pswrd2", { true, true }, false, 0,
835         { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
836         PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
837         PSWRD2_REG_DBG_SHIFT,   PSWRD2_REG_DBG_FORCE_VALID,
838         PSWRD2_REG_DBG_FORCE_FRAME,
839         true, false, DBG_RESET_REG_MISC_PL_HV, 2 };
840
841 static struct block_defs block_pswwr_defs = {
842         "pswwr", { true, true }, false, 0,
843         { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
844         PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
845         PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
846         PSWWR_REG_DBG_FORCE_FRAME,
847         true, false, DBG_RESET_REG_MISC_PL_HV, 3 };
848
849 static struct block_defs block_pswwr2_defs = {
850         "pswwr2", { false, false }, false, 0,
851         { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
852         0, 0, 0, 0, 0,
853         true, false, DBG_RESET_REG_MISC_PL_HV, 3 };
854
855 static struct block_defs block_pswrq_defs = {
856         "pswrq", { true, true }, false, 0,
857         { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
858         PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
859         PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
860         PSWRQ_REG_DBG_FORCE_FRAME,
861         true, false, DBG_RESET_REG_MISC_PL_HV, 1 };
862
863 static struct block_defs block_pswrq2_defs = {
864         "pswrq2", { true, true }, false, 0,
865         { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
866         PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
867         PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
868         PSWRQ2_REG_DBG_FORCE_FRAME,
869         true, false, DBG_RESET_REG_MISC_PL_HV, 1 };
870
871 static struct block_defs block_pglcs_defs =     {
872         "pglcs", { false, true }, false, 0,
873         { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH },
874         PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
875         PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
876         PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
877         true, false, DBG_RESET_REG_MISCS_PL_HV, 2 };
878
879 static struct block_defs block_ptu_defs ={
880         "ptu", { true, true }, false, 0,
881         { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
882         PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
883         PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
884         PTU_REG_DBG_FORCE_FRAME,
885         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20 };
886
887 static struct block_defs block_dmae_defs = {
888         "dmae", { true, true }, false, 0,
889         { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
890         DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
891         DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
892         DMAE_REG_DBG_FORCE_FRAME,
893         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28 };
894
895 static struct block_defs block_tcm_defs = {
896         "tcm", { true, true }, true, DBG_TSTORM_ID,
897         { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
898         TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
899         TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
900         TCM_REG_DBG_FORCE_FRAME,
901         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5 };
902
903 static struct block_defs block_mcm_defs = {
904         "mcm", { true, true }, true, DBG_MSTORM_ID,
905         { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM },
906         MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
907         MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
908         MCM_REG_DBG_FORCE_FRAME,
909         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3 };
910
911 static struct block_defs block_ucm_defs = {
912         "ucm", { true, true }, true, DBG_USTORM_ID,
913         { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
914         UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
915         UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
916         UCM_REG_DBG_FORCE_FRAME,
917         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8 };
918
919 static struct block_defs block_xcm_defs = {
920         "xcm", { true, true }, true, DBG_XSTORM_ID,
921         { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
922         XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
923         XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
924         XCM_REG_DBG_FORCE_FRAME,
925         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19 };
926
927 static struct block_defs block_ycm_defs = {
928         "ycm", { true, true }, true, DBG_YSTORM_ID,
929         { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY },
930         YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
931         YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
932         YCM_REG_DBG_FORCE_FRAME,
933         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5 };
934
935 static struct block_defs block_pcm_defs = {
936         "pcm", { true, true }, true, DBG_PSTORM_ID,
937         { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
938         PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
939         PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
940         PCM_REG_DBG_FORCE_FRAME,
941         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4 };
942
943 static struct block_defs block_qm_defs = {
944         "qm", { true, true }, false, 0,
945         { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ },
946         QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
947         QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
948         QM_REG_DBG_FORCE_FRAME,
949         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16 };
950
951 static struct block_defs block_tm_defs = {
952         "tm", { true, true }, false, 0,
953         { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
954         TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
955         TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
956         TM_REG_DBG_FORCE_FRAME,
957         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17 };
958
959 static struct block_defs block_dorq_defs = {
960         "dorq", { true, true }, false, 0,
961         { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY },
962         DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
963         DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
964         DORQ_REG_DBG_FORCE_FRAME,
965         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18 };
966
967 static struct block_defs block_brb_defs = {
968         "brb", { true, true }, false, 0,
969         { DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR },
970         BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
971         BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
972         BRB_REG_DBG_FORCE_FRAME,
973         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0 };
974
975 static struct block_defs block_src_defs = {
976         "src", { true, true }, false, 0,
977         { DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
978         SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
979         SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
980         SRC_REG_DBG_FORCE_FRAME,
981         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2 };
982
983 static struct block_defs block_prs_defs = {
984         "prs", { true, true }, false, 0,
985         { DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR },
986         PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
987         PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
988         PRS_REG_DBG_FORCE_FRAME,
989         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1 };
990
991 static struct block_defs block_tsdm_defs = {
992         "tsdm", { true, true }, true, DBG_TSTORM_ID,
993         { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
994         TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
995         TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
996         TSDM_REG_DBG_FORCE_FRAME,
997         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3 };
998
999 static struct block_defs block_msdm_defs = {
1000         "msdm", { true, true }, true, DBG_MSTORM_ID,
1001         { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM },
1002         MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
1003         MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
1004         MSDM_REG_DBG_FORCE_FRAME,
1005         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6 };
1006
1007 static struct block_defs block_usdm_defs = {
1008         "usdm", { true, true }, true, DBG_USTORM_ID,
1009         { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1010         USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
1011         USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
1012         USDM_REG_DBG_FORCE_FRAME,
1013         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
1014         };
1015 static struct block_defs block_xsdm_defs = {
1016         "xsdm", { true, true }, true, DBG_XSTORM_ID,
1017         { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
1018         XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
1019         XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
1020         XSDM_REG_DBG_FORCE_FRAME,
1021         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20 };
1022
1023 static struct block_defs block_ysdm_defs = {
1024         "ysdm", { true, true }, true, DBG_YSTORM_ID,
1025         { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY },
1026         YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
1027         YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
1028         YSDM_REG_DBG_FORCE_FRAME,
1029         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8 };
1030
1031 static struct block_defs block_psdm_defs = {
1032         "psdm", { true, true }, true, DBG_PSTORM_ID,
1033         { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
1034         PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
1035         PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
1036         PSDM_REG_DBG_FORCE_FRAME,
1037         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7 };
1038
1039 static struct block_defs block_tsem_defs = {
1040         "tsem", { true, true }, true, DBG_TSTORM_ID,
1041         { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
1042         TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
1043         TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
1044         TSEM_REG_DBG_FORCE_FRAME,
1045         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4 };
1046
1047 static struct block_defs block_msem_defs = {
1048         "msem", { true, true }, true, DBG_MSTORM_ID,
1049         { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM },
1050         MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
1051         MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
1052         MSEM_REG_DBG_FORCE_FRAME,
1053         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9 };
1054
1055 static struct block_defs block_usem_defs = {
1056         "usem", { true, true }, true, DBG_USTORM_ID,
1057         { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1058         USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
1059         USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
1060         USEM_REG_DBG_FORCE_FRAME,
1061         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9 };
1062
1063 static struct block_defs block_xsem_defs = {
1064         "xsem", { true, true }, true, DBG_XSTORM_ID,
1065         { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
1066         XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
1067         XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
1068         XSEM_REG_DBG_FORCE_FRAME,
1069         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21 };
1070
1071 static struct block_defs block_ysem_defs = {
1072         "ysem", { true, true }, true, DBG_YSTORM_ID,
1073         { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY },
1074         YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
1075         YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
1076         YSEM_REG_DBG_FORCE_FRAME,
1077         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11 };
1078
1079 static struct block_defs block_psem_defs = {
1080         "psem", { true, true }, true, DBG_PSTORM_ID,
1081         { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
1082         PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
1083         PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
1084         PSEM_REG_DBG_FORCE_FRAME,
1085         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10 };
1086
1087 static struct block_defs block_rss_defs = {
1088         "rss", { true, true }, false, 0,
1089         { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
1090         RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
1091         RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
1092         RSS_REG_DBG_FORCE_FRAME,
1093         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18 };
1094
1095 static struct block_defs block_tmld_defs = {
1096         "tmld", { true, true }, false, 0,
1097         { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM },
1098         TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
1099         TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
1100         TMLD_REG_DBG_FORCE_FRAME,
1101         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13 };
1102
1103 static struct block_defs block_muld_defs = {
1104         "muld", { true, true }, false, 0,
1105         { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1106         MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
1107         MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
1108         MULD_REG_DBG_FORCE_FRAME,
1109         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14 };
1110
1111 static struct block_defs block_yuld_defs = {
1112         "yuld", { true, true }, false, 0,
1113         { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1114         YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
1115         YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
1116         YULD_REG_DBG_FORCE_FRAME_BB_K2,
1117         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 15 };
1118
1119 static struct block_defs block_xyld_defs = {
1120         "xyld", { true, true }, false, 0,
1121         { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
1122         XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
1123         XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
1124         XYLD_REG_DBG_FORCE_FRAME,
1125         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12 };
1126
1127 static struct block_defs block_prm_defs = {
1128         "prm", { true, true }, false, 0,
1129         { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM },
1130         PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
1131         PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
1132         PRM_REG_DBG_FORCE_FRAME,
1133         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21 };
1134
1135 static struct block_defs block_pbf_pb1_defs = {
1136         "pbf_pb1", { true, true }, false, 0,
1137         { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV },
1138         PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
1139         PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
1140         PBF_PB1_REG_DBG_FORCE_FRAME,
1141         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 11 };
1142
1143 static struct block_defs block_pbf_pb2_defs = {
1144         "pbf_pb2", { true, true }, false, 0,
1145         { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV },
1146         PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
1147         PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
1148         PBF_PB2_REG_DBG_FORCE_FRAME,
1149         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 12 };
1150
1151 static struct block_defs block_rpb_defs = {
1152         "rpb", { true, true }, false, 0,
1153         { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM },
1154         RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
1155         RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
1156         RPB_REG_DBG_FORCE_FRAME,
1157         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13 };
1158
1159 static struct block_defs block_btb_defs = {
1160         "btb", { true, true }, false, 0,
1161         { DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV },
1162         BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
1163         BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
1164         BTB_REG_DBG_FORCE_FRAME,
1165         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10 };
1166
1167 static struct block_defs block_pbf_defs = {
1168         "pbf", { true, true }, false, 0,
1169         { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV },
1170         PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
1171         PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
1172         PBF_REG_DBG_FORCE_FRAME,
1173         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15 };
1174
1175 static struct block_defs block_rdif_defs = {
1176         "rdif", { true, true }, false, 0,
1177         { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM },
1178         RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
1179         RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
1180         RDIF_REG_DBG_FORCE_FRAME,
1181         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16 };
1182
1183 static struct block_defs block_tdif_defs = {
1184         "tdif", { true, true }, false, 0,
1185         { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
1186         TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
1187         TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
1188         TDIF_REG_DBG_FORCE_FRAME,
1189         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17 };
1190
1191 static struct block_defs block_cdu_defs = {
1192         "cdu", { true, true }, false, 0,
1193         { DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
1194         CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
1195         CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
1196         CDU_REG_DBG_FORCE_FRAME,
1197         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23 };
1198
1199 static struct block_defs block_ccfc_defs = {
1200         "ccfc", { true, true }, false, 0,
1201         { DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
1202         CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
1203         CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
1204         CCFC_REG_DBG_FORCE_FRAME,
1205         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24 };
1206
1207 static struct block_defs block_tcfc_defs = {
1208         "tcfc", { true, true }, false, 0,
1209         { DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
1210         TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
1211         TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
1212         TCFC_REG_DBG_FORCE_FRAME,
1213         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25 };
1214
1215 static struct block_defs block_igu_defs = {
1216         "igu", { true, true }, false, 0,
1217         { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
1218         IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
1219         IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
1220         IGU_REG_DBG_FORCE_FRAME,
1221         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27 };
1222
1223 static struct block_defs block_cau_defs = {
1224         "cau", { true, true }, false, 0,
1225         { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
1226         CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
1227         CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
1228         CAU_REG_DBG_FORCE_FRAME,
1229         true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19 };
1230
1231 static struct block_defs block_umac_defs = {
1232         "umac", { false, true }, false, 0,
1233         { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ },
1234         UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
1235         UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
1236         UMAC_REG_DBG_FORCE_FRAME_K2_E5,
1237         true, false, DBG_RESET_REG_MISCS_PL_HV, 6 };
1238
1239 static struct block_defs block_xmac_defs = {
1240         "xmac", { false, false }, false, 0,
1241         { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1242         0, 0, 0, 0, 0,
1243         false, false, MAX_DBG_RESET_REGS, 0     };
1244
1245 static struct block_defs block_dbg_defs = {
1246         "dbg", { false, false }, false, 0,
1247         { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1248         0, 0, 0, 0, 0,
1249         true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3 };
1250
1251 static struct block_defs block_nig_defs = {
1252         "nig", { true, true }, false, 0,
1253         { DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN },
1254         NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
1255         NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
1256         NIG_REG_DBG_FORCE_FRAME,
1257         true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0 };
1258
1259 static struct block_defs block_wol_defs = {
1260         "wol", { false, true }, false, 0,
1261         { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ },
1262         WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
1263         WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
1264         WOL_REG_DBG_FORCE_FRAME_K2_E5,
1265         true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7 };
1266
1267 static struct block_defs block_bmbn_defs = {
1268         "bmbn", { false, true }, false, 0,
1269         { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB },
1270         BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
1271         BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
1272         BMBN_REG_DBG_FORCE_FRAME_K2_E5,
1273         false, false, MAX_DBG_RESET_REGS, 0 };
1274
1275 static struct block_defs block_ipc_defs = {
1276         "ipc", { false, false }, false, 0,
1277         { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1278         0, 0, 0, 0, 0,
1279         true, false, DBG_RESET_REG_MISCS_PL_UA, 8 };
1280
1281 static struct block_defs block_nwm_defs = {
1282         "nwm", { false, true }, false, 0,
1283         { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW },
1284         NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
1285         NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
1286         NWM_REG_DBG_FORCE_FRAME_K2_E5,
1287         true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0 };
1288
1289 static struct block_defs block_nws_defs = {
1290         "nws", { false, true }, false, 0,
1291         { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW },
1292         NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
1293         NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
1294         NWS_REG_DBG_FORCE_FRAME_K2_E5,
1295         true, false, DBG_RESET_REG_MISCS_PL_HV, 12 };
1296
1297 static struct block_defs block_ms_defs = {
1298         "ms", { false, true }, false, 0,
1299         { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ },
1300         MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
1301         MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
1302         MS_REG_DBG_FORCE_FRAME_K2_E5,
1303         true, false, DBG_RESET_REG_MISCS_PL_HV, 13 };
1304
1305 static struct block_defs block_phy_pcie_defs = {
1306         "phy_pcie", { false, true }, false, 0,
1307         { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH },
1308         PCIE_REG_DBG_COMMON_SELECT_K2_E5, PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
1309         PCIE_REG_DBG_COMMON_SHIFT_K2_E5, PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
1310         PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
1311         false, false, MAX_DBG_RESET_REGS, 0 };
1312
1313 static struct block_defs block_led_defs = {
1314         "led", { false, false }, false, 0,
1315         { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1316         0, 0, 0, 0, 0,
1317         true, false, DBG_RESET_REG_MISCS_PL_HV, 14 };
1318
1319 static struct block_defs block_avs_wrap_defs = {
1320         "avs_wrap", { false, false }, false, 0,
1321         { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1322         0, 0, 0, 0, 0,
1323         true, false, DBG_RESET_REG_MISCS_PL_UA, 11 };
1324
1325 static struct block_defs block_rgfs_defs = {
1326         "rgfs", { false, false }, false, 0,
1327         { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1328         0, 0, 0, 0, 0,
1329         false, false, MAX_DBG_RESET_REGS, 0 };
1330
1331 static struct block_defs block_rgsrc_defs = {
1332         "rgsrc", { false, false }, false, 0,
1333         { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1334         0, 0, 0, 0, 0,
1335         false, false, MAX_DBG_RESET_REGS, 0 };
1336
1337 static struct block_defs block_tgfs_defs = {
1338         "tgfs", { false, false }, false, 0,
1339         { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1340         0, 0, 0, 0, 0,
1341         false, false, MAX_DBG_RESET_REGS, 0 };
1342
1343 static struct block_defs block_tgsrc_defs = {
1344         "tgsrc", { false, false }, false, 0,
1345         { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1346         0, 0, 0, 0, 0,
1347         false, false, MAX_DBG_RESET_REGS, 0 };
1348
1349 static struct block_defs block_ptld_defs = {
1350         "ptld", { false, false }, false, 0,
1351         { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1352         0, 0, 0, 0, 0,
1353         false, false, MAX_DBG_RESET_REGS, 0 };
1354
1355 static struct block_defs block_ypld_defs = {
1356         "ypld", { false, false }, false, 0,
1357         { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1358         0, 0, 0, 0, 0,
1359         false, false, MAX_DBG_RESET_REGS, 0 };
1360
1361 static struct block_defs block_misc_aeu_defs = {
1362         "misc_aeu", { false, false }, false, 0,
1363         { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1364         0, 0, 0, 0, 0,
1365         false, false, MAX_DBG_RESET_REGS, 0 };
1366
1367 static struct block_defs block_bar0_map_defs = {
1368         "bar0_map", { false, false }, false, 0,
1369         { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1370         0, 0, 0, 0, 0,
1371         false, false, MAX_DBG_RESET_REGS, 0 };
1372
1373
1374 static struct block_defs* s_block_defs[MAX_BLOCK_ID] = {
1375         &block_grc_defs,
1376         &block_miscs_defs,
1377         &block_misc_defs,
1378         &block_dbu_defs,
1379         &block_pglue_b_defs,
1380         &block_cnig_defs,
1381         &block_cpmu_defs,
1382         &block_ncsi_defs,
1383         &block_opte_defs,
1384         &block_bmb_defs,
1385         &block_pcie_defs,
1386         &block_mcp_defs,
1387         &block_mcp2_defs,
1388         &block_pswhst_defs,
1389         &block_pswhst2_defs,
1390         &block_pswrd_defs,
1391         &block_pswrd2_defs,
1392         &block_pswwr_defs,
1393         &block_pswwr2_defs,
1394         &block_pswrq_defs,
1395         &block_pswrq2_defs,
1396         &block_pglcs_defs,
1397         &block_dmae_defs,
1398         &block_ptu_defs,
1399         &block_tcm_defs,
1400         &block_mcm_defs,
1401         &block_ucm_defs,
1402         &block_xcm_defs,
1403         &block_ycm_defs,
1404         &block_pcm_defs,
1405         &block_qm_defs,
1406         &block_tm_defs,
1407         &block_dorq_defs,
1408         &block_brb_defs,
1409         &block_src_defs,
1410         &block_prs_defs,
1411         &block_tsdm_defs,
1412         &block_msdm_defs,
1413         &block_usdm_defs,
1414         &block_xsdm_defs,
1415         &block_ysdm_defs,
1416         &block_psdm_defs,
1417         &block_tsem_defs,
1418         &block_msem_defs,
1419         &block_usem_defs,
1420         &block_xsem_defs,
1421         &block_ysem_defs,
1422         &block_psem_defs,
1423         &block_rss_defs,
1424         &block_tmld_defs,
1425         &block_muld_defs,
1426         &block_yuld_defs,
1427         &block_xyld_defs,
1428         &block_ptld_defs,
1429         &block_ypld_defs,
1430         &block_prm_defs,
1431         &block_pbf_pb1_defs,
1432         &block_pbf_pb2_defs,
1433         &block_rpb_defs,
1434         &block_btb_defs,
1435         &block_pbf_defs,
1436         &block_rdif_defs,
1437         &block_tdif_defs,
1438         &block_cdu_defs,
1439         &block_ccfc_defs,
1440         &block_tcfc_defs,
1441         &block_igu_defs,
1442         &block_cau_defs,
1443         &block_rgfs_defs,
1444         &block_rgsrc_defs,
1445         &block_tgfs_defs,
1446         &block_tgsrc_defs,
1447         &block_umac_defs,
1448         &block_xmac_defs,
1449         &block_dbg_defs,
1450         &block_nig_defs,
1451         &block_wol_defs,
1452         &block_bmbn_defs,
1453         &block_ipc_defs,
1454         &block_nwm_defs,
1455         &block_nws_defs,
1456         &block_ms_defs,
1457         &block_phy_pcie_defs,
1458         &block_led_defs,
1459         &block_avs_wrap_defs,
1460         &block_misc_aeu_defs,
1461         &block_bar0_map_defs,
1462
1463 };
1464
1465
1466 /* Constraint operation types */
1467 static struct dbg_bus_constraint_op_defs s_constraint_op_defs[] = {
1468
1469         /* DBG_BUS_CONSTRAINT_OP_EQ */
1470         { 0, false },
1471         
1472         /* DBG_BUS_CONSTRAINT_OP_NE */
1473         { 5, false },
1474         
1475         /* DBG_BUS_CONSTRAINT_OP_LT */
1476         { 1, false },
1477         
1478         /* DBG_BUS_CONSTRAINT_OP_LTC */
1479         { 1, true },
1480         
1481         /* DBG_BUS_CONSTRAINT_OP_LE */
1482         { 2, false },
1483         
1484         /* DBG_BUS_CONSTRAINT_OP_LEC */
1485         { 2, true },
1486         
1487         /* DBG_BUS_CONSTRAINT_OP_GT */
1488         { 4, false },
1489         
1490         /* DBG_BUS_CONSTRAINT_OP_GTC */
1491         { 4, true },
1492         
1493         /* DBG_BUS_CONSTRAINT_OP_GE */
1494         { 3, false },
1495         
1496         /* DBG_BUS_CONSTRAINT_OP_GEC */
1497         { 3, true }
1498 };
1499
1500 static const char* s_dbg_target_names[] = {
1501
1502         /* DBG_BUS_TARGET_ID_INT_BUF */ 
1503         "int-buf",
1504         
1505         /* DBG_BUS_TARGET_ID_NIG */
1506         "nw",
1507
1508         /* DBG_BUS_TARGET_ID_PCI */
1509         "pci-buf"
1510 };
1511
1512 static struct storm_mode_defs s_storm_mode_defs[] = {
1513
1514         /* DBG_BUS_STORM_MODE_PRINTF */
1515         { "printf", true, 0 },
1516
1517         /* DBG_BUS_STORM_MODE_PRAM_ADDR */
1518         { "pram_addr", true, 1 },
1519
1520         /* DBG_BUS_STORM_MODE_DRA_RW */
1521         { "dra_rw", true, 2 },
1522
1523         /* DBG_BUS_STORM_MODE_DRA_W */
1524         { "dra_w", true, 3 },
1525
1526         /* DBG_BUS_STORM_MODE_LD_ST_ADDR */
1527         { "ld_st_addr", true, 4 },
1528
1529         /* DBG_BUS_STORM_MODE_DRA_FSM */
1530         { "dra_fsm", true, 5 },
1531
1532         /* DBG_BUS_STORM_MODE_RH */
1533         { "rh", true, 6 },
1534
1535         /* DBG_BUS_STORM_MODE_FOC */
1536         { "foc", false, 1 },
1537
1538         /* DBG_BUS_STORM_MODE_EXT_STORE */
1539         { "ext_store", false, 3 }
1540 };
1541
1542 static struct platform_defs s_platform_defs[] = {
1543
1544         /* PLATFORM_ASIC */
1545         { "asic", 1 },
1546
1547         /* PLATFORM_EMUL_FULL */
1548         { "emul_full", 2000 },
1549
1550         /* PLATFORM_EMUL_REDUCED */
1551         { "emul_reduced", 2000 },
1552
1553         /* PLATFORM_FPGA */
1554         { "fpga", 200 }
1555 };
1556
1557 static struct grc_param_defs s_grc_param_defs[] = {
1558
1559         /* DBG_GRC_PARAM_DUMP_TSTORM */
1560         { { 1, 1 }, 0, 1, false, 1, 1 },
1561
1562         /* DBG_GRC_PARAM_DUMP_MSTORM */
1563         { { 1, 1 }, 0, 1, false, 1, 1 },
1564
1565         /* DBG_GRC_PARAM_DUMP_USTORM */
1566         { { 1, 1 }, 0, 1, false, 1, 1 },
1567
1568         /* DBG_GRC_PARAM_DUMP_XSTORM */
1569         { { 1, 1 }, 0, 1, false, 1, 1 },
1570
1571         /* DBG_GRC_PARAM_DUMP_YSTORM */
1572         { { 1, 1 }, 0, 1, false, 1, 1 },
1573
1574         /* DBG_GRC_PARAM_DUMP_PSTORM */
1575         { { 1, 1 }, 0, 1, false, 1, 1 },
1576
1577         /* DBG_GRC_PARAM_DUMP_REGS */
1578         { { 1, 1 }, 0, 1, false, 0, 1 },
1579
1580         /* DBG_GRC_PARAM_DUMP_RAM */
1581         { { 1, 1 }, 0, 1, false, 0, 1 },
1582
1583         /* DBG_GRC_PARAM_DUMP_PBUF */
1584         { { 1, 1 }, 0, 1, false, 0, 1 },
1585
1586         /* DBG_GRC_PARAM_DUMP_IOR */
1587         { { 0, 0 }, 0, 1, false, 0, 1 },
1588
1589         /* DBG_GRC_PARAM_DUMP_VFC */
1590         { { 0, 0 }, 0, 1, false, 0, 1 },
1591
1592         /* DBG_GRC_PARAM_DUMP_CM_CTX */
1593         { { 1, 1 }, 0, 1, false, 0, 1 },
1594
1595         /* DBG_GRC_PARAM_DUMP_ILT */
1596         { { 1, 1 }, 0, 1, false, 0, 1 },
1597
1598         /* DBG_GRC_PARAM_DUMP_RSS */
1599         { { 1, 1 }, 0, 1, false, 0, 1 },
1600
1601         /* DBG_GRC_PARAM_DUMP_CAU */
1602         { { 1, 1 }, 0, 1, false, 0, 1 },
1603
1604         /* DBG_GRC_PARAM_DUMP_QM */
1605         { { 1, 1 }, 0, 1, false, 0, 1 },
1606
1607         /* DBG_GRC_PARAM_DUMP_MCP */
1608         { { 1, 1 }, 0, 1, false, 0, 1 },
1609
1610         /* DBG_GRC_PARAM_RESERVED */
1611         { { 1, 1 }, 0, 1, false, 0, 1 },
1612
1613         /* DBG_GRC_PARAM_DUMP_CFC */
1614         { { 1, 1 }, 0, 1, false, 0, 1 },
1615
1616         /* DBG_GRC_PARAM_DUMP_IGU */
1617         { { 1, 1 }, 0, 1, false, 0, 1 },
1618
1619         /* DBG_GRC_PARAM_DUMP_BRB */
1620         { { 0, 0 }, 0, 1, false, 0, 1 },
1621
1622         /* DBG_GRC_PARAM_DUMP_BTB */
1623         { { 0, 0 }, 0, 1, false, 0, 1 },
1624
1625         /* DBG_GRC_PARAM_DUMP_BMB */
1626         { { 0, 0 }, 0, 1, false, 0, 1 },
1627
1628         /* DBG_GRC_PARAM_DUMP_NIG */
1629         { { 1, 1 }, 0, 1, false, 0, 1 },
1630
1631         /* DBG_GRC_PARAM_DUMP_MULD */
1632         { { 1, 1 }, 0, 1, false, 0, 1 },
1633
1634         /* DBG_GRC_PARAM_DUMP_PRS */
1635         { { 1, 1 }, 0, 1, false, 0, 1 },
1636
1637         /* DBG_GRC_PARAM_DUMP_DMAE */
1638         { { 1, 1 }, 0, 1, false, 0, 1 },
1639
1640         /* DBG_GRC_PARAM_DUMP_TM */
1641         { { 1, 1 }, 0, 1, false, 0, 1 },
1642
1643         /* DBG_GRC_PARAM_DUMP_SDM */
1644         { { 1, 1 }, 0, 1, false, 0, 1 },
1645
1646         /* DBG_GRC_PARAM_DUMP_DIF */
1647         { { 1, 1 }, 0, 1, false, 0, 1 },
1648
1649         /* DBG_GRC_PARAM_DUMP_STATIC */
1650         { { 1, 1 }, 0, 1, false, 0, 1 },
1651
1652         /* DBG_GRC_PARAM_UNSTALL */
1653         { { 0, 0 }, 0, 1, false, 0, 0 },
1654
1655         /* DBG_GRC_PARAM_NUM_LCIDS */
1656         { { MAX_LCIDS, MAX_LCIDS }, 1, MAX_LCIDS, false, MAX_LCIDS, MAX_LCIDS }, 
1657
1658         /* DBG_GRC_PARAM_NUM_LTIDS */
1659         { { MAX_LTIDS, MAX_LTIDS }, 1, MAX_LTIDS, false, MAX_LTIDS, MAX_LTIDS },
1660
1661         /* DBG_GRC_PARAM_EXCLUDE_ALL */
1662         { { 0, 0 }, 0, 1, true, 0, 0 },
1663
1664         /* DBG_GRC_PARAM_CRASH */
1665         { { 0, 0 }, 0, 1, true, 0, 0 },
1666
1667         /* DBG_GRC_PARAM_PARITY_SAFE */
1668         { { 0, 0 }, 0, 1, false, 1, 0 },
1669
1670         /* DBG_GRC_PARAM_DUMP_CM */
1671         { { 1, 1 }, 0, 1, false, 0, 1 },
1672
1673         /* DBG_GRC_PARAM_DUMP_PHY */
1674         { { 1, 1 }, 0, 1, false, 0, 1 },
1675
1676         /* DBG_GRC_PARAM_NO_MCP */
1677         { { 0, 0 }, 0, 1, false, 0, 0 },
1678
1679         /* DBG_GRC_PARAM_NO_FW_VER */
1680         { { 0, 0 }, 0, 1, false, 0, 0 }
1681 };
1682
1683 static struct rss_mem_defs s_rss_mem_defs[] = {
1684         { "rss_mem_cid", "rss_cid", 0,
1685         { 256, 320 },
1686         { 32, 32 } },
1687
1688         { "rss_mem_key_msb", "rss_key", 1024,
1689         { 128, 208 },
1690         { 256, 256 } },
1691
1692         { "rss_mem_key_lsb", "rss_key", 2048,
1693         { 128, 208 },
1694         { 64, 64 } },
1695
1696         { "rss_mem_info", "rss_info", 3072,
1697         { 128, 208 },
1698         { 16, 16 } },
1699
1700         { "rss_mem_ind", "rss_ind", 4096,
1701         { 16384, 26624 },
1702         { 16, 16 } }
1703 };
1704
1705 static struct vfc_ram_defs s_vfc_ram_defs[] = {
1706         { "vfc_ram_tt1", "vfc_ram", 0, 512 },
1707         { "vfc_ram_mtt2", "vfc_ram", 512, 128 },
1708         { "vfc_ram_stt2", "vfc_ram", 640, 32 },
1709         { "vfc_ram_ro_vect", "vfc_ram", 672, 32 }
1710 };
1711
1712 static struct big_ram_defs s_big_ram_defs[] = {
1713         { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB, BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
1714           { 4800, 5632 } },
1715
1716         { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB, BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
1717           { 2880, 3680 } },
1718
1719         { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB, BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
1720           { 1152, 1152 } }
1721 };
1722
1723 static struct reset_reg_defs s_reset_regs_defs[] = {
1724
1725         /* DBG_RESET_REG_MISCS_PL_UA */
1726         { MISCS_REG_RESET_PL_UA, 0x0, { true, true } },
1727
1728         /* DBG_RESET_REG_MISCS_PL_HV */
1729         { MISCS_REG_RESET_PL_HV, 0x0, { true, true } },
1730
1731         /* DBG_RESET_REG_MISCS_PL_HV_2 */
1732         { MISCS_REG_RESET_PL_HV_2_K2_E5, 0x0, { false, true } },
1733
1734         /* DBG_RESET_REG_MISC_PL_UA */
1735         { MISC_REG_RESET_PL_UA, 0x0, { true, true } },
1736
1737         /* DBG_RESET_REG_MISC_PL_HV */
1738         { MISC_REG_RESET_PL_HV, 0x0, { true, true } },
1739
1740         /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
1741         { MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040, { true, true } },
1742
1743         /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
1744         { MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007, { true, true } },
1745
1746         /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
1747         { MISC_REG_RESET_PL_PDA_VAUX, 0x2, { true, true } },
1748 };
1749
1750 static struct phy_defs s_phy_defs[] = {
1751         { "nw_phy", NWS_REG_NWS_CMU_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5 },
1752         { "sgmii_phy", MS_REG_MS_CMU_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 },
1753         { "pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 },
1754         { "pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 },
1755 };
1756
1757 /* The order of indexes that should be applied to a PCI buffer line */
1758 static const u8 s_pci_buf_line_ind[PCI_BUF_LINE_SIZE_IN_DWORDS] = { 1, 0, 3, 2, 5, 4, 7, 6 };
1759
1760 /******************************** Variables **********************************/
1761
1762 /* The version of the calling app */
1763 static u32 s_app_ver;
1764
1765 /**************************** Private Functions ******************************/
1766
1767 static void ecore_static_asserts(void)
1768 {
1769         CHECK_ARR_SIZE(s_dbg_arrays, MAX_BIN_DBG_BUFFER_TYPE);
1770         CHECK_ARR_SIZE(s_big_ram_defs, NUM_BIG_RAM_TYPES);
1771         CHECK_ARR_SIZE(s_vfc_ram_defs, NUM_VFC_RAM_TYPES);
1772         CHECK_ARR_SIZE(s_rss_mem_defs, NUM_RSS_MEM_TYPES);
1773         CHECK_ARR_SIZE(s_chip_defs, MAX_CHIP_IDS);
1774         CHECK_ARR_SIZE(s_platform_defs, MAX_PLATFORM_IDS);
1775         CHECK_ARR_SIZE(s_storm_defs, MAX_DBG_STORMS);
1776         CHECK_ARR_SIZE(s_constraint_op_defs, MAX_DBG_BUS_CONSTRAINT_OPS);
1777         CHECK_ARR_SIZE(s_dbg_target_names, MAX_DBG_BUS_TARGETS);
1778         CHECK_ARR_SIZE(s_storm_mode_defs, MAX_DBG_BUS_STORM_MODES);
1779         CHECK_ARR_SIZE(s_grc_param_defs, MAX_DBG_GRC_PARAMS);
1780         CHECK_ARR_SIZE(s_reset_regs_defs, MAX_DBG_RESET_REGS);
1781 }
1782
1783 /* Reads and returns a single dword from the specified unaligned buffer. */
1784 static u32 ecore_read_unaligned_dword(u8 *buf)
1785 {
1786         u32 dword;
1787
1788         OSAL_MEMCPY((u8*)&dword, buf, sizeof(dword));
1789         return dword;
1790 }
1791
1792 /* Returns the difference in bytes between the specified physical addresses.
1793  * Assumes that the first address is bigger then the second, and that the
1794  * difference is a 32-bit value.
1795  */
1796 static u32 ecore_phys_addr_diff(struct dbg_bus_mem_addr *a,
1797                                                                 struct dbg_bus_mem_addr *b)
1798 {
1799         return a->hi == b->hi ? a->lo - b->lo : b->lo - a->lo;
1800 }
1801
1802 /* Sets the value of the specified GRC param */
1803 static void ecore_grc_set_param(struct ecore_hwfn *p_hwfn,
1804                                  enum dbg_grc_params grc_param,
1805                                  u32 val)
1806 {
1807         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1808
1809         dev_data->grc.param_val[grc_param] = val;
1810 }
1811
1812 /* Returns the value of the specified GRC param */
1813 static u32 ecore_grc_get_param(struct ecore_hwfn *p_hwfn,
1814                                                            enum dbg_grc_params grc_param)
1815 {
1816         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1817
1818         return dev_data->grc.param_val[grc_param];
1819 }
1820
1821 /* Initializes the GRC parameters */
1822 static void ecore_dbg_grc_init_params(struct ecore_hwfn *p_hwfn)
1823 {
1824         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1825
1826         if (!dev_data->grc.params_initialized) {
1827                 ecore_dbg_grc_set_params_default(p_hwfn);
1828                 dev_data->grc.params_initialized = 1;
1829         }
1830 }
1831
1832 /* Initializes debug data for the specified device */
1833 static enum dbg_status ecore_dbg_dev_init(struct ecore_hwfn *p_hwfn,
1834                                                                                   struct ecore_ptt *p_ptt)
1835 {
1836         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1837
1838         if (dev_data->initialized)
1839                 return DBG_STATUS_OK;
1840
1841         if (!s_app_ver)
1842                 return DBG_STATUS_APP_VERSION_NOT_SET;
1843
1844         if (ECORE_IS_K2(p_hwfn->p_dev)) {
1845                 dev_data->chip_id = CHIP_K2;
1846                 dev_data->mode_enable[MODE_K2] = 1;
1847         }
1848         else if (ECORE_IS_BB_B0(p_hwfn->p_dev)) {
1849                 dev_data->chip_id = CHIP_BB;
1850                 dev_data->mode_enable[MODE_BB] = 1;
1851         }
1852         else {
1853                 return DBG_STATUS_UNKNOWN_CHIP;
1854         }
1855
1856 #ifdef ASIC_ONLY
1857         dev_data->platform_id = PLATFORM_ASIC;
1858         dev_data->mode_enable[MODE_ASIC] = 1;
1859 #else
1860         if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) {
1861                 dev_data->platform_id = PLATFORM_ASIC;
1862                 dev_data->mode_enable[MODE_ASIC] = 1;
1863         }
1864         else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1865                 if (ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED) & 0x20000000) {
1866                         dev_data->platform_id = PLATFORM_EMUL_FULL;
1867                         dev_data->mode_enable[MODE_EMUL_FULL] = 1;
1868                 }
1869                 else {
1870                         dev_data->platform_id = PLATFORM_EMUL_REDUCED;
1871                         dev_data->mode_enable[MODE_EMUL_REDUCED] = 1;
1872                 }
1873         }
1874         else if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1875                 dev_data->platform_id = PLATFORM_FPGA;
1876                 dev_data->mode_enable[MODE_FPGA] = 1;
1877         }
1878         else {
1879                 return DBG_STATUS_UNKNOWN_CHIP;
1880         }
1881 #endif
1882
1883         /* Initializes the GRC parameters */
1884         ecore_dbg_grc_init_params(p_hwfn);
1885
1886         dev_data->initialized = true;
1887
1888         return DBG_STATUS_OK;
1889 }
1890
1891 static struct dbg_bus_block* get_dbg_bus_block_desc(struct ecore_hwfn *p_hwfn,
1892                                                                                                                   enum block_id block_id)
1893 {
1894         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1895
1896         return (struct dbg_bus_block*)&dbg_bus_blocks[block_id * MAX_CHIP_IDS + dev_data->chip_id];
1897 }
1898
1899 /* Returns OSAL_NULL for signature line, latency line and non-existing lines */
1900 static struct dbg_bus_line* get_dbg_bus_line_desc(struct ecore_hwfn *p_hwfn,
1901                                                                                                                 enum block_id block_id)
1902 {
1903         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1904         struct dbg_bus_block_data *block_bus;
1905         struct dbg_bus_block *block_desc;
1906
1907         block_bus = &dev_data->bus.blocks[block_id];
1908         block_desc = get_dbg_bus_block_desc(p_hwfn, block_id);
1909
1910         if (!block_bus->line_num ||
1911                 (block_bus->line_num == 1 && block_desc->has_latency_events) ||
1912                 block_bus->line_num >= NUM_DBG_LINES(block_desc))
1913                 return OSAL_NULL;
1914
1915         return (struct dbg_bus_line*)&dbg_bus_lines[block_desc->lines_offset + block_bus->line_num - NUM_EXTRA_DBG_LINES(block_desc)];
1916 }
1917
1918 /* Reads the FW info structure for the specified Storm from the chip,
1919  * and writes it to the specified fw_info pointer.
1920  */
1921 static void ecore_read_fw_info(struct ecore_hwfn *p_hwfn,
1922                                                            struct ecore_ptt *p_ptt,
1923                                                            u8 storm_id,
1924                                                            struct fw_info *fw_info)
1925 {
1926         struct storm_defs *storm = &s_storm_defs[storm_id];
1927         struct fw_info_location fw_info_location;
1928         u32 addr, i, *dest;
1929
1930         OSAL_MEMSET(&fw_info_location, 0, sizeof(fw_info_location));
1931         OSAL_MEMSET(fw_info, 0, sizeof(*fw_info));
1932
1933         /* Read first the address that points to fw_info location.
1934          * The address is located in the last line of the Storm RAM.
1935          */
1936         addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM + DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) - sizeof(fw_info_location);
1937         dest = (u32*)&fw_info_location;
1938
1939         for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location)); i++, addr += BYTES_IN_DWORD)
1940                 dest[i] = ecore_rd(p_hwfn, p_ptt, addr);
1941
1942         /* Read FW version info from Storm RAM */
1943         if (fw_info_location.size > 0 && fw_info_location.size <= sizeof(*fw_info)) {
1944                 addr = fw_info_location.grc_addr;
1945                 dest = (u32*)fw_info;
1946                 for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size); i++, addr += BYTES_IN_DWORD)
1947                         dest[i] = ecore_rd(p_hwfn, p_ptt, addr);
1948         }
1949 }
1950
1951 /* Dumps the specified string to the specified buffer.
1952  * Returns the dumped size in bytes.
1953  */
1954 static u32 ecore_dump_str(char *dump_buf,
1955                                                   bool dump,
1956                                                   const char *str)
1957 {
1958         if (dump)
1959                 OSAL_STRCPY(dump_buf, str);
1960
1961         return (u32)OSAL_STRLEN(str) + 1;
1962 }
1963
1964 /* Dumps zeros to align the specified buffer to dwords.
1965  * Returns the dumped size in bytes.
1966  */
1967 static u32 ecore_dump_align(char *dump_buf,
1968                                                         bool dump,
1969                                                         u32 byte_offset)
1970 {
1971         u8 offset_in_dword, align_size;
1972
1973         offset_in_dword = (u8)(byte_offset & 0x3);
1974         align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1975
1976         if (dump && align_size)
1977                 OSAL_MEMSET(dump_buf, 0, align_size);
1978
1979         return align_size;
1980 }
1981
1982 /* Writes the specified string param to the specified buffer.
1983  * Returns the dumped size in dwords.
1984  */
1985 static u32 ecore_dump_str_param(u32 *dump_buf,
1986                                                                 bool dump,
1987                                                                 const char *param_name,
1988                                                                 const char *param_val)
1989 {
1990         char *char_buf = (char*)dump_buf;
1991         u32 offset = 0;
1992
1993         /* Dump param name */
1994         offset += ecore_dump_str(char_buf + offset, dump, param_name);
1995
1996         /* Indicate a string param value */
1997         if (dump)
1998                 *(char_buf + offset) = 1;
1999         offset++;
2000
2001         /* Dump param value */
2002         offset += ecore_dump_str(char_buf + offset, dump, param_val);
2003
2004         /* Align buffer to next dword */
2005         offset += ecore_dump_align(char_buf + offset, dump, offset);
2006
2007         return BYTES_TO_DWORDS(offset);
2008 }
2009
2010 /* Writes the specified numeric param to the specified buffer.
2011  * Returns the dumped size in dwords.
2012  */
2013 static u32 ecore_dump_num_param(u32 *dump_buf,
2014                                                                 bool dump,
2015                                                                 const char *param_name,
2016                                                                 u32 param_val)
2017 {
2018         char *char_buf = (char*)dump_buf;
2019         u32 offset = 0;
2020
2021         /* Dump param name */
2022         offset += ecore_dump_str(char_buf + offset, dump, param_name);
2023
2024         /* Indicate a numeric param value */
2025         if (dump)
2026                 *(char_buf + offset) = 0;
2027         offset++;
2028
2029         /* Align buffer to next dword */
2030         offset += ecore_dump_align(char_buf + offset, dump, offset);
2031
2032         /* Dump param value (and change offset from bytes to dwords) */
2033         offset = BYTES_TO_DWORDS(offset);
2034         if (dump)
2035                 *(dump_buf + offset) = param_val;
2036         offset++;
2037
2038         return offset;
2039 }
2040
2041 /* Reads the FW version and writes it as a param to the specified buffer.
2042  * Returns the dumped size in dwords.
2043  */
2044 static u32 ecore_dump_fw_ver_param(struct ecore_hwfn *p_hwfn,
2045                                                                    struct ecore_ptt *p_ptt,
2046                                                                    u32 *dump_buf,
2047                                                                    bool dump)
2048 {
2049         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2050         char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
2051         char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
2052         struct fw_info fw_info = { { 0 }, { 0 } };
2053         u32 offset = 0;
2054
2055         if (dump && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2056                 /* Read FW image/version from PRAM in a non-reset SEMI */
2057                 bool found = false;
2058                 u8 storm_id;
2059
2060                 for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found; storm_id++) {
2061                         struct storm_defs *storm = &s_storm_defs[storm_id];
2062
2063                         /* Read FW version/image */
2064                         if (dev_data->block_in_reset[storm->block_id])
2065                                 continue;
2066
2067                         /* Read FW info for the current Storm */
2068                         ecore_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
2069
2070                         /* Create FW version/image strings */
2071                         if (OSAL_SNPRINTF(fw_ver_str, sizeof(fw_ver_str), "%d_%d_%d_%d", fw_info.ver.num.major, fw_info.ver.num.minor, fw_info.ver.num.rev, fw_info.ver.num.eng) < 0)
2072                                 DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid FW version string\n");
2073                         switch (fw_info.ver.image_id) {
2074                         case FW_IMG_KUKU: OSAL_STRCPY(fw_img_str, "kuku"); break;
2075                         case FW_IMG_MAIN: OSAL_STRCPY(fw_img_str, "main"); break;
2076                         case FW_IMG_L2B: OSAL_STRCPY(fw_img_str, "l2b"); break;
2077                         default: OSAL_STRCPY(fw_img_str, "unknown"); break;
2078                         }
2079
2080                         found = true;
2081                 }
2082         }
2083
2084         /* Dump FW version, image and timestamp */
2085         offset += ecore_dump_str_param(dump_buf + offset, dump, "fw-version", fw_ver_str);
2086         offset += ecore_dump_str_param(dump_buf + offset, dump, "fw-image", fw_img_str);
2087         offset += ecore_dump_num_param(dump_buf + offset, dump, "fw-timestamp", fw_info.ver.timestamp);
2088
2089         return offset;
2090 }
2091
2092 /* Reads the MFW version and writes it as a param to the specified buffer.
2093  * Returns the dumped size in dwords.
2094  */
2095 static u32 ecore_dump_mfw_ver_param(struct ecore_hwfn *p_hwfn,
2096                                                                         struct ecore_ptt *p_ptt,
2097                                                                         u32 *dump_buf,
2098                                                                         bool dump)
2099 {
2100         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2101         char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
2102         bool is_emul;
2103         
2104         is_emul = dev_data->platform_id == PLATFORM_EMUL_FULL || dev_data->platform_id == PLATFORM_EMUL_REDUCED;
2105
2106         if (dump && !is_emul && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2107                 u32 public_data_addr, global_section_offsize_addr, global_section_offsize, global_section_addr, mfw_ver;
2108
2109                 /* Find MCP public data GRC address. Needs to be ORed with
2110                  * MCP_REG_SCRATCH due to a HW bug.
2111                  */
2112                 public_data_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR) | MCP_REG_SCRATCH;
2113
2114                 /* Find MCP public global section offset */
2115                 global_section_offsize_addr = public_data_addr + OFFSETOF(struct mcp_public_data, sections) + sizeof(offsize_t) * PUBLIC_GLOBAL;
2116                 global_section_offsize = ecore_rd(p_hwfn, p_ptt, global_section_offsize_addr);
2117                 global_section_addr = MCP_REG_SCRATCH + (global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
2118
2119                 /* Read MFW version from MCP public global section */
2120                 mfw_ver = ecore_rd(p_hwfn, p_ptt, global_section_addr + OFFSETOF(struct public_global, mfw_ver));
2121
2122                 /* Dump MFW version param */
2123                 if (OSAL_SNPRINTF(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d", (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16), (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
2124                         DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid MFW version string\n");
2125         }
2126
2127         return ecore_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
2128 }
2129
2130 /* Writes a section header to the specified buffer.
2131  * Returns the dumped size in dwords.
2132  */
2133 static u32 ecore_dump_section_hdr(u32 *dump_buf,
2134                                                                   bool dump,
2135                                                                   const char *name,
2136                                                                   u32 num_params)
2137 {
2138         return ecore_dump_num_param(dump_buf, dump, name, num_params);
2139 }
2140
2141 /* Writes the common global params to the specified buffer.
2142  * Returns the dumped size in dwords.
2143  */
2144 static u32 ecore_dump_common_global_params(struct ecore_hwfn *p_hwfn,
2145                                                                                    struct ecore_ptt *p_ptt,
2146                                                                                    u32 *dump_buf,
2147                                                                                    bool dump,
2148                                                                                    u8 num_specific_global_params)
2149 {
2150         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2151         u32 offset = 0;
2152         u8 num_params;
2153
2154         /* Dump global params section header */
2155         num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
2156         offset += ecore_dump_section_hdr(dump_buf + offset, dump, "global_params", num_params);
2157
2158         /* Store params */
2159         offset += ecore_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2160         offset += ecore_dump_mfw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2161         offset += ecore_dump_num_param(dump_buf + offset, dump, "tools-version", TOOLS_VERSION);
2162         offset += ecore_dump_str_param(dump_buf + offset, dump, "chip", s_chip_defs[dev_data->chip_id].name);
2163         offset += ecore_dump_str_param(dump_buf + offset, dump, "platform", s_platform_defs[dev_data->platform_id].name);
2164         offset += ecore_dump_num_param(dump_buf + offset, dump, "pci-func", p_hwfn->abs_pf_id);
2165
2166         return offset;
2167 }
2168
2169 /* Writes the "last" section (including CRC) to the specified buffer at the
2170  * given offset. Returns the dumped size in dwords.
2171  */
2172 static u32 ecore_dump_last_section(struct ecore_hwfn *p_hwfn,
2173                                                                    u32 *dump_buf,
2174                                                                    u32 offset,
2175                                                                    bool dump)
2176 {
2177         u32 start_offset = offset;
2178
2179         /* Dump CRC section header */
2180         offset += ecore_dump_section_hdr(dump_buf + offset, dump, "last", 0);
2181
2182         /* Calculate CRC32 and add it to the dword after the "last" section */
2183         if (dump)
2184                 *(dump_buf + offset) = ~OSAL_CRC32(0xffffffff, (u8*)dump_buf, DWORDS_TO_BYTES(offset));
2185
2186         offset++;
2187
2188         return offset - start_offset;
2189 }
2190
2191 /* Update blocks reset state  */
2192 static void ecore_update_blocks_reset_state(struct ecore_hwfn *p_hwfn,
2193                                                                                         struct ecore_ptt *p_ptt)
2194 {
2195         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2196         u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2197         u32 i;
2198
2199         /* Read reset registers */
2200         for (i = 0; i < MAX_DBG_RESET_REGS; i++)
2201                 if (s_reset_regs_defs[i].exists[dev_data->chip_id])
2202                         reg_val[i] = ecore_rd(p_hwfn, p_ptt, s_reset_regs_defs[i].addr);
2203
2204         /* Check if blocks are in reset */
2205         for (i = 0; i < MAX_BLOCK_ID; i++) {
2206                 struct block_defs *block = s_block_defs[i];
2207
2208                 dev_data->block_in_reset[i] = block->has_reset_bit && !(reg_val[block->reset_reg] & (1 << block->reset_bit_offset));
2209         }
2210 }
2211
2212 /* Enable / disable the Debug block */
2213 static void ecore_bus_enable_dbg_block(struct ecore_hwfn *p_hwfn,
2214                                                                            struct ecore_ptt *p_ptt,
2215                                                                            bool enable)
2216 {
2217         ecore_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
2218 }
2219
2220 /* Resets the Debug block */
2221 static void ecore_bus_reset_dbg_block(struct ecore_hwfn *p_hwfn,
2222                                                                           struct ecore_ptt *p_ptt)
2223 {
2224         u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
2225         struct block_defs *dbg_block = s_block_defs[BLOCK_DBG];
2226
2227         dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr;
2228         old_reset_reg_val = ecore_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
2229         new_reset_reg_val = old_reset_reg_val & ~(1 << dbg_block->reset_bit_offset);
2230
2231         ecore_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
2232         ecore_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
2233 }
2234
2235 static void ecore_bus_set_framing_mode(struct ecore_hwfn *p_hwfn,
2236                                                                            struct ecore_ptt *p_ptt,
2237                                                                            enum dbg_bus_frame_modes mode)
2238 {
2239         ecore_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
2240 }
2241
2242 /* Enable / disable Debug Bus clients according to the specified mask
2243  * (1 = enable, 0 = disable).
2244  */
2245 static void ecore_bus_enable_clients(struct ecore_hwfn *p_hwfn,
2246                                                                          struct ecore_ptt *p_ptt,
2247                                                                          u32 client_mask)
2248 {
2249         ecore_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
2250 }
2251
2252 /* Enables the specified Storm for Debug Bus. Assumes a valid Storm ID. */
2253 static void ecore_bus_enable_storm(struct ecore_hwfn *p_hwfn,
2254                                                                    struct ecore_ptt *p_ptt,
2255                                                                    enum dbg_storms storm_id,
2256                                                                    enum dbg_bus_filter_types filter_type)
2257 {
2258         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2259         u32 base_addr, sem_filter_params = filter_type;
2260         struct dbg_bus_storm_data *storm_bus;
2261         struct storm_mode_defs *storm_mode;
2262         struct storm_defs *storm;
2263
2264         storm = &s_storm_defs[storm_id];
2265         storm_bus = &dev_data->bus.storms[storm_id];
2266         storm_mode = &s_storm_mode_defs[storm_bus->mode];
2267         base_addr = storm->sem_fast_mem_addr;
2268
2269         /* Config SEM */
2270         if (storm_mode->is_fast_dbg) {
2271
2272                 /* Enable fast debug */
2273                 ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST);
2274                 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DEBUG_MODE, storm_mode->id_in_hw);
2275                 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DEBUG_ACTIVE, 1);
2276
2277                 /* Enable all messages except STORE. Must be done after
2278                  * enabling SEM_FAST_REG_DEBUG_ACTIVE, otherwise messages will
2279                  * be dropped after the SEMI sync fifo is filled.
2280                  */
2281                 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DBG_MODE6_SRC_DISABLE, SEM_FAST_MODE6_SRC_ENABLE);
2282         }
2283         else {
2284
2285                 /* Ensable slow debug */
2286                 ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST);
2287                 ecore_wr(p_hwfn, p_ptt, storm->sem_slow_enable_addr, 1);
2288                 ecore_wr(p_hwfn, p_ptt, storm->sem_slow_mode_addr, storm_mode->id_in_hw);
2289                 ecore_wr(p_hwfn, p_ptt, storm->sem_slow_mode1_conf_addr, SEM_SLOW_MODE1_DATA_ENABLE);
2290         }
2291
2292         /* Config SEM cid filter */
2293         if (storm_bus->cid_filter_en) {
2294                 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_FILTER_CID, storm_bus->cid);
2295                 sem_filter_params |= SEM_FILTER_CID_EN_MASK;
2296         }
2297
2298         /* Config SEM eid filter */
2299         if (storm_bus->eid_filter_en) {
2300                 const union dbg_bus_storm_eid_params *eid_filter = &storm_bus->eid_filter_params;
2301
2302                 if (storm_bus->eid_range_not_mask) {
2303                         ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_EVENT_ID_RANGE_STRT, eid_filter->range.min);
2304                         ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_EVENT_ID_RANGE_END, eid_filter->range.max);
2305                         sem_filter_params |= SEM_FILTER_EID_RANGE_EN_MASK;
2306                 }
2307                 else {
2308                         ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_FILTER_EVENT_ID, eid_filter->mask.val);
2309                         ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_EVENT_ID_MASK, ~eid_filter->mask.mask);
2310                         sem_filter_params |= SEM_FILTER_EID_MASK_EN_MASK;
2311                 }
2312         }
2313
2314         /* Config accumulaed SEM filter parameters (if any) */
2315         if (sem_filter_params)
2316                 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_RECORD_FILTER_ENABLE, sem_filter_params);
2317 }
2318
2319 /* Disables Debug Bus block inputs */
2320 static enum dbg_status ecore_bus_disable_inputs(struct ecore_hwfn *p_hwfn,
2321                                                                                                 struct ecore_ptt *p_ptt,
2322                                                                                                 bool empty_semi_fifos)
2323 {
2324         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2325         u8 storm_id, num_fifos_to_empty = MAX_DBG_STORMS;
2326         bool is_fifo_empty[MAX_DBG_STORMS] = { false };
2327         u32 block_id;
2328
2329         /* Disable messages output in all Storms */
2330         for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2331                 struct storm_defs *storm = &s_storm_defs[storm_id];
2332
2333                 if (!dev_data->block_in_reset[storm->block_id])
2334                         ecore_wr(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_DBG_MODE6_SRC_DISABLE, SEM_FAST_MODE6_SRC_DISABLE);
2335         }
2336
2337         /* Try to empty the SEMI sync fifo. Must be done after messages output
2338          * were disabled in all Storms (i.e. SEM_FAST_REG_DBG_MODE6_SRC_DISABLE
2339          * was set to all 1's.
2340          */
2341         while (num_fifos_to_empty) {
2342                 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2343                         struct storm_defs *storm = &s_storm_defs[storm_id];
2344
2345                         if (is_fifo_empty[storm_id])
2346                                 continue;
2347
2348                         /* Check if sync fifo got empty */
2349                         if (dev_data->block_in_reset[storm->block_id] || ecore_rd(p_hwfn, p_ptt, storm->sem_sync_dbg_empty_addr)) {
2350                                 is_fifo_empty[storm_id] = true;
2351                                 num_fifos_to_empty--;
2352                         }
2353                 }
2354
2355                 /* Check if need to continue polling */
2356                 if (num_fifos_to_empty) {
2357                         u32 polling_ms = SEMI_SYNC_FIFO_POLLING_DELAY_MS * s_platform_defs[dev_data->platform_id].delay_factor;
2358                         u32 polling_count = 0;
2359
2360                         if (empty_semi_fifos && polling_count < SEMI_SYNC_FIFO_POLLING_COUNT) {
2361                                 OSAL_MSLEEP(polling_ms);
2362                                 polling_count++;
2363                         }
2364                         else {
2365                                 DP_NOTICE(p_hwfn, false, "Warning: failed to empty the SEMI sync FIFO. It means that the last few messages from the SEMI could not be sent to the DBG block. This can happen when the DBG block is blocked (e.g. due to a PCI problem).\n");
2366                                 break;
2367                         }
2368                 }
2369         }
2370
2371         /* Disable debug in all Storms */
2372         for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2373                 struct storm_defs *storm = &s_storm_defs[storm_id];
2374                 u32 base_addr = storm->sem_fast_mem_addr;
2375
2376                 if (dev_data->block_in_reset[storm->block_id])
2377                         continue;
2378
2379                 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DEBUG_ACTIVE, 0);
2380                 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_RECORD_FILTER_ENABLE, DBG_BUS_FILTER_TYPE_OFF);
2381                 ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_FRAME_MODE_4HW_0ST);
2382                 ecore_wr(p_hwfn, p_ptt, storm->sem_slow_enable_addr, 0);
2383         }
2384
2385         /* Disable all clients */
2386         ecore_bus_enable_clients(p_hwfn, p_ptt, 0);
2387
2388         /* Disable all blocks */
2389         for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2390                 struct block_defs *block = s_block_defs[block_id];
2391
2392                 if (block->has_dbg_bus[dev_data->chip_id] && !dev_data->block_in_reset[block_id])
2393                         ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
2394         }
2395
2396         /* Disable timestamp */
2397         ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_VALID_EN, 0);
2398
2399         /* Disable filters and triggers */
2400         ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_ENABLE, DBG_BUS_FILTER_TYPE_OFF);
2401         ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_ENABLE, 0);
2402
2403         return DBG_STATUS_OK;
2404 }
2405
2406 /* Sets a Debug Bus trigger/filter constraint */
2407 static void ecore_bus_set_constraint(struct ecore_hwfn *p_hwfn,
2408                                                                          struct ecore_ptt *p_ptt,
2409                                                                          bool is_filter,
2410                                                                          u8 constraint_id,
2411                                                                          u8 hw_op_val,
2412                                                                          u32 data_val,
2413                                                                          u32 data_mask,
2414                                                                          u8 frame_bit,
2415                                                                          u8 frame_mask,
2416                                                                          u16 dword_offset,
2417                                                                          u16 range,
2418                                                                          u8 cyclic_bit,
2419                                                                          u8 must_bit)
2420 {
2421         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2422         u32 reg_offset = constraint_id * BYTES_IN_DWORD;
2423         u8 curr_trigger_state;
2424
2425         /* For trigger only - set register offset according to state */
2426         if (!is_filter) {
2427                 curr_trigger_state = dev_data->bus.next_trigger_state - 1;
2428                 reg_offset += curr_trigger_state * TRIGGER_SETS_PER_STATE * MAX_CONSTRAINTS * BYTES_IN_DWORD;
2429         }
2430
2431         ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_OPRTN_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_OPRTN_0) + reg_offset, hw_op_val);
2432         ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_DATA_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_0) + reg_offset, data_val);
2433         ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_DATA_MASK_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_MASK_0) + reg_offset, data_mask);
2434         ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_FRAME_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_0) + reg_offset, frame_bit);
2435         ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_FRAME_MASK_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_MASK_0) + reg_offset, frame_mask);
2436         ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_OFFSET_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_OFFSET_0) + reg_offset, dword_offset);
2437         ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_RANGE_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_RANGE_0) + reg_offset, range);
2438         ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_CYCLIC_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_CYCLIC_0) + reg_offset, cyclic_bit);
2439         ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_MUST_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_MUST_0) + reg_offset, must_bit);
2440 }
2441
2442 /* Reads the specified DBG Bus internal buffer range and copy it to the
2443  * specified buffer. Returns the dumped size in dwords.
2444  */
2445 static u32 ecore_bus_dump_int_buf_range(struct ecore_hwfn *p_hwfn,
2446                                                                                 struct ecore_ptt *p_ptt,
2447                                                                                 u32 *dump_buf,
2448                                                                                 bool dump,
2449                                                                                 u32 start_line,
2450                                                                                 u32 end_line)
2451 {
2452         u32 line, reg_addr, i, offset = 0;
2453
2454         if (!dump)
2455                 return (end_line - start_line + 1) * INT_BUF_LINE_SIZE_IN_DWORDS;
2456
2457         for (line = start_line, reg_addr = DBG_REG_INTR_BUFFER + DWORDS_TO_BYTES(start_line * INT_BUF_LINE_SIZE_IN_DWORDS);
2458                 line <= end_line;
2459                 line++, offset += INT_BUF_LINE_SIZE_IN_DWORDS)
2460                 for (i = 0; i < INT_BUF_LINE_SIZE_IN_DWORDS; i++, reg_addr += BYTES_IN_DWORD)
2461                         dump_buf[offset + INT_BUF_LINE_SIZE_IN_DWORDS - 1 - i] = ecore_rd(p_hwfn, p_ptt, reg_addr);
2462
2463         return offset;
2464 }
2465
2466 /* Reads the DBG Bus internal buffer and copy its contents to a buffer.
2467  * Returns the dumped size in dwords.
2468  */
2469 static u32 ecore_bus_dump_int_buf(struct ecore_hwfn *p_hwfn,
2470                                                                   struct ecore_ptt *p_ptt,
2471                                                                   u32 *dump_buf,
2472                                                                   bool dump)
2473 {
2474         u32 last_written_line, offset = 0;
2475
2476         last_written_line = ecore_rd(p_hwfn, p_ptt, DBG_REG_INTR_BUFFER_WR_PTR);
2477
2478         if (ecore_rd(p_hwfn, p_ptt, DBG_REG_WRAP_ON_INT_BUFFER)) {
2479
2480                 /* Internal buffer was wrapped: first dump from write pointer
2481                  * to buffer end, then dump from buffer start to write pointer.
2482                  */
2483                 if (last_written_line < INT_BUF_NUM_OF_LINES - 1)
2484                         offset += ecore_bus_dump_int_buf_range(p_hwfn, p_ptt, dump_buf + offset, dump, last_written_line + 1, INT_BUF_NUM_OF_LINES - 1);
2485                 offset += ecore_bus_dump_int_buf_range(p_hwfn, p_ptt, dump_buf + offset, dump, 0, last_written_line);
2486         }
2487         else if (last_written_line) {
2488
2489                 /* Internal buffer wasn't wrapped: dump from buffer start until
2490                  *  write pointer.
2491                  */
2492                 if (!ecore_rd(p_hwfn, p_ptt, DBG_REG_INTR_BUFFER_RD_PTR))
2493                         offset += ecore_bus_dump_int_buf_range(p_hwfn, p_ptt, dump_buf + offset, dump, 0, last_written_line);
2494                 else
2495                         DP_NOTICE(p_hwfn, true, "Unexpected Debug Bus error: internal buffer read pointer is not zero\n");
2496         }
2497
2498         return offset;
2499 }
2500
2501 /* Reads the specified DBG Bus PCI buffer range and copy it to the specified
2502  * buffer. Returns the dumped size in dwords.
2503  */
2504 static u32 ecore_bus_dump_pci_buf_range(struct ecore_hwfn *p_hwfn,
2505                                                                                 u32 *dump_buf,
2506                                                                                 bool dump,
2507                                                                                 u32 start_line,
2508                                                                                 u32 end_line)
2509 {
2510         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2511         u32 offset = 0;
2512
2513         /* Extract PCI buffer pointer from virtual address */
2514         void *virt_addr_lo = &dev_data->bus.pci_buf.virt_addr.lo;
2515         u32 *pci_buf_start = (u32*)(osal_uintptr_t)*((u64*)virt_addr_lo);
2516         u32 *pci_buf, line, i;
2517
2518         if (!dump)
2519                 return (end_line - start_line + 1) * PCI_BUF_LINE_SIZE_IN_DWORDS;
2520
2521         for (line = start_line, pci_buf = pci_buf_start + start_line * PCI_BUF_LINE_SIZE_IN_DWORDS;
2522         line <= end_line;
2523                 line++, offset += PCI_BUF_LINE_SIZE_IN_DWORDS)
2524                 for (i = 0; i < PCI_BUF_LINE_SIZE_IN_DWORDS; i++, pci_buf++)
2525                         dump_buf[offset + s_pci_buf_line_ind[i]] = *pci_buf;
2526
2527         return offset;
2528 }
2529
2530 /* Copies the DBG Bus PCI buffer to the specified buffer.
2531  * Returns the dumped size in dwords.
2532  */
2533 static u32 ecore_bus_dump_pci_buf(struct ecore_hwfn *p_hwfn,
2534                                                                   struct ecore_ptt *p_ptt,
2535                                                                   u32 *dump_buf,
2536                                                                   bool dump)
2537 {
2538         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2539         u32 next_wr_byte_offset, next_wr_line_offset;
2540         struct dbg_bus_mem_addr next_wr_phys_addr;
2541         u32 pci_buf_size_in_lines, offset = 0;
2542
2543         pci_buf_size_in_lines = dev_data->bus.pci_buf.size / PCI_BUF_LINE_SIZE_IN_BYTES;
2544
2545         /* Extract write pointer (physical address) */
2546         next_wr_phys_addr.lo = ecore_rd(p_hwfn, p_ptt, DBG_REG_EXT_BUFFER_WR_PTR);
2547         next_wr_phys_addr.hi = ecore_rd(p_hwfn, p_ptt, DBG_REG_EXT_BUFFER_WR_PTR + BYTES_IN_DWORD);
2548
2549         /* Convert write pointer to offset */
2550         next_wr_byte_offset = ecore_phys_addr_diff(&next_wr_phys_addr, &dev_data->bus.pci_buf.phys_addr);
2551         if ((next_wr_byte_offset % PCI_BUF_LINE_SIZE_IN_BYTES) || next_wr_byte_offset > dev_data->bus.pci_buf.size)
2552                 return 0;
2553         next_wr_line_offset = next_wr_byte_offset / PCI_BUF_LINE_SIZE_IN_BYTES;
2554
2555         /* PCI buffer wrapped: first dump from write pointer to buffer end. */
2556         if (ecore_rd(p_hwfn, p_ptt, DBG_REG_WRAP_ON_EXT_BUFFER))
2557                 offset += ecore_bus_dump_pci_buf_range(p_hwfn, dump_buf + offset, dump, next_wr_line_offset, pci_buf_size_in_lines - 1);
2558
2559         /* Dump from buffer start until write pointer */
2560         if (next_wr_line_offset)
2561                 offset += ecore_bus_dump_pci_buf_range(p_hwfn, dump_buf + offset, dump, 0, next_wr_line_offset - 1);
2562
2563         return offset;
2564 }
2565
2566 /* Copies the DBG Bus recorded data to the specified buffer.
2567  * Returns the dumped size in dwords.
2568  */
2569 static u32 ecore_bus_dump_data(struct ecore_hwfn *p_hwfn,
2570                                                            struct ecore_ptt *p_ptt,
2571                                                            u32 *dump_buf,
2572                                                            bool dump)
2573 {
2574         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2575
2576         switch (dev_data->bus.target) {
2577         case DBG_BUS_TARGET_ID_INT_BUF:
2578                 return ecore_bus_dump_int_buf(p_hwfn, p_ptt, dump_buf, dump);
2579         case DBG_BUS_TARGET_ID_PCI:
2580                 return ecore_bus_dump_pci_buf(p_hwfn, p_ptt, dump_buf, dump);
2581         default:
2582                 break;
2583         }
2584
2585         return 0;
2586 }
2587
2588 /* Frees the Debug Bus PCI buffer */
2589 static void ecore_bus_free_pci_buf(struct ecore_hwfn *p_hwfn)
2590 {
2591         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2592         dma_addr_t pci_buf_phys_addr;
2593         void *virt_addr_lo;
2594         u32 *pci_buf;
2595
2596         /* Extract PCI buffer pointer from virtual address */
2597         virt_addr_lo = &dev_data->bus.pci_buf.virt_addr.lo;
2598         pci_buf = (u32*)(osal_uintptr_t)*((u64*)virt_addr_lo);
2599
2600         if (!dev_data->bus.pci_buf.size)
2601                 return;
2602
2603         OSAL_MEMCPY(&pci_buf_phys_addr, &dev_data->bus.pci_buf.phys_addr, sizeof(pci_buf_phys_addr));
2604
2605         OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, pci_buf, pci_buf_phys_addr, dev_data->bus.pci_buf.size);
2606
2607         dev_data->bus.pci_buf.size = 0;
2608 }
2609
2610 /* Dumps the list of DBG Bus inputs (blocks/Storms) to the specified buffer.
2611  * Returns the dumped size in dwords.
2612  */
2613 static u32 ecore_bus_dump_inputs(struct ecore_hwfn *p_hwfn,
2614                                                                  u32 *dump_buf,
2615                                                                  bool dump)
2616 {
2617         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2618         char storm_name[8] = "?storm";
2619         u32 block_id, offset = 0;
2620         u8 storm_id;
2621
2622         /* Store storms */
2623         for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2624                 struct dbg_bus_storm_data *storm_bus = &dev_data->bus.storms[storm_id];
2625                 struct storm_defs *storm = &s_storm_defs[storm_id];
2626
2627                 if (!dev_data->bus.storms[storm_id].enabled)
2628                         continue;
2629
2630                 /* Dump section header */
2631                 storm_name[0] = storm->letter;
2632                 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "bus_input", 3);
2633                 offset += ecore_dump_str_param(dump_buf + offset, dump, "name", storm_name);
2634                 offset += ecore_dump_num_param(dump_buf + offset, dump, "id", storm_bus->hw_id);
2635                 offset += ecore_dump_str_param(dump_buf + offset, dump, "mode", s_storm_mode_defs[storm_bus->mode].name);
2636         }
2637
2638         /* Store blocks */
2639         for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2640                 struct dbg_bus_block_data *block_bus = &dev_data->bus.blocks[block_id];
2641                 struct block_defs *block = s_block_defs[block_id];
2642
2643                 if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
2644                         continue;
2645
2646                 /* Dump section header */
2647                 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "bus_input", 4);
2648                 offset += ecore_dump_str_param(dump_buf + offset, dump, "name", block->name);
2649                 offset += ecore_dump_num_param(dump_buf + offset, dump, "line", block_bus->line_num);
2650                 offset += ecore_dump_num_param(dump_buf + offset, dump, "en", GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK));
2651                 offset += ecore_dump_num_param(dump_buf + offset, dump, "shr", GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT));
2652         }
2653
2654         return offset;
2655 }
2656
2657 /* Dumps the Debug Bus header (params, inputs, data header) to the specified
2658  * buffer. Returns the dumped size in dwords.
2659  */
2660 static u32 ecore_bus_dump_hdr(struct ecore_hwfn *p_hwfn,
2661                                                           struct ecore_ptt *p_ptt,
2662                                                           u32 *dump_buf,
2663                                                           bool dump)
2664 {
2665         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2666         char hw_id_mask_str[16];
2667         u32 offset = 0;
2668
2669         if (OSAL_SNPRINTF(hw_id_mask_str, sizeof(hw_id_mask_str), "0x%x", dev_data->bus.hw_id_mask) < 0)
2670                 DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid HW ID mask\n");
2671
2672         /* Dump global params */
2673         offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 5);
2674         offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "debug-bus");
2675         offset += ecore_dump_str_param(dump_buf + offset, dump, "wrap-mode", dev_data->bus.one_shot_en ? "one-shot" : "wrap-around");
2676         offset += ecore_dump_num_param(dump_buf + offset, dump, "hw-dwords", dev_data->bus.hw_dwords);
2677         offset += ecore_dump_str_param(dump_buf + offset, dump, "hw-id-mask", hw_id_mask_str);
2678         offset += ecore_dump_str_param(dump_buf + offset, dump, "target", s_dbg_target_names[dev_data->bus.target]);
2679
2680         offset += ecore_bus_dump_inputs(p_hwfn, dump_buf + offset, dump);
2681
2682         if (dev_data->bus.target != DBG_BUS_TARGET_ID_NIG) {
2683                 u32 recorded_dwords = 0;
2684                 
2685                 if (dump)
2686                         recorded_dwords = ecore_bus_dump_data(p_hwfn, p_ptt, OSAL_NULL, false);
2687
2688                 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "bus_data", 1);
2689                 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", recorded_dwords);
2690         }
2691
2692         return offset;
2693 }
2694
2695 static bool ecore_is_mode_match(struct ecore_hwfn *p_hwfn, 
2696                                                                 u16 *modes_buf_offset)
2697 {
2698         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2699         bool arg1, arg2;
2700         u8 tree_val;
2701
2702         /* Get next element from modes tree buffer */
2703         tree_val = ((u8*)s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr)[(*modes_buf_offset)++];
2704
2705         switch (tree_val) {
2706         case INIT_MODE_OP_NOT:
2707                 return !ecore_is_mode_match(p_hwfn, modes_buf_offset);
2708         case INIT_MODE_OP_OR:
2709         case INIT_MODE_OP_AND:
2710                 arg1 = ecore_is_mode_match(p_hwfn, modes_buf_offset);
2711                 arg2 = ecore_is_mode_match(p_hwfn, modes_buf_offset);
2712                 return (tree_val == INIT_MODE_OP_OR) ? (arg1 || arg2) : (arg1 && arg2);
2713         default: return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
2714         }
2715 }
2716
2717 /* Returns true if the specified entity (indicated by GRC param) should be
2718  * included in the dump, false otherwise.
2719  */
2720 static bool ecore_grc_is_included(struct ecore_hwfn *p_hwfn,
2721                                                                   enum dbg_grc_params grc_param)
2722 {
2723         return ecore_grc_get_param(p_hwfn, grc_param) > 0;
2724 }
2725
2726 /* Returns true of the specified Storm should be included in the dump, false
2727  * otherwise.
2728  */
2729 static bool ecore_grc_is_storm_included(struct ecore_hwfn *p_hwfn,
2730                                                                                 enum dbg_storms storm)
2731 {
2732         return ecore_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
2733 }
2734
2735 /* Returns true if the specified memory should be included in the dump, false
2736  * otherwise.
2737  */
2738 static bool ecore_grc_is_mem_included(struct ecore_hwfn *p_hwfn,
2739                                                                           enum block_id block_id,
2740                                                                           u8 mem_group_id)
2741 {
2742         struct block_defs *block = s_block_defs[block_id];
2743         u8 i;
2744
2745         /* Check Storm match */
2746         if (block->associated_to_storm &&
2747                 !ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)block->storm_id))
2748                 return false;
2749
2750         for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
2751                 struct big_ram_defs *big_ram = &s_big_ram_defs[i];
2752
2753                 if (mem_group_id == big_ram->mem_group_id || mem_group_id == big_ram->ram_mem_group_id)
2754                         return ecore_grc_is_included(p_hwfn, big_ram->grc_param);
2755         }
2756
2757         switch (mem_group_id) {
2758         case MEM_GROUP_PXP_ILT:
2759         case MEM_GROUP_PXP_MEM:
2760                 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
2761         case MEM_GROUP_RAM:
2762                 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
2763         case MEM_GROUP_PBUF:
2764                 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
2765         case MEM_GROUP_CAU_MEM:
2766         case MEM_GROUP_CAU_SB:
2767         case MEM_GROUP_CAU_PI:
2768                 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
2769         case MEM_GROUP_QM_MEM:
2770                 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
2771         case MEM_GROUP_CFC_MEM:
2772         case MEM_GROUP_CONN_CFC_MEM:
2773         case MEM_GROUP_TASK_CFC_MEM:
2774                 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC);
2775         case MEM_GROUP_IGU_MEM:
2776         case MEM_GROUP_IGU_MSIX:
2777                 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
2778         case MEM_GROUP_MULD_MEM:
2779                 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
2780         case MEM_GROUP_PRS_MEM:
2781                 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
2782         case MEM_GROUP_DMAE_MEM:
2783                 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
2784         case MEM_GROUP_TM_MEM:
2785                 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
2786         case MEM_GROUP_SDM_MEM:
2787                 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
2788         case MEM_GROUP_TDIF_CTX:
2789         case MEM_GROUP_RDIF_CTX:
2790                 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
2791         case MEM_GROUP_CM_MEM:
2792                 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
2793         case MEM_GROUP_IOR:
2794                 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
2795         default:
2796                 return true;
2797         }
2798 }
2799
2800 /* Stalls all Storms */
2801 static void ecore_grc_stall_storms(struct ecore_hwfn *p_hwfn,
2802                                                                    struct ecore_ptt *p_ptt,
2803                                                                    bool stall)
2804 {
2805         u32 reg_addr;
2806         u8 storm_id;
2807
2808         for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2809                 if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id))
2810                         continue;
2811
2812                 reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr + SEM_FAST_REG_STALL_0_BB_K2;
2813                 ecore_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
2814         }
2815
2816         OSAL_MSLEEP(STALL_DELAY_MS);
2817 }
2818
2819 /* Takes all blocks out of reset */
2820 static void ecore_grc_unreset_blocks(struct ecore_hwfn *p_hwfn,
2821                                                                          struct ecore_ptt *p_ptt)
2822 {
2823         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2824         u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2825         u32 block_id, i;
2826
2827         /* Fill reset regs values */
2828         for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2829                 struct block_defs *block = s_block_defs[block_id];
2830
2831                 if (block->has_reset_bit && block->unreset)
2832                         reg_val[block->reset_reg] |= (1 << block->reset_bit_offset);
2833         }
2834
2835         /* Write reset registers */
2836         for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2837                 if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2838                         continue;
2839
2840                 reg_val[i] |= s_reset_regs_defs[i].unreset_val;
2841
2842                 if (reg_val[i])
2843                         ecore_wr(p_hwfn, p_ptt, s_reset_regs_defs[i].addr + RESET_REG_UNRESET_OFFSET, reg_val[i]);
2844         }
2845 }
2846
2847 /* Returns the attention block data of the specified block */
2848 static const struct dbg_attn_block_type_data* ecore_get_block_attn_data(enum block_id block_id,
2849                                                                                                                                                 enum dbg_attn_type attn_type)
2850 {
2851         const struct dbg_attn_block *base_attn_block_arr = (const struct dbg_attn_block*)s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
2852
2853         return &base_attn_block_arr[block_id].per_type_data[attn_type];
2854 }
2855
2856 /* Returns the attention registers of the specified block */
2857 static const struct dbg_attn_reg* ecore_get_block_attn_regs(enum block_id block_id,
2858                                                                                                                         enum dbg_attn_type attn_type,
2859                                                                                                                         u8 *num_attn_regs)
2860 {
2861         const struct dbg_attn_block_type_data *block_type_data = ecore_get_block_attn_data(block_id, attn_type);
2862
2863         *num_attn_regs = block_type_data->num_regs;
2864
2865         return &((const struct dbg_attn_reg*)s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->regs_offset];
2866 }
2867
2868 /* For each block, clear the status of all parities */
2869 static void ecore_grc_clear_all_prty(struct ecore_hwfn *p_hwfn,
2870                                                                          struct ecore_ptt *p_ptt)
2871 {
2872         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2873         const struct dbg_attn_reg *attn_reg_arr;
2874         u8 reg_idx, num_attn_regs;
2875         u32 block_id;
2876
2877         for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2878                 if (dev_data->block_in_reset[block_id])
2879                         continue;
2880
2881                 attn_reg_arr = ecore_get_block_attn_regs((enum block_id)block_id, ATTN_TYPE_PARITY, &num_attn_regs);
2882
2883                 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2884                         const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
2885                         u16 modes_buf_offset;
2886                         bool eval_mode;
2887
2888                         /* Check mode */
2889                         eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
2890                         modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
2891
2892                         /* If Mode match: clear parity status */
2893                         if (!eval_mode || ecore_is_mode_match(p_hwfn, &modes_buf_offset))
2894                                 ecore_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(reg_data->sts_clr_address));
2895                 }
2896         }
2897 }
2898
2899 /* Dumps GRC registers section header. Returns the dumped size in dwords.
2900  * the following parameters are dumped:
2901  * - count:      no. of dumped entries
2902  * - split:      split type
2903  * - id:         split ID (dumped only if split_id >= 0)
2904  * - param_name: user parameter value (dumped only if param_name != OSAL_NULL
2905  *               and param_val != OSAL_NULL).
2906  */
2907 static u32 ecore_grc_dump_regs_hdr(u32 *dump_buf,
2908                                                                    bool dump,
2909                                                                    u32 num_reg_entries,
2910                                                                    const char *split_type,
2911                                                                    int split_id,
2912                                                                    const char *param_name,
2913                                                                    const char *param_val)
2914 {
2915         u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0);
2916         u32 offset = 0;
2917
2918         offset += ecore_dump_section_hdr(dump_buf + offset, dump, "grc_regs", num_params);
2919         offset += ecore_dump_num_param(dump_buf + offset, dump, "count", num_reg_entries);
2920         offset += ecore_dump_str_param(dump_buf + offset, dump, "split", split_type);
2921         if (split_id >= 0)
2922                 offset += ecore_dump_num_param(dump_buf + offset, dump, "id", split_id);
2923         if (param_name && param_val)
2924                 offset += ecore_dump_str_param(dump_buf + offset, dump, param_name, param_val);
2925
2926         return offset;
2927 }
2928
2929 /* Dumps the GRC registers in the specified address range.
2930  * Returns the dumped size in dwords.
2931  * The addr and len arguments are specified in dwords.
2932  */
2933 static u32 ecore_grc_dump_addr_range(struct ecore_hwfn *p_hwfn,
2934                                                                          struct ecore_ptt *p_ptt,
2935                                                                          u32 *dump_buf,
2936                                                                          bool dump,
2937                                                                          u32 addr,
2938                                                                          u32 len,
2939                                                                          bool wide_bus)
2940 {
2941         u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i;
2942
2943         if (!dump)
2944                 return len;
2945
2946         for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
2947                 *(dump_buf + offset) = ecore_rd(p_hwfn, p_ptt, byte_addr);
2948
2949         return offset;
2950 }
2951
2952 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
2953  * The addr and len arguments are specified in dwords.
2954  */
2955 static u32 ecore_grc_dump_reg_entry_hdr(u32 *dump_buf,
2956                                                                                 bool dump,
2957                                                                                 u32 addr,
2958                                                                                 u32 len)
2959 {
2960         if (dump)
2961                 *dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
2962
2963         return 1;
2964 }
2965
2966 /* Dumps GRC registers sequence. Returns the dumped size in dwords.
2967  * The addr and len arguments are specified in dwords.
2968  */
2969 static u32 ecore_grc_dump_reg_entry(struct ecore_hwfn *p_hwfn,
2970                                                                         struct ecore_ptt *p_ptt,
2971                                                                         u32 *dump_buf,
2972                                                                         bool dump,
2973                                                                         u32 addr,
2974                                                                         u32 len,
2975                                                                         bool wide_bus)
2976 {
2977         u32 offset = 0;
2978
2979         offset += ecore_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
2980         offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, len, wide_bus);
2981
2982         return offset;
2983 }
2984
2985 /* Dumps GRC registers sequence with skip cycle.
2986  * Returns the dumped size in dwords.
2987  * - addr:      start GRC address in dwords
2988  * - total_len: total no. of dwords to dump
2989  * - read_len:  no. consecutive dwords to read 
2990  * - skip_len:  no. of dwords to skip (and fill with zeros)
2991  */
2992 static u32 ecore_grc_dump_reg_entry_skip(struct ecore_hwfn *p_hwfn,
2993                                                                                  struct ecore_ptt *p_ptt,
2994                                                                                  u32 *dump_buf,
2995                                                                                  bool dump,
2996                                                                                  u32 addr,
2997                                                                                  u32 total_len,
2998                                                                                  u32 read_len,
2999                                                                                  u32 skip_len)
3000 {
3001         u32 offset = 0, reg_offset = 0;
3002
3003         offset += ecore_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
3004
3005         if (!dump)
3006                 return offset + total_len;
3007
3008         while (reg_offset < total_len) {
3009                 u32 curr_len = OSAL_MIN_T(u32, read_len, total_len - reg_offset);
3010
3011                 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, curr_len, false);
3012                 reg_offset += curr_len;
3013                 addr += curr_len;
3014
3015                 if (reg_offset < total_len) {
3016                         curr_len = OSAL_MIN_T(u32, skip_len, total_len - skip_len);
3017                         OSAL_MEMSET(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
3018                         offset += curr_len;
3019                         reg_offset += curr_len;
3020                         addr += curr_len;
3021                 }
3022         }
3023
3024         return offset;
3025 }
3026
3027 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
3028 static u32 ecore_grc_dump_regs_entries(struct ecore_hwfn *p_hwfn,
3029                                                                            struct ecore_ptt *p_ptt,
3030                                                                            struct dbg_array input_regs_arr,
3031                                                                            u32 *dump_buf,
3032                                                                            bool dump,
3033                                                                            bool block_enable[MAX_BLOCK_ID],
3034                                                                            u32 *num_dumped_reg_entries)
3035 {
3036         u32 i, offset = 0, input_offset = 0;
3037         bool mode_match = true;
3038         
3039         *num_dumped_reg_entries = 0;
3040
3041         while (input_offset < input_regs_arr.size_in_dwords) {
3042                 const struct dbg_dump_cond_hdr* cond_hdr = (const struct dbg_dump_cond_hdr*)&input_regs_arr.ptr[input_offset++];
3043                 u16 modes_buf_offset;
3044                 bool eval_mode;
3045
3046                 /* Check mode/block */
3047                 eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
3048                 if (eval_mode) {
3049                         modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
3050                         mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
3051                 }
3052
3053                 if (!mode_match || !block_enable[cond_hdr->block_id]) {
3054                         input_offset += cond_hdr->data_size;
3055                         continue;
3056                 }
3057
3058                 for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
3059                         const struct dbg_dump_reg *reg = (const struct dbg_dump_reg*)&input_regs_arr.ptr[input_offset];
3060
3061                         offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump,
3062                                                                                                 GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS),
3063                                                                                                 GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH),
3064                                                                                                 GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS));
3065                         (*num_dumped_reg_entries)++;
3066                 }
3067         }
3068
3069         return offset;
3070 }
3071
3072 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
3073 static u32 ecore_grc_dump_split_data(struct ecore_hwfn *p_hwfn,
3074                                                                          struct ecore_ptt *p_ptt,
3075                                                                          struct dbg_array input_regs_arr,
3076                                                                          u32 *dump_buf,
3077                                                                          bool dump,
3078                                                                          bool block_enable[MAX_BLOCK_ID],
3079                                                                          const char *split_type_name,
3080                                                                          u32 split_id,
3081                                                                          const char *param_name,
3082                                                                          const char *param_val)
3083 {
3084         u32 num_dumped_reg_entries, offset;
3085
3086         /* Calculate register dump header size (and skip it for now) */
3087         offset = ecore_grc_dump_regs_hdr(dump_buf, false, 0, split_type_name, split_id, param_name, param_val);
3088
3089         /* Dump registers */
3090         offset += ecore_grc_dump_regs_entries(p_hwfn, p_ptt, input_regs_arr, dump_buf + offset, dump, block_enable, &num_dumped_reg_entries);
3091
3092         /* Write register dump header */
3093         if (dump && num_dumped_reg_entries > 0)
3094                 ecore_grc_dump_regs_hdr(dump_buf, dump, num_dumped_reg_entries, split_type_name, split_id, param_name, param_val);
3095
3096         return num_dumped_reg_entries > 0 ? offset : 0;
3097 }
3098
3099 /* Dumps registers according to the input registers array. Returns the dumped
3100  * size in dwords.
3101  */
3102 static u32 ecore_grc_dump_registers(struct ecore_hwfn *p_hwfn,
3103                                                                         struct ecore_ptt *p_ptt,
3104                                                                         u32 *dump_buf,
3105                                                                         bool dump,
3106                                                                         bool block_enable[MAX_BLOCK_ID],
3107                                                                         const char *param_name,
3108                                                                         const char *param_val)
3109 {
3110         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3111         struct chip_platform_defs *chip_platform;
3112         u32 offset = 0, input_offset = 0;
3113         u8 port_id, pf_id, vf_id;
3114
3115         chip_platform = &s_chip_defs[dev_data->chip_id].per_platform[dev_data->platform_id];
3116
3117         if (dump)
3118                 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Dumping registers...\n");
3119
3120         while (input_offset < s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
3121                 const struct dbg_dump_split_hdr *split_hdr;
3122                 struct dbg_array curr_input_regs_arr;
3123                 u32 split_data_size;
3124                 u8 split_type_id;
3125
3126                 split_hdr = (const struct dbg_dump_split_hdr*)&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
3127                 split_type_id = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3128                 split_data_size = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3129                 curr_input_regs_arr.ptr = &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
3130                 curr_input_regs_arr.size_in_dwords = split_data_size;
3131
3132                 switch(split_type_id) {
3133                 case SPLIT_TYPE_NONE:
3134                         offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "eng", (u32)(-1), param_name, param_val);
3135                         break;
3136
3137                 case SPLIT_TYPE_PORT:
3138                         for (port_id = 0; port_id < chip_platform->num_ports; port_id++) {
3139                                 if (dump)
3140                                         ecore_port_pretend(p_hwfn, p_ptt, port_id);
3141                                 offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "port", port_id, param_name, param_val);
3142                         }
3143                         break;
3144
3145                 case SPLIT_TYPE_PF:
3146                 case SPLIT_TYPE_PORT_PF:
3147                         for (pf_id = 0; pf_id < chip_platform->num_pfs; pf_id++) {
3148                                 if (dump)
3149                                         ecore_fid_pretend(p_hwfn, p_ptt, (pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT));
3150                                 offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "pf", pf_id, param_name, param_val);
3151                         }
3152                         break;
3153
3154                 case SPLIT_TYPE_VF:
3155                         for (vf_id = 0; vf_id < chip_platform->num_vfs; vf_id++) {
3156                                 if (dump)
3157                                         ecore_fid_pretend(p_hwfn, p_ptt, (1 << PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT) | (vf_id << PXP_PRETEND_CONCRETE_FID_VFID_SHIFT));
3158                                 offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "vf", vf_id, param_name, param_val);
3159                         }
3160                         break;
3161
3162                 default:
3163                         break;
3164                 }
3165
3166                 input_offset += split_data_size;
3167         }
3168
3169         /* Pretend to original PF */
3170         if (dump)
3171                 ecore_fid_pretend(p_hwfn, p_ptt, (p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT));
3172
3173         return offset;
3174 }
3175
3176 /* Dump reset registers. Returns the dumped size in dwords. */
3177 static u32 ecore_grc_dump_reset_regs(struct ecore_hwfn *p_hwfn,
3178         struct ecore_ptt *p_ptt,
3179         u32 *dump_buf,
3180         bool dump)
3181 {
3182         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3183         u32 i, offset = 0, num_regs = 0;
3184
3185         /* Calculate header size */
3186         offset += ecore_grc_dump_regs_hdr(dump_buf, false, 0, "eng", -1, OSAL_NULL, OSAL_NULL);
3187
3188         /* Write reset registers */
3189         for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
3190                 if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
3191                         continue;
3192
3193                 offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(s_reset_regs_defs[i].addr), 1, false);
3194                 num_regs++;
3195         }
3196
3197         /* Write header */
3198         if (dump)
3199                 ecore_grc_dump_regs_hdr(dump_buf, true, num_regs, "eng", -1, OSAL_NULL, OSAL_NULL);
3200
3201         return offset;
3202 }
3203
3204 /* Dump registers that are modified during GRC Dump and therefore must be
3205  * dumped first. Returns the dumped size in dwords.
3206  */
3207 static u32 ecore_grc_dump_modified_regs(struct ecore_hwfn *p_hwfn,
3208                                                                                 struct ecore_ptt *p_ptt,
3209                                                                                 u32 *dump_buf,
3210                                                                                 bool dump)
3211 {
3212         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3213         u32 block_id, offset = 0, num_reg_entries = 0;
3214         const struct dbg_attn_reg *attn_reg_arr;
3215         u8 storm_id, reg_idx, num_attn_regs;
3216
3217         /* Calculate header size */
3218         offset += ecore_grc_dump_regs_hdr(dump_buf, false, 0, "eng", -1, OSAL_NULL, OSAL_NULL);
3219
3220         /* Write parity registers */
3221         for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3222                 if (dev_data->block_in_reset[block_id] && dump)
3223                         continue;
3224
3225                 attn_reg_arr = ecore_get_block_attn_regs((enum block_id)block_id, ATTN_TYPE_PARITY, &num_attn_regs);
3226
3227                 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
3228                         const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
3229                         u16 modes_buf_offset;
3230                         bool eval_mode;
3231
3232                         /* Check mode */
3233                         eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
3234                         modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
3235                         if (eval_mode && !ecore_is_mode_match(p_hwfn, &modes_buf_offset))
3236                                 continue;
3237
3238                         /* Mode match: read & dump registers */
3239                         offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, reg_data->mask_address, 1, false);
3240                         offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, GET_FIELD(reg_data->data, DBG_ATTN_REG_STS_ADDRESS), 1, false);
3241                         num_reg_entries += 2;
3242                 }
3243         }
3244
3245         /* Write Storm stall status registers */
3246         for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3247                 struct storm_defs *storm = &s_storm_defs[storm_id];
3248
3249                 if (dev_data->block_in_reset[storm->block_id] && dump)
3250                         continue;
3251
3252                 offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump,
3253                         BYTES_TO_DWORDS(storm->sem_fast_mem_addr + SEM_FAST_REG_STALLED), 1, false);
3254                 num_reg_entries++;
3255         }
3256
3257         /* Write header */
3258         if (dump)
3259                 ecore_grc_dump_regs_hdr(dump_buf, true, num_reg_entries, "eng", -1, OSAL_NULL, OSAL_NULL);
3260
3261         return offset;
3262 }
3263
3264 /* Dumps registers that can't be represented in the debug arrays */
3265 static u32 ecore_grc_dump_special_regs(struct ecore_hwfn *p_hwfn,
3266                                                                            struct ecore_ptt *p_ptt,
3267                                                                            u32 *dump_buf,
3268                                                                            bool dump)
3269 {
3270         u32 offset = 0;
3271
3272         offset += ecore_grc_dump_regs_hdr(dump_buf, dump, 2, "eng", -1, OSAL_NULL, OSAL_NULL);
3273
3274         /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
3275          * skipped).
3276          */
3277         offset += ecore_grc_dump_reg_entry_skip(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO), RDIF_REG_DEBUG_ERROR_INFO_SIZE, 7, 1);
3278         offset += ecore_grc_dump_reg_entry_skip(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO), TDIF_REG_DEBUG_ERROR_INFO_SIZE, 7, 1);
3279
3280         return offset;
3281 }
3282
3283 /* Dumps a GRC memory header (section and params). Returns the dumped size in
3284  * dwords. The following parameters are dumped:
3285  * - name:         dumped only if it's not OSAL_NULL.
3286  * - addr:         in dwords, dumped only if name is OSAL_NULL.
3287  * - len:          in dwords, always dumped.
3288  * - width:        dumped if it's not zero.
3289  * - packed:       dumped only if it's not false.
3290  * - mem_group:    always dumped.
3291  * - is_storm:     true only if the memory is related to a Storm.
3292  * - storm_letter: valid only if is_storm is true.
3293  *
3294  */
3295 static u32 ecore_grc_dump_mem_hdr(struct ecore_hwfn *p_hwfn,
3296                                                                   u32 *dump_buf,
3297                                                                   bool dump,
3298                                                                   const char *name,
3299                                                                   u32 addr,
3300                                                                   u32 len,
3301                                                                   u32 bit_width,
3302                                                                   bool packed,
3303                                                                   const char *mem_group,
3304                                                                   bool is_storm,
3305                                                                   char storm_letter)
3306 {
3307         u8 num_params = 3;
3308         u32 offset = 0;
3309         char buf[64];
3310
3311         if (!len)
3312                 DP_NOTICE(p_hwfn, true, "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
3313
3314         if (bit_width)
3315                 num_params++;
3316         if (packed)
3317                 num_params++;
3318
3319         /* Dump section header */
3320         offset += ecore_dump_section_hdr(dump_buf + offset, dump, "grc_mem", num_params);
3321
3322         if (name) {
3323
3324                 /* Dump name */
3325                 if (is_storm) {
3326                         OSAL_STRCPY(buf, "?STORM_");
3327                         buf[0] = storm_letter;
3328                         OSAL_STRCPY(buf + OSAL_STRLEN(buf), name);
3329                 }
3330                 else {
3331                         OSAL_STRCPY(buf, name);
3332                 }
3333
3334                 offset += ecore_dump_str_param(dump_buf + offset, dump, "name", buf);
3335                 if (dump)
3336                         DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Dumping %d registers from %s...\n", len, buf);
3337         }
3338         else {
3339
3340                 /* Dump address */
3341                 u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
3342
3343                 offset += ecore_dump_num_param(dump_buf + offset, dump, "addr", addr_in_bytes);
3344                 if (dump && len > 64)
3345                         DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Dumping %d registers from address 0x%x...\n", len, addr_in_bytes);
3346         }
3347
3348         /* Dump len */
3349         offset += ecore_dump_num_param(dump_buf + offset, dump, "len", len);
3350
3351         /* Dump bit width */
3352         if (bit_width)
3353                 offset += ecore_dump_num_param(dump_buf + offset, dump, "width", bit_width);
3354
3355         /* Dump packed */
3356         if (packed)
3357                 offset += ecore_dump_num_param(dump_buf + offset, dump, "packed", 1);
3358
3359         /* Dump reg type */
3360         if (is_storm) {
3361                 OSAL_STRCPY(buf, "?STORM_");
3362                 buf[0] = storm_letter;
3363                 OSAL_STRCPY(buf + OSAL_STRLEN(buf), mem_group);
3364         }
3365         else {
3366                 OSAL_STRCPY(buf, mem_group);
3367         }
3368
3369         offset += ecore_dump_str_param(dump_buf + offset, dump, "type", buf);
3370
3371         return offset;
3372 }
3373
3374 /* Dumps a single GRC memory. If name is OSAL_NULL, the memory is stored by address.
3375  * Returns the dumped size in dwords.
3376  * The addr and len arguments are specified in dwords.
3377  */
3378 static u32 ecore_grc_dump_mem(struct ecore_hwfn *p_hwfn,
3379                                                           struct ecore_ptt *p_ptt,
3380                                                           u32 *dump_buf,
3381                                                           bool dump,
3382                                                           const char *name,
3383                                                           u32 addr,
3384                                                           u32 len,
3385                                                           bool wide_bus,
3386                                                           u32 bit_width,
3387                                                           bool packed,
3388                                                           const char *mem_group,
3389                                                           bool is_storm,
3390                                                           char storm_letter)
3391 {
3392         u32 offset = 0;
3393
3394         offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, name, addr, len, bit_width, packed, mem_group, is_storm, storm_letter);
3395         offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, len, wide_bus);
3396
3397         return offset;
3398 }
3399
3400 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
3401 static u32 ecore_grc_dump_mem_entries(struct ecore_hwfn *p_hwfn,
3402                                                                           struct ecore_ptt *p_ptt,
3403                                                                           struct dbg_array input_mems_arr,
3404                                                                           u32 *dump_buf,
3405                                                                           bool dump)
3406 {
3407         u32 i, offset = 0, input_offset = 0;
3408         bool mode_match = true;
3409
3410         while (input_offset < input_mems_arr.size_in_dwords) {
3411                 const struct dbg_dump_cond_hdr* cond_hdr;
3412                 u16 modes_buf_offset;
3413                 u32 num_entries;
3414                 bool eval_mode;
3415
3416                 cond_hdr = (const struct dbg_dump_cond_hdr*)&input_mems_arr.ptr[input_offset++];
3417                 num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
3418
3419                 /* Check required mode */
3420                 eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
3421                 if (eval_mode) {
3422                         modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
3423                         mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
3424                 }
3425
3426                 if (!mode_match) {
3427                         input_offset += cond_hdr->data_size;
3428                         continue;
3429                 }
3430
3431                 for (i = 0; i < num_entries; i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
3432                         const struct dbg_dump_mem *mem = (const struct dbg_dump_mem*)&input_mems_arr.ptr[input_offset];
3433                         u8 mem_group_id = GET_FIELD(mem->dword0, DBG_DUMP_MEM_MEM_GROUP_ID);
3434                         bool is_storm = false, mem_wide_bus;
3435                         char storm_letter = 'a';
3436                         u32 mem_addr, mem_len;
3437
3438                         if (mem_group_id >= MEM_GROUPS_NUM) {
3439                                 DP_NOTICE(p_hwfn, true, "Invalid mem_group_id\n");
3440                                 return 0;
3441                         }
3442
3443                         if (!ecore_grc_is_mem_included(p_hwfn, (enum block_id)cond_hdr->block_id, mem_group_id))
3444                                 continue;
3445
3446                         mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
3447                         mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
3448                         mem_wide_bus = GET_FIELD(mem->dword1, DBG_DUMP_MEM_WIDE_BUS);
3449
3450                         /* Update memory length for CCFC/TCFC memories
3451                          * according to number of LCIDs/LTIDs.
3452                          */
3453                         if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
3454                                 if (mem_len % MAX_LCIDS) {
3455                                         DP_NOTICE(p_hwfn, true, "Invalid CCFC connection memory size\n");
3456                                         return 0;
3457                                 }
3458
3459                                 mem_len = ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS) * (mem_len / MAX_LCIDS);
3460                         }
3461                         else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) {
3462                                 if (mem_len % MAX_LTIDS) {
3463                                         DP_NOTICE(p_hwfn, true, "Invalid TCFC task memory size\n");
3464                                         return 0;
3465                                 }
3466
3467                                 mem_len = ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS) * (mem_len / MAX_LTIDS);
3468                         }
3469
3470                         /* If memory is associated with Storm, udpate Storm
3471                          * details.
3472                          */
3473                         if (s_block_defs[cond_hdr->block_id]->associated_to_storm) {
3474                                 is_storm = true;
3475                                 storm_letter = s_storm_defs[s_block_defs[cond_hdr->block_id]->storm_id].letter;
3476                         }
3477
3478                         /* Dump memory */
3479                         offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, OSAL_NULL, mem_addr, mem_len, mem_wide_bus,
3480                                                                                         0, false, s_mem_group_names[mem_group_id], is_storm, storm_letter);
3481                 }
3482         }
3483
3484         return offset;
3485 }
3486
3487 /* Dumps GRC memories according to the input array dump_mem.
3488  * Returns the dumped size in dwords.
3489  */
3490 static u32 ecore_grc_dump_memories(struct ecore_hwfn *p_hwfn,
3491                                                                    struct ecore_ptt *p_ptt,
3492                                                                    u32 *dump_buf,
3493                                                                    bool dump)
3494 {
3495         u32 offset = 0, input_offset = 0;
3496
3497         while (input_offset < s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
3498                 const struct dbg_dump_split_hdr *split_hdr;
3499                 struct dbg_array curr_input_mems_arr;
3500                 u32 split_data_size;
3501                 u8 split_type_id;
3502
3503                 split_hdr = (const struct dbg_dump_split_hdr*)&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
3504                 split_type_id = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3505                 split_data_size = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3506                 curr_input_mems_arr.ptr = &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
3507                 curr_input_mems_arr.size_in_dwords = split_data_size;
3508
3509                 switch (split_type_id) {
3510                 case SPLIT_TYPE_NONE:
3511                         offset += ecore_grc_dump_mem_entries(p_hwfn, p_ptt, curr_input_mems_arr, dump_buf + offset, dump);
3512                         break;
3513
3514                 default:
3515                         DP_NOTICE(p_hwfn, true, "Dumping split memories is currently not supported\n");
3516                         break;
3517                 }
3518
3519                 input_offset += split_data_size;
3520         }
3521
3522         return offset;
3523 }
3524
3525 /* Dumps GRC context data for the specified Storm.
3526  * Returns the dumped size in dwords.
3527  * The lid_size argument is specified in quad-regs.
3528  */
3529 static u32 ecore_grc_dump_ctx_data(struct ecore_hwfn *p_hwfn,
3530                                                                    struct ecore_ptt *p_ptt,
3531                                                                    u32 *dump_buf,
3532                                                                    bool dump,
3533                                                                    const char *name,
3534                                                                    u32 num_lids,
3535                                                                    u32 lid_size,
3536                                                                    u32 rd_reg_addr,
3537                                                                    u8 storm_id)
3538 {
3539         struct storm_defs *storm = &s_storm_defs[storm_id];
3540         u32 i, lid, total_size, offset = 0;
3541
3542         if (!lid_size)
3543                 return 0;
3544
3545         lid_size *= BYTES_IN_DWORD;
3546         total_size = num_lids * lid_size;
3547
3548         offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, name, 0, total_size, lid_size * 32, false, name, true, storm->letter);
3549
3550         if (!dump)
3551                 return offset + total_size;
3552
3553         /* Dump context data */
3554         for (lid = 0; lid < num_lids; lid++) {
3555                 for (i = 0; i < lid_size; i++, offset++) {
3556                         ecore_wr(p_hwfn, p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
3557                         *(dump_buf + offset) = ecore_rd(p_hwfn, p_ptt, rd_reg_addr);
3558                 }
3559         }
3560
3561         return offset;
3562 }
3563
3564 /* Dumps GRC contexts. Returns the dumped size in dwords. */
3565 static u32 ecore_grc_dump_ctx(struct ecore_hwfn *p_hwfn,
3566                                                           struct ecore_ptt *p_ptt,
3567                                                           u32 *dump_buf,
3568                                                           bool dump)
3569 {
3570         u32 offset = 0;
3571         u8 storm_id;
3572
3573         for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3574                 struct storm_defs *storm = &s_storm_defs[storm_id];
3575
3576                 if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id))
3577                         continue;
3578
3579                 /* Dump Conn AG context size */
3580                 offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "CONN_AG_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS),
3581                         storm->cm_conn_ag_ctx_lid_size, storm->cm_conn_ag_ctx_rd_addr, storm_id);
3582
3583                 /* Dump Conn ST context size */
3584                 offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "CONN_ST_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS),
3585                         storm->cm_conn_st_ctx_lid_size, storm->cm_conn_st_ctx_rd_addr, storm_id);
3586
3587                 /* Dump Task AG context size */
3588                 offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "TASK_AG_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS),
3589                         storm->cm_task_ag_ctx_lid_size, storm->cm_task_ag_ctx_rd_addr, storm_id);
3590
3591                 /* Dump Task ST context size */
3592                 offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "TASK_ST_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS),
3593                         storm->cm_task_st_ctx_lid_size, storm->cm_task_st_ctx_rd_addr, storm_id);
3594         }
3595
3596         return offset;
3597 }
3598
3599 /* Dumps GRC IORs data. Returns the dumped size in dwords. */
3600 static u32 ecore_grc_dump_iors(struct ecore_hwfn *p_hwfn,
3601                                                            struct ecore_ptt *p_ptt,
3602                                                            u32 *dump_buf,
3603                                                            bool dump)
3604 {
3605         char buf[10] = "IOR_SET_?";
3606         u32 addr, offset = 0;
3607         u8 storm_id, set_id;
3608
3609         for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3610                 struct storm_defs *storm = &s_storm_defs[storm_id];
3611
3612                 if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id))
3613                         continue;
3614
3615                 for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
3616                         addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr + SEM_FAST_REG_STORM_REG_FILE) + IOR_SET_OFFSET(set_id);
3617                         buf[OSAL_STRLEN(buf) - 1] = '0' + set_id;
3618                         offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, buf, addr, IORS_PER_SET, false, 32, false, "ior", true, storm->letter);
3619                 }
3620         }
3621
3622         return offset;
3623 }
3624
3625 /* Dump VFC CAM. Returns the dumped size in dwords. */
3626 static u32 ecore_grc_dump_vfc_cam(struct ecore_hwfn *p_hwfn,
3627                                                                   struct ecore_ptt *p_ptt,
3628                                                                   u32 *dump_buf,
3629                                                                   bool dump,
3630                                                                   u8 storm_id)
3631 {
3632         u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
3633         struct storm_defs *storm = &s_storm_defs[storm_id];
3634         u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
3635         u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
3636         u32 row, i, offset = 0;
3637
3638         offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, "vfc_cam", 0, total_size, 256, false, "vfc_cam", true, storm->letter);
3639
3640         if (!dump)
3641                 return offset + total_size;
3642
3643         /* Prepare CAM address */
3644         SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
3645
3646         for (row = 0; row < VFC_CAM_NUM_ROWS; row++, offset += VFC_CAM_RESP_DWORDS) {
3647
3648                 /* Write VFC CAM command */
3649                 SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
3650                 ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR, cam_cmd, VFC_CAM_CMD_DWORDS);
3651
3652                 /* Write VFC CAM address */
3653                 ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR, cam_addr, VFC_CAM_ADDR_DWORDS);
3654
3655                 /* Read VFC CAM read response */
3656                 ARR_REG_RD(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD, dump_buf + offset, VFC_CAM_RESP_DWORDS);
3657         }
3658
3659         return offset;
3660 }
3661
3662 /* Dump VFC RAM. Returns the dumped size in dwords. */
3663 static u32 ecore_grc_dump_vfc_ram(struct ecore_hwfn *p_hwfn,
3664                                                                   struct ecore_ptt *p_ptt,
3665                                                                   u32 *dump_buf,
3666                                                                   bool dump,
3667                                                                   u8 storm_id,
3668                                                                   struct vfc_ram_defs *ram_defs)
3669 {
3670         u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
3671         struct storm_defs *storm = &s_storm_defs[storm_id];
3672         u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
3673         u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
3674         u32 row, i, offset = 0;
3675
3676         offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, ram_defs->mem_name, 0, total_size, 256, false, ram_defs->type_name, true, storm->letter);
3677
3678         /* Prepare RAM address */
3679         SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
3680
3681         if (!dump)
3682                 return offset + total_size;
3683
3684         for (row = ram_defs->base_row; row < ram_defs->base_row + ram_defs->num_rows; row++, offset += VFC_RAM_RESP_DWORDS) {
3685
3686                 /* Write VFC RAM command */
3687                 ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR, ram_cmd, VFC_RAM_CMD_DWORDS);
3688
3689                 /* Write VFC RAM address */
3690                 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
3691                 ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR, ram_addr, VFC_RAM_ADDR_DWORDS);
3692
3693                 /* Read VFC RAM read response */
3694                 ARR_REG_RD(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD, dump_buf + offset, VFC_RAM_RESP_DWORDS);
3695         }
3696
3697         return offset;
3698 }
3699
3700 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
3701 static u32 ecore_grc_dump_vfc(struct ecore_hwfn *p_hwfn,
3702                                                           struct ecore_ptt *p_ptt,
3703                                                           u32 *dump_buf,
3704                                                           bool dump)
3705 {
3706         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3707         u8 storm_id, i;
3708         u32 offset = 0;
3709         
3710         for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3711                 if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id) ||
3712                         !s_storm_defs[storm_id].has_vfc ||
3713                         (storm_id == DBG_PSTORM_ID && dev_data->platform_id != PLATFORM_ASIC))
3714                         continue;
3715
3716                 /* Read CAM */
3717                 offset += ecore_grc_dump_vfc_cam(p_hwfn, p_ptt, dump_buf + offset, dump, storm_id);
3718
3719                 /* Read RAM */
3720                 for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
3721                         offset += ecore_grc_dump_vfc_ram(p_hwfn, p_ptt, dump_buf + offset, dump, storm_id, &s_vfc_ram_defs[i]);
3722         }
3723
3724         return offset;
3725 }
3726
3727 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
3728 static u32 ecore_grc_dump_rss(struct ecore_hwfn *p_hwfn,
3729                                                           struct ecore_ptt *p_ptt,
3730                                                           u32 *dump_buf,
3731                                                           bool dump)
3732 {
3733         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3734         u32 offset = 0;
3735         u8 rss_mem_id;
3736
3737         for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
3738                 u32 rss_addr, num_entries, entry_width, total_dwords, i;
3739                 struct rss_mem_defs *rss_defs;
3740                 bool packed;
3741
3742                 rss_defs = &s_rss_mem_defs[rss_mem_id];
3743                 rss_addr = rss_defs->addr;
3744                 num_entries = rss_defs->num_entries[dev_data->chip_id];
3745                 entry_width = rss_defs->entry_width[dev_data->chip_id];
3746                 total_dwords = (num_entries * entry_width) / 32;
3747                 packed = (entry_width == 16);
3748
3749                 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, rss_defs->mem_name, 0, total_dwords,
3750                         entry_width, packed, rss_defs->type_name, false, 0);
3751
3752                 /* Dump RSS data */
3753                 if (!dump) {
3754                         offset += total_dwords;
3755                         continue;
3756                 }
3757
3758                 for (i = 0; i < total_dwords; i += RSS_REG_RSS_RAM_DATA_SIZE, rss_addr++) {
3759                         ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3760                         offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA), RSS_REG_RSS_RAM_DATA_SIZE, false);
3761                 }
3762         }
3763
3764         return offset;
3765 }
3766
3767 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
3768 static u32 ecore_grc_dump_big_ram(struct ecore_hwfn *p_hwfn,
3769                                                                   struct ecore_ptt *p_ptt,
3770                                                                   u32 *dump_buf,
3771                                                                   bool dump,
3772                                                                   u8 big_ram_id)
3773 {
3774         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3775         u32 total_blocks, ram_size, offset = 0, i;
3776         char mem_name[12] = "???_BIG_RAM";
3777         char type_name[8] = "???_RAM";
3778         struct big_ram_defs *big_ram;
3779
3780         big_ram = &s_big_ram_defs[big_ram_id];
3781         total_blocks = big_ram->num_of_blocks[dev_data->chip_id];
3782         ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
3783
3784         OSAL_STRNCPY(type_name, big_ram->instance_name, OSAL_STRLEN(big_ram->instance_name));
3785         OSAL_STRNCPY(mem_name, big_ram->instance_name, OSAL_STRLEN(big_ram->instance_name));
3786
3787         /* Dump memory header */
3788         offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, mem_name, 0, ram_size, BIG_RAM_BLOCK_SIZE_BYTES * 8, false, type_name, false, 0);
3789
3790         /* Read and dump Big RAM data */
3791         if (!dump)
3792                 return offset + ram_size;
3793
3794         /* Dump Big RAM */
3795         for (i = 0; i < total_blocks / 2; i++) {
3796                 ecore_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3797                 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(big_ram->data_reg_addr), 2 * BIG_RAM_BLOCK_SIZE_DWORDS, false);
3798         }
3799
3800         return offset;
3801 }
3802
3803 static u32 ecore_grc_dump_mcp(struct ecore_hwfn *p_hwfn,
3804                                                           struct ecore_ptt *p_ptt,
3805                                                           u32 *dump_buf,
3806                                                           bool dump)
3807 {
3808         bool block_enable[MAX_BLOCK_ID] = { 0 };
3809         bool halted = false;
3810         u32 offset = 0;
3811
3812         /* Halt MCP */
3813         if (dump && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3814                 halted = !ecore_mcp_halt(p_hwfn, p_ptt);
3815                 if (!halted)
3816                         DP_NOTICE(p_hwfn, false, "MCP halt failed!\n");
3817         }
3818
3819         /* Dump MCP scratchpad */
3820         offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, OSAL_NULL, BYTES_TO_DWORDS(MCP_REG_SCRATCH), MCP_REG_SCRATCH_SIZE, false, 0, false, "MCP", false, 0);
3821
3822         /* Dump MCP cpu_reg_file */
3823         offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, OSAL_NULL, BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE), MCP_REG_CPU_REG_FILE_SIZE, false, 0, false, "MCP", false, 0);
3824
3825         /* Dump MCP registers */
3826         block_enable[BLOCK_MCP] = true;
3827         offset += ecore_grc_dump_registers(p_hwfn, p_ptt, dump_buf + offset, dump, block_enable, "block", "MCP");
3828
3829         /* Dump required non-MCP registers */
3830         offset += ecore_grc_dump_regs_hdr(dump_buf + offset, dump, 1, "eng", -1, "block", "MCP");
3831         offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR), 1, false);
3832
3833         /* Release MCP */
3834         if (halted && ecore_mcp_resume(p_hwfn, p_ptt))
3835                 DP_NOTICE(p_hwfn, false, "Failed to resume MCP after halt!\n");
3836
3837         return offset;
3838 }
3839
3840 /* Dumps the tbus indirect memory for all PHYs. */
3841 static u32 ecore_grc_dump_phy(struct ecore_hwfn *p_hwfn,
3842                                                           struct ecore_ptt *p_ptt,
3843                                                           u32 *dump_buf,
3844                                                           bool dump)
3845 {
3846         u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3847         char mem_name[32];
3848         u8 phy_id;
3849
3850         for (phy_id = 0; phy_id < OSAL_ARRAY_SIZE(s_phy_defs); phy_id++) {
3851                 u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3852                 struct phy_defs *phy_defs;
3853                 u8 *bytes_buf;
3854
3855                 phy_defs = &s_phy_defs[phy_id];
3856                 addr_lo_addr = phy_defs->base_addr + phy_defs->tbus_addr_lo_addr;
3857                 addr_hi_addr = phy_defs->base_addr + phy_defs->tbus_addr_hi_addr;
3858                 data_lo_addr = phy_defs->base_addr + phy_defs->tbus_data_lo_addr;
3859                 data_hi_addr = phy_defs->base_addr + phy_defs->tbus_data_hi_addr;
3860                 bytes_buf = (u8*)(dump_buf + offset);
3861
3862                 if (OSAL_SNPRINTF(mem_name, sizeof(mem_name), "tbus_%s", phy_defs->phy_name) < 0)
3863                         DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid PHY memory name\n");
3864
3865                 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, mem_name, 0, PHY_DUMP_SIZE_DWORDS, 16, true, mem_name, false, 0);
3866
3867                 if (!dump) {
3868                         offset += PHY_DUMP_SIZE_DWORDS;
3869                         continue;
3870                 }
3871
3872                 for (tbus_hi_offset = 0; tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8); tbus_hi_offset++) {
3873                         ecore_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3874                         for (tbus_lo_offset = 0; tbus_lo_offset < 256; tbus_lo_offset++) {
3875                                 ecore_wr(p_hwfn, p_ptt, addr_lo_addr, tbus_lo_offset);
3876                                 *(bytes_buf++) = (u8)ecore_rd(p_hwfn, p_ptt, data_lo_addr);
3877                                 *(bytes_buf++) = (u8)ecore_rd(p_hwfn, p_ptt, data_hi_addr);
3878                         }
3879                 }
3880
3881                 offset += PHY_DUMP_SIZE_DWORDS;
3882         }
3883
3884         return offset;
3885 }
3886
3887 static void ecore_config_dbg_line(struct ecore_hwfn *p_hwfn,
3888                                                                   struct ecore_ptt *p_ptt,
3889                                                                   enum block_id block_id,
3890                                                                   u8 line_id,
3891                                                                   u8 enable_mask,
3892                                                                   u8 right_shift,
3893                                                                   u8 force_valid_mask,
3894                                                                   u8 force_frame_mask)
3895 {
3896         struct block_defs *block = s_block_defs[block_id];
3897
3898         ecore_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
3899         ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
3900         ecore_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
3901         ecore_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
3902         ecore_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
3903 }
3904
3905 /* Dumps Static Debug data. Returns the dumped size in dwords. */
3906 static u32 ecore_grc_dump_static_debug(struct ecore_hwfn *p_hwfn,
3907                                                                            struct ecore_ptt *p_ptt,
3908                                                                            u32 *dump_buf,
3909                                                                            bool dump)
3910 {
3911         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3912         u32 block_id, line_id, offset = 0;
3913
3914         /* Skip static debug if a debug bus recording is in progress */
3915         if (ecore_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3916                 return 0;
3917
3918         if (dump) {
3919                 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Dumping static debug data...\n");
3920
3921                 /* Disable all blocks debug output */
3922                 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3923                         struct block_defs *block = s_block_defs[block_id];
3924
3925                         if (block->has_dbg_bus[dev_data->chip_id])
3926                                 ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
3927                 }
3928
3929                 ecore_bus_reset_dbg_block(p_hwfn, p_ptt);
3930                 ecore_bus_set_framing_mode(p_hwfn, p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
3931                 ecore_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3932                 ecore_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3933                 ecore_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3934         }
3935
3936         /* Dump all static debug lines for each relevant block */
3937         for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3938                 struct block_defs *block = s_block_defs[block_id];
3939                 struct dbg_bus_block *block_desc;
3940                 u32 block_dwords;
3941
3942                 if (!block->has_dbg_bus[dev_data->chip_id])
3943                         continue;
3944
3945                 block_desc = get_dbg_bus_block_desc(p_hwfn, (enum block_id)block_id);
3946                 block_dwords = NUM_DBG_LINES(block_desc) * STATIC_DEBUG_LINE_DWORDS;
3947
3948                 /* Dump static section params */
3949                 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, block->name, 0, block_dwords, 32, false, "STATIC", false, 0);
3950
3951                 if (!dump) {
3952                         offset += block_dwords;
3953                         continue;
3954                 }
3955
3956                 /* If all lines are invalid - dump zeros */
3957                 if (dev_data->block_in_reset[block_id]) {
3958                         OSAL_MEMSET(dump_buf + offset, 0, DWORDS_TO_BYTES(block_dwords));
3959                         offset += block_dwords;
3960                         continue;
3961                 }
3962
3963                 /* Enable block's client */
3964                 ecore_bus_enable_clients(p_hwfn, p_ptt, 1 << block->dbg_client_id[dev_data->chip_id]);
3965                 for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc); line_id++) {
3966
3967                         /* Configure debug line ID */
3968                         ecore_config_dbg_line(p_hwfn, p_ptt, (enum block_id)block_id, (u8)line_id, 0xf, 0, 0, 0);
3969
3970                         /* Read debug line info */
3971                         offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA), STATIC_DEBUG_LINE_DWORDS, true);
3972                 }
3973
3974                 /* Disable block's client and debug output */
3975                 ecore_bus_enable_clients(p_hwfn, p_ptt, 0);
3976                 ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
3977         }
3978
3979         if (dump) {
3980                 ecore_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3981                 ecore_bus_enable_clients(p_hwfn, p_ptt, 0);
3982         }
3983
3984         return offset;
3985 }
3986
3987 /* Performs GRC Dump to the specified buffer.
3988  * Returns the dumped size in dwords.
3989  */
3990 static enum dbg_status ecore_grc_dump(struct ecore_hwfn *p_hwfn,
3991                                                                           struct ecore_ptt *p_ptt,
3992                                                                           u32 *dump_buf,
3993                                                                           bool dump,
3994                                                                           u32 *num_dumped_dwords)
3995 {
3996         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3997         bool is_emul, parities_masked = false;
3998         u8 i, port_mode = 0;
3999         u32 offset = 0;
4000
4001         is_emul = dev_data->platform_id == PLATFORM_EMUL_FULL || dev_data->platform_id == PLATFORM_EMUL_REDUCED;
4002
4003         *num_dumped_dwords = 0;
4004
4005         ;
4006
4007         if (dump) {
4008
4009                 /* Find port mode */
4010                 switch (ecore_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
4011                 case 0: port_mode = 1; break;
4012                 case 1: port_mode = 2; break;
4013                 case 2: port_mode = 4; break;
4014                 }
4015
4016                 /* Update reset state */
4017                 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
4018         }
4019
4020         /* Dump global params */
4021         offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 4);
4022         offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "grc-dump");
4023         offset += ecore_dump_num_param(dump_buf + offset, dump, "num-lcids", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS));
4024         offset += ecore_dump_num_param(dump_buf + offset, dump, "num-ltids", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS));
4025         offset += ecore_dump_num_param(dump_buf + offset, dump, "num-ports", port_mode);
4026
4027         /* Dump reset registers (dumped before taking blocks out of reset ) */
4028         if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4029                 offset += ecore_grc_dump_reset_regs(p_hwfn, p_ptt, dump_buf + offset, dump);
4030
4031         /* Take all blocks out of reset (using reset registers) */
4032         if (dump) {
4033                 ecore_grc_unreset_blocks(p_hwfn, p_ptt);
4034                 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
4035         }
4036
4037         /* Disable all parities using MFW command */
4038         if (dump && !is_emul && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
4039                 parities_masked = !ecore_mcp_mask_parities(p_hwfn, p_ptt, 1);
4040                 if (!parities_masked) {
4041                         DP_NOTICE(p_hwfn, false, "Failed to mask parities using MFW\n");
4042                         if (ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
4043                                 return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
4044                 }
4045         }
4046
4047         /* Dump modified registers (dumped before modifying them) */
4048         if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4049                 offset += ecore_grc_dump_modified_regs(p_hwfn, p_ptt, dump_buf + offset, dump);
4050
4051         /* Stall storms */
4052         if (dump && (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR) || ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
4053                 ecore_grc_stall_storms(p_hwfn, p_ptt, true);
4054
4055         /* Dump all regs  */
4056         if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
4057                 bool block_enable[MAX_BLOCK_ID];
4058
4059                 /* Dump all blocks except MCP */
4060                 for (i = 0; i < MAX_BLOCK_ID; i++)
4061                         block_enable[i] = true;
4062                 block_enable[BLOCK_MCP] = false;
4063                 offset += ecore_grc_dump_registers(p_hwfn, p_ptt, dump_buf + offset, dump, block_enable, OSAL_NULL, OSAL_NULL);
4064
4065                 /* Dump special registers */
4066                 offset += ecore_grc_dump_special_regs(p_hwfn, p_ptt, dump_buf + offset, dump);
4067         }
4068
4069         /* Dump memories */
4070         offset += ecore_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
4071
4072         /* Dump MCP */
4073         if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
4074                 offset += ecore_grc_dump_mcp(p_hwfn, p_ptt, dump_buf + offset, dump);
4075
4076         /* Dump context */
4077         if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
4078                 offset += ecore_grc_dump_ctx(p_hwfn, p_ptt, dump_buf + offset, dump);
4079
4080         /* Dump RSS memories */
4081         if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
4082                 offset += ecore_grc_dump_rss(p_hwfn, p_ptt, dump_buf + offset, dump);
4083
4084         /* Dump Big RAM */
4085         for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
4086                 if (ecore_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
4087                         offset += ecore_grc_dump_big_ram(p_hwfn, p_ptt, dump_buf + offset, dump, i);
4088
4089         /* Dump IORs */
4090         if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
4091                 offset += ecore_grc_dump_iors(p_hwfn, p_ptt, dump_buf + offset, dump);
4092
4093         /* Dump VFC */
4094         if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
4095                 offset += ecore_grc_dump_vfc(p_hwfn, p_ptt, dump_buf + offset, dump);
4096
4097         /* Dump PHY tbus */
4098         if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id == CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
4099                 offset += ecore_grc_dump_phy(p_hwfn, p_ptt, dump_buf + offset, dump);
4100
4101         /* Dump static debug data  */
4102         if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_STATIC) && dev_data->bus.state == DBG_BUS_STATE_IDLE)
4103                 offset += ecore_grc_dump_static_debug(p_hwfn, p_ptt, dump_buf + offset, dump);
4104
4105         /* Dump last section */
4106         offset += ecore_dump_last_section(p_hwfn, dump_buf, offset, dump);
4107
4108         if (dump) {
4109
4110                 /* Unstall storms */
4111                 if (ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
4112                         ecore_grc_stall_storms(p_hwfn, p_ptt, false);
4113
4114                 /* Clear parity status */
4115                 if (!is_emul)
4116                         ecore_grc_clear_all_prty(p_hwfn, p_ptt);
4117
4118                 /* Enable all parities using MFW command */
4119                 if (parities_masked)
4120                         ecore_mcp_mask_parities(p_hwfn, p_ptt, 0);
4121         }
4122
4123         *num_dumped_dwords = offset;
4124
4125         ;
4126
4127         return DBG_STATUS_OK;
4128 }
4129
4130 /* Writes the specified failing Idle Check rule to the specified buffer.
4131  * Returns the dumped size in dwords.
4132  */
4133 static u32 ecore_idle_chk_dump_failure(struct ecore_hwfn *p_hwfn,
4134                                                                            struct ecore_ptt *p_ptt,
4135                                                                            u32 *dump_buf,
4136                                                                            bool dump,
4137                                                                            u16 rule_id,
4138                                                                            const struct dbg_idle_chk_rule *rule,
4139                                                                            u16 fail_entry_id,
4140                                                                            u32 *cond_reg_values)
4141 {
4142         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4143         const struct dbg_idle_chk_cond_reg *cond_regs;
4144         const struct dbg_idle_chk_info_reg *info_regs;
4145         u32 i, next_reg_offset = 0, offset = 0;
4146         struct dbg_idle_chk_result_hdr *hdr;
4147         const union dbg_idle_chk_reg *regs;
4148         u8 reg_id;
4149
4150         hdr = (struct dbg_idle_chk_result_hdr*)dump_buf;
4151         regs = &((const union dbg_idle_chk_reg*)s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4152         cond_regs = &regs[0].cond_reg;
4153         info_regs = &regs[rule->num_cond_regs].info_reg;
4154
4155         /* Dump rule data */
4156         if (dump) {
4157                 OSAL_MEMSET(hdr, 0, sizeof(*hdr));
4158                 hdr->rule_id = rule_id;
4159                 hdr->mem_entry_id = fail_entry_id;
4160                 hdr->severity = rule->severity;
4161                 hdr->num_dumped_cond_regs = rule->num_cond_regs;
4162         }
4163
4164         offset += IDLE_CHK_RESULT_HDR_DWORDS;
4165
4166         /* Dump condition register values */
4167         for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4168                 const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4169                 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4170
4171                 reg_hdr = (struct dbg_idle_chk_result_reg_hdr*)(dump_buf + offset);
4172
4173                 /* Write register header */
4174                 if (!dump) {
4175                         offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->entry_size;
4176                         continue;
4177                 }
4178
4179                 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4180                 OSAL_MEMSET(reg_hdr, 0, sizeof(*reg_hdr));
4181                 reg_hdr->start_entry = reg->start_entry;
4182                 reg_hdr->size = reg->entry_size;
4183                 SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM, reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
4184                 SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
4185
4186                 /* Write register values */
4187                 for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
4188                         dump_buf[offset] = cond_reg_values[next_reg_offset];
4189         }
4190
4191         /* Dump info register values */
4192         for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
4193                 const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
4194                 u32 block_id;
4195
4196                 /* Check if register's block is in reset */
4197                 if (!dump) {
4198                         offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
4199                         continue;
4200                 }
4201
4202                 block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
4203                 if (block_id >= MAX_BLOCK_ID) {
4204                         DP_NOTICE(p_hwfn, true, "Invalid block_id\n");
4205                         return 0;
4206                 }
4207
4208                 if (!dev_data->block_in_reset[block_id]) {
4209                         struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4210                         bool wide_bus, eval_mode, mode_match = true;
4211                         u16 modes_buf_offset;
4212                         u32 addr;
4213
4214                         reg_hdr = (struct dbg_idle_chk_result_reg_hdr*)(dump_buf + offset);
4215
4216                         /* Check mode */
4217                         eval_mode = GET_FIELD(reg->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
4218                         if (eval_mode) {
4219                                 modes_buf_offset = GET_FIELD(reg->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
4220                                 mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
4221                         }
4222
4223                         if (!mode_match)
4224                                 continue;
4225
4226                         addr = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_ADDRESS);
4227                         wide_bus = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
4228
4229                         /* Write register header */
4230                         offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4231                         hdr->num_dumped_info_regs++;
4232                         OSAL_MEMSET(reg_hdr, 0, sizeof(*reg_hdr));
4233                         reg_hdr->size = reg->size;
4234                         SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, rule->num_cond_regs + reg_id);
4235
4236                         /* Write register values */
4237                         offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, reg->size, wide_bus);
4238                 }
4239         }
4240
4241         return offset;
4242 }
4243
4244 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
4245 static u32 ecore_idle_chk_dump_rule_entries(struct ecore_hwfn *p_hwfn,
4246                                                                                         struct ecore_ptt *p_ptt,
4247                                                                                         u32 *dump_buf,
4248                                                                                         bool dump,
4249                                                                                         const struct dbg_idle_chk_rule *input_rules,
4250                                                                                         u32 num_input_rules,
4251                                                                                         u32 *num_failing_rules)
4252 {
4253         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4254         u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
4255         u32 i, offset = 0;
4256         u16 entry_id;
4257         u8 reg_id;
4258
4259         *num_failing_rules = 0;
4260
4261         for (i = 0; i < num_input_rules; i++) {
4262                 const struct dbg_idle_chk_cond_reg *cond_regs;
4263                 const struct dbg_idle_chk_rule *rule;
4264                 const union dbg_idle_chk_reg *regs;
4265                 u16 num_reg_entries = 1;
4266                 bool check_rule = true;
4267                 const u32 *imm_values;
4268
4269                 rule = &input_rules[i];
4270                 regs = &((const union dbg_idle_chk_reg*)s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4271                 cond_regs = &regs[0].cond_reg;
4272                 imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr[rule->imm_offset];
4273
4274                 /* Check if all condition register blocks are out of reset, and
4275                  * find maximal number of entries (all condition registers that
4276                  * are memories must have the same size, which is > 1).
4277                  */
4278                 for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule; reg_id++) {
4279                         u32 block_id = GET_FIELD(cond_regs[reg_id].data, DBG_IDLE_CHK_COND_REG_BLOCK_ID);
4280
4281                         if (block_id >= MAX_BLOCK_ID) {
4282                                 DP_NOTICE(p_hwfn, true, "Invalid block_id\n");
4283                                 return 0;
4284                         }
4285
4286                         check_rule = !dev_data->block_in_reset[block_id];
4287                         if (cond_regs[reg_id].num_entries > num_reg_entries)
4288                                 num_reg_entries = cond_regs[reg_id].num_entries;
4289                 }
4290
4291                 if (!check_rule && dump)
4292                         continue;
4293
4294                 /* Go over all register entries (number of entries is the same for all
4295                  * condition registers).
4296                  */
4297                 for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
4298                         u32 next_reg_offset = 0;
4299
4300                         if (!dump) {
4301                                 offset += ecore_idle_chk_dump_failure(p_hwfn, p_ptt, dump_buf + offset, false, rule->rule_id, rule, entry_id, OSAL_NULL);
4302                                 (*num_failing_rules)++;
4303                                 break;
4304                         }
4305
4306                         /* Read current entry of all condition registers */
4307                         for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4308                                 const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4309                                 u32 padded_entry_size, addr;
4310                                 bool wide_bus;
4311
4312                                 /* Find GRC address (if it's a memory, the address of the
4313                                  * specific entry is calculated).
4314                                  */
4315                                 addr = GET_FIELD(reg->data, DBG_IDLE_CHK_COND_REG_ADDRESS);
4316                                 wide_bus = GET_FIELD(reg->data, DBG_IDLE_CHK_COND_REG_WIDE_BUS);
4317                                 if (reg->num_entries > 1 || reg->start_entry > 0) {
4318                                         padded_entry_size = reg->entry_size > 1 ? OSAL_ROUNDUP_POW_OF_TWO(reg->entry_size) : 1;
4319                                         addr += (reg->start_entry + entry_id) * padded_entry_size;
4320                                 }
4321
4322                                 /* Read registers */
4323                                 if (next_reg_offset + reg->entry_size >= IDLE_CHK_MAX_ENTRIES_SIZE) {
4324                                         DP_NOTICE(p_hwfn, true, "idle check registers entry is too large\n");
4325                                         return 0;
4326                                 }
4327
4328                                 next_reg_offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, cond_reg_values + next_reg_offset, dump, addr, reg->entry_size, wide_bus);
4329                         }
4330
4331                         /* Call rule condition function. if returns true, it's a failure.*/
4332                         if ((*cond_arr[rule->cond_id])(cond_reg_values, imm_values)) {
4333                                 offset += ecore_idle_chk_dump_failure(p_hwfn, p_ptt, dump_buf + offset, dump, rule->rule_id, rule, entry_id, cond_reg_values);
4334                                 (*num_failing_rules)++;
4335                                 break;
4336                         }
4337                 }
4338         }
4339
4340         return offset;
4341 }
4342
4343 /* Performs Idle Check Dump to the specified buffer.
4344  * Returns the dumped size in dwords.
4345  */
4346 static u32 ecore_idle_chk_dump(struct ecore_hwfn *p_hwfn,
4347                                                            struct ecore_ptt *p_ptt,
4348                                                            u32 *dump_buf,
4349                                                            bool dump)
4350 {
4351         u32 num_failing_rules_offset, offset = 0, input_offset = 0, num_failing_rules = 0;
4352
4353         /* Dump global params */
4354         offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4355         offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "idle-chk");
4356
4357         /* Dump idle check section header with a single parameter */
4358         offset += ecore_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
4359         num_failing_rules_offset = offset;
4360         offset += ecore_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
4361
4362         while (input_offset < s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
4363                 const struct dbg_idle_chk_cond_hdr *cond_hdr = (const struct dbg_idle_chk_cond_hdr*)&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr[input_offset++];
4364                 bool eval_mode, mode_match = true;
4365                 u32 curr_failing_rules;
4366                 u16 modes_buf_offset;
4367
4368                 /* Check mode */
4369                 eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
4370                 if (eval_mode) {
4371                         modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
4372                         mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
4373                 }
4374
4375                 if (mode_match) {
4376                         offset += ecore_idle_chk_dump_rule_entries(p_hwfn, p_ptt, dump_buf + offset, dump, (const struct dbg_idle_chk_rule*)&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr[input_offset], cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS, &curr_failing_rules);
4377                         num_failing_rules += curr_failing_rules;
4378                 }
4379
4380                 input_offset += cond_hdr->data_size;
4381         }
4382
4383         /* Overwrite num_rules parameter */
4384         if (dump)
4385                 ecore_dump_num_param(dump_buf + num_failing_rules_offset, dump, "num_rules", num_failing_rules);
4386
4387         /* Dump last section */
4388         offset += ecore_dump_last_section(p_hwfn, dump_buf, offset, dump);
4389
4390         return offset;
4391 }
4392
4393 /* Finds the meta data image in NVRAM */
4394 static enum dbg_status ecore_find_nvram_image(struct ecore_hwfn *p_hwfn,
4395                                                                                           struct ecore_ptt *p_ptt,
4396                                                                                           u32 image_type,
4397                                                                                           u32 *nvram_offset_bytes,
4398                                                                                           u32 *nvram_size_bytes)
4399 {
4400         u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
4401         struct mcp_file_att file_att;
4402         int nvm_result;
4403
4404         /* Call NVRAM get file command */
4405         nvm_result = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_GET_FILE_ATT, image_type, &ret_mcp_resp, &ret_mcp_param, &ret_txn_size, (u32*)&file_att);
4406
4407         /* Check response */
4408         if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4409                 return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4410
4411         /* Update return values */
4412         *nvram_offset_bytes = file_att.nvm_start_addr;
4413         *nvram_size_bytes = file_att.len;
4414
4415         DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n", image_type, *nvram_offset_bytes, *nvram_size_bytes);
4416
4417         /* Check alignment */
4418         if (*nvram_size_bytes & 0x3)
4419                 return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
4420
4421         return DBG_STATUS_OK;
4422 }
4423
4424 /* Reads data from NVRAM */
4425 static enum dbg_status ecore_nvram_read(struct ecore_hwfn *p_hwfn,
4426                                                                                 struct ecore_ptt *p_ptt,
4427                                                                                 u32 nvram_offset_bytes,
4428                                                                                 u32 nvram_size_bytes,
4429                                                                                 u32 *ret_buf)
4430 {
4431         u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
4432         s32 bytes_left = nvram_size_bytes;
4433         u32 read_offset = 0;
4434
4435         DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "nvram_read: reading image of size %d bytes from NVRAM\n", nvram_size_bytes);
4436
4437         do {
4438                 bytes_to_copy = (bytes_left > MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
4439
4440                 /* Call NVRAM read command */
4441                 if (ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_READ_NVRAM, (nvram_offset_bytes + read_offset) | (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_SHIFT), &ret_mcp_resp, &ret_mcp_param, &ret_read_size, (u32*)((u8*)ret_buf + read_offset)))
4442                         return DBG_STATUS_NVRAM_READ_FAILED;
4443
4444                 /* Check response */
4445                 if ((ret_mcp_resp  & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4446                         return DBG_STATUS_NVRAM_READ_FAILED;
4447
4448                 /* Update read offset */
4449                 read_offset += ret_read_size;
4450                 bytes_left -= ret_read_size;
4451         } while (bytes_left > 0);
4452
4453         return DBG_STATUS_OK;
4454 }
4455
4456 /* Get info on the MCP Trace data in the scratchpad:
4457  * - trace_data_grc_addr (OUT): trace data GRC address in bytes
4458  * - trace_data_size (OUT): trace data size in bytes (without the header)
4459  */
4460 static enum dbg_status ecore_mcp_trace_get_data_info(struct ecore_hwfn *p_hwfn,
4461                                                                                                          struct ecore_ptt *p_ptt,
4462                                                                                                          u32 *trace_data_grc_addr,
4463                                                                                                          u32 *trace_data_size)
4464 {
4465         u32 spad_trace_offsize, signature;
4466
4467         /* Read trace section offsize structure from MCP scratchpad */
4468         spad_trace_offsize = ecore_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4469
4470         /* Extract trace section address from offsize (in scratchpad) */
4471         *trace_data_grc_addr = MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4472
4473         /* Read signature from MCP trace section */
4474         signature = ecore_rd(p_hwfn, p_ptt, *trace_data_grc_addr + OFFSETOF(struct mcp_trace, signature));
4475
4476         if (signature != MFW_TRACE_SIGNATURE)
4477                 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4478
4479         /* Read trace size from MCP trace section */
4480         *trace_data_size = ecore_rd(p_hwfn, p_ptt, *trace_data_grc_addr + OFFSETOF(struct mcp_trace, size));
4481
4482         return DBG_STATUS_OK;
4483 }
4484
4485 /* Reads MCP trace meta data image from NVRAM
4486  * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4487  * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4488  *                            loaded from file).
4489  * - trace_meta_size (OUT):   size in bytes of the trace meta data.
4490  */
4491 static enum dbg_status ecore_mcp_trace_get_meta_info(struct ecore_hwfn *p_hwfn,
4492                                                                                                          struct ecore_ptt *p_ptt,
4493                                                                                                          u32 trace_data_size_bytes,
4494                                                                                                          u32 *running_bundle_id,
4495                                                                                                          u32 *trace_meta_offset,
4496                                                                                                          u32 *trace_meta_size)
4497 {
4498         u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4499
4500         /* Read MCP trace section offsize structure from MCP scratchpad */
4501         spad_trace_offsize = ecore_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4502
4503         /* Find running bundle ID */
4504         running_mfw_addr = MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) + SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4505         *running_bundle_id = ecore_rd(p_hwfn, p_ptt, running_mfw_addr);
4506         if (*running_bundle_id > 1)
4507                 return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4508
4509         /* Find image in NVRAM */
4510         nvram_image_type = (*running_bundle_id == DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4511         return ecore_find_nvram_image(p_hwfn, p_ptt, nvram_image_type, trace_meta_offset, trace_meta_size);
4512 }
4513
4514 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
4515 static enum dbg_status ecore_mcp_trace_read_meta(struct ecore_hwfn *p_hwfn,
4516                                                                                                  struct ecore_ptt *p_ptt,
4517                                                                                                  u32 nvram_offset_in_bytes,
4518                                                                                                  u32 size_in_bytes,
4519                                                                                                  u32 *buf)
4520 {
4521         u8 modules_num, module_len, i, *byte_buf = (u8*)buf;
4522         enum dbg_status status;
4523         u32 signature;
4524
4525         /* Read meta data from NVRAM */
4526         status = ecore_nvram_read(p_hwfn, p_ptt, nvram_offset_in_bytes, size_in_bytes, buf);
4527         if (status != DBG_STATUS_OK)
4528                 return status;
4529
4530         /* Extract and check first signature */
4531         signature = ecore_read_unaligned_dword(byte_buf);
4532         byte_buf += sizeof(signature);
4533         if (signature != NVM_MAGIC_VALUE)
4534                 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4535
4536         /* Extract number of modules */
4537         modules_num = *(byte_buf++);
4538
4539         /* Skip all modules */
4540         for (i = 0; i < modules_num; i++) {
4541                 module_len = *(byte_buf++);
4542                 byte_buf += module_len;
4543         }
4544
4545         /* Extract and check second signature */
4546         signature = ecore_read_unaligned_dword(byte_buf);
4547         byte_buf += sizeof(signature);
4548         if (signature != NVM_MAGIC_VALUE)
4549                 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4550
4551         return DBG_STATUS_OK;
4552 }
4553
4554 /* Dump MCP Trace */
4555 static enum dbg_status ecore_mcp_trace_dump(struct ecore_hwfn *p_hwfn,
4556                                                                                         struct ecore_ptt *p_ptt,
4557                                                                                         u32 *dump_buf,
4558                                                                                         bool dump,
4559                                                                                         u32 *num_dumped_dwords)
4560 {
4561         u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0, trace_meta_size_dwords = 0;
4562         u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4563         u32 running_bundle_id, offset = 0;
4564         enum dbg_status status;
4565         bool mcp_access;
4566         int halted = 0;
4567
4568         *num_dumped_dwords = 0;
4569
4570         mcp_access = !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4571
4572         /* Get trace data info */
4573         status = ecore_mcp_trace_get_data_info(p_hwfn, p_ptt, &trace_data_grc_addr, &trace_data_size_bytes);
4574         if (status != DBG_STATUS_OK)
4575                 return status;
4576
4577         /* Dump global params */
4578         offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4579         offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "mcp-trace");
4580
4581         /* Halt MCP while reading from scratchpad so the read data will be
4582          * consistent. if halt fails, MCP trace is taken anyway, with a small
4583          * risk that it may be corrupt.
4584          */
4585         if (dump && mcp_access) {
4586                 halted = !ecore_mcp_halt(p_hwfn, p_ptt);
4587                 if (!halted)
4588                         DP_NOTICE(p_hwfn, false, "MCP halt failed!\n");
4589         }
4590
4591         /* Find trace data size */
4592         trace_data_size_dwords = DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace), BYTES_IN_DWORD);
4593
4594         /* Dump trace data section header and param */
4595         offset += ecore_dump_section_hdr(dump_buf + offset, dump, "mcp_trace_data", 1);
4596         offset += ecore_dump_num_param(dump_buf + offset, dump, "size", trace_data_size_dwords);
4597
4598         /* Read trace data from scratchpad into dump buffer */
4599         offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(trace_data_grc_addr), trace_data_size_dwords, false);
4600
4601         /* Resume MCP (only if halt succeeded) */
4602         if (halted && ecore_mcp_resume(p_hwfn, p_ptt))
4603                 DP_NOTICE(p_hwfn, false, "Failed to resume MCP after halt!\n");
4604
4605         /* Dump trace meta section header */
4606         offset += ecore_dump_section_hdr(dump_buf + offset, dump, "mcp_trace_meta", 1);
4607
4608         /* Read trace meta only if NVRAM access is enabled
4609          * (trace_meta_size_bytes is dword-aligned).
4610          */
4611         if (OSAL_NVM_IS_ACCESS_ENABLED(p_hwfn) && mcp_access) {
4612                 status = ecore_mcp_trace_get_meta_info(p_hwfn, p_ptt, trace_data_size_bytes, &running_bundle_id, &trace_meta_offset_bytes, &trace_meta_size_bytes);
4613                 if (status == DBG_STATUS_OK)
4614                         trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
4615         }
4616
4617         /* Dump trace meta size param */
4618         offset += ecore_dump_num_param(dump_buf + offset, dump, "size", trace_meta_size_dwords);
4619
4620         /* Read trace meta image into dump buffer */
4621         if (dump && trace_meta_size_dwords)
4622                 status = ecore_mcp_trace_read_meta(p_hwfn, p_ptt, trace_meta_offset_bytes, trace_meta_size_bytes, dump_buf + offset);
4623         if (status == DBG_STATUS_OK)
4624                 offset += trace_meta_size_dwords;
4625
4626         /* Dump last section */
4627         offset += ecore_dump_last_section(p_hwfn, dump_buf, offset, dump);
4628
4629         *num_dumped_dwords = offset;
4630
4631         /* If no mcp access, indicate that the dump doesn't contain the meta
4632          * data from NVRAM.
4633          */
4634         return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4635 }
4636
4637 /* Dump GRC FIFO */
4638 static enum dbg_status ecore_reg_fifo_dump(struct ecore_hwfn *p_hwfn,
4639                                                                                    struct ecore_ptt *p_ptt,
4640                                                                                    u32 *dump_buf,
4641                                                                                    bool dump,
4642                                                                                    u32 *num_dumped_dwords)
4643 {
4644         u32 dwords_read, size_param_offset, offset = 0;
4645         bool fifo_has_data;
4646
4647         *num_dumped_dwords = 0;
4648
4649         /* Dump global params */
4650         offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4651         offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "reg-fifo");
4652
4653         /* Dump fifo data section header and param. The size param is 0 for
4654          * now, and is overwritten after reading the FIFO.
4655          */
4656         offset += ecore_dump_section_hdr(dump_buf + offset, dump, "reg_fifo_data", 1);
4657         size_param_offset = offset;
4658         offset += ecore_dump_num_param(dump_buf + offset, dump, "size", 0);
4659
4660         if (dump) {
4661                 fifo_has_data = ecore_rd(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4662
4663                 /* Pull available data from fifo. Use DMAE since this is
4664                  * widebus memory and must be accessed atomically. Test for
4665                  * dwords_read not passing buffer size since more entries could
4666                  * be added to the buffer as we
4667                  * are emptying it.
4668                  */
4669                 for (dwords_read = 0; fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS; dwords_read += REG_FIFO_ELEMENT_DWORDS, offset += REG_FIFO_ELEMENT_DWORDS) {
4670                         if (ecore_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO, (u64)(osal_uintptr_t)(&dump_buf[offset]), REG_FIFO_ELEMENT_DWORDS, 0))
4671                                 return DBG_STATUS_DMAE_FAILED;
4672                         fifo_has_data = ecore_rd(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4673                 }
4674
4675                 ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", dwords_read);
4676         }
4677         else {
4678
4679                 /* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4680                  * test how much data is available, except for reading it.
4681                  */
4682                 offset += REG_FIFO_DEPTH_DWORDS;
4683         }
4684
4685         /* Dump last section */
4686         offset += ecore_dump_last_section(p_hwfn, dump_buf, offset, dump);
4687
4688         *num_dumped_dwords = offset;
4689
4690         return DBG_STATUS_OK;
4691 }
4692
4693 /* Dump IGU FIFO */
4694 static enum dbg_status ecore_igu_fifo_dump(struct ecore_hwfn *p_hwfn,
4695                                                                                    struct ecore_ptt *p_ptt,
4696                                                                                    u32 *dump_buf,
4697                                                                                    bool dump,
4698                                                                                    u32 *num_dumped_dwords)
4699 {
4700         u32 dwords_read, size_param_offset, offset = 0;
4701         bool fifo_has_data;
4702
4703         *num_dumped_dwords = 0;
4704
4705         /* Dump global params */
4706         offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4707         offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "igu-fifo");
4708
4709         /* Dump fifo data section header and param. The size param is 0 for
4710          * now, and is overwritten after reading the FIFO.
4711          */
4712         offset += ecore_dump_section_hdr(dump_buf + offset, dump, "igu_fifo_data", 1);
4713         size_param_offset = offset;
4714         offset += ecore_dump_num_param(dump_buf + offset, dump, "size", 0);
4715
4716         if (dump) {
4717                 fifo_has_data = ecore_rd(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4718
4719                 /* Pull available data from fifo. Use DMAE since this is
4720                  * widebus memory and must be accessed atomically. Test for
4721                  * dwords_read not passing buffer size since more entries could
4722                  * be added to the buffer as we are emptying it.
4723                  */
4724                 for (dwords_read = 0; fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS; dwords_read += IGU_FIFO_ELEMENT_DWORDS, offset += IGU_FIFO_ELEMENT_DWORDS) {
4725                         if (ecore_dmae_grc2host(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_MEMORY, (u64)(osal_uintptr_t)(&dump_buf[offset]), IGU_FIFO_ELEMENT_DWORDS, 0))
4726                                 return DBG_STATUS_DMAE_FAILED;
4727                         fifo_has_data = ecore_rd(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4728                 }
4729
4730                 ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", dwords_read);
4731         }
4732         else {
4733
4734                 /* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4735                  * test how much data is available, except for reading it.
4736                  */
4737                 offset += IGU_FIFO_DEPTH_DWORDS;
4738         }
4739
4740         /* Dump last section */
4741         offset += ecore_dump_last_section(p_hwfn, dump_buf, offset, dump);
4742
4743         *num_dumped_dwords = offset;
4744
4745         return DBG_STATUS_OK;
4746 }
4747
4748 /* Protection Override dump */
4749 static enum dbg_status ecore_protection_override_dump(struct ecore_hwfn *p_hwfn,
4750                                                                                                           struct ecore_ptt *p_ptt,
4751                                                                                                           u32 *dump_buf,
4752                                                                                                           bool dump,
4753                                                                                                           u32 *num_dumped_dwords)
4754 {
4755         u32 size_param_offset, override_window_dwords, offset = 0;
4756
4757         *num_dumped_dwords = 0;
4758
4759         /* Dump global params */
4760         offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4761         offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "protection-override");
4762
4763         /* Dump data section header and param. The size param is 0 for now,
4764          * and is overwritten after reading the data.
4765          */
4766         offset += ecore_dump_section_hdr(dump_buf + offset, dump, "protection_override_data", 1);
4767         size_param_offset = offset;
4768         offset += ecore_dump_num_param(dump_buf + offset, dump, "size", 0);
4769
4770         if (dump) {
4771                 /* Add override window info to buffer */
4772                 override_window_dwords = ecore_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) * PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4773                 if (ecore_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_PROTECTION_OVERRIDE_WINDOW, (u64)(osal_uintptr_t)(dump_buf + offset), override_window_dwords, 0))
4774                         return DBG_STATUS_DMAE_FAILED;
4775                 offset += override_window_dwords;
4776                 ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", override_window_dwords);
4777         }
4778         else {
4779                 offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4780         }
4781
4782         /* Dump last section */
4783         offset += ecore_dump_last_section(p_hwfn, dump_buf, offset, dump);
4784
4785         *num_dumped_dwords = offset;
4786
4787         return DBG_STATUS_OK;
4788 }
4789
4790 /* Performs FW Asserts Dump to the specified buffer.
4791  * Returns the dumped size in dwords.
4792  */
4793 static u32 ecore_fw_asserts_dump(struct ecore_hwfn *p_hwfn,
4794                                                                  struct ecore_ptt *p_ptt,
4795                                                                  u32 *dump_buf,
4796                                                                  bool dump)
4797 {
4798         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4799         struct fw_asserts_ram_section *asserts;
4800         char storm_letter_str[2] = "?";
4801         struct fw_info fw_info;
4802         u32 offset = 0;
4803         u8 storm_id;
4804
4805         /* Dump global params */
4806         offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4807         offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "fw-asserts");
4808
4809         /* Find Storm dump size */
4810         for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4811                 u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx, last_list_idx, addr;
4812                 struct storm_defs *storm = &s_storm_defs[storm_id];
4813
4814                 if (dev_data->block_in_reset[storm->block_id])
4815                         continue;
4816
4817                 /* Read FW info for the current Storm  */
4818                 ecore_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
4819
4820                 asserts = &fw_info.fw_asserts_section;
4821
4822                 /* Dump FW Asserts section header and params */
4823                 storm_letter_str[0] = storm->letter;
4824                 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "fw_asserts", 2);
4825                 offset += ecore_dump_str_param(dump_buf + offset, dump, "storm", storm_letter_str);
4826                 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", asserts->list_element_dword_size);
4827
4828                 /* Read and dump FW Asserts data */
4829                 if (!dump) {
4830                         offset += asserts->list_element_dword_size;
4831                         continue;
4832                 }
4833
4834                 fw_asserts_section_addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
4835                         RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
4836                 next_list_idx_addr = fw_asserts_section_addr + DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
4837                 next_list_idx = ecore_rd(p_hwfn, p_ptt, next_list_idx_addr);
4838                 last_list_idx = (next_list_idx > 0 ? next_list_idx : asserts->list_num_elements) - 1;
4839                 addr = BYTES_TO_DWORDS(fw_asserts_section_addr) + asserts->list_dword_offset +
4840                                         last_list_idx * asserts->list_element_dword_size;
4841                 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, asserts->list_element_dword_size, false);
4842         }
4843
4844         /* Dump last section */
4845         offset += ecore_dump_last_section(p_hwfn, dump_buf, offset, dump);
4846
4847         return offset;
4848 }
4849
4850 /***************************** Public Functions *******************************/
4851
4852 enum dbg_status ecore_dbg_set_bin_ptr(const u8 * const bin_ptr)
4853 {
4854         struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr*)bin_ptr;
4855         u8 buf_id;
4856
4857         /* convert binary data to debug arrays */
4858         for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
4859                 s_dbg_arrays[buf_id].ptr = (u32*)(bin_ptr + buf_array[buf_id].offset);
4860                 s_dbg_arrays[buf_id].size_in_dwords = BYTES_TO_DWORDS(buf_array[buf_id].length);
4861         }
4862
4863         return DBG_STATUS_OK;
4864 }
4865
4866 enum dbg_status ecore_dbg_set_app_ver(u32 ver)
4867 {
4868         if (ver < TOOLS_VERSION)
4869                 return DBG_STATUS_UNSUPPORTED_APP_VERSION;
4870
4871         s_app_ver = ver;
4872
4873         return DBG_STATUS_OK;
4874 }
4875
4876 u32 ecore_dbg_get_fw_func_ver(void)
4877 {
4878         return TOOLS_VERSION;
4879 }
4880
4881 enum chip_ids ecore_dbg_get_chip_id(struct ecore_hwfn *p_hwfn)
4882 {
4883         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4884
4885         return (enum chip_ids)dev_data->chip_id;
4886 }
4887
4888 enum dbg_status ecore_dbg_bus_reset(struct ecore_hwfn *p_hwfn,
4889                                                                         struct ecore_ptt *p_ptt,
4890                                                                         bool one_shot_en,
4891                                                                         u8 force_hw_dwords,
4892                                                                         bool unify_inputs,
4893                                                                         bool grc_input_en)
4894 {
4895         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4896         enum dbg_status status;
4897         
4898         status = ecore_dbg_dev_init(p_hwfn, p_ptt);
4899         if (status != DBG_STATUS_OK)
4900                 return status;
4901
4902         DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_reset: one_shot_en = %d, force_hw_dwords = %d, unify_inputs = %d, grc_input_en = %d\n", one_shot_en, force_hw_dwords, unify_inputs, grc_input_en);
4903
4904         if (force_hw_dwords &&
4905                 force_hw_dwords != 4 &&
4906                 force_hw_dwords != 8)
4907                 return DBG_STATUS_INVALID_ARGS;
4908
4909         if (ecore_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
4910                 return DBG_STATUS_DBG_BUS_IN_USE;
4911
4912         /* Update reset state of all blocks */
4913         ecore_update_blocks_reset_state(p_hwfn, p_ptt);
4914
4915         /* Disable all debug inputs */
4916         status = ecore_bus_disable_inputs(p_hwfn, p_ptt, false);
4917         if (status != DBG_STATUS_OK)
4918                 return status;
4919
4920         /* Reset DBG block */
4921         ecore_bus_reset_dbg_block(p_hwfn, p_ptt);
4922
4923         /* Set one-shot / wrap-around */
4924         ecore_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, one_shot_en ? 0 : 1);
4925
4926         /* Init state params */
4927         OSAL_MEMSET(&dev_data->bus, 0, sizeof(dev_data->bus));
4928         dev_data->bus.target = DBG_BUS_TARGET_ID_INT_BUF;
4929         dev_data->bus.state = DBG_BUS_STATE_READY;
4930         dev_data->bus.one_shot_en = one_shot_en;
4931         dev_data->bus.hw_dwords = force_hw_dwords;
4932         dev_data->bus.grc_input_en = grc_input_en;
4933         dev_data->bus.unify_inputs = unify_inputs;
4934         dev_data->bus.num_enabled_blocks = grc_input_en ? 1 : 0;
4935
4936         /* Init special DBG block */
4937         if (grc_input_en)
4938                 SET_FIELD(dev_data->bus.blocks[BLOCK_DBG].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, 0x1);
4939
4940         return DBG_STATUS_OK;
4941 }
4942
4943 enum dbg_status ecore_dbg_bus_set_pci_output(struct ecore_hwfn *p_hwfn,
4944                                                                                          struct ecore_ptt *p_ptt,
4945                                                                                          u16 buf_size_kb)
4946 {
4947         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4948         dma_addr_t pci_buf_phys_addr;
4949         void *pci_buf;
4950
4951         DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_set_pci_output: buf_size_kb = %d\n", buf_size_kb);
4952
4953         if (dev_data->bus.target != DBG_BUS_TARGET_ID_INT_BUF)
4954                 return DBG_STATUS_OUTPUT_ALREADY_SET;
4955         if (dev_data->bus.state != DBG_BUS_STATE_READY || dev_data->bus.pci_buf.size > 0)
4956                 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
4957
4958         dev_data->bus.target = DBG_BUS_TARGET_ID_PCI;
4959         dev_data->bus.pci_buf.size = buf_size_kb * 1024;
4960         if (dev_data->bus.pci_buf.size % PCI_PKT_SIZE_IN_BYTES)
4961                 return DBG_STATUS_INVALID_ARGS;
4962
4963         pci_buf = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &pci_buf_phys_addr, dev_data->bus.pci_buf.size);
4964         if (!pci_buf)
4965                 return DBG_STATUS_PCI_BUF_ALLOC_FAILED;
4966
4967         OSAL_MEMCPY(&dev_data->bus.pci_buf.phys_addr, &pci_buf_phys_addr, sizeof(pci_buf_phys_addr));
4968
4969         dev_data->bus.pci_buf.virt_addr.lo = (u32)((u64)(osal_uintptr_t)pci_buf);
4970         dev_data->bus.pci_buf.virt_addr.hi = (u32)((u64)(osal_uintptr_t)pci_buf >> 32);
4971
4972         ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_LSB, dev_data->bus.pci_buf.phys_addr.lo);
4973         ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_MSB, dev_data->bus.pci_buf.phys_addr.hi);
4974         ecore_wr(p_hwfn, p_ptt, DBG_REG_TARGET_PACKET_SIZE, PCI_PKT_SIZE_IN_CHUNKS);
4975         ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_EXT_BUFFER_SIZE, dev_data->bus.pci_buf.size / PCI_PKT_SIZE_IN_BYTES);
4976         ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_FUNC_NUM, OPAQUE_FID(p_hwfn->rel_pf_id));
4977         ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_LOGIC_ADDR, PCI_PHYS_ADDR_TYPE);
4978         ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_REQ_CREDIT, PCI_REQ_CREDIT);
4979         ecore_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_PCI);
4980         ecore_wr(p_hwfn, p_ptt, DBG_REG_OUTPUT_ENABLE, TARGET_EN_MASK_PCI);
4981
4982         return DBG_STATUS_OK;
4983 }
4984
4985 enum dbg_status ecore_dbg_bus_set_nw_output(struct ecore_hwfn *p_hwfn,
4986                                                                                         struct ecore_ptt *p_ptt,
4987                                                                                         u8 port_id,
4988                                                                                         u32 dest_addr_lo32,
4989                                                                                         u16 dest_addr_hi16,
4990                                                                                         u16 data_limit_size_kb,
4991                                                                                         bool send_to_other_engine,
4992                                                                                         bool rcv_from_other_engine)
4993 {
4994         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4995
4996         DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_set_nw_output: port_id = %d, dest_addr_lo32 = 0x%x, dest_addr_hi16 = 0x%x, data_limit_size_kb = %d, send_to_other_engine = %d, rcv_from_other_engine = %d\n", port_id, dest_addr_lo32, dest_addr_hi16, data_limit_size_kb, send_to_other_engine, rcv_from_other_engine);
4997
4998         if (dev_data->bus.target != DBG_BUS_TARGET_ID_INT_BUF)
4999                 return DBG_STATUS_OUTPUT_ALREADY_SET;
5000         if (dev_data->bus.state != DBG_BUS_STATE_READY)
5001                 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5002         if (port_id >= s_chip_defs[dev_data->chip_id].per_platform[dev_data->platform_id].num_ports || (send_to_other_engine && rcv_from_other_engine))
5003                 return DBG_STATUS_INVALID_ARGS;
5004
5005         dev_data->bus.target = DBG_BUS_TARGET_ID_NIG;
5006         dev_data->bus.rcv_from_other_engine = rcv_from_other_engine;
5007
5008         ecore_wr(p_hwfn, p_ptt, DBG_REG_OUTPUT_ENABLE, TARGET_EN_MASK_NIG);
5009         ecore_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_NIG);
5010
5011         if (send_to_other_engine)
5012                 ecore_wr(p_hwfn, p_ptt, DBG_REG_OTHER_ENGINE_MODE_BB_K2, DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX);
5013         else
5014                 ecore_wr(p_hwfn, p_ptt, NIG_REG_DEBUG_PORT, port_id);
5015
5016         if (rcv_from_other_engine) {
5017                 ecore_wr(p_hwfn, p_ptt, DBG_REG_OTHER_ENGINE_MODE_BB_K2, DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX);
5018         }
5019         else {
5020
5021                 /* Configure ethernet header of 14 bytes */
5022                 ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_WIDTH, 0);
5023                 ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_7, dest_addr_lo32);
5024                 ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_6, (u32)SRC_MAC_ADDR_LO16 | ((u32)dest_addr_hi16 << 16));
5025                 ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_5, SRC_MAC_ADDR_HI32);
5026                 ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_4, (u32)ETH_TYPE << 16);
5027                 ecore_wr(p_hwfn, p_ptt, DBG_REG_TARGET_PACKET_SIZE, NIG_PKT_SIZE_IN_CHUNKS);
5028                 if (data_limit_size_kb)
5029                         ecore_wr(p_hwfn, p_ptt, DBG_REG_NIG_DATA_LIMIT_SIZE, (data_limit_size_kb * 1024) / CHUNK_SIZE_IN_BYTES);
5030         }
5031
5032         return DBG_STATUS_OK;
5033 }
5034
5035 static bool ecore_is_overlapping_enable_mask(struct ecore_hwfn *p_hwfn,
5036                                                                           u8 enable_mask,
5037                                                                           u8 right_shift)
5038 {
5039         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5040         u8 curr_shifted_enable_mask, shifted_enable_mask;
5041         u32 block_id;
5042
5043         shifted_enable_mask = SHR(enable_mask, VALUES_PER_CYCLE, right_shift);
5044
5045         if (dev_data->bus.num_enabled_blocks) {
5046                 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5047                         struct dbg_bus_block_data *block_bus = &dev_data->bus.blocks[block_id];
5048
5049                         if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5050                                 continue;
5051
5052                         curr_shifted_enable_mask =
5053                                 SHR(GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5054                                         VALUES_PER_CYCLE,
5055                                         GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT));
5056                         if (shifted_enable_mask & curr_shifted_enable_mask)
5057                                 return true;
5058                 }
5059         }
5060
5061         return false;
5062 }
5063
5064 enum dbg_status ecore_dbg_bus_enable_block(struct ecore_hwfn *p_hwfn,
5065                                                                                    struct ecore_ptt *p_ptt,
5066                                                                                    enum block_id block_id,
5067                                                                                    u8 line_num,
5068                                                                                    u8 enable_mask,
5069                                                                                    u8 right_shift,
5070                                                                                    u8 force_valid_mask,
5071                                                                                    u8 force_frame_mask)
5072 {
5073         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5074         struct block_defs *block = s_block_defs[block_id];
5075         struct dbg_bus_block_data *block_bus;
5076         struct dbg_bus_block *block_desc;
5077
5078         block_bus = &dev_data->bus.blocks[block_id];
5079         block_desc = get_dbg_bus_block_desc(p_hwfn, block_id);
5080
5081         DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_block: block = %d, line_num = %d, enable_mask = 0x%x, right_shift = %d, force_valid_mask = 0x%x, force_frame_mask = 0x%x\n", block_id, line_num, enable_mask, right_shift, force_valid_mask, force_frame_mask);
5082
5083         if (dev_data->bus.state != DBG_BUS_STATE_READY)
5084                 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5085         if (block_id >= MAX_BLOCK_ID)
5086                 return DBG_STATUS_INVALID_ARGS;
5087         if (GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5088                 return DBG_STATUS_BLOCK_ALREADY_ENABLED;
5089         if (!block->has_dbg_bus[dev_data->chip_id] ||
5090                 line_num >= NUM_DBG_LINES(block_desc) ||
5091                 !enable_mask ||
5092                 enable_mask > MAX_CYCLE_VALUES_MASK ||
5093                 force_valid_mask > MAX_CYCLE_VALUES_MASK ||
5094                 force_frame_mask > MAX_CYCLE_VALUES_MASK ||
5095                 right_shift > VALUES_PER_CYCLE - 1)
5096                 return DBG_STATUS_INVALID_ARGS;
5097         if (dev_data->block_in_reset[block_id])
5098                 return DBG_STATUS_BLOCK_IN_RESET;
5099         if (!dev_data->bus.unify_inputs && ecore_is_overlapping_enable_mask(p_hwfn, enable_mask, right_shift))
5100                 return DBG_STATUS_INPUT_OVERLAP;
5101
5102         dev_data->bus.blocks[block_id].line_num = line_num;
5103         SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, enable_mask);
5104         SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT, right_shift);
5105         SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK, force_valid_mask);
5106         SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK, force_frame_mask);
5107
5108         dev_data->bus.num_enabled_blocks++;
5109
5110         return DBG_STATUS_OK;
5111 }
5112
5113 enum dbg_status ecore_dbg_bus_enable_storm(struct ecore_hwfn *p_hwfn,
5114                                                                                    enum dbg_storms storm,
5115                                                                                    enum dbg_bus_storm_modes storm_mode)
5116 {
5117         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5118
5119         DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_storm: storm = %d, storm_mode = %d\n", storm, storm_mode);
5120
5121         if (dev_data->bus.state != DBG_BUS_STATE_READY)
5122                 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5123         if (dev_data->bus.hw_dwords >= 4)
5124                 return DBG_STATUS_HW_ONLY_RECORDING;
5125         if (storm >= MAX_DBG_STORMS)
5126                 return DBG_STATUS_INVALID_ARGS;
5127         if (storm_mode >= MAX_DBG_BUS_STORM_MODES)
5128                 return DBG_STATUS_INVALID_ARGS;
5129         if (dev_data->bus.unify_inputs)
5130                 return DBG_STATUS_INVALID_ARGS;
5131
5132         if (dev_data->bus.storms[storm].enabled)
5133                 return DBG_STATUS_STORM_ALREADY_ENABLED;
5134
5135         dev_data->bus.storms[storm].enabled = true;
5136         dev_data->bus.storms[storm].mode = (u8)storm_mode;
5137         dev_data->bus.storms[storm].hw_id = dev_data->bus.num_enabled_storms;
5138
5139         dev_data->bus.num_enabled_storms++;
5140
5141         return DBG_STATUS_OK;
5142 }
5143
5144 enum dbg_status ecore_dbg_bus_enable_timestamp(struct ecore_hwfn *p_hwfn,
5145                                                                                            struct ecore_ptt *p_ptt,
5146                                                                                            u8 valid_mask,
5147                                                                                            u8 frame_mask,
5148                                                                                            u32 tick_len)
5149 {
5150         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5151
5152         DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_timestamp: valid_mask = 0x%x, frame_mask = 0x%x, tick_len = %d\n", valid_mask, frame_mask, tick_len);
5153
5154         if (dev_data->bus.state != DBG_BUS_STATE_READY)
5155                 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5156         if (valid_mask > 0x7 || frame_mask > 0x7)
5157                 return DBG_STATUS_INVALID_ARGS;
5158         if (!dev_data->bus.unify_inputs && ecore_is_overlapping_enable_mask(p_hwfn, 0x1, 0))
5159                 return DBG_STATUS_INPUT_OVERLAP;
5160
5161         dev_data->bus.timestamp_input_en = true;
5162         dev_data->bus.num_enabled_blocks++;
5163
5164         SET_FIELD(dev_data->bus.blocks[BLOCK_DBG].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, 0x1);
5165
5166         ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_VALID_EN, valid_mask);
5167         ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_FRAME_EN, frame_mask);
5168         ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_TICK, tick_len);
5169
5170         return DBG_STATUS_OK;
5171 }
5172
5173 enum dbg_status ecore_dbg_bus_add_eid_range_sem_filter(struct ecore_hwfn *p_hwfn,
5174                                                                                                            enum dbg_storms storm_id,
5175                                                                                                            u8 min_eid,
5176                                                                                                            u8 max_eid)
5177 {
5178         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5179         struct dbg_bus_storm_data *storm_bus;
5180
5181         storm_bus = &dev_data->bus.storms[storm_id];
5182
5183         DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_eid_range_sem_filter: storm = %d, min_eid = 0x%x, max_eid = 0x%x\n", storm_id, min_eid, max_eid);
5184
5185         if (storm_id >= MAX_DBG_STORMS)
5186                 return DBG_STATUS_INVALID_ARGS;
5187         if (min_eid > max_eid)
5188                 return DBG_STATUS_INVALID_ARGS;
5189         if (!storm_bus->enabled)
5190                 return DBG_STATUS_STORM_NOT_ENABLED;
5191
5192         storm_bus->eid_filter_en = 1;
5193         storm_bus->eid_range_not_mask = 1;
5194         storm_bus->eid_filter_params.range.min = min_eid;
5195         storm_bus->eid_filter_params.range.max = max_eid;
5196
5197         return DBG_STATUS_OK;
5198 }
5199
5200 enum dbg_status ecore_dbg_bus_add_eid_mask_sem_filter(struct ecore_hwfn *p_hwfn,
5201                                                                                                           enum dbg_storms storm_id,
5202                                                                                                           u8 eid_val,
5203                                                                                                           u8 eid_mask)
5204 {
5205         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5206         struct dbg_bus_storm_data *storm_bus;
5207
5208         storm_bus = &dev_data->bus.storms[storm_id];
5209
5210         DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_eid_mask_sem_filter: storm = %d, eid_val = 0x%x, eid_mask = 0x%x\n", storm_id, eid_val, eid_mask);
5211
5212         if (storm_id >= MAX_DBG_STORMS)
5213                 return DBG_STATUS_INVALID_ARGS;
5214         if (!storm_bus->enabled)
5215                 return DBG_STATUS_STORM_NOT_ENABLED;
5216
5217         storm_bus->eid_filter_en = 1;
5218         storm_bus->eid_range_not_mask = 0;
5219         storm_bus->eid_filter_params.mask.val = eid_val;
5220         storm_bus->eid_filter_params.mask.mask = eid_mask;
5221
5222         return DBG_STATUS_OK;
5223 }
5224
5225 enum dbg_status ecore_dbg_bus_add_cid_sem_filter(struct ecore_hwfn *p_hwfn,
5226                                                                                                  enum dbg_storms storm_id,
5227                                                                                                  u32 cid)
5228 {
5229         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5230         struct dbg_bus_storm_data *storm_bus;
5231
5232         storm_bus = &dev_data->bus.storms[storm_id];
5233
5234         DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_cid_sem_filter: storm = %d, cid = 0x%x\n", storm_id, cid);
5235
5236         if (storm_id >= MAX_DBG_STORMS)
5237                 return DBG_STATUS_INVALID_ARGS;
5238         if (!storm_bus->enabled)
5239                 return DBG_STATUS_STORM_NOT_ENABLED;
5240
5241         storm_bus->cid_filter_en = 1;
5242         storm_bus->cid = cid;
5243
5244         return DBG_STATUS_OK;
5245 }
5246
5247 enum dbg_status ecore_dbg_bus_enable_filter(struct ecore_hwfn *p_hwfn,
5248                                                                                         struct ecore_ptt *p_ptt,
5249                                                                                         enum block_id block_id,
5250                                                                                         u8 const_msg_len)
5251 {
5252         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5253
5254         DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_filter: block = %d, const_msg_len = %d\n", block_id, const_msg_len);
5255
5256         if (dev_data->bus.state != DBG_BUS_STATE_READY)
5257                 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5258         if (dev_data->bus.filter_en)
5259                 return DBG_STATUS_FILTER_ALREADY_ENABLED;
5260         if (block_id >= MAX_BLOCK_ID)
5261                 return DBG_STATUS_INVALID_ARGS;
5262         if (!GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5263                 return DBG_STATUS_BLOCK_NOT_ENABLED;
5264         if (!dev_data->bus.unify_inputs)
5265                 return DBG_STATUS_FILTER_BUG;
5266
5267         dev_data->bus.filter_en = true;
5268         dev_data->bus.next_constraint_id = 0;
5269         dev_data->bus.adding_filter = true;
5270
5271         /* HW ID is set to 0 due to required unifyInputs */
5272         ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_ID_NUM, 0); 
5273         ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_MSG_LENGTH_ENABLE, const_msg_len > 0 ? 1 : 0);
5274         if (const_msg_len > 0)
5275                 ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_MSG_LENGTH, const_msg_len - 1);
5276
5277         return DBG_STATUS_OK;
5278 }
5279
5280 enum dbg_status ecore_dbg_bus_enable_trigger(struct ecore_hwfn *p_hwfn,
5281                                                                                          struct ecore_ptt *p_ptt,
5282                                                                                          bool rec_pre_trigger,
5283                                                                                          u8 pre_chunks,
5284                                                                                          bool rec_post_trigger,
5285                                                                                          u32 post_cycles,
5286                                                                                          bool filter_pre_trigger,
5287                                                                                          bool filter_post_trigger)
5288 {
5289         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5290         enum dbg_bus_post_trigger_types post_trigger_type;
5291         enum dbg_bus_pre_trigger_types pre_trigger_type;
5292         struct dbg_bus_data *bus = &dev_data->bus;
5293
5294         DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_trigger: rec_pre_trigger = %d, pre_chunks = %d, rec_post_trigger = %d, post_cycles = %d, filter_pre_trigger = %d, filter_post_trigger = %d\n", rec_pre_trigger, pre_chunks, rec_post_trigger, post_cycles, filter_pre_trigger, filter_post_trigger);
5295
5296         if (bus->state != DBG_BUS_STATE_READY)
5297                 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5298         if (bus->trigger_en)
5299                 return DBG_STATUS_TRIGGER_ALREADY_ENABLED;
5300         if (rec_pre_trigger && pre_chunks >= INT_BUF_SIZE_IN_CHUNKS)
5301                 return DBG_STATUS_INVALID_ARGS;
5302
5303         bus->trigger_en = true;
5304         bus->filter_pre_trigger = filter_pre_trigger;
5305         bus->filter_post_trigger = filter_post_trigger;
5306
5307         if (rec_pre_trigger) {
5308                 pre_trigger_type = pre_chunks ? DBG_BUS_PRE_TRIGGER_NUM_CHUNKS : DBG_BUS_PRE_TRIGGER_START_FROM_ZERO;
5309                 ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_PRE_NUM_CHUNKS, pre_chunks);
5310         }
5311         else {
5312                 pre_trigger_type = DBG_BUS_PRE_TRIGGER_DROP;
5313         }
5314
5315         if (rec_post_trigger) {
5316                 post_trigger_type = DBG_BUS_POST_TRIGGER_RECORD;
5317                 ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_POST_NUM_CYCLES, post_cycles ? post_cycles : 0xffffffff);
5318         }
5319         else {
5320                 post_trigger_type = DBG_BUS_POST_TRIGGER_DROP;
5321         }
5322
5323         ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_PRE_TRGR_EVNT_MODE, pre_trigger_type);
5324         ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_POST_TRGR_EVNT_MODE, post_trigger_type);
5325         ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_ENABLE, 1);
5326
5327         return DBG_STATUS_OK;
5328 }
5329
5330 enum dbg_status ecore_dbg_bus_add_trigger_state(struct ecore_hwfn *p_hwfn,
5331                                                                                                 struct ecore_ptt *p_ptt,
5332                                                                                                 enum block_id block_id,
5333                                                                                                 u8 const_msg_len,
5334                                                                                                 u16 count_to_next)
5335 {
5336         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5337         struct dbg_bus_data *bus = &dev_data->bus;
5338         struct dbg_bus_block_data *block_bus;
5339         u8 reg_offset;
5340
5341         DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_trigger_state: block = %d, const_msg_len = %d, count_to_next = %d\n", block_id, const_msg_len, count_to_next);
5342
5343         block_bus = &bus->blocks[block_id];
5344
5345         if (!bus->trigger_en)
5346                 return DBG_STATUS_TRIGGER_NOT_ENABLED;
5347         if (bus->next_trigger_state == MAX_TRIGGER_STATES)
5348                 return DBG_STATUS_TOO_MANY_TRIGGER_STATES;
5349         if (block_id >= MAX_BLOCK_ID)
5350                 return DBG_STATUS_INVALID_ARGS;
5351         if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5352                 return DBG_STATUS_BLOCK_NOT_ENABLED;
5353         if (!count_to_next)
5354                 return DBG_STATUS_INVALID_ARGS;
5355
5356         bus->next_constraint_id = 0;
5357         bus->adding_filter = false;
5358
5359         /* Store block's shifted enable mask */
5360         SET_FIELD(bus->trigger_states[dev_data->bus.next_trigger_state].data, DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK, SHR(GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5361                                            VALUES_PER_CYCLE,
5362                                            GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT)));
5363
5364         /* Set trigger state registers */
5365         reg_offset = bus->next_trigger_state * BYTES_IN_DWORD;
5366         ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_MSG_LENGTH_ENABLE_0 + reg_offset, const_msg_len > 0 ? 1 : 0);
5367         if (const_msg_len > 0)
5368                 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_MSG_LENGTH_0 + reg_offset, const_msg_len - 1);
5369
5370         /* Set trigger set registers */
5371         reg_offset = bus->next_trigger_state * TRIGGER_SETS_PER_STATE * BYTES_IN_DWORD;
5372         ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_SET_COUNT_0 + reg_offset, count_to_next);
5373
5374         /* Set next state to final state, and overwrite previous next state
5375          * (if any).
5376          */
5377         ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_SET_NXT_STATE_0 + reg_offset, MAX_TRIGGER_STATES);
5378         if (bus->next_trigger_state > 0) {
5379                 reg_offset = (bus->next_trigger_state - 1) * TRIGGER_SETS_PER_STATE * BYTES_IN_DWORD;
5380                 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_SET_NXT_STATE_0 + reg_offset, bus->next_trigger_state);
5381         }
5382
5383         bus->next_trigger_state++;
5384
5385         return DBG_STATUS_OK;
5386 }
5387
5388 enum dbg_status ecore_dbg_bus_add_constraint(struct ecore_hwfn *p_hwfn,
5389                                                                                          struct ecore_ptt *p_ptt,
5390                                                                                          enum dbg_bus_constraint_ops constraint_op,
5391                                                                                          u32 data_val,
5392                                                                                          u32 data_mask,
5393                                                                                          bool compare_frame,
5394                                                                                          u8 frame_bit,
5395                                                                                          u8 cycle_offset,
5396                                                                                          u8 dword_offset_in_cycle,
5397                                                                                          bool is_mandatory)
5398 {
5399         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5400         struct dbg_bus_data *bus = &dev_data->bus;
5401         u16 dword_offset, range = 0;
5402
5403         DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_constraint: op = %d, data_val = 0x%x, data_mask = 0x%x, compare_frame = %d, frame_bit = %d, cycle_offset = %d, dword_offset_in_cycle = %d, is_mandatory = %d\n", constraint_op, data_val, data_mask, compare_frame, frame_bit, cycle_offset, dword_offset_in_cycle, is_mandatory);
5404
5405         if (!bus->filter_en && !dev_data->bus.trigger_en)
5406                 return DBG_STATUS_CANT_ADD_CONSTRAINT;
5407         if (bus->trigger_en && !bus->adding_filter && !bus->next_trigger_state)
5408                 return DBG_STATUS_CANT_ADD_CONSTRAINT;
5409         if (bus->next_constraint_id >= MAX_CONSTRAINTS)
5410                 return DBG_STATUS_TOO_MANY_CONSTRAINTS;
5411         if (constraint_op >= MAX_DBG_BUS_CONSTRAINT_OPS || frame_bit > 1 || dword_offset_in_cycle > 3 || (bus->adding_filter && cycle_offset > 3))
5412                 return DBG_STATUS_INVALID_ARGS;
5413         if (compare_frame &&
5414                 constraint_op != DBG_BUS_CONSTRAINT_OP_EQ &&
5415                 constraint_op != DBG_BUS_CONSTRAINT_OP_NE)
5416                 return DBG_STATUS_INVALID_ARGS;
5417
5418         dword_offset = cycle_offset * VALUES_PER_CYCLE + dword_offset_in_cycle;
5419
5420         if (!bus->adding_filter) {
5421                 u8 curr_trigger_state_id = bus->next_trigger_state - 1;
5422                 struct dbg_bus_trigger_state_data *trigger_state;
5423
5424                 trigger_state = &bus->trigger_states[curr_trigger_state_id];
5425
5426                 /* Check if the selected dword is enabled in the block */
5427                 if (!(GET_FIELD(trigger_state->data, DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK) & (u8)(1 << dword_offset_in_cycle)))
5428                         return DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET;
5429
5430                 /* Add selected dword to trigger state's dword mask */
5431                 SET_FIELD(trigger_state->data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK, GET_FIELD(trigger_state->data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK) | (u8)(1 << dword_offset_in_cycle));
5432         }
5433
5434         /* Prepare data mask and range */
5435         if (constraint_op == DBG_BUS_CONSTRAINT_OP_EQ ||
5436                 constraint_op == DBG_BUS_CONSTRAINT_OP_NE) {
5437                 data_mask = ~data_mask;
5438         }
5439         else {
5440                 u8 lsb, width;
5441                         
5442                 /* Extract lsb and width from mask */
5443                 if (!data_mask)
5444                         return DBG_STATUS_INVALID_ARGS;
5445
5446                 for (lsb = 0; lsb < 32 && !(data_mask & 1); lsb++, data_mask >>= 1);
5447                 for (width = 0;
5448                 width < 32 - lsb && (data_mask & 1);
5449                         width++, data_mask >>= 1) {}
5450                         if (data_mask)
5451                                 return DBG_STATUS_INVALID_ARGS;
5452                 range = (lsb << 5) | (width - 1);
5453         }
5454
5455         /* Add constraint */
5456         ecore_bus_set_constraint(p_hwfn, p_ptt, dev_data->bus.adding_filter ? 1 : 0,
5457                 dev_data->bus.next_constraint_id,
5458                 s_constraint_op_defs[constraint_op].hw_op_val,
5459                 data_val, data_mask, frame_bit,
5460                 compare_frame ? 0 : 1, dword_offset, range,
5461                 s_constraint_op_defs[constraint_op].is_cyclic ? 1 : 0,
5462                 is_mandatory ? 1 : 0);
5463
5464         /* If first constraint, fill other 3 constraints with dummy constraints
5465          * that always match (using the same offset).
5466          */
5467         if (!dev_data->bus.next_constraint_id) {
5468                 u8 i;
5469
5470                 for (i = 1; i < MAX_CONSTRAINTS; i++)
5471                         ecore_bus_set_constraint(p_hwfn, p_ptt, bus->adding_filter ? 1 : 0,
5472                                 i, DBG_BUS_CONSTRAINT_OP_EQ, 0, 0xffffffff,
5473                                 0, 1, dword_offset, 0, 0, 1);
5474         }
5475
5476         bus->next_constraint_id++;
5477
5478         return DBG_STATUS_OK;
5479 }
5480
5481 /* Configure the DBG block client mask */
5482 static void ecore_config_dbg_block_client_mask(struct ecore_hwfn *p_hwfn,
5483                                                                                 struct ecore_ptt *p_ptt)
5484 {
5485         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5486         struct dbg_bus_data *bus = &dev_data->bus;
5487         u32 block_id, client_mask = 0;
5488         u8 storm_id;
5489
5490         /* Update client mask for Storm inputs */
5491         if (bus->num_enabled_storms)
5492                 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5493                         struct storm_defs *storm = &s_storm_defs[storm_id];
5494
5495                         if (bus->storms[storm_id].enabled)
5496                                 client_mask |= (1 << storm->dbg_client_id[dev_data->chip_id]);
5497                 }
5498
5499         /* Update client mask for block inputs */
5500         if (bus->num_enabled_blocks) {
5501                 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5502                         struct dbg_bus_block_data *block_bus = &bus->blocks[block_id];
5503                         struct block_defs *block = s_block_defs[block_id];
5504
5505                         if (GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK) && block_id != BLOCK_DBG)
5506                                 client_mask |= (1 << block->dbg_client_id[dev_data->chip_id]);
5507                 }
5508         }
5509
5510         /* Update client mask for GRC input */
5511         if (bus->grc_input_en)
5512                 client_mask |= (1 << DBG_BUS_CLIENT_CPU);
5513
5514         /* Update client mask for timestamp input */
5515         if (bus->timestamp_input_en)
5516                 client_mask |= (1 << DBG_BUS_CLIENT_TIMESTAMP);
5517
5518         ecore_bus_enable_clients(p_hwfn, p_ptt, client_mask);
5519 }
5520
5521 /* Configure the DBG block framing mode */
5522 static enum dbg_status ecore_config_dbg_block_framing_mode(struct ecore_hwfn *p_hwfn,
5523                                                                                                         struct ecore_ptt *p_ptt)
5524 {
5525         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5526         struct dbg_bus_data *bus = &dev_data->bus;
5527         enum dbg_bus_frame_modes dbg_framing_mode;
5528         u32 block_id;
5529
5530         if (!bus->hw_dwords && bus->num_enabled_blocks) {
5531                 struct dbg_bus_line *line_desc;
5532                 u8 hw_dwords;
5533
5534                 /* Choose either 4 HW dwords (128-bit mode) or 8 HW dwords
5535                  * (256-bit mode).
5536                  */
5537                 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5538                         struct dbg_bus_block_data *block_bus = &bus->blocks[block_id];
5539
5540                         if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5541                                 continue;
5542
5543                         line_desc = get_dbg_bus_line_desc(p_hwfn, (enum block_id)block_id);
5544                         hw_dwords = line_desc && GET_FIELD(line_desc->data, DBG_BUS_LINE_IS_256B) ? 8 : 4;
5545
5546                         if (bus->hw_dwords > 0 && bus->hw_dwords != hw_dwords)
5547                                 return DBG_STATUS_NON_MATCHING_LINES;
5548
5549                         /* The DBG block doesn't support triggers and
5550                          * filters on 256b debug lines.
5551                          */
5552                         if (hw_dwords == 8 && (bus->trigger_en || bus->filter_en))
5553                                 return DBG_STATUS_NO_FILTER_TRIGGER_64B;
5554
5555                         bus->hw_dwords = hw_dwords;
5556                 }
5557         }
5558
5559         switch (bus->hw_dwords) {
5560         case 0: dbg_framing_mode = DBG_BUS_FRAME_MODE_0HW_4ST; break;
5561         case 4: dbg_framing_mode = DBG_BUS_FRAME_MODE_4HW_0ST; break;
5562         case 8: dbg_framing_mode = DBG_BUS_FRAME_MODE_8HW_0ST; break;
5563         default: dbg_framing_mode = DBG_BUS_FRAME_MODE_0HW_4ST; break;
5564         }
5565         ecore_bus_set_framing_mode(p_hwfn, p_ptt, dbg_framing_mode);
5566
5567         return DBG_STATUS_OK;
5568 }
5569
5570 /* Configure the DBG block Storm data */
5571 static enum dbg_status ecore_config_storm_inputs(struct ecore_hwfn *p_hwfn,
5572                                                                                   struct ecore_ptt *p_ptt)
5573 {
5574         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5575         struct dbg_bus_data *bus = &dev_data->bus;
5576         u8 storm_id, i, next_storm_id = 0;
5577         u32 storm_id_mask = 0;
5578
5579         /* Check if SEMI sync FIFO is empty */
5580         for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5581                 struct dbg_bus_storm_data *storm_bus = &bus->storms[storm_id];
5582                 struct storm_defs *storm = &s_storm_defs[storm_id];
5583
5584                 if (storm_bus->enabled && !ecore_rd(p_hwfn, p_ptt, storm->sem_sync_dbg_empty_addr))
5585                         return DBG_STATUS_SEMI_FIFO_NOT_EMPTY;
5586         }
5587
5588         for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5589                 struct dbg_bus_storm_data *storm_bus = &bus->storms[storm_id];
5590
5591                 if (storm_bus->enabled)
5592                         storm_id_mask |= (storm_bus->hw_id << (storm_id * HW_ID_BITS));
5593         }
5594
5595         ecore_wr(p_hwfn, p_ptt, DBG_REG_STORM_ID_NUM, storm_id_mask);
5596
5597         /* Disable storm stall if recording to internal buffer in one-shot */
5598         ecore_wr(p_hwfn, p_ptt, DBG_REG_NO_GRANT_ON_FULL, (dev_data->bus.target == DBG_BUS_TARGET_ID_INT_BUF && bus->one_shot_en) ? 0 : 1);
5599
5600         /* Configure calendar */
5601         for (i = 0; i < NUM_CALENDAR_SLOTS; i++, next_storm_id = (next_storm_id + 1) % MAX_DBG_STORMS) {
5602
5603                 /* Find next enabled Storm */
5604                 for (; !dev_data->bus.storms[next_storm_id].enabled; next_storm_id = (next_storm_id + 1) % MAX_DBG_STORMS);
5605
5606                 /* Configure calendar slot */
5607                 ecore_wr(p_hwfn, p_ptt, DBG_REG_CALENDAR_SLOT0 + DWORDS_TO_BYTES(i), next_storm_id);
5608         }
5609
5610         return DBG_STATUS_OK;
5611 }
5612
5613 /* Assign HW ID to each dword/qword:
5614  * if the inputs are unified, HW ID 0 is assigned to all dwords/qwords.
5615  * Otherwise, we would like to assign a different HW ID to each dword, to avoid
5616  * data synchronization issues. however, we need to check if there is a trigger
5617  * state for which more than one dword has a constraint. if there is, we cannot
5618  * assign a different HW ID to each dword (since a trigger state has a single
5619  * HW ID), so we assign a different HW ID to each block.
5620  */
5621 static void ecore_assign_hw_ids(struct ecore_hwfn *p_hwfn,
5622                                                  u8 hw_ids[VALUES_PER_CYCLE])
5623 {
5624         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5625         struct dbg_bus_data *bus = &dev_data->bus;
5626         bool hw_id_per_dword = true;
5627         u8 val_id, state_id;
5628         u32 block_id;
5629
5630         OSAL_MEMSET(hw_ids, 0, VALUES_PER_CYCLE);
5631
5632         if (bus->unify_inputs)
5633                 return;
5634
5635         if (bus->trigger_en) {
5636                 for (state_id = 0; state_id < bus->next_trigger_state && hw_id_per_dword; state_id++) {
5637                         u8 num_dwords = 0;
5638
5639                         for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5640                                 if (GET_FIELD(bus->trigger_states[state_id].data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK) & (1 << val_id))
5641                                         num_dwords++;
5642
5643                         if (num_dwords > 1)
5644                                 hw_id_per_dword = false;
5645                 }
5646         }
5647
5648         if (hw_id_per_dword) {
5649
5650                 /* Assign a different HW ID for each dword */
5651                 for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5652                         hw_ids[val_id] = val_id;
5653         }
5654         else {
5655                 u8 shifted_enable_mask, next_hw_id = 0;
5656
5657                 /* Assign HW IDs according to blocks enable /  */
5658                 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5659                         struct dbg_bus_block_data *block_bus = &bus->blocks[block_id];
5660
5661                         if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5662                                 continue;
5663
5664                         block_bus->hw_id = next_hw_id++;
5665                         if (!block_bus->hw_id)
5666                                 continue;
5667
5668                         shifted_enable_mask =
5669                                 SHR(GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5670                                         VALUES_PER_CYCLE,
5671                                         GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT));
5672
5673                         for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5674                                 if (shifted_enable_mask & (1 << val_id))
5675                                         hw_ids[val_id] = block_bus->hw_id;
5676                 }
5677         }
5678 }
5679
5680 /* Configure the DBG block HW blocks data */
5681 static void ecore_config_block_inputs(struct ecore_hwfn *p_hwfn,
5682                                                            struct ecore_ptt *p_ptt)
5683 {
5684         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5685         struct dbg_bus_data *bus = &dev_data->bus;
5686         u8 hw_ids[VALUES_PER_CYCLE];
5687         u8 val_id, state_id;
5688
5689         ecore_assign_hw_ids(p_hwfn, hw_ids);
5690
5691         /* Assign a HW ID to each trigger state */
5692         if (dev_data->bus.trigger_en) {
5693                 for (state_id = 0; state_id < bus->next_trigger_state; state_id++) {
5694                         for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++) {
5695                                 u8 state_data = bus->trigger_states[state_id].data;
5696
5697                                 if (GET_FIELD(state_data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK) & (1 << val_id)) {
5698                                         ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_ID_0 + state_id * BYTES_IN_DWORD, hw_ids[val_id]);
5699                                         break;
5700                                 }
5701                         }
5702                 }
5703         }
5704
5705         /* Configure HW ID mask */
5706         dev_data->bus.hw_id_mask = 0;
5707         for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5708                 bus->hw_id_mask |= (hw_ids[val_id] << (val_id * HW_ID_BITS));
5709         ecore_wr(p_hwfn, p_ptt, DBG_REG_HW_ID_NUM, bus->hw_id_mask);
5710
5711         /* Configure additional K2 PCIE registers */
5712         if (dev_data->chip_id == CHIP_K2 &&
5713                 (GET_FIELD(bus->blocks[BLOCK_PCIE].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK) ||
5714                         GET_FIELD(bus->blocks[BLOCK_PHY_PCIE].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))) {
5715                 ecore_wr(p_hwfn, p_ptt, PCIE_REG_DBG_REPEAT_THRESHOLD_COUNT_K2_E5, 1);
5716                 ecore_wr(p_hwfn, p_ptt, PCIE_REG_DBG_FW_TRIGGER_ENABLE_K2_E5, 1);
5717         }
5718 }
5719
5720 enum dbg_status ecore_dbg_bus_start(struct ecore_hwfn *p_hwfn,
5721                                                                         struct ecore_ptt *p_ptt)
5722 {
5723         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5724         struct dbg_bus_data *bus = &dev_data->bus;
5725         enum dbg_bus_filter_types filter_type;
5726         enum dbg_status status;
5727         u32 block_id;
5728         u8 storm_id;
5729
5730         DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_start\n");
5731
5732         if (bus->state != DBG_BUS_STATE_READY)
5733                 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5734
5735         /* Check if any input was enabled */
5736         if (!bus->num_enabled_storms &&
5737                 !bus->num_enabled_blocks &&
5738                 !bus->rcv_from_other_engine)
5739                 return DBG_STATUS_NO_INPUT_ENABLED;
5740
5741         /* Check if too many input types were enabled (storm+dbgmux) */
5742         if (bus->num_enabled_storms && bus->num_enabled_blocks)
5743                 return DBG_STATUS_TOO_MANY_INPUTS;
5744
5745         /* Configure framing mode */
5746         if ((status = ecore_config_dbg_block_framing_mode(p_hwfn, p_ptt)) != DBG_STATUS_OK)
5747                 return status;
5748
5749         /* Configure DBG block for Storm inputs */
5750         if (bus->num_enabled_storms)
5751                 if ((status = ecore_config_storm_inputs(p_hwfn, p_ptt)) != DBG_STATUS_OK)
5752                         return status;
5753
5754         /* Configure DBG block for block inputs */
5755         if (bus->num_enabled_blocks)
5756                 ecore_config_block_inputs(p_hwfn, p_ptt);
5757         
5758         /* Configure filter type */
5759         if (bus->filter_en) {
5760                 if (bus->trigger_en) {
5761                         if (bus->filter_pre_trigger)
5762                                 filter_type = bus->filter_post_trigger ? DBG_BUS_FILTER_TYPE_ON : DBG_BUS_FILTER_TYPE_PRE;
5763                         else
5764                                 filter_type = bus->filter_post_trigger ? DBG_BUS_FILTER_TYPE_POST : DBG_BUS_FILTER_TYPE_OFF;
5765                 }
5766                 else {
5767                         filter_type = DBG_BUS_FILTER_TYPE_ON;
5768                 }
5769         }
5770         else {
5771                 filter_type = DBG_BUS_FILTER_TYPE_OFF;
5772         }
5773         ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_ENABLE, filter_type);
5774
5775         /* Restart timestamp */
5776         ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP, 0);
5777
5778         /* Enable debug block */
5779         ecore_bus_enable_dbg_block(p_hwfn, p_ptt, 1);
5780
5781         /* Configure enabled blocks - must be done before the DBG block is
5782          * enabled.
5783          */
5784         if (dev_data->bus.num_enabled_blocks) {
5785                 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5786                         if (!GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK) || block_id == BLOCK_DBG)
5787                                 continue;
5788
5789                         ecore_config_dbg_line(p_hwfn, p_ptt, (enum block_id)block_id,
5790                                 dev_data->bus.blocks[block_id].line_num,
5791                                 GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5792                                 GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT),
5793                                 GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK),
5794                                 GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK));
5795                 }
5796         }
5797
5798         /* Configure client mask */
5799         ecore_config_dbg_block_client_mask(p_hwfn, p_ptt);
5800
5801         /* Configure enabled Storms - must be done after the DBG block is
5802          * enabled.
5803          */
5804         if (dev_data->bus.num_enabled_storms)
5805                 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++)
5806                         if (dev_data->bus.storms[storm_id].enabled)
5807                                 ecore_bus_enable_storm(p_hwfn, p_ptt, (enum dbg_storms)storm_id, filter_type);
5808
5809         dev_data->bus.state = DBG_BUS_STATE_RECORDING;
5810
5811         return DBG_STATUS_OK;
5812 }
5813
5814 enum dbg_status ecore_dbg_bus_stop(struct ecore_hwfn *p_hwfn,
5815                                                                    struct ecore_ptt *p_ptt)
5816 {
5817         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5818         struct dbg_bus_data *bus = &dev_data->bus;
5819         enum dbg_status status = DBG_STATUS_OK;
5820
5821         DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_stop\n");
5822
5823         if (bus->state != DBG_BUS_STATE_RECORDING)
5824                 return DBG_STATUS_RECORDING_NOT_STARTED;
5825
5826         status = ecore_bus_disable_inputs(p_hwfn, p_ptt, true);
5827         if (status != DBG_STATUS_OK)
5828                 return status;
5829
5830         ecore_wr(p_hwfn, p_ptt, DBG_REG_CPU_TIMEOUT, 1);
5831
5832         OSAL_MSLEEP(FLUSH_DELAY_MS);
5833
5834         ecore_bus_enable_dbg_block(p_hwfn, p_ptt, false);
5835
5836         /* Check if trigger worked */
5837         if (bus->trigger_en) {
5838                 u32 trigger_state = ecore_rd(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATUS_CUR_STATE);
5839
5840                 if (trigger_state != MAX_TRIGGER_STATES)
5841                         return DBG_STATUS_DATA_DIDNT_TRIGGER;
5842         }
5843
5844         bus->state = DBG_BUS_STATE_STOPPED;
5845
5846         return status;
5847 }
5848
5849 enum dbg_status ecore_dbg_bus_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
5850                                                                                                 struct ecore_ptt *p_ptt,
5851                                                                                                 u32 *buf_size)
5852 {
5853         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5854         struct dbg_bus_data *bus = &dev_data->bus;
5855         enum dbg_status status;
5856
5857         status = ecore_dbg_dev_init(p_hwfn, p_ptt);
5858
5859         *buf_size = 0;
5860
5861         if (status != DBG_STATUS_OK)
5862                 return status;
5863
5864         /* Add dump header */
5865         *buf_size = (u32)ecore_bus_dump_hdr(p_hwfn, p_ptt, OSAL_NULL, false);
5866
5867         switch (bus->target) {
5868         case DBG_BUS_TARGET_ID_INT_BUF:
5869                 *buf_size += INT_BUF_SIZE_IN_DWORDS; break;
5870         case DBG_BUS_TARGET_ID_PCI:
5871                 *buf_size += BYTES_TO_DWORDS(bus->pci_buf.size); break;
5872         default:
5873                 break;
5874         }
5875
5876         /* Dump last section */
5877         *buf_size += ecore_dump_last_section(p_hwfn, OSAL_NULL, 0, false);
5878
5879         return DBG_STATUS_OK;
5880 }
5881
5882 enum dbg_status ecore_dbg_bus_dump(struct ecore_hwfn *p_hwfn,
5883                                                                    struct ecore_ptt *p_ptt,
5884                                                                    u32 *dump_buf,
5885                                                                    u32 buf_size_in_dwords,
5886                                                                    u32 *num_dumped_dwords)
5887 {
5888         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5889         u32 min_buf_size_in_dwords, block_id, offset = 0;
5890         struct dbg_bus_data *bus = &dev_data->bus;
5891         enum dbg_status status;
5892         u8 storm_id;
5893
5894         *num_dumped_dwords = 0;
5895
5896         status = ecore_dbg_bus_get_dump_buf_size(p_hwfn, p_ptt, &min_buf_size_in_dwords);
5897         if (status != DBG_STATUS_OK)
5898                 return status;
5899
5900         DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_dump: dump_buf = 0x%p, buf_size_in_dwords = %d\n", dump_buf, buf_size_in_dwords);
5901
5902         if (bus->state != DBG_BUS_STATE_RECORDING && bus->state != DBG_BUS_STATE_STOPPED)
5903                 return DBG_STATUS_RECORDING_NOT_STARTED;
5904
5905         if (bus->state == DBG_BUS_STATE_RECORDING) {
5906                 enum dbg_status stop_state = ecore_dbg_bus_stop(p_hwfn, p_ptt);
5907                 if (stop_state != DBG_STATUS_OK)
5908                         return stop_state;
5909         }
5910
5911         if (buf_size_in_dwords < min_buf_size_in_dwords)
5912                 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5913
5914         if (bus->target == DBG_BUS_TARGET_ID_PCI && !bus->pci_buf.size)
5915                 return DBG_STATUS_PCI_BUF_NOT_ALLOCATED;
5916
5917         /* Dump header */
5918         offset += ecore_bus_dump_hdr(p_hwfn, p_ptt, dump_buf + offset, true);
5919
5920         /* Dump recorded data */
5921         if (bus->target != DBG_BUS_TARGET_ID_NIG) {
5922                 u32 recorded_dwords = ecore_bus_dump_data(p_hwfn, p_ptt, dump_buf + offset, true);
5923
5924                 if (!recorded_dwords)
5925                         return DBG_STATUS_NO_DATA_RECORDED;
5926                 if (recorded_dwords % CHUNK_SIZE_IN_DWORDS)
5927                         return DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED;
5928                 offset += recorded_dwords;
5929         }
5930
5931         /* Dump last section */
5932         offset += ecore_dump_last_section(p_hwfn, dump_buf, offset, true);
5933
5934         /* If recorded to PCI buffer - free the buffer */
5935         ecore_bus_free_pci_buf(p_hwfn);
5936
5937         /* Clear debug bus parameters */
5938         bus->state = DBG_BUS_STATE_IDLE;
5939         bus->num_enabled_blocks = 0;
5940         bus->num_enabled_storms = 0;
5941         bus->filter_en = bus->trigger_en = 0;
5942
5943         for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++)
5944                 SET_FIELD(bus->blocks[BLOCK_PCIE].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, 0);
5945
5946         for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5947                 struct dbg_bus_storm_data *storm_bus = &bus->storms[storm_id];
5948
5949                 storm_bus->enabled = false;
5950                 storm_bus->eid_filter_en = storm_bus->cid_filter_en = 0;
5951         }
5952
5953         *num_dumped_dwords = offset;
5954
5955         return DBG_STATUS_OK;
5956 }
5957
5958 enum dbg_status ecore_dbg_grc_config(struct ecore_hwfn *p_hwfn,
5959                                                                          enum dbg_grc_params grc_param,
5960                                                                          u32 val)
5961 {
5962         int i;
5963
5964         DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val);
5965
5966         /* Initializes the GRC parameters (if not initialized). Needed in order
5967          * to set the default parameter values for the first time.
5968          */
5969         ecore_dbg_grc_init_params(p_hwfn);
5970
5971         if (grc_param >= MAX_DBG_GRC_PARAMS)
5972                 return DBG_STATUS_INVALID_ARGS;
5973         if (val < s_grc_param_defs[grc_param].min ||
5974                 val > s_grc_param_defs[grc_param].max)
5975                 return DBG_STATUS_INVALID_ARGS;
5976
5977         if (s_grc_param_defs[grc_param].is_preset) {
5978
5979                 /* Preset param */
5980
5981                 /* Disabling a preset is not allowed. Call
5982                  * dbg_grc_set_params_default instead.
5983                  */
5984                 if (!val)
5985                         return DBG_STATUS_INVALID_ARGS;
5986
5987                 /* Update all params with the preset values */
5988                 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) {
5989                         u32 preset_val;
5990
5991                         if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL)
5992                                 preset_val = s_grc_param_defs[i].exclude_all_preset_val;
5993                         else if (grc_param == DBG_GRC_PARAM_CRASH)
5994                                 preset_val = s_grc_param_defs[i].crash_preset_val;
5995                         else
5996                                 return DBG_STATUS_INVALID_ARGS;
5997
5998                         ecore_grc_set_param(p_hwfn, (enum dbg_grc_params)i, preset_val);
5999                 }
6000         }
6001         else {
6002
6003                 /* Regular param - set its value */
6004                 ecore_grc_set_param(p_hwfn, grc_param, val);
6005         }
6006
6007         return DBG_STATUS_OK;
6008 }
6009
6010 /* Assign default GRC param values */
6011 void ecore_dbg_grc_set_params_default(struct ecore_hwfn *p_hwfn)
6012 {
6013         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
6014         u32 i;
6015
6016         for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
6017                 dev_data->grc.param_val[i] = s_grc_param_defs[i].default_val[dev_data->chip_id];
6018 }
6019
6020 enum dbg_status ecore_dbg_grc_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6021                                                                                                 struct ecore_ptt *p_ptt,
6022                                                                                                 u32 *buf_size)
6023 {
6024         enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6025
6026         *buf_size = 0;
6027
6028         if (status != DBG_STATUS_OK)
6029                 return status;
6030
6031         if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr || !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
6032                 !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
6033                 return DBG_STATUS_DBG_ARRAY_NOT_SET;
6034
6035         return ecore_grc_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6036 }
6037
6038 enum dbg_status ecore_dbg_grc_dump(struct ecore_hwfn *p_hwfn,
6039                                                                    struct ecore_ptt *p_ptt,
6040                                                                    u32 *dump_buf,
6041                                                                    u32 buf_size_in_dwords,
6042                                                                    u32 *num_dumped_dwords)
6043 {
6044         u32 needed_buf_size_in_dwords;
6045         enum dbg_status status;
6046
6047         *num_dumped_dwords = 0;
6048
6049         status = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6050         if (status != DBG_STATUS_OK)
6051                 return status;
6052
6053         if (buf_size_in_dwords < needed_buf_size_in_dwords)
6054                 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6055
6056         /* Doesn't do anything, needed for compile time asserts */
6057         ecore_static_asserts();
6058
6059         /* GRC Dump */
6060         status = ecore_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6061
6062         /* Reveret GRC params to their default */
6063         ecore_dbg_grc_set_params_default(p_hwfn);
6064
6065         return status;
6066 }
6067
6068 enum dbg_status ecore_dbg_idle_chk_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6069                                                                                                          struct ecore_ptt *p_ptt,
6070                                                                                                          u32 *buf_size)
6071 {
6072         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
6073         struct idle_chk_data *idle_chk = &dev_data->idle_chk;
6074         enum dbg_status status;
6075
6076         *buf_size = 0;
6077                 
6078         status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6079         if (status != DBG_STATUS_OK)
6080                 return status;
6081
6082         if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
6083                 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr || !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
6084                 return DBG_STATUS_DBG_ARRAY_NOT_SET;
6085
6086         if (!idle_chk->buf_size_set) {
6087                 idle_chk->buf_size = ecore_idle_chk_dump(p_hwfn, p_ptt, OSAL_NULL, false);
6088                 idle_chk->buf_size_set = true;
6089         }
6090
6091         *buf_size = idle_chk->buf_size;
6092
6093         return DBG_STATUS_OK;
6094 }
6095
6096 enum dbg_status ecore_dbg_idle_chk_dump(struct ecore_hwfn *p_hwfn,
6097                                                                                 struct ecore_ptt *p_ptt,
6098                                                                                 u32 *dump_buf,
6099                                                                                 u32 buf_size_in_dwords,
6100                                                                                 u32 *num_dumped_dwords)
6101 {
6102         u32 needed_buf_size_in_dwords;
6103         enum dbg_status status;
6104
6105         *num_dumped_dwords = 0;
6106
6107         status = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6108         if (status != DBG_STATUS_OK)
6109                 return status;
6110
6111         if (buf_size_in_dwords < needed_buf_size_in_dwords)
6112                 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6113
6114         /* Update reset state */
6115         ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6116
6117         /* Idle Check Dump */
6118         *num_dumped_dwords = ecore_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
6119
6120         /* Reveret GRC params to their default */
6121         ecore_dbg_grc_set_params_default(p_hwfn);
6122
6123         return DBG_STATUS_OK;
6124 }
6125
6126 enum dbg_status ecore_dbg_mcp_trace_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6127                                                                                                           struct ecore_ptt *p_ptt,
6128                                                                                                           u32 *buf_size)
6129 {
6130         enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6131
6132         *buf_size = 0;
6133
6134         if (status != DBG_STATUS_OK)
6135                 return status;
6136
6137         return ecore_mcp_trace_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6138 }
6139
6140 enum dbg_status ecore_dbg_mcp_trace_dump(struct ecore_hwfn *p_hwfn,
6141                                                                                  struct ecore_ptt *p_ptt,
6142                                                                                  u32 *dump_buf,
6143                                                                                  u32 buf_size_in_dwords,
6144                                                                                  u32 *num_dumped_dwords)
6145 {
6146         u32 needed_buf_size_in_dwords;
6147         enum dbg_status status;
6148
6149         status = ecore_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6150         if (status != DBG_STATUS_OK && status != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
6151                 return status;
6152
6153         if (buf_size_in_dwords < needed_buf_size_in_dwords)
6154                 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6155
6156         /* Update reset state */
6157         ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6158
6159         /* Perform dump */
6160         status = ecore_mcp_trace_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6161
6162         /* Reveret GRC params to their default */
6163         ecore_dbg_grc_set_params_default(p_hwfn);
6164
6165         return status;
6166 }
6167
6168 enum dbg_status ecore_dbg_reg_fifo_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6169                                                                                                          struct ecore_ptt *p_ptt,
6170                                                                                                          u32 *buf_size)
6171 {
6172         enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6173
6174         *buf_size = 0;
6175
6176         if (status != DBG_STATUS_OK)
6177                 return status;
6178
6179         return ecore_reg_fifo_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6180 }
6181
6182 enum dbg_status ecore_dbg_reg_fifo_dump(struct ecore_hwfn *p_hwfn,
6183                                                                                 struct ecore_ptt *p_ptt,
6184                                                                                 u32 *dump_buf,
6185                                                                                 u32 buf_size_in_dwords,
6186                                                                                 u32 *num_dumped_dwords)
6187 {
6188         u32 needed_buf_size_in_dwords;
6189         enum dbg_status status;
6190
6191         *num_dumped_dwords = 0;
6192
6193         status = ecore_dbg_reg_fifo_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6194         if (status != DBG_STATUS_OK)
6195                 return status;
6196
6197         if (buf_size_in_dwords < needed_buf_size_in_dwords)
6198                 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6199
6200         /* Update reset state */
6201         ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6202
6203         status = ecore_reg_fifo_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6204
6205         /* Reveret GRC params to their default */
6206         ecore_dbg_grc_set_params_default(p_hwfn);
6207
6208         return status;
6209 }
6210
6211 enum dbg_status ecore_dbg_igu_fifo_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6212                                                                                                          struct ecore_ptt *p_ptt,
6213                                                                                                          u32 *buf_size)
6214 {
6215         enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6216
6217         *buf_size = 0;
6218
6219         if (status != DBG_STATUS_OK)
6220                 return status;
6221
6222         return ecore_igu_fifo_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6223 }
6224
6225 enum dbg_status ecore_dbg_igu_fifo_dump(struct ecore_hwfn *p_hwfn,
6226                                                                                 struct ecore_ptt *p_ptt,
6227                                                                                 u32 *dump_buf,
6228                                                                                 u32 buf_size_in_dwords,
6229                                                                                 u32 *num_dumped_dwords)
6230 {
6231         u32 needed_buf_size_in_dwords;
6232         enum dbg_status status;
6233
6234         *num_dumped_dwords = 0;
6235
6236         status = ecore_dbg_igu_fifo_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6237         if (status != DBG_STATUS_OK)
6238                 return status;
6239
6240         if (buf_size_in_dwords < needed_buf_size_in_dwords)
6241                 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6242
6243         /* Update reset state */
6244         ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6245
6246         status = ecore_igu_fifo_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6247
6248         /* Reveret GRC params to their default */
6249         ecore_dbg_grc_set_params_default(p_hwfn);
6250
6251         return status;
6252 }
6253
6254 enum dbg_status ecore_dbg_protection_override_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6255                                                                                                                                 struct ecore_ptt *p_ptt,
6256                                                                                                                                 u32 *buf_size)
6257 {
6258         enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6259
6260         *buf_size = 0;
6261
6262         if (status != DBG_STATUS_OK)
6263                 return status;
6264
6265         return ecore_protection_override_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6266 }
6267
6268 enum dbg_status ecore_dbg_protection_override_dump(struct ecore_hwfn *p_hwfn,
6269                                                                                                    struct ecore_ptt *p_ptt,
6270                                                                                                    u32 *dump_buf,
6271                                                                                                    u32 buf_size_in_dwords,
6272                                                                                                    u32 *num_dumped_dwords)
6273 {
6274         u32 needed_buf_size_in_dwords;
6275         enum dbg_status status;
6276
6277         *num_dumped_dwords = 0;
6278
6279         status = ecore_dbg_protection_override_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6280         if (status != DBG_STATUS_OK)
6281                 return status;
6282
6283         if (buf_size_in_dwords < needed_buf_size_in_dwords)
6284                 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6285
6286         /* Update reset state */
6287         ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6288
6289         status = ecore_protection_override_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6290
6291         /* Reveret GRC params to their default */
6292         ecore_dbg_grc_set_params_default(p_hwfn);
6293
6294         return status;
6295 }
6296
6297 enum dbg_status ecore_dbg_fw_asserts_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6298                                                                                                            struct ecore_ptt *p_ptt,
6299                                                                                                            u32 *buf_size)
6300 {
6301         enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6302
6303         *buf_size = 0;
6304
6305         if (status != DBG_STATUS_OK)
6306                 return status;
6307
6308         /* Update reset state */
6309         ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6310
6311         *buf_size = ecore_fw_asserts_dump(p_hwfn, p_ptt, OSAL_NULL, false);
6312
6313         return DBG_STATUS_OK;
6314 }
6315
6316 enum dbg_status ecore_dbg_fw_asserts_dump(struct ecore_hwfn *p_hwfn,
6317                                                                                   struct ecore_ptt *p_ptt,
6318                                                                                   u32 *dump_buf,
6319                                                                                   u32 buf_size_in_dwords,
6320                                                                                   u32 *num_dumped_dwords)
6321 {
6322         u32 needed_buf_size_in_dwords;
6323         enum dbg_status status;
6324
6325         *num_dumped_dwords = 0;
6326
6327         status = ecore_dbg_fw_asserts_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6328         if (status != DBG_STATUS_OK)
6329                 return status;
6330
6331         if (buf_size_in_dwords < needed_buf_size_in_dwords)
6332                 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6333
6334         *num_dumped_dwords = ecore_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
6335
6336         /* Reveret GRC params to their default */
6337         ecore_dbg_grc_set_params_default(p_hwfn);
6338
6339         return DBG_STATUS_OK;
6340 }
6341
6342 enum dbg_status ecore_dbg_read_attn(struct ecore_hwfn *p_hwfn,
6343                                                                         struct ecore_ptt *p_ptt,
6344                                                                         enum block_id block_id,
6345                                                                         enum dbg_attn_type attn_type,
6346                                                                         bool clear_status,
6347                                                                         struct dbg_attn_block_result *results)
6348 {
6349         enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6350         u8 reg_idx, num_attn_regs, num_result_regs = 0;
6351         const struct dbg_attn_reg *attn_reg_arr;
6352
6353         if (status != DBG_STATUS_OK)
6354                 return status;
6355
6356         if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
6357                 return DBG_STATUS_DBG_ARRAY_NOT_SET;
6358
6359         attn_reg_arr = ecore_get_block_attn_regs(block_id, attn_type, &num_attn_regs);
6360
6361         for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
6362                 const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
6363                 struct dbg_attn_reg_result *reg_result;
6364                 u32 sts_addr, sts_val;
6365                 u16 modes_buf_offset;
6366                 bool eval_mode;
6367
6368                 /* Check mode */
6369                 eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
6370                 modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
6371                 if (eval_mode && !ecore_is_mode_match(p_hwfn, &modes_buf_offset))
6372                         continue;
6373
6374                 /* Mode match - read attention status register */
6375                 sts_addr = DWORDS_TO_BYTES(clear_status ? reg_data->sts_clr_address : GET_FIELD(reg_data->data, DBG_ATTN_REG_STS_ADDRESS));
6376                 sts_val = ecore_rd(p_hwfn, p_ptt, sts_addr);
6377                 if (!sts_val)
6378                         continue;
6379
6380                 /* Non-zero attention status - add to results */
6381                 reg_result = &results->reg_results[num_result_regs];
6382                 SET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
6383                 SET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_NUM_REG_ATTN, GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
6384                 reg_result->block_attn_offset = reg_data->block_attn_offset;
6385                 reg_result->sts_val = sts_val;
6386                 reg_result->mask_val = ecore_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(reg_data->mask_address));
6387                 num_result_regs++;
6388         }
6389
6390         results->block_id = (u8)block_id;
6391         results->names_offset = ecore_get_block_attn_data(block_id, attn_type)->names_offset;
6392         SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
6393         SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
6394
6395         return DBG_STATUS_OK;
6396 }
6397
6398 enum dbg_status ecore_dbg_print_attn(struct ecore_hwfn *p_hwfn,
6399                                                                          struct dbg_attn_block_result *results)
6400 {
6401         enum dbg_attn_type attn_type;
6402         u8 num_regs, i;
6403
6404         num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
6405         attn_type = (enum dbg_attn_type)GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
6406
6407         for (i = 0; i < num_regs; i++) {
6408                 struct dbg_attn_reg_result *reg_result;
6409                 const char *attn_type_str;
6410                 u32 sts_addr;
6411
6412                 reg_result = &results->reg_results[i];
6413                 attn_type_str = (attn_type == ATTN_TYPE_INTERRUPT ? "interrupt" : "parity");
6414                 sts_addr = GET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_STS_ADDRESS);
6415                 DP_NOTICE(p_hwfn, false, "%s: address 0x%08x, status 0x%08x, mask 0x%08x\n", attn_type_str, sts_addr, reg_result->sts_val, reg_result->mask_val);
6416         }
6417
6418         return DBG_STATUS_OK;
6419 }
6420
6421 bool ecore_is_block_in_reset(struct ecore_hwfn *p_hwfn,
6422                                                          struct ecore_ptt *p_ptt,
6423                                                          enum block_id block_id)
6424 {
6425         struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
6426         struct block_defs *block = s_block_defs[block_id];
6427         u32 reset_reg;
6428         
6429         if (!block->has_reset_bit)
6430                 return false;
6431
6432         reset_reg = block->reset_reg;
6433
6434         return s_reset_regs_defs[reset_reg].exists[dev_data->chip_id] ?
6435                 !(ecore_rd(p_hwfn, p_ptt, s_reset_regs_defs[reset_reg].addr) & (1 << block->reset_bit_offset)) :        true;
6436 }
6437