2 * Copyright (c) 2017 Chelsio Communications, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/types.h>
31 #include <sys/param.h>
33 #include "common/common.h"
34 #include "common/t4_regs.h"
36 #include "cudbg_lib_common.h"
37 #include "cudbg_lib.h"
38 #include "cudbg_entity.h"
39 #define BUFFER_WARN_LIMIT 10000000
41 struct large_entity large_entity_list[] = {
48 static int is_fw_attached(struct cudbg_init *pdbg_init)
51 return (pdbg_init->adap->flags & FW_OK);
54 /* This function will add additional padding bytes into debug_buffer to make it
56 static void align_debug_buffer(struct cudbg_buffer *dbg_buff,
57 struct cudbg_entity_hdr *entity_hdr)
62 remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
65 memcpy(((u8 *) dbg_buff->data) + dbg_buff->offset, &zero_buf,
67 dbg_buff->offset += padding;
68 entity_hdr->num_pad = padding;
71 entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
74 static void read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
75 enum ctxt_type ctype, u32 *data)
77 struct adapter *padap = pdbg_init->adap;
80 if (is_fw_attached(pdbg_init)) {
81 rc = begin_synchronized_op(padap, NULL, SLEEP_OK | INTR_OK,
85 rc = t4_sge_ctxt_rd(padap, padap->mbox, cid, ctype,
87 end_synchronized_op(padap, 0);
92 t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
95 static int get_next_ext_entity_hdr(void *outbuf, u32 *ext_size,
96 struct cudbg_buffer *dbg_buff,
97 struct cudbg_entity_hdr **entity_hdr)
99 struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
101 u32 ext_offset = cudbg_hdr->data_len;
104 if (dbg_buff->size - dbg_buff->offset <=
105 sizeof(struct cudbg_entity_hdr)) {
106 rc = CUDBG_STATUS_BUFFER_SHORT;
110 *entity_hdr = (struct cudbg_entity_hdr *)
111 ((char *)outbuf + cudbg_hdr->data_len);
113 /* Find the last extended entity header */
114 while ((*entity_hdr)->size) {
116 ext_offset += sizeof(struct cudbg_entity_hdr) +
119 *ext_size += (*entity_hdr)->size +
120 sizeof(struct cudbg_entity_hdr);
122 if (dbg_buff->size - dbg_buff->offset + *ext_size <=
123 sizeof(struct cudbg_entity_hdr)) {
124 rc = CUDBG_STATUS_BUFFER_SHORT;
128 if (ext_offset != (*entity_hdr)->next_ext_offset) {
129 ext_offset -= sizeof(struct cudbg_entity_hdr) +
134 (*entity_hdr)->next_ext_offset = *ext_size;
136 *entity_hdr = (struct cudbg_entity_hdr *)
141 /* update the data offset */
142 dbg_buff->offset = ext_offset;
147 static int wr_entity_to_flash(void *handle, struct cudbg_buffer *dbg_buff,
148 u32 cur_entity_data_offset,
150 int entity_nu, u32 ext_size)
152 struct cudbg_private *priv = handle;
153 struct cudbg_init *cudbg_init = &priv->dbg_init;
154 struct cudbg_flash_sec_info *sec_info = &priv->sec_info;
156 u32 cur_entity_hdr_offset = sizeof(struct cudbg_hdr);
157 u32 remain_flash_size;
158 u32 flash_data_offset;
162 data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
163 sizeof(struct cudbg_hdr);
165 flash_data_offset = (FLASH_CUDBG_NSECS *
166 (sizeof(struct cudbg_flash_hdr) +
168 (cur_entity_data_offset - data_hdr_size);
170 if (flash_data_offset > CUDBG_FLASH_SIZE) {
171 update_skip_size(sec_info, cur_entity_size);
172 if (cudbg_init->verbose)
173 cudbg_init->print("Large entity skipping...\n");
177 remain_flash_size = CUDBG_FLASH_SIZE - flash_data_offset;
179 if (cur_entity_size > remain_flash_size) {
180 update_skip_size(sec_info, cur_entity_size);
181 if (cudbg_init->verbose)
182 cudbg_init->print("Large entity skipping...\n");
186 cur_entity_hdr_offset +=
187 (sizeof(struct cudbg_entity_hdr) *
190 rc = cudbg_write_flash(handle, timestamp, dbg_buff,
191 cur_entity_data_offset,
192 cur_entity_hdr_offset,
195 if (rc == CUDBG_STATUS_FLASH_FULL && cudbg_init->verbose)
196 cudbg_init->print("\n\tFLASH is full... "
197 "can not write in flash more\n\n");
203 int cudbg_collect(void *handle, void *outbuf, u32 *outbuf_size)
205 struct cudbg_entity_hdr *entity_hdr = NULL;
206 struct cudbg_entity_hdr *ext_entity_hdr = NULL;
207 struct cudbg_hdr *cudbg_hdr;
208 struct cudbg_buffer dbg_buff;
209 struct cudbg_error cudbg_err = {0};
210 int large_entity_code;
212 u8 *dbg_bitmap = ((struct cudbg_private *)handle)->dbg_init.dbg_bitmap;
213 struct cudbg_init *cudbg_init =
214 &(((struct cudbg_private *)handle)->dbg_init);
215 struct adapter *padap = cudbg_init->adap;
216 u32 total_size, remaining_buf_size;
218 int index, bit, i, rc = -1;
224 dbg_buff.data = outbuf;
225 dbg_buff.size = *outbuf_size;
228 cudbg_hdr = (struct cudbg_hdr *)dbg_buff.data;
229 cudbg_hdr->signature = CUDBG_SIGNATURE;
230 cudbg_hdr->hdr_len = sizeof(struct cudbg_hdr);
231 cudbg_hdr->major_ver = CUDBG_MAJOR_VERSION;
232 cudbg_hdr->minor_ver = CUDBG_MINOR_VERSION;
233 cudbg_hdr->max_entities = CUDBG_MAX_ENTITY;
234 cudbg_hdr->chip_ver = padap->params.chipid;
236 if (cudbg_hdr->data_len)
239 if (cudbg_init->use_flash) {
241 rc = t4_get_flash_params(padap);
243 if (cudbg_init->verbose)
244 cudbg_init->print("\nGet flash params failed.\n\n");
245 cudbg_init->use_flash = 0;
250 /* Timestamp is mandatory. If it is not passed then disable
253 if (!cudbg_init->dbg_params[CUDBG_TIMESTAMP_PARAM].u.time) {
254 if (cudbg_init->verbose)
255 cudbg_init->print("\nTimestamp param missing,"
256 "so ignoring flash write request\n\n");
257 cudbg_init->use_flash = 0;
262 if (sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY >
264 rc = CUDBG_STATUS_SMALL_BUFF;
265 total_size = cudbg_hdr->hdr_len;
269 /* If ext flag is set then move the offset to the end of the buf
270 * so that we can add ext entities
273 ext_entity_hdr = (struct cudbg_entity_hdr *)
274 ((char *)outbuf + cudbg_hdr->hdr_len +
275 (sizeof(struct cudbg_entity_hdr) *
276 (CUDBG_EXT_ENTITY - 1)));
277 ext_entity_hdr->start_offset = cudbg_hdr->data_len;
278 ext_entity_hdr->entity_type = CUDBG_EXT_ENTITY;
279 ext_entity_hdr->size = 0;
280 dbg_buff.offset = cudbg_hdr->data_len;
282 dbg_buff.offset += cudbg_hdr->hdr_len; /* move 24 bytes*/
283 dbg_buff.offset += CUDBG_MAX_ENTITY *
284 sizeof(struct cudbg_entity_hdr);
287 total_size = dbg_buff.offset;
288 all = dbg_bitmap[0] & (1 << CUDBG_ALL);
290 /*sort(large_entity_list);*/
292 for (i = 1; i < CUDBG_MAX_ENTITY; i++) {
296 if (entity_list[i].bit == CUDBG_EXT_ENTITY)
299 if (all || (dbg_bitmap[index] & (1 << bit))) {
302 rc = get_entity_hdr(outbuf, i, dbg_buff.size,
305 cudbg_hdr->hdr_flags = rc;
307 rc = get_next_ext_entity_hdr(outbuf, &ext_size,
313 /* move the offset after the ext header */
315 sizeof(struct cudbg_entity_hdr);
318 entity_hdr->entity_type = i;
319 entity_hdr->start_offset = dbg_buff.offset;
320 /* process each entity by calling process_entity fp */
321 remaining_buf_size = dbg_buff.size - dbg_buff.offset;
323 if ((remaining_buf_size <= BUFFER_WARN_LIMIT) &&
324 is_large_entity(i)) {
325 if (cudbg_init->verbose)
326 cudbg_init->print("Skipping %s\n",
327 entity_list[i].name);
332 /* If fw_attach is 0, then skip entities which
333 * communicates with firmware
336 if (!is_fw_attached(cudbg_init) &&
337 (entity_list[i].flag &
338 (1 << ENTITY_FLAG_FW_NO_ATTACH))) {
339 if (cudbg_init->verbose)
340 cudbg_init->print("Skipping %s entity,"\
341 "because fw_attach "\
343 entity_list[i].name);
347 if (cudbg_init->verbose)
348 cudbg_init->print("collecting debug entity: "\
349 "%s\n", entity_list[i].name);
350 memset(&cudbg_err, 0,
351 sizeof(struct cudbg_error));
352 rc = process_entity[i-1](cudbg_init, &dbg_buff,
357 entity_hdr->size = 0;
358 dbg_buff.offset = entity_hdr->start_offset;
360 align_debug_buffer(&dbg_buff, entity_hdr);
362 if (cudbg_err.sys_err)
363 rc = CUDBG_SYSTEM_ERROR;
365 entity_hdr->hdr_flags = rc;
366 entity_hdr->sys_err = cudbg_err.sys_err;
367 entity_hdr->sys_warn = cudbg_err.sys_warn;
369 /* We don't want to include ext entity size in global
373 total_size += entity_hdr->size;
375 cudbg_hdr->data_len = total_size;
376 *outbuf_size = total_size;
378 /* consider the size of the ext entity header and data
382 ext_size += (sizeof(struct cudbg_entity_hdr) +
384 entity_hdr->start_offset -= cudbg_hdr->data_len;
385 ext_entity_hdr->size = ext_size;
386 entity_hdr->next_ext_offset = ext_size;
387 entity_hdr->flag |= CUDBG_EXT_DATA_VALID;
390 if (cudbg_init->use_flash) {
392 wr_entity_to_flash(handle,
402 wr_entity_to_flash(handle,
412 for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
414 large_entity_code = large_entity_list[i].entity_code;
415 if (large_entity_list[i].skip_flag) {
417 rc = get_entity_hdr(outbuf, large_entity_code,
418 dbg_buff.size, &entity_hdr);
420 cudbg_hdr->hdr_flags = rc;
422 rc = get_next_ext_entity_hdr(outbuf, &ext_size,
429 sizeof(struct cudbg_entity_hdr);
432 /* If fw_attach is 0, then skip entities which
433 * communicates with firmware
435 if (!is_fw_attached(cudbg_init) &&
436 (entity_list[large_entity_code].flag &
437 (1 << ENTITY_FLAG_FW_NO_ATTACH))) {
438 if (cudbg_init->verbose)
439 cudbg_init->print("Skipping %s entity,"\
440 "because fw_attach "\
442 entity_list[large_entity_code]
447 entity_hdr->entity_type = large_entity_code;
448 entity_hdr->start_offset = dbg_buff.offset;
449 if (cudbg_init->verbose)
450 cudbg_init->print("Re-trying debug entity: %s\n",
451 entity_list[large_entity_code].name);
453 memset(&cudbg_err, 0, sizeof(struct cudbg_error));
454 rc = process_entity[large_entity_code - 1](cudbg_init,
458 entity_hdr->size = 0;
459 dbg_buff.offset = entity_hdr->start_offset;
461 align_debug_buffer(&dbg_buff, entity_hdr);
463 if (cudbg_err.sys_err)
464 rc = CUDBG_SYSTEM_ERROR;
466 entity_hdr->hdr_flags = rc;
467 entity_hdr->sys_err = cudbg_err.sys_err;
468 entity_hdr->sys_warn = cudbg_err.sys_warn;
470 /* We don't want to include ext entity size in global
474 total_size += entity_hdr->size;
476 cudbg_hdr->data_len = total_size;
477 *outbuf_size = total_size;
479 /* consider the size of the ext entity header and
483 ext_size += (sizeof(struct cudbg_entity_hdr) +
485 entity_hdr->start_offset -=
487 ext_entity_hdr->size = ext_size;
488 entity_hdr->flag |= CUDBG_EXT_DATA_VALID;
491 if (cudbg_init->use_flash) {
493 wr_entity_to_flash(handle,
501 wr_entity_to_flash(handle,
507 large_entity_list[i].
514 cudbg_hdr->data_len = total_size;
515 *outbuf_size = total_size;
518 *outbuf_size += ext_size;
525 void reset_skip_entity(void)
529 for (i = 0; i < ARRAY_SIZE(large_entity_list); i++)
530 large_entity_list[i].skip_flag = 0;
533 void skip_entity(int entity_code)
536 for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
538 if (large_entity_list[i].entity_code == entity_code)
539 large_entity_list[i].skip_flag = 1;
543 int is_large_entity(int entity_code)
547 for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
549 if (large_entity_list[i].entity_code == entity_code)
555 int get_entity_hdr(void *outbuf, int i, u32 size,
556 struct cudbg_entity_hdr **entity_hdr)
559 struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
561 if (cudbg_hdr->hdr_len + (sizeof(struct cudbg_entity_hdr)*i) > size)
562 return CUDBG_STATUS_SMALL_BUFF;
564 *entity_hdr = (struct cudbg_entity_hdr *)
565 ((char *)outbuf+cudbg_hdr->hdr_len +
566 (sizeof(struct cudbg_entity_hdr)*(i-1)));
570 static int collect_rss(struct cudbg_init *pdbg_init,
571 struct cudbg_buffer *dbg_buff,
572 struct cudbg_error *cudbg_err)
574 struct adapter *padap = pdbg_init->adap;
575 struct cudbg_buffer scratch_buff;
579 size = RSS_NENTRIES * sizeof(u16);
580 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
584 rc = t4_read_rss(padap, (u16 *)scratch_buff.data);
586 if (pdbg_init->verbose)
587 pdbg_init->print("%s(), t4_read_rss failed!, rc: %d\n",
589 cudbg_err->sys_err = rc;
593 rc = write_compression_hdr(&scratch_buff, dbg_buff);
597 rc = compress_buff(&scratch_buff, dbg_buff);
600 release_scratch_buff(&scratch_buff, dbg_buff);
605 static int collect_sw_state(struct cudbg_init *pdbg_init,
606 struct cudbg_buffer *dbg_buff,
607 struct cudbg_error *cudbg_err)
609 struct adapter *padap = pdbg_init->adap;
610 struct cudbg_buffer scratch_buff;
611 struct sw_state *swstate;
615 size = sizeof(struct sw_state);
617 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
621 swstate = (struct sw_state *) scratch_buff.data;
623 swstate->fw_state = t4_read_reg(padap, A_PCIE_FW);
624 snprintf(swstate->caller_string, sizeof(swstate->caller_string), "%s",
626 swstate->os_type = 0;
628 rc = write_compression_hdr(&scratch_buff, dbg_buff);
632 rc = compress_buff(&scratch_buff, dbg_buff);
635 release_scratch_buff(&scratch_buff, dbg_buff);
640 static int collect_ddp_stats(struct cudbg_init *pdbg_init,
641 struct cudbg_buffer *dbg_buff,
642 struct cudbg_error *cudbg_err)
644 struct adapter *padap = pdbg_init->adap;
645 struct cudbg_buffer scratch_buff;
646 struct tp_usm_stats *tp_usm_stats_buff;
650 size = sizeof(struct tp_usm_stats);
652 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
656 tp_usm_stats_buff = (struct tp_usm_stats *) scratch_buff.data;
658 /* spin_lock(&padap->stats_lock); TODO*/
659 t4_get_usm_stats(padap, tp_usm_stats_buff, 1);
660 /* spin_unlock(&padap->stats_lock); TODO*/
662 rc = write_compression_hdr(&scratch_buff, dbg_buff);
666 rc = compress_buff(&scratch_buff, dbg_buff);
669 release_scratch_buff(&scratch_buff, dbg_buff);
674 static int collect_ulptx_la(struct cudbg_init *pdbg_init,
675 struct cudbg_buffer *dbg_buff,
676 struct cudbg_error *cudbg_err)
678 struct adapter *padap = pdbg_init->adap;
679 struct cudbg_buffer scratch_buff;
680 struct struct_ulptx_la *ulptx_la_buff;
684 size = sizeof(struct struct_ulptx_la);
686 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
690 ulptx_la_buff = (struct struct_ulptx_la *) scratch_buff.data;
692 for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
693 ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
694 A_ULP_TX_LA_RDPTR_0 +
696 ulptx_la_buff->wrptr[i] = t4_read_reg(padap,
697 A_ULP_TX_LA_WRPTR_0 +
699 ulptx_la_buff->rddata[i] = t4_read_reg(padap,
700 A_ULP_TX_LA_RDDATA_0 +
702 for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++) {
703 ulptx_la_buff->rd_data[i][j] =
705 A_ULP_TX_LA_RDDATA_0 + 0x10 * i);
709 rc = write_compression_hdr(&scratch_buff, dbg_buff);
713 rc = compress_buff(&scratch_buff, dbg_buff);
716 release_scratch_buff(&scratch_buff, dbg_buff);
722 static int collect_ulprx_la(struct cudbg_init *pdbg_init,
723 struct cudbg_buffer *dbg_buff,
724 struct cudbg_error *cudbg_err)
726 struct adapter *padap = pdbg_init->adap;
727 struct cudbg_buffer scratch_buff;
728 struct struct_ulprx_la *ulprx_la_buff;
732 size = sizeof(struct struct_ulprx_la);
734 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
738 ulprx_la_buff = (struct struct_ulprx_la *) scratch_buff.data;
739 t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
740 ulprx_la_buff->size = ULPRX_LA_SIZE;
742 rc = write_compression_hdr(&scratch_buff, dbg_buff);
746 rc = compress_buff(&scratch_buff, dbg_buff);
749 release_scratch_buff(&scratch_buff, dbg_buff);
754 static int collect_cpl_stats(struct cudbg_init *pdbg_init,
755 struct cudbg_buffer *dbg_buff,
756 struct cudbg_error *cudbg_err)
758 struct adapter *padap = pdbg_init->adap;
759 struct cudbg_buffer scratch_buff;
760 struct struct_tp_cpl_stats *tp_cpl_stats_buff;
764 size = sizeof(struct struct_tp_cpl_stats);
766 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
770 tp_cpl_stats_buff = (struct struct_tp_cpl_stats *) scratch_buff.data;
771 tp_cpl_stats_buff->nchan = padap->chip_params->nchan;
773 /* spin_lock(&padap->stats_lock); TODO*/
774 t4_tp_get_cpl_stats(padap, &tp_cpl_stats_buff->stats, 1);
775 /* spin_unlock(&padap->stats_lock); TODO*/
777 rc = write_compression_hdr(&scratch_buff, dbg_buff);
781 rc = compress_buff(&scratch_buff, dbg_buff);
784 release_scratch_buff(&scratch_buff, dbg_buff);
789 static int collect_wc_stats(struct cudbg_init *pdbg_init,
790 struct cudbg_buffer *dbg_buff,
791 struct cudbg_error *cudbg_err)
793 struct adapter *padap = pdbg_init->adap;
794 struct cudbg_buffer scratch_buff;
795 struct struct_wc_stats *wc_stats_buff;
802 size = sizeof(struct struct_wc_stats);
804 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
808 wc_stats_buff = (struct struct_wc_stats *) scratch_buff.data;
811 val1 = t4_read_reg(padap, A_SGE_STAT_TOTAL);
812 val2 = t4_read_reg(padap, A_SGE_STAT_MATCH);
813 wc_stats_buff->wr_cl_success = val1 - val2;
814 wc_stats_buff->wr_cl_fail = val2;
816 wc_stats_buff->wr_cl_success = 0;
817 wc_stats_buff->wr_cl_fail = 0;
820 rc = write_compression_hdr(&scratch_buff, dbg_buff);
824 rc = compress_buff(&scratch_buff, dbg_buff);
826 release_scratch_buff(&scratch_buff, dbg_buff);
831 static int mem_desc_cmp(const void *a, const void *b)
833 return ((const struct struct_mem_desc *)a)->base -
834 ((const struct struct_mem_desc *)b)->base;
837 static int fill_meminfo(struct adapter *padap,
838 struct struct_meminfo *meminfo_buff)
840 struct struct_mem_desc *md;
845 size = sizeof(struct struct_meminfo);
847 memset(meminfo_buff->avail, 0,
848 ARRAY_SIZE(meminfo_buff->avail) *
849 sizeof(struct struct_mem_desc));
850 memset(meminfo_buff->mem, 0,
851 (ARRAY_SIZE(region) + 3) * sizeof(struct struct_mem_desc));
852 md = meminfo_buff->mem;
854 for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) {
855 meminfo_buff->mem[i].limit = 0;
856 meminfo_buff->mem[i].idx = i;
861 lo = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
863 if (lo & F_EDRAM0_ENABLE) {
864 hi = t4_read_reg(padap, A_MA_EDRAM0_BAR);
865 meminfo_buff->avail[i].base = G_EDRAM0_BASE(hi) << 20;
866 meminfo_buff->avail[i].limit = meminfo_buff->avail[i].base +
867 (G_EDRAM0_SIZE(hi) << 20);
868 meminfo_buff->avail[i].idx = 0;
872 if (lo & F_EDRAM1_ENABLE) {
873 hi = t4_read_reg(padap, A_MA_EDRAM1_BAR);
874 meminfo_buff->avail[i].base = G_EDRAM1_BASE(hi) << 20;
875 meminfo_buff->avail[i].limit = meminfo_buff->avail[i].base +
876 (G_EDRAM1_SIZE(hi) << 20);
877 meminfo_buff->avail[i].idx = 1;
882 if (lo & F_EXT_MEM0_ENABLE) {
883 hi = t4_read_reg(padap, A_MA_EXT_MEMORY0_BAR);
884 meminfo_buff->avail[i].base = G_EXT_MEM_BASE(hi) << 20;
885 meminfo_buff->avail[i].limit =
886 meminfo_buff->avail[i].base +
887 (G_EXT_MEM_SIZE(hi) << 20);
888 meminfo_buff->avail[i].idx = 3;
892 if (lo & F_EXT_MEM1_ENABLE) {
893 hi = t4_read_reg(padap, A_MA_EXT_MEMORY1_BAR);
894 meminfo_buff->avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
895 meminfo_buff->avail[i].limit =
896 meminfo_buff->avail[i].base +
897 (G_EXT_MEM1_SIZE(hi) << 20);
898 meminfo_buff->avail[i].idx = 4;
901 } else if (is_t6(padap)) {
902 if (lo & F_EXT_MEM_ENABLE) {
903 hi = t4_read_reg(padap, A_MA_EXT_MEMORY_BAR);
904 meminfo_buff->avail[i].base = G_EXT_MEM_BASE(hi) << 20;
905 meminfo_buff->avail[i].limit =
906 meminfo_buff->avail[i].base +
907 (G_EXT_MEM_SIZE(hi) << 20);
908 meminfo_buff->avail[i].idx = 2;
913 if (!i) { /* no memory available */
914 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
918 meminfo_buff->avail_c = i;
919 qsort(meminfo_buff->avail, i, sizeof(struct struct_mem_desc),
921 (md++)->base = t4_read_reg(padap, A_SGE_DBQ_CTXT_BADDR);
922 (md++)->base = t4_read_reg(padap, A_SGE_IMSG_CTXT_BADDR);
923 (md++)->base = t4_read_reg(padap, A_SGE_FLM_CACHE_BADDR);
924 (md++)->base = t4_read_reg(padap, A_TP_CMM_TCB_BASE);
925 (md++)->base = t4_read_reg(padap, A_TP_CMM_MM_BASE);
926 (md++)->base = t4_read_reg(padap, A_TP_CMM_TIMER_BASE);
927 (md++)->base = t4_read_reg(padap, A_TP_CMM_MM_RX_FLST_BASE);
928 (md++)->base = t4_read_reg(padap, A_TP_CMM_MM_TX_FLST_BASE);
929 (md++)->base = t4_read_reg(padap, A_TP_CMM_MM_PS_FLST_BASE);
931 /* the next few have explicit upper bounds */
932 md->base = t4_read_reg(padap, A_TP_PMM_TX_BASE);
933 md->limit = md->base - 1 +
935 A_TP_PMM_TX_PAGE_SIZE) *
936 G_PMTXMAXPAGE(t4_read_reg(padap,
937 A_TP_PMM_TX_MAX_PAGE)
941 md->base = t4_read_reg(padap, A_TP_PMM_RX_BASE);
942 md->limit = md->base - 1 +
944 A_TP_PMM_RX_PAGE_SIZE) *
945 G_PMRXMAXPAGE(t4_read_reg(padap,
946 A_TP_PMM_RX_MAX_PAGE)
949 if (t4_read_reg(padap, A_LE_DB_CONFIG) & F_HASHEN) {
950 if (chip_id(padap) <= CHELSIO_T5) {
951 hi = t4_read_reg(padap, A_LE_DB_TID_HASHBASE) / 4;
952 md->base = t4_read_reg(padap, A_LE_DB_HASH_TID_BASE);
954 hi = t4_read_reg(padap, A_LE_DB_HASH_TID_BASE);
955 md->base = t4_read_reg(padap,
956 A_LE_DB_HASH_TBL_BASE_ADDR);
961 md->idx = ARRAY_SIZE(region); /* hide it */
964 #define ulp_region(reg) \
966 md->base = t4_read_reg(padap, A_ULP_ ## reg ## _LLIMIT);\
967 (md++)->limit = t4_read_reg(padap, A_ULP_ ## reg ## _ULIMIT);\
970 ulp_region(RX_ISCSI);
975 ulp_region(RX_RQUDP);
980 md->idx = ARRAY_SIZE(region);
982 u32 sge_ctrl = t4_read_reg(padap, A_SGE_CONTROL2);
983 u32 fifo_size = t4_read_reg(padap, A_SGE_DBVFIFO_SIZE);
985 if (sge_ctrl & F_VFIFO_ENABLE)
986 size = G_DBVFIFO_SIZE(fifo_size);
988 size = G_T6_DBVFIFO_SIZE(fifo_size);
991 md->base = G_BASEADDR(t4_read_reg(padap,
992 A_SGE_DBVFIFO_BADDR));
993 md->limit = md->base + (size << 2) - 1;
999 md->base = t4_read_reg(padap, A_ULP_RX_CTX_BASE);
1002 md->base = t4_read_reg(padap, A_ULP_TX_ERR_TABLE_BASE);
1005 #ifndef __NO_DRIVER_OCQ_SUPPORT__
1006 /*md->base = padap->vres.ocq.start;*/
1007 /*if (adap->vres.ocq.size)*/
1008 /* md->limit = md->base + adap->vres.ocq.size - 1;*/
1010 md->idx = ARRAY_SIZE(region); /* hide it */
1014 /* add any address-space holes, there can be up to 3 */
1015 for (n = 0; n < i - 1; n++)
1016 if (meminfo_buff->avail[n].limit <
1017 meminfo_buff->avail[n + 1].base)
1018 (md++)->base = meminfo_buff->avail[n].limit;
1020 if (meminfo_buff->avail[n].limit)
1021 (md++)->base = meminfo_buff->avail[n].limit;
1023 n = (int) (md - meminfo_buff->mem);
1024 meminfo_buff->mem_c = n;
1026 qsort(meminfo_buff->mem, n, sizeof(struct struct_mem_desc),
1029 lo = t4_read_reg(padap, A_CIM_SDRAM_BASE_ADDR);
1030 hi = t4_read_reg(padap, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
1031 meminfo_buff->up_ram_lo = lo;
1032 meminfo_buff->up_ram_hi = hi;
1034 lo = t4_read_reg(padap, A_CIM_EXTMEM2_BASE_ADDR);
1035 hi = t4_read_reg(padap, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
1036 meminfo_buff->up_extmem2_lo = lo;
1037 meminfo_buff->up_extmem2_hi = hi;
1039 lo = t4_read_reg(padap, A_TP_PMM_RX_MAX_PAGE);
1040 meminfo_buff->rx_pages_data[0] = G_PMRXMAXPAGE(lo);
1041 meminfo_buff->rx_pages_data[1] =
1042 t4_read_reg(padap, A_TP_PMM_RX_PAGE_SIZE) >> 10;
1043 meminfo_buff->rx_pages_data[2] = (lo & F_PMRXNUMCHN) ? 2 : 1 ;
1045 lo = t4_read_reg(padap, A_TP_PMM_TX_MAX_PAGE);
1046 hi = t4_read_reg(padap, A_TP_PMM_TX_PAGE_SIZE);
1047 meminfo_buff->tx_pages_data[0] = G_PMTXMAXPAGE(lo);
1048 meminfo_buff->tx_pages_data[1] =
1049 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
1050 meminfo_buff->tx_pages_data[2] =
1051 hi >= (1 << 20) ? 'M' : 'K';
1052 meminfo_buff->tx_pages_data[3] = 1 << G_PMTXNUMCHN(lo);
1054 for (i = 0; i < 4; i++) {
1055 if (chip_id(padap) > CHELSIO_T5)
1056 lo = t4_read_reg(padap,
1057 A_MPS_RX_MAC_BG_PG_CNT0 + i * 4);
1059 lo = t4_read_reg(padap, A_MPS_RX_PG_RSV0 + i * 4);
1061 used = G_T5_USED(lo);
1062 alloc = G_T5_ALLOC(lo);
1065 alloc = G_ALLOC(lo);
1067 meminfo_buff->port_used[i] = used;
1068 meminfo_buff->port_alloc[i] = alloc;
1071 for (i = 0; i < padap->chip_params->nchan; i++) {
1072 if (chip_id(padap) > CHELSIO_T5)
1073 lo = t4_read_reg(padap,
1074 A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4);
1076 lo = t4_read_reg(padap, A_MPS_RX_PG_RSV4 + i * 4);
1078 used = G_T5_USED(lo);
1079 alloc = G_T5_ALLOC(lo);
1082 alloc = G_ALLOC(lo);
1084 meminfo_buff->loopback_used[i] = used;
1085 meminfo_buff->loopback_alloc[i] = alloc;
1091 static int collect_meminfo(struct cudbg_init *pdbg_init,
1092 struct cudbg_buffer *dbg_buff,
1093 struct cudbg_error *cudbg_err)
1095 struct adapter *padap = pdbg_init->adap;
1096 struct cudbg_buffer scratch_buff;
1097 struct struct_meminfo *meminfo_buff;
1101 size = sizeof(struct struct_meminfo);
1103 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1107 meminfo_buff = (struct struct_meminfo *)scratch_buff.data;
1109 rc = fill_meminfo(padap, meminfo_buff);
1113 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1117 rc = compress_buff(&scratch_buff, dbg_buff);
1119 release_scratch_buff(&scratch_buff, dbg_buff);
1124 static int collect_lb_stats(struct cudbg_init *pdbg_init,
1125 struct cudbg_buffer *dbg_buff,
1126 struct cudbg_error *cudbg_err)
1128 struct adapter *padap = pdbg_init->adap;
1129 struct cudbg_buffer scratch_buff;
1130 struct lb_port_stats *tmp_stats;
1131 struct struct_lb_stats *lb_stats_buff;
1135 rc = padap->params.nports;
1140 size = sizeof(struct struct_lb_stats) +
1141 n * sizeof(struct lb_port_stats);
1143 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1147 lb_stats_buff = (struct struct_lb_stats *) scratch_buff.data;
1149 lb_stats_buff->nchan = n;
1150 tmp_stats = lb_stats_buff->s;
1152 for (i = 0; i < n; i += 2, tmp_stats += 2) {
1153 t4_get_lb_stats(padap, i, tmp_stats);
1154 t4_get_lb_stats(padap, i + 1, tmp_stats+1);
1157 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1161 rc = compress_buff(&scratch_buff, dbg_buff);
1163 release_scratch_buff(&scratch_buff, dbg_buff);
1168 static int collect_rdma_stats(struct cudbg_init *pdbg_init,
1169 struct cudbg_buffer *dbg_buff,
1170 struct cudbg_error *cudbg_er)
1172 struct adapter *padap = pdbg_init->adap;
1173 struct cudbg_buffer scratch_buff;
1174 struct tp_rdma_stats *rdma_stats_buff;
1178 size = sizeof(struct tp_rdma_stats);
1180 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1184 rdma_stats_buff = (struct tp_rdma_stats *) scratch_buff.data;
1186 /* spin_lock(&padap->stats_lock); TODO*/
1187 t4_tp_get_rdma_stats(padap, rdma_stats_buff, 1);
1188 /* spin_unlock(&padap->stats_lock); TODO*/
1190 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1194 rc = compress_buff(&scratch_buff, dbg_buff);
1196 release_scratch_buff(&scratch_buff, dbg_buff);
1201 static int collect_clk_info(struct cudbg_init *pdbg_init,
1202 struct cudbg_buffer *dbg_buff,
1203 struct cudbg_error *cudbg_err)
1205 struct cudbg_buffer scratch_buff;
1206 struct adapter *padap = pdbg_init->adap;
1207 struct struct_clk_info *clk_info_buff;
1212 if (!padap->params.vpd.cclk) {
1213 rc = CUDBG_STATUS_CCLK_NOT_DEFINED;
1217 size = sizeof(struct struct_clk_info);
1218 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1222 clk_info_buff = (struct struct_clk_info *) scratch_buff.data;
1224 clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk; /* in ps
1226 clk_info_buff->res = t4_read_reg(padap, A_TP_TIMER_RESOLUTION);
1227 clk_info_buff->tre = G_TIMERRESOLUTION(clk_info_buff->res);
1228 clk_info_buff->dack_re = G_DELAYEDACKRESOLUTION(clk_info_buff->res);
1229 tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000;
1231 clk_info_buff->dack_timer = ((clk_info_buff->cclk_ps <<
1232 clk_info_buff->dack_re) / 1000000) *
1233 t4_read_reg(padap, A_TP_DACK_TIMER);
1235 clk_info_buff->retransmit_min =
1236 tp_tick_us * t4_read_reg(padap, A_TP_RXT_MIN);
1237 clk_info_buff->retransmit_max =
1238 tp_tick_us * t4_read_reg(padap, A_TP_RXT_MAX);
1240 clk_info_buff->persist_timer_min =
1241 tp_tick_us * t4_read_reg(padap, A_TP_PERS_MIN);
1242 clk_info_buff->persist_timer_max =
1243 tp_tick_us * t4_read_reg(padap, A_TP_PERS_MAX);
1245 clk_info_buff->keepalive_idle_timer =
1246 tp_tick_us * t4_read_reg(padap, A_TP_KEEP_IDLE);
1247 clk_info_buff->keepalive_interval =
1248 tp_tick_us * t4_read_reg(padap, A_TP_KEEP_INTVL);
1250 clk_info_buff->initial_srtt =
1251 tp_tick_us * G_INITSRTT(t4_read_reg(padap, A_TP_INIT_SRTT));
1252 clk_info_buff->finwait2_timer =
1253 tp_tick_us * t4_read_reg(padap, A_TP_FINWAIT2_TIMER);
1255 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1260 rc = compress_buff(&scratch_buff, dbg_buff);
1262 release_scratch_buff(&scratch_buff, dbg_buff);
1268 static int collect_macstats(struct cudbg_init *pdbg_init,
1269 struct cudbg_buffer *dbg_buff,
1270 struct cudbg_error *cudbg_err)
1272 struct adapter *padap = pdbg_init->adap;
1273 struct cudbg_buffer scratch_buff;
1274 struct struct_mac_stats_rev1 *mac_stats_buff;
1278 rc = padap->params.nports;
1283 size = sizeof(struct struct_mac_stats_rev1);
1285 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1289 mac_stats_buff = (struct struct_mac_stats_rev1 *) scratch_buff.data;
1291 mac_stats_buff->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
1292 mac_stats_buff->ver_hdr.revision = CUDBG_MAC_STATS_REV;
1293 mac_stats_buff->ver_hdr.size = sizeof(struct struct_mac_stats_rev1) -
1294 sizeof(struct cudbg_ver_hdr);
1296 mac_stats_buff->port_count = n;
1297 for (i = 0; i < mac_stats_buff->port_count; i++)
1298 t4_get_port_stats(padap, i, &mac_stats_buff->stats[i]);
1300 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1304 rc = compress_buff(&scratch_buff, dbg_buff);
1306 release_scratch_buff(&scratch_buff, dbg_buff);
1311 static int collect_cim_pif_la(struct cudbg_init *pdbg_init,
1312 struct cudbg_buffer *dbg_buff,
1313 struct cudbg_error *cudbg_err)
1315 struct adapter *padap = pdbg_init->adap;
1316 struct cudbg_buffer scratch_buff;
1317 struct cim_pif_la *cim_pif_la_buff;
1321 size = sizeof(struct cim_pif_la) +
1322 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
1324 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1328 cim_pif_la_buff = (struct cim_pif_la *) scratch_buff.data;
1329 cim_pif_la_buff->size = CIM_PIFLA_SIZE;
1331 t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
1332 (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
1335 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1339 rc = compress_buff(&scratch_buff, dbg_buff);
1341 release_scratch_buff(&scratch_buff, dbg_buff);
1346 static int collect_tp_la(struct cudbg_init *pdbg_init,
1347 struct cudbg_buffer *dbg_buff,
1348 struct cudbg_error *cudbg_err)
1350 struct adapter *padap = pdbg_init->adap;
1351 struct cudbg_buffer scratch_buff;
1352 struct struct_tp_la *tp_la_buff;
1356 size = sizeof(struct struct_tp_la) + TPLA_SIZE * sizeof(u64);
1358 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1362 tp_la_buff = (struct struct_tp_la *) scratch_buff.data;
1364 tp_la_buff->mode = G_DBGLAMODE(t4_read_reg(padap, A_TP_DBG_LA_CONFIG));
1365 t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
1367 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1371 rc = compress_buff(&scratch_buff, dbg_buff);
1373 release_scratch_buff(&scratch_buff, dbg_buff);
1378 static int collect_fcoe_stats(struct cudbg_init *pdbg_init,
1379 struct cudbg_buffer *dbg_buff,
1380 struct cudbg_error *cudbg_err)
1382 struct adapter *padap = pdbg_init->adap;
1383 struct cudbg_buffer scratch_buff;
1384 struct struct_tp_fcoe_stats *tp_fcoe_stats_buff;
1388 size = sizeof(struct struct_tp_fcoe_stats);
1390 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1394 tp_fcoe_stats_buff = (struct struct_tp_fcoe_stats *) scratch_buff.data;
1396 /* spin_lock(&padap->stats_lock); TODO*/
1397 t4_get_fcoe_stats(padap, 0, &tp_fcoe_stats_buff->stats[0], 1);
1398 t4_get_fcoe_stats(padap, 1, &tp_fcoe_stats_buff->stats[1], 1);
1399 if (padap->chip_params->nchan == NCHAN) {
1400 t4_get_fcoe_stats(padap, 2, &tp_fcoe_stats_buff->stats[2], 1);
1401 t4_get_fcoe_stats(padap, 3, &tp_fcoe_stats_buff->stats[3], 1);
1403 /* spin_unlock(&padap->stats_lock); TODO*/
1405 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1409 rc = compress_buff(&scratch_buff, dbg_buff);
1411 release_scratch_buff(&scratch_buff, dbg_buff);
1416 static int collect_tp_err_stats(struct cudbg_init *pdbg_init,
1417 struct cudbg_buffer *dbg_buff,
1418 struct cudbg_error *cudbg_err)
1420 struct adapter *padap = pdbg_init->adap;
1421 struct cudbg_buffer scratch_buff;
1422 struct struct_tp_err_stats *tp_err_stats_buff;
1426 size = sizeof(struct struct_tp_err_stats);
1428 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1432 tp_err_stats_buff = (struct struct_tp_err_stats *) scratch_buff.data;
1434 /* spin_lock(&padap->stats_lock); TODO*/
1435 t4_tp_get_err_stats(padap, &tp_err_stats_buff->stats, 1);
1436 /* spin_unlock(&padap->stats_lock); TODO*/
1437 tp_err_stats_buff->nchan = padap->chip_params->nchan;
1439 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1443 rc = compress_buff(&scratch_buff, dbg_buff);
1445 release_scratch_buff(&scratch_buff, dbg_buff);
1450 static int collect_tcp_stats(struct cudbg_init *pdbg_init,
1451 struct cudbg_buffer *dbg_buff,
1452 struct cudbg_error *cudbg_err)
1454 struct adapter *padap = pdbg_init->adap;
1455 struct cudbg_buffer scratch_buff;
1456 struct struct_tcp_stats *tcp_stats_buff;
1460 size = sizeof(struct struct_tcp_stats);
1462 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1466 tcp_stats_buff = (struct struct_tcp_stats *) scratch_buff.data;
1468 /* spin_lock(&padap->stats_lock); TODO*/
1469 t4_tp_get_tcp_stats(padap, &tcp_stats_buff->v4, &tcp_stats_buff->v6, 1);
1470 /* spin_unlock(&padap->stats_lock); TODO*/
1472 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1476 rc = compress_buff(&scratch_buff, dbg_buff);
1478 release_scratch_buff(&scratch_buff, dbg_buff);
1483 static int collect_hw_sched(struct cudbg_init *pdbg_init,
1484 struct cudbg_buffer *dbg_buff,
1485 struct cudbg_error *cudbg_err)
1487 struct adapter *padap = pdbg_init->adap;
1488 struct cudbg_buffer scratch_buff;
1489 struct struct_hw_sched *hw_sched_buff;
1493 if (!padap->params.vpd.cclk) {
1494 rc = CUDBG_STATUS_CCLK_NOT_DEFINED;
1498 size = sizeof(struct struct_hw_sched);
1499 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1503 hw_sched_buff = (struct struct_hw_sched *) scratch_buff.data;
1505 hw_sched_buff->map = t4_read_reg(padap, A_TP_TX_MOD_QUEUE_REQ_MAP);
1506 hw_sched_buff->mode = G_TIMERMODE(t4_read_reg(padap, A_TP_MOD_CONFIG));
1507 t4_read_pace_tbl(padap, hw_sched_buff->pace_tab);
1509 for (i = 0; i < NTX_SCHED; ++i) {
1510 t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
1511 &hw_sched_buff->ipg[i], 1);
1514 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1518 rc = compress_buff(&scratch_buff, dbg_buff);
1520 release_scratch_buff(&scratch_buff, dbg_buff);
1525 static int collect_pm_stats(struct cudbg_init *pdbg_init,
1526 struct cudbg_buffer *dbg_buff,
1527 struct cudbg_error *cudbg_err)
1529 struct adapter *padap = pdbg_init->adap;
1530 struct cudbg_buffer scratch_buff;
1531 struct struct_pm_stats *pm_stats_buff;
1535 size = sizeof(struct struct_pm_stats);
1537 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1541 pm_stats_buff = (struct struct_pm_stats *) scratch_buff.data;
1543 t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
1544 t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
1546 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1550 rc = compress_buff(&scratch_buff, dbg_buff);
1552 release_scratch_buff(&scratch_buff, dbg_buff);
1557 static int collect_path_mtu(struct cudbg_init *pdbg_init,
1558 struct cudbg_buffer *dbg_buff,
1559 struct cudbg_error *cudbg_err)
1561 struct adapter *padap = pdbg_init->adap;
1562 struct cudbg_buffer scratch_buff;
1566 size = NMTUS * sizeof(u16);
1568 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1572 t4_read_mtu_tbl(padap, (u16 *)scratch_buff.data, NULL);
1574 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1578 rc = compress_buff(&scratch_buff, dbg_buff);
1580 release_scratch_buff(&scratch_buff, dbg_buff);
1585 static int collect_rss_key(struct cudbg_init *pdbg_init,
1586 struct cudbg_buffer *dbg_buff,
1587 struct cudbg_error *cudbg_err)
1589 struct adapter *padap = pdbg_init->adap;
1590 struct cudbg_buffer scratch_buff;
1595 size = 10 * sizeof(u32);
1596 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1600 t4_read_rss_key(padap, (u32 *)scratch_buff.data, 1);
1602 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1606 rc = compress_buff(&scratch_buff, dbg_buff);
1608 release_scratch_buff(&scratch_buff, dbg_buff);
1613 static int collect_rss_config(struct cudbg_init *pdbg_init,
1614 struct cudbg_buffer *dbg_buff,
1615 struct cudbg_error *cudbg_err)
1617 struct adapter *padap = pdbg_init->adap;
1618 struct cudbg_buffer scratch_buff;
1619 struct rss_config *rss_conf;
1623 size = sizeof(struct rss_config);
1625 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1629 rss_conf = (struct rss_config *)scratch_buff.data;
1631 rss_conf->tp_rssconf = t4_read_reg(padap, A_TP_RSS_CONFIG);
1632 rss_conf->tp_rssconf_tnl = t4_read_reg(padap, A_TP_RSS_CONFIG_TNL);
1633 rss_conf->tp_rssconf_ofd = t4_read_reg(padap, A_TP_RSS_CONFIG_OFD);
1634 rss_conf->tp_rssconf_syn = t4_read_reg(padap, A_TP_RSS_CONFIG_SYN);
1635 rss_conf->tp_rssconf_vrt = t4_read_reg(padap, A_TP_RSS_CONFIG_VRT);
1636 rss_conf->tp_rssconf_cng = t4_read_reg(padap, A_TP_RSS_CONFIG_CNG);
1637 rss_conf->chip = padap->params.chipid;
1639 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1643 rc = compress_buff(&scratch_buff, dbg_buff);
1646 release_scratch_buff(&scratch_buff, dbg_buff);
1651 static int collect_rss_vf_config(struct cudbg_init *pdbg_init,
1652 struct cudbg_buffer *dbg_buff,
1653 struct cudbg_error *cudbg_err)
1655 struct adapter *padap = pdbg_init->adap;
1656 struct cudbg_buffer scratch_buff;
1657 struct rss_vf_conf *vfconf;
1658 int vf, rc, vf_count;
1661 vf_count = padap->chip_params->vfcount;
1662 size = vf_count * sizeof(*vfconf);
1664 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1668 vfconf = (struct rss_vf_conf *)scratch_buff.data;
1670 for (vf = 0; vf < vf_count; vf++) {
1671 t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
1672 &vfconf[vf].rss_vf_vfh, 1);
1675 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1679 rc = compress_buff(&scratch_buff, dbg_buff);
1682 release_scratch_buff(&scratch_buff, dbg_buff);
1687 static int collect_rss_pf_config(struct cudbg_init *pdbg_init,
1688 struct cudbg_buffer *dbg_buff,
1689 struct cudbg_error *cudbg_err)
1691 struct cudbg_buffer scratch_buff;
1692 struct rss_pf_conf *pfconf;
1693 struct adapter *padap = pdbg_init->adap;
1694 u32 rss_pf_map, rss_pf_mask, size;
1697 size = 8 * sizeof(*pfconf);
1699 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1703 pfconf = (struct rss_pf_conf *)scratch_buff.data;
1705 rss_pf_map = t4_read_rss_pf_map(padap, 1);
1706 rss_pf_mask = t4_read_rss_pf_mask(padap, 1);
1708 for (pf = 0; pf < 8; pf++) {
1709 pfconf[pf].rss_pf_map = rss_pf_map;
1710 pfconf[pf].rss_pf_mask = rss_pf_mask;
1712 t4_read_rss_pf_config(padap, pf, &pfconf[pf].rss_pf_config, 1);
1715 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1719 rc = compress_buff(&scratch_buff, dbg_buff);
1721 release_scratch_buff(&scratch_buff, dbg_buff);
1726 static int check_valid(u32 *buf, int type)
1743 index = bit_pos / 32;
1746 return buf[index] & (1U << bit);
1750 * Get EGRESS, INGRESS, FLM, and CNM max qid.
1752 * For EGRESS and INGRESS, do the following calculation.
1753 * max_qid = (DBQ/IMSG context region size in bytes) /
1754 * (size of context in bytes).
1756 * For FLM, do the following calculation.
1757 * max_qid = (FLM cache region size in bytes) /
1758 * ((number of pointers cached in EDRAM) * 8 (bytes per pointer)).
1760 * There's a 1-to-1 mapping between FLM and CNM if there's no header splitting
1761 * enabled; i.e., max CNM qid is equal to max FLM qid. However, if header
1762 * splitting is enabled, then max CNM qid is half of max FLM qid.
1764 static int get_max_ctxt_qid(struct adapter *padap,
1765 struct struct_meminfo *meminfo,
1766 u32 *max_ctx_qid, u8 nelem)
1768 u32 i, idx, found = 0;
1770 if (nelem != (CTXT_CNM + 1))
1773 for (i = 0; i < meminfo->mem_c; i++) {
1774 if (meminfo->mem[i].idx >= ARRAY_SIZE(region))
1775 continue; /* skip holes */
1777 idx = meminfo->mem[i].idx;
1778 /* Get DBQ, IMSG, and FLM context region size */
1779 if (idx <= CTXT_FLM) {
1780 if (!(meminfo->mem[i].limit))
1781 meminfo->mem[i].limit =
1782 i < meminfo->mem_c - 1 ?
1783 meminfo->mem[i + 1].base - 1 : ~0;
1785 if (idx < CTXT_FLM) {
1786 /* Get EGRESS and INGRESS max qid. */
1787 max_ctx_qid[idx] = (meminfo->mem[i].limit -
1788 meminfo->mem[i].base + 1) /
1789 CUDBG_CTXT_SIZE_BYTES;
1792 /* Get FLM and CNM max qid. */
1793 u32 value, edram_ptr_count;
1794 u8 bytes_per_ptr = 8;
1797 value = t4_read_reg(padap, A_SGE_FLM_CFG);
1799 /* Check if header splitting is enabled. */
1800 nohdr = (value >> S_NOHDR) & 1U;
1802 /* Get the number of pointers in EDRAM per
1803 * qid in units of 32.
1805 edram_ptr_count = 32 *
1806 (1U << G_EDRAMPTRCNT(value));
1808 /* EDRAMPTRCNT value of 3 is reserved.
1809 * So don't exceed 128.
1811 if (edram_ptr_count > 128)
1812 edram_ptr_count = 128;
1814 max_ctx_qid[idx] = (meminfo->mem[i].limit -
1815 meminfo->mem[i].base + 1) /
1820 /* CNM has 1-to-1 mapping with FLM.
1821 * However, if header splitting is enabled,
1822 * then max CNM qid is half of max FLM qid.
1824 max_ctx_qid[CTXT_CNM] = nohdr ?
1826 max_ctx_qid[idx] >> 1;
1828 /* One more increment for CNM */
1836 /* Sanity check. Ensure the values are within known max. */
1837 max_ctx_qid[CTXT_EGRESS] = min_t(u32, max_ctx_qid[CTXT_EGRESS],
1839 max_ctx_qid[CTXT_INGRESS] = min_t(u32, max_ctx_qid[CTXT_INGRESS],
1840 CUDBG_MAX_INGRESS_QIDS);
1841 max_ctx_qid[CTXT_FLM] = min_t(u32, max_ctx_qid[CTXT_FLM],
1843 max_ctx_qid[CTXT_CNM] = min_t(u32, max_ctx_qid[CTXT_CNM],
1844 CUDBG_MAX_CNM_QIDS);
1848 static int collect_dump_context(struct cudbg_init *pdbg_init,
1849 struct cudbg_buffer *dbg_buff,
1850 struct cudbg_error *cudbg_err)
1852 struct cudbg_buffer scratch_buff;
1853 struct cudbg_buffer temp_buff;
1854 struct adapter *padap = pdbg_init->adap;
1855 u32 size = 0, next_offset = 0, total_size = 0;
1856 struct cudbg_ch_cntxt *buff = NULL;
1857 struct struct_meminfo meminfo;
1861 u32 max_ctx_qid[CTXT_CNM + 1];
1862 bool limit_qid = false;
1865 rc = fill_meminfo(padap, &meminfo);
1869 /* Get max valid qid for each type of queue */
1870 rc = get_max_ctxt_qid(padap, &meminfo, max_ctx_qid, CTXT_CNM + 1);
1874 /* There are four types of queues. Collect context upto max
1875 * qid of each type of queue.
1877 for (i = CTXT_EGRESS; i <= CTXT_CNM; i++)
1878 size += sizeof(struct cudbg_ch_cntxt) * max_ctx_qid[i];
1880 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1881 if (rc == CUDBG_STATUS_NO_SCRATCH_MEM) {
1882 /* Not enough scratch Memory available.
1883 * Collect context of at least CUDBG_LOWMEM_MAX_CTXT_QIDS
1884 * for each queue type.
1887 for (i = CTXT_EGRESS; i <= CTXT_CNM; i++)
1888 size += sizeof(struct cudbg_ch_cntxt) *
1889 CUDBG_LOWMEM_MAX_CTXT_QIDS;
1892 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1897 buff = (struct cudbg_ch_cntxt *)scratch_buff.data;
1899 /* Collect context data */
1900 for (i = CTXT_EGRESS; i <= CTXT_FLM; i++) {
1902 for (j = 0; j < max_ctx_qid[i]; j++) {
1903 read_sge_ctxt(pdbg_init, j, i, buff->data);
1905 rc = check_valid(buff->data, i);
1907 buff->cntxt_type = i;
1910 total_size += sizeof(struct cudbg_ch_cntxt);
1912 if (i == CTXT_FLM) {
1913 read_sge_ctxt(pdbg_init, j, CTXT_CNM,
1915 buff->cntxt_type = CTXT_CNM;
1919 sizeof(struct cudbg_ch_cntxt);
1924 /* If there's not enough space to collect more qids,
1925 * then bail and move on to next queue type.
1928 qid_count >= CUDBG_LOWMEM_MAX_CTXT_QIDS)
1933 scratch_buff.size = total_size;
1934 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1938 /* Splitting buffer and writing in terms of CUDBG_CHUNK_SIZE */
1939 while (total_size > 0) {
1940 bytes = min_t(unsigned long, (unsigned long)total_size,
1941 (unsigned long)CUDBG_CHUNK_SIZE);
1942 temp_buff.size = bytes;
1943 temp_buff.data = (void *)((char *)scratch_buff.data +
1946 rc = compress_buff(&temp_buff, dbg_buff);
1950 total_size -= bytes;
1951 next_offset += bytes;
1955 scratch_buff.size = size;
1956 release_scratch_buff(&scratch_buff, dbg_buff);
1961 static int collect_fw_devlog(struct cudbg_init *pdbg_init,
1962 struct cudbg_buffer *dbg_buff,
1963 struct cudbg_error *cudbg_err)
1966 struct adapter *padap = pdbg_init->adap;
1967 struct devlog_params *dparams = &padap->params.devlog;
1968 struct cudbg_param *params = NULL;
1969 struct cudbg_buffer scratch_buff;
1973 rc = t4_init_devlog_params(padap, 1);
1976 pdbg_init->print("%s(), t4_init_devlog_params failed!, rc: "\
1977 "%d\n", __func__, rc);
1978 for (i = 0; i < pdbg_init->dbg_params_cnt; i++) {
1979 if (pdbg_init->dbg_params[i].param_type ==
1980 CUDBG_DEVLOG_PARAM) {
1981 params = &pdbg_init->dbg_params[i];
1987 dparams->memtype = params->u.devlog_param.memtype;
1988 dparams->start = params->u.devlog_param.start;
1989 dparams->size = params->u.devlog_param.size;
1991 cudbg_err->sys_err = rc;
1996 rc = get_scratch_buff(dbg_buff, dparams->size, &scratch_buff);
2001 /* Collect FW devlog */
2002 if (dparams->start != 0) {
2003 offset = scratch_buff.offset;
2004 rc = t4_memory_rw(padap, padap->params.drv_memwin,
2005 dparams->memtype, dparams->start,
2007 (__be32 *)((char *)scratch_buff.data +
2011 pdbg_init->print("%s(), t4_memory_rw failed!, rc: "\
2012 "%d\n", __func__, rc);
2013 cudbg_err->sys_err = rc;
2018 rc = write_compression_hdr(&scratch_buff, dbg_buff);
2023 rc = compress_buff(&scratch_buff, dbg_buff);
2026 release_scratch_buff(&scratch_buff, dbg_buff);
2034 static int collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
2035 struct cudbg_buffer *dbg_buff,
2036 struct cudbg_error *cudbg_err)
2038 int rc = 0, qid = 0;
2040 rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2045 static int collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
2046 struct cudbg_buffer *dbg_buff,
2047 struct cudbg_error *cudbg_err)
2049 int rc = 0, qid = 1;
2051 rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2056 static int collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
2057 struct cudbg_buffer *dbg_buff,
2058 struct cudbg_error *cudbg_err)
2060 int rc = 0, qid = 2;
2062 rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2067 static int collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
2068 struct cudbg_buffer *dbg_buff,
2069 struct cudbg_error *cudbg_err)
2071 int rc = 0, qid = 3;
2073 rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2078 static int collect_cim_obq_sge(struct cudbg_init *pdbg_init,
2079 struct cudbg_buffer *dbg_buff,
2080 struct cudbg_error *cudbg_err)
2082 int rc = 0, qid = 4;
2084 rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2089 static int collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
2090 struct cudbg_buffer *dbg_buff,
2091 struct cudbg_error *cudbg_err)
2093 int rc = 0, qid = 5;
2095 rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2100 static int collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
2101 struct cudbg_buffer *dbg_buff,
2102 struct cudbg_error *cudbg_err)
2104 int rc = 0, qid = 6;
2106 rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2111 static int collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
2112 struct cudbg_buffer *dbg_buff,
2113 struct cudbg_error *cudbg_err)
2115 int rc = 0, qid = 7;
2117 rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2122 static int read_cim_obq(struct cudbg_init *pdbg_init,
2123 struct cudbg_buffer *dbg_buff,
2124 struct cudbg_error *cudbg_err, int qid)
2126 struct cudbg_buffer scratch_buff;
2127 struct adapter *padap = pdbg_init->adap;
2130 int no_of_read_words;
2132 /* collect CIM OBQ */
2133 qsize = 6 * CIM_OBQ_SIZE * 4 * sizeof(u32);
2134 rc = get_scratch_buff(dbg_buff, qsize, &scratch_buff);
2138 /* t4_read_cim_obq will return no. of read words or error */
2139 no_of_read_words = t4_read_cim_obq(padap, qid,
2140 (u32 *)((u32 *)scratch_buff.data +
2141 scratch_buff.offset), qsize);
2143 /* no_of_read_words is less than or equal to 0 means error */
2144 if (no_of_read_words <= 0) {
2145 if (no_of_read_words == 0)
2146 rc = CUDBG_SYSTEM_ERROR;
2148 rc = no_of_read_words;
2149 if (pdbg_init->verbose)
2150 pdbg_init->print("%s: t4_read_cim_obq failed (%d)\n",
2152 cudbg_err->sys_err = rc;
2156 scratch_buff.size = no_of_read_words * 4;
2158 rc = write_compression_hdr(&scratch_buff, dbg_buff);
2163 rc = compress_buff(&scratch_buff, dbg_buff);
2169 release_scratch_buff(&scratch_buff, dbg_buff);
2176 static int collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
2177 struct cudbg_buffer *dbg_buff,
2178 struct cudbg_error *cudbg_err)
2180 int rc = 0, qid = 0;
2182 rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2186 static int collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
2187 struct cudbg_buffer *dbg_buff,
2188 struct cudbg_error *cudbg_err)
2190 int rc = 0, qid = 1;
2192 rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2196 static int collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
2197 struct cudbg_buffer *dbg_buff,
2198 struct cudbg_error *cudbg_err)
2200 int rc = 0, qid = 2;
2202 rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2206 static int collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
2207 struct cudbg_buffer *dbg_buff,
2208 struct cudbg_error *cudbg_err)
2210 int rc = 0, qid = 3;
2212 rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2216 static int collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
2217 struct cudbg_buffer *dbg_buff,
2218 struct cudbg_error *cudbg_err)
2220 int rc = 0, qid = 4;
2222 rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2226 static int collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
2227 struct cudbg_buffer *dbg_buff,
2228 struct cudbg_error *cudbg_err)
2232 rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2236 static int read_cim_ibq(struct cudbg_init *pdbg_init,
2237 struct cudbg_buffer *dbg_buff,
2238 struct cudbg_error *cudbg_err, int qid)
2240 struct adapter *padap = pdbg_init->adap;
2241 struct cudbg_buffer scratch_buff;
2244 int no_of_read_words;
2246 /* collect CIM IBQ */
2247 qsize = CIM_IBQ_SIZE * 4 * sizeof(u32);
2248 rc = get_scratch_buff(dbg_buff, qsize, &scratch_buff);
2253 /* t4_read_cim_ibq will return no. of read words or error */
2254 no_of_read_words = t4_read_cim_ibq(padap, qid,
2255 (u32 *)((u32 *)scratch_buff.data +
2256 scratch_buff.offset), qsize);
2257 /* no_of_read_words is less than or equal to 0 means error */
2258 if (no_of_read_words <= 0) {
2259 if (no_of_read_words == 0)
2260 rc = CUDBG_SYSTEM_ERROR;
2262 rc = no_of_read_words;
2263 if (pdbg_init->verbose)
2264 pdbg_init->print("%s: t4_read_cim_ibq failed (%d)\n",
2266 cudbg_err->sys_err = rc;
2270 rc = write_compression_hdr(&scratch_buff, dbg_buff);
2274 rc = compress_buff(&scratch_buff, dbg_buff);
2279 release_scratch_buff(&scratch_buff, dbg_buff);
2285 static int collect_cim_ma_la(struct cudbg_init *pdbg_init,
2286 struct cudbg_buffer *dbg_buff,
2287 struct cudbg_error *cudbg_err)
2289 struct cudbg_buffer scratch_buff;
2290 struct adapter *padap = pdbg_init->adap;
2293 /* collect CIM MA LA */
2294 scratch_buff.size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
2295 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2300 t4_cim_read_ma_la(padap,
2301 (u32 *) ((char *)scratch_buff.data +
2302 scratch_buff.offset),
2303 (u32 *) ((char *)scratch_buff.data +
2304 scratch_buff.offset + 5 * CIM_MALA_SIZE));
2306 rc = write_compression_hdr(&scratch_buff, dbg_buff);
2310 rc = compress_buff(&scratch_buff, dbg_buff);
2313 release_scratch_buff(&scratch_buff, dbg_buff);
2318 static int collect_cim_la(struct cudbg_init *pdbg_init,
2319 struct cudbg_buffer *dbg_buff,
2320 struct cudbg_error *cudbg_err)
2322 struct cudbg_buffer scratch_buff;
2323 struct adapter *padap = pdbg_init->adap;
2329 /* collect CIM LA */
2331 size = padap->params.cim_la_size / 10 + 1;
2332 size *= 11 * sizeof(u32);
2334 size = padap->params.cim_la_size / 8;
2335 size *= 8 * sizeof(u32);
2338 size += sizeof(cfg);
2340 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
2344 rc = t4_cim_read(padap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
2347 if (pdbg_init->verbose)
2348 pdbg_init->print("%s: t4_cim_read failed (%d)\n",
2350 cudbg_err->sys_err = rc;
2354 memcpy((char *)scratch_buff.data + scratch_buff.offset, &cfg,
2357 rc = t4_cim_read_la(padap,
2358 (u32 *) ((char *)scratch_buff.data +
2359 scratch_buff.offset + sizeof(cfg)), NULL);
2361 if (pdbg_init->verbose)
2362 pdbg_init->print("%s: t4_cim_read_la failed (%d)\n",
2364 cudbg_err->sys_err = rc;
2368 rc = write_compression_hdr(&scratch_buff, dbg_buff);
2372 rc = compress_buff(&scratch_buff, dbg_buff);
2377 release_scratch_buff(&scratch_buff, dbg_buff);
2382 static int collect_cim_qcfg(struct cudbg_init *pdbg_init,
2383 struct cudbg_buffer *dbg_buff,
2384 struct cudbg_error *cudbg_err)
2386 struct cudbg_buffer scratch_buff;
2387 struct adapter *padap = pdbg_init->adap;
2389 int cim_num_obq, rc = 0;
2391 struct struct_cim_qcfg *cim_qcfg_data = NULL;
2393 rc = get_scratch_buff(dbg_buff, sizeof(struct struct_cim_qcfg),
2399 offset = scratch_buff.offset;
2401 cim_num_obq = is_t4(padap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
2404 (struct struct_cim_qcfg *)((u8 *)((char *)scratch_buff.data +
2407 rc = t4_cim_read(padap, A_UP_IBQ_0_RDADDR,
2408 ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
2411 if (pdbg_init->verbose)
2412 pdbg_init->print("%s: t4_cim_read IBQ_0_RDADDR failed (%d)\n",
2414 cudbg_err->sys_err = rc;
2418 rc = t4_cim_read(padap, A_UP_OBQ_0_REALADDR,
2419 ARRAY_SIZE(cim_qcfg_data->obq_wr),
2420 cim_qcfg_data->obq_wr);
2423 if (pdbg_init->verbose)
2424 pdbg_init->print("%s: t4_cim_read OBQ_0_REALADDR failed (%d)\n",
2426 cudbg_err->sys_err = rc;
2431 t4_read_cimq_cfg(padap,
2432 cim_qcfg_data->base,
2433 cim_qcfg_data->size,
2434 cim_qcfg_data->thres);
2436 rc = write_compression_hdr(&scratch_buff, dbg_buff);
2440 rc = compress_buff(&scratch_buff, dbg_buff);
2445 release_scratch_buff(&scratch_buff, dbg_buff);
2451 * Fetch the TX/RX payload regions start and end.
2453 * @padap (IN): adapter handle.
2454 * @mem_type (IN): EDC0, EDC1, MC/MC0/MC1.
2455 * @mem_tot_len (IN): total length of @mem_type memory region to read.
2456 * @payload_type (IN): TX or RX Payload.
2457 * @reg_info (OUT): store the payload region info.
2459 * Fetch the TX/RX payload region information from meminfo.
2460 * However, reading from the @mem_type region starts at 0 and not
2461 * from whatever base info is stored in meminfo. Hence, if the
2462 * payload region exists, then calculate the payload region
2463 * start and end wrt 0 and @mem_tot_len, respectively, and set
2464 * @reg_info->exist to true. Otherwise, set @reg_info->exist to false.
2467 static int get_payload_range(struct adapter *padap, u8 mem_type,
2468 unsigned long mem_tot_len, u8 payload_type,
2469 struct struct_region_info *reg_info)
2471 struct struct_meminfo meminfo;
2472 struct struct_mem_desc mem_region;
2473 struct struct_mem_desc payload;
2474 u32 i, idx, found = 0;
2478 /* Get meminfo of all regions */
2479 rc = fill_meminfo(padap, &meminfo);
2483 /* Extract the specified TX or RX Payload region range */
2484 memset(&payload, 0, sizeof(struct struct_mem_desc));
2485 for (i = 0; i < meminfo.mem_c; i++) {
2486 if (meminfo.mem[i].idx >= ARRAY_SIZE(region))
2487 continue; /* skip holes */
2489 idx = meminfo.mem[i].idx;
2490 /* Get TX or RX Payload region start and end */
2491 if (idx == payload_type) {
2492 if (!(meminfo.mem[i].limit))
2493 meminfo.mem[i].limit =
2494 i < meminfo.mem_c - 1 ?
2495 meminfo.mem[i + 1].base - 1 : ~0;
2497 memcpy(&payload, &meminfo.mem[i], sizeof(payload));
2503 /* If TX or RX Payload region is not found return error. */
2507 if (mem_type < MEM_MC) {
2508 memcpy(&mem_region, &meminfo.avail[mem_type],
2509 sizeof(mem_region));
2511 /* Check if both MC0 and MC1 exist by checking if a
2512 * base address for the specified @mem_type exists.
2513 * If a base address exists, then there is MC1 and
2514 * hence use the base address stored at index 3.
2515 * Otherwise, use the base address stored at index 2.
2517 mc_type = meminfo.avail[mem_type].base ?
2518 mem_type : mem_type - 1;
2519 memcpy(&mem_region, &meminfo.avail[mc_type],
2520 sizeof(mem_region));
2523 /* Check if payload region exists in current memory */
2524 if (payload.base < mem_region.base && payload.limit < mem_region.base) {
2525 reg_info->exist = false;
2529 /* Get Payload region start and end with respect to 0 and
2530 * mem_tot_len, respectively. This is because reading from the
2531 * memory region starts at 0 and not at base info stored in meminfo.
2533 if (payload.base < mem_region.limit) {
2534 reg_info->exist = true;
2535 if (payload.base >= mem_region.base)
2536 reg_info->start = payload.base - mem_region.base;
2538 reg_info->start = 0;
2540 if (payload.limit < mem_region.limit)
2541 reg_info->end = payload.limit - mem_region.base;
2543 reg_info->end = mem_tot_len;
2550 static int read_fw_mem(struct cudbg_init *pdbg_init,
2551 struct cudbg_buffer *dbg_buff, u8 mem_type,
2552 unsigned long tot_len, struct cudbg_error *cudbg_err)
2555 struct cudbg_buffer scratch_buff;
2556 struct adapter *padap = pdbg_init->adap;
2557 unsigned long bytes_read = 0;
2558 unsigned long bytes_left;
2559 unsigned long bytes;
2561 struct struct_region_info payload[2]; /* TX and RX Payload Region */
2562 u16 get_payload_flag;
2566 pdbg_init->dbg_params[CUDBG_GET_PAYLOAD_PARAM].param_type;
2568 /* If explicitly asked to get TX/RX Payload data,
2569 * then don't zero out the payload data. Otherwise,
2570 * zero out the payload data.
2572 if (!get_payload_flag) {
2576 /* Find the index of TX and RX Payload regions in meminfo */
2577 for (i = 0; i < ARRAY_SIZE(region); i++) {
2578 if (!strcmp(region[i], "Tx payload:") ||
2579 !strcmp(region[i], "Rx payload:")) {
2580 region_index[j] = i;
2587 /* Get TX/RX Payload region range if they exist */
2588 memset(payload, 0, ARRAY_SIZE(payload) * sizeof(payload[0]));
2589 for (i = 0; i < ARRAY_SIZE(payload); i++) {
2590 rc = get_payload_range(padap, mem_type, tot_len,
2596 if (payload[i].exist) {
2597 /* Align start and end to avoid wrap around */
2599 roundup(payload[i].start,
2602 rounddown(payload[i].end,
2608 bytes_left = tot_len;
2609 scratch_buff.size = tot_len;
2610 rc = write_compression_hdr(&scratch_buff, dbg_buff);
2614 while (bytes_left > 0) {
2615 bytes = min_t(unsigned long, bytes_left, (unsigned long)CUDBG_CHUNK_SIZE);
2616 rc = get_scratch_buff(dbg_buff, bytes, &scratch_buff);
2619 rc = CUDBG_STATUS_NO_SCRATCH_MEM;
2623 if (!get_payload_flag) {
2624 for (i = 0; i < ARRAY_SIZE(payload); i++) {
2625 if (payload[i].exist &&
2626 bytes_read >= payload[i].start &&
2627 (bytes_read + bytes) <= payload[i].end) {
2628 memset(scratch_buff.data, 0, bytes);
2629 /* TX and RX Payload regions
2637 /* Read from file */
2638 /*fread(scratch_buff.data, 1, Bytes, in);*/
2639 rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type, bytes_read,
2640 bytes, (__be32 *)(scratch_buff.data), 1);
2643 if (pdbg_init->verbose)
2644 pdbg_init->print("%s: t4_memory_rw failed (%d)",
2646 cudbg_err->sys_err = rc;
2651 rc = compress_buff(&scratch_buff, dbg_buff);
2655 bytes_left -= bytes;
2656 bytes_read += bytes;
2657 release_scratch_buff(&scratch_buff, dbg_buff);
2662 release_scratch_buff(&scratch_buff, dbg_buff);
2670 static void collect_mem_info(struct cudbg_init *pdbg_init,
2671 struct card_mem *mem_info)
2673 struct adapter *padap = pdbg_init->adap;
2681 value = t4_read_reg(padap, A_MA_EXT_MEMORY_BAR);
2682 value = G_EXT_MEM_SIZE(value);
2683 mem_info->size_mc0 = (u16)value; /* size in MB */
2685 value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
2686 if (value & F_EXT_MEM_ENABLE)
2687 mem_info->mem_flag |= (1 << MC0_FLAG); /* set mc0 flag
2690 value = t4_read_reg(padap, A_MA_EXT_MEMORY0_BAR);
2691 value = G_EXT_MEM0_SIZE(value);
2692 mem_info->size_mc0 = (u16)value;
2694 value = t4_read_reg(padap, A_MA_EXT_MEMORY1_BAR);
2695 value = G_EXT_MEM1_SIZE(value);
2696 mem_info->size_mc1 = (u16)value;
2698 value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
2699 if (value & F_EXT_MEM0_ENABLE)
2700 mem_info->mem_flag |= (1 << MC0_FLAG);
2701 if (value & F_EXT_MEM1_ENABLE)
2702 mem_info->mem_flag |= (1 << MC1_FLAG);
2705 value = t4_read_reg(padap, A_MA_EDRAM0_BAR);
2706 value = G_EDRAM0_SIZE(value);
2707 mem_info->size_edc0 = (u16)value;
2709 value = t4_read_reg(padap, A_MA_EDRAM1_BAR);
2710 value = G_EDRAM1_SIZE(value);
2711 mem_info->size_edc1 = (u16)value;
2713 value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
2714 if (value & F_EDRAM0_ENABLE)
2715 mem_info->mem_flag |= (1 << EDC0_FLAG);
2716 if (value & F_EDRAM1_ENABLE)
2717 mem_info->mem_flag |= (1 << EDC1_FLAG);
2721 static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
2722 struct cudbg_error *cudbg_err)
2724 struct adapter *padap = pdbg_init->adap;
2727 if (is_fw_attached(pdbg_init)) {
2729 /* Flush uP dcache before reading edcX/mcX */
2730 rc = begin_synchronized_op(padap, NULL, SLEEP_OK | INTR_OK,
2733 rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
2734 end_synchronized_op(padap, 0);
2738 if (pdbg_init->verbose)
2739 pdbg_init->print("%s: t4_fwcache failed (%d)\n",
2741 cudbg_err->sys_warn = rc;
2746 static int collect_edc0_meminfo(struct cudbg_init *pdbg_init,
2747 struct cudbg_buffer *dbg_buff,
2748 struct cudbg_error *cudbg_err)
2750 struct card_mem mem_info = {0};
2751 unsigned long edc0_size;
2754 cudbg_t4_fwcache(pdbg_init, cudbg_err);
2756 collect_mem_info(pdbg_init, &mem_info);
2758 if (mem_info.mem_flag & (1 << EDC0_FLAG)) {
2759 edc0_size = (((unsigned long)mem_info.size_edc0) * 1024 * 1024);
2760 rc = read_fw_mem(pdbg_init, dbg_buff, MEM_EDC0,
2761 edc0_size, cudbg_err);
2766 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2767 if (pdbg_init->verbose)
2768 pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
2769 __func__, err_msg[-rc]);
2777 static int collect_edc1_meminfo(struct cudbg_init *pdbg_init,
2778 struct cudbg_buffer *dbg_buff,
2779 struct cudbg_error *cudbg_err)
2781 struct card_mem mem_info = {0};
2782 unsigned long edc1_size;
2785 cudbg_t4_fwcache(pdbg_init, cudbg_err);
2787 collect_mem_info(pdbg_init, &mem_info);
2789 if (mem_info.mem_flag & (1 << EDC1_FLAG)) {
2790 edc1_size = (((unsigned long)mem_info.size_edc1) * 1024 * 1024);
2791 rc = read_fw_mem(pdbg_init, dbg_buff, MEM_EDC1,
2792 edc1_size, cudbg_err);
2796 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2797 if (pdbg_init->verbose)
2798 pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
2799 __func__, err_msg[-rc]);
2808 static int collect_mc0_meminfo(struct cudbg_init *pdbg_init,
2809 struct cudbg_buffer *dbg_buff,
2810 struct cudbg_error *cudbg_err)
2812 struct card_mem mem_info = {0};
2813 unsigned long mc0_size;
2816 cudbg_t4_fwcache(pdbg_init, cudbg_err);
2818 collect_mem_info(pdbg_init, &mem_info);
2820 if (mem_info.mem_flag & (1 << MC0_FLAG)) {
2821 mc0_size = (((unsigned long)mem_info.size_mc0) * 1024 * 1024);
2822 rc = read_fw_mem(pdbg_init, dbg_buff, MEM_MC0,
2823 mc0_size, cudbg_err);
2827 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2828 if (pdbg_init->verbose)
2829 pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
2830 __func__, err_msg[-rc]);
2838 static int collect_mc1_meminfo(struct cudbg_init *pdbg_init,
2839 struct cudbg_buffer *dbg_buff,
2840 struct cudbg_error *cudbg_err)
2842 struct card_mem mem_info = {0};
2843 unsigned long mc1_size;
2846 cudbg_t4_fwcache(pdbg_init, cudbg_err);
2848 collect_mem_info(pdbg_init, &mem_info);
2850 if (mem_info.mem_flag & (1 << MC1_FLAG)) {
2851 mc1_size = (((unsigned long)mem_info.size_mc1) * 1024 * 1024);
2852 rc = read_fw_mem(pdbg_init, dbg_buff, MEM_MC1,
2853 mc1_size, cudbg_err);
2857 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2859 if (pdbg_init->verbose)
2860 pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
2861 __func__, err_msg[-rc]);
2868 static int collect_reg_dump(struct cudbg_init *pdbg_init,
2869 struct cudbg_buffer *dbg_buff,
2870 struct cudbg_error *cudbg_err)
2872 struct cudbg_buffer scratch_buff;
2873 struct cudbg_buffer tmp_scratch_buff;
2874 struct adapter *padap = pdbg_init->adap;
2875 unsigned long bytes_read = 0;
2876 unsigned long bytes_left;
2877 u32 buf_size = 0, bytes = 0;
2881 buf_size = T4_REGMAP_SIZE ;/*+ sizeof(unsigned int);*/
2882 else if (is_t5(padap) || is_t6(padap))
2883 buf_size = T5_REGMAP_SIZE;
2885 scratch_buff.size = buf_size;
2887 tmp_scratch_buff = scratch_buff;
2889 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2894 t4_get_regs(padap, (void *)scratch_buff.data, scratch_buff.size);
2895 bytes_left = scratch_buff.size;
2897 rc = write_compression_hdr(&scratch_buff, dbg_buff);
2901 while (bytes_left > 0) {
2902 tmp_scratch_buff.data =
2903 ((char *)scratch_buff.data) + bytes_read;
2904 bytes = min_t(unsigned long, bytes_left, (unsigned long)CUDBG_CHUNK_SIZE);
2905 tmp_scratch_buff.size = bytes;
2906 compress_buff(&tmp_scratch_buff, dbg_buff);
2907 bytes_left -= bytes;
2908 bytes_read += bytes;
2912 release_scratch_buff(&scratch_buff, dbg_buff);
2917 static int collect_cctrl(struct cudbg_init *pdbg_init,
2918 struct cudbg_buffer *dbg_buff,
2919 struct cudbg_error *cudbg_err)
2921 struct cudbg_buffer scratch_buff;
2922 struct adapter *padap = pdbg_init->adap;
2926 size = sizeof(u16) * NMTUS * NCCTRL_WIN;
2927 scratch_buff.size = size;
2929 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2933 t4_read_cong_tbl(padap, (void *)scratch_buff.data);
2935 rc = write_compression_hdr(&scratch_buff, dbg_buff);
2939 rc = compress_buff(&scratch_buff, dbg_buff);
2942 release_scratch_buff(&scratch_buff, dbg_buff);
2947 static int check_busy_bit(struct adapter *padap)
2955 while (busy & (1 < retry)) {
2956 val = t4_read_reg(padap, A_CIM_HOST_ACC_CTRL);
2957 busy = (0 != (val & CUDBG_CIM_BUSY_BIT));
2967 static int cim_ha_rreg(struct adapter *padap, u32 addr, u32 *val)
2971 /* write register address into the A_CIM_HOST_ACC_CTRL */
2972 t4_write_reg(padap, A_CIM_HOST_ACC_CTRL, addr);
2975 rc = check_busy_bit(padap);
2979 /* Read value from A_CIM_HOST_ACC_DATA */
2980 *val = t4_read_reg(padap, A_CIM_HOST_ACC_DATA);
2986 static int dump_up_cim(struct adapter *padap, struct cudbg_init *pdbg_init,
2987 struct ireg_field *up_cim_reg, u32 *buff)
2992 for (i = 0; i < up_cim_reg->ireg_offset_range; i++) {
2993 rc = cim_ha_rreg(padap,
2994 up_cim_reg->ireg_local_offset + (i * 4),
2997 if (pdbg_init->verbose)
2998 pdbg_init->print("BUSY timeout reading"
2999 "CIM_HOST_ACC_CTRL\n");
3010 static int collect_up_cim_indirect(struct cudbg_init *pdbg_init,
3011 struct cudbg_buffer *dbg_buff,
3012 struct cudbg_error *cudbg_err)
3014 struct cudbg_buffer scratch_buff;
3015 struct adapter *padap = pdbg_init->adap;
3016 struct ireg_buf *up_cim;
3020 n = sizeof(t5_up_cim_reg_array) / (4 * sizeof(u32));
3021 size = sizeof(struct ireg_buf) * n;
3022 scratch_buff.size = size;
3024 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3028 up_cim = (struct ireg_buf *)scratch_buff.data;
3030 for (i = 0; i < n; i++) {
3031 struct ireg_field *up_cim_reg = &up_cim->tp_pio;
3032 u32 *buff = up_cim->outbuf;
3035 up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
3036 up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
3037 up_cim_reg->ireg_local_offset =
3038 t5_up_cim_reg_array[i][2];
3039 up_cim_reg->ireg_offset_range =
3040 t5_up_cim_reg_array[i][3];
3041 } else if (is_t6(padap)) {
3042 up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
3043 up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
3044 up_cim_reg->ireg_local_offset =
3045 t6_up_cim_reg_array[i][2];
3046 up_cim_reg->ireg_offset_range =
3047 t6_up_cim_reg_array[i][3];
3050 rc = dump_up_cim(padap, pdbg_init, up_cim_reg, buff);
3055 rc = write_compression_hdr(&scratch_buff, dbg_buff);
3059 rc = compress_buff(&scratch_buff, dbg_buff);
3062 release_scratch_buff(&scratch_buff, dbg_buff);
3067 static int collect_mbox_log(struct cudbg_init *pdbg_init,
3068 struct cudbg_buffer *dbg_buff,
3069 struct cudbg_error *cudbg_err)
3072 struct cudbg_buffer scratch_buff;
3073 struct cudbg_mbox_log *mboxlog = NULL;
3074 struct mbox_cmd_log *log = NULL;
3075 struct mbox_cmd *entry;
3078 unsigned int entry_idx;
3082 if (pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.mboxlog_param.log) {
3083 log = pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.
3085 mbox_cmds = pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.
3086 mboxlog_param.mbox_cmds;
3088 if (pdbg_init->verbose)
3089 pdbg_init->print("Mbox log is not requested\n");
3090 return CUDBG_STATUS_ENTITY_NOT_REQUESTED;
3093 size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
3094 scratch_buff.size = size;
3095 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3099 mboxlog = (struct cudbg_mbox_log *)scratch_buff.data;
3101 for (k = 0; k < mbox_cmds; k++) {
3102 entry_idx = log->cursor + k;
3103 if (entry_idx >= log->size)
3104 entry_idx -= log->size;
3105 entry = mbox_cmd_log_entry(log, entry_idx);
3107 /* skip over unused entries */
3108 if (entry->timestamp == 0)
3111 memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
3113 for (i = 0; i < MBOX_LEN / 8; i++) {
3114 flit = entry->cmd[i];
3115 mboxlog->hi[i] = (u32)(flit >> 32);
3116 mboxlog->lo[i] = (u32)flit;
3122 rc = write_compression_hdr(&scratch_buff, dbg_buff);
3126 rc = compress_buff(&scratch_buff, dbg_buff);
3129 release_scratch_buff(&scratch_buff, dbg_buff);
3136 static int collect_pbt_tables(struct cudbg_init *pdbg_init,
3137 struct cudbg_buffer *dbg_buff,
3138 struct cudbg_error *cudbg_err)
3140 struct cudbg_buffer scratch_buff;
3141 struct adapter *padap = pdbg_init->adap;
3142 struct cudbg_pbt_tables *pbt = NULL;
3147 size = sizeof(struct cudbg_pbt_tables);
3148 scratch_buff.size = size;
3150 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3154 pbt = (struct cudbg_pbt_tables *)scratch_buff.data;
3156 /* PBT dynamic entries */
3157 addr = CUDBG_CHAC_PBT_ADDR;
3158 for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) {
3159 rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_dynamic[i]);
3161 if (pdbg_init->verbose)
3162 pdbg_init->print("BUSY timeout reading"
3163 "CIM_HOST_ACC_CTRL\n");
3168 /* PBT static entries */
3170 /* static entries start when bit 6 is set */
3171 addr = CUDBG_CHAC_PBT_ADDR + (1 << 6);
3172 for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) {
3173 rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_static[i]);
3175 if (pdbg_init->verbose)
3176 pdbg_init->print("BUSY timeout reading"
3177 "CIM_HOST_ACC_CTRL\n");
3183 addr = CUDBG_CHAC_PBT_LRF;
3184 for (i = 0; i < CUDBG_LRF_ENTRIES; i++) {
3185 rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->lrf_table[i]);
3187 if (pdbg_init->verbose)
3188 pdbg_init->print("BUSY timeout reading"
3189 "CIM_HOST_ACC_CTRL\n");
3194 /* PBT data entries */
3195 addr = CUDBG_CHAC_PBT_DATA;
3196 for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) {
3197 rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_data[i]);
3199 if (pdbg_init->verbose)
3200 pdbg_init->print("BUSY timeout reading"
3201 "CIM_HOST_ACC_CTRL\n");
3206 rc = write_compression_hdr(&scratch_buff, dbg_buff);
3210 rc = compress_buff(&scratch_buff, dbg_buff);
3213 release_scratch_buff(&scratch_buff, dbg_buff);
3218 static int collect_pm_indirect(struct cudbg_init *pdbg_init,
3219 struct cudbg_buffer *dbg_buff,
3220 struct cudbg_error *cudbg_err)
3222 struct cudbg_buffer scratch_buff;
3223 struct adapter *padap = pdbg_init->adap;
3224 struct ireg_buf *ch_pm;
3228 n = sizeof(t5_pm_rx_array) / (4 * sizeof(u32));
3229 size = sizeof(struct ireg_buf) * n * 2;
3230 scratch_buff.size = size;
3232 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3236 ch_pm = (struct ireg_buf *)scratch_buff.data;
3239 for (i = 0; i < n; i++) {
3240 struct ireg_field *pm_pio = &ch_pm->tp_pio;
3241 u32 *buff = ch_pm->outbuf;
3243 pm_pio->ireg_addr = t5_pm_rx_array[i][0];
3244 pm_pio->ireg_data = t5_pm_rx_array[i][1];
3245 pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
3246 pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
3248 t4_read_indirect(padap,
3252 pm_pio->ireg_offset_range,
3253 pm_pio->ireg_local_offset);
3259 n = sizeof(t5_pm_tx_array) / (4 * sizeof(u32));
3260 for (i = 0; i < n; i++) {
3261 struct ireg_field *pm_pio = &ch_pm->tp_pio;
3262 u32 *buff = ch_pm->outbuf;
3264 pm_pio->ireg_addr = t5_pm_tx_array[i][0];
3265 pm_pio->ireg_data = t5_pm_tx_array[i][1];
3266 pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
3267 pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
3269 t4_read_indirect(padap,
3273 pm_pio->ireg_offset_range,
3274 pm_pio->ireg_local_offset);
3279 rc = write_compression_hdr(&scratch_buff, dbg_buff);
3283 rc = compress_buff(&scratch_buff, dbg_buff);
3286 release_scratch_buff(&scratch_buff, dbg_buff);
3292 static int collect_tid(struct cudbg_init *pdbg_init,
3293 struct cudbg_buffer *dbg_buff,
3294 struct cudbg_error *cudbg_err)
3297 struct cudbg_buffer scratch_buff;
3298 struct adapter *padap = pdbg_init->adap;
3299 struct tid_info_region *tid;
3300 struct tid_info_region_rev1 *tid1;
3301 u32 para[7], val[7];
3305 scratch_buff.size = sizeof(struct tid_info_region_rev1);
3307 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3311 #define FW_PARAM_DEV_A(param) \
3312 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3313 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3314 #define FW_PARAM_PFVF_A(param) \
3315 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
3316 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
3317 V_FW_PARAMS_PARAM_Y(0) | \
3318 V_FW_PARAMS_PARAM_Z(0))
3319 #define MAX_ATIDS_A 8192U
3321 tid1 = (struct tid_info_region_rev1 *)scratch_buff.data;
3323 tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
3324 tid1->ver_hdr.revision = CUDBG_TID_INFO_REV;
3325 tid1->ver_hdr.size = sizeof(struct tid_info_region_rev1) -
3326 sizeof(struct cudbg_ver_hdr);
3329 tid->hash_base = t4_read_reg(padap, A_LE_DB_TID_HASHBASE);
3330 tid1->tid_start = 0;
3331 } else if (is_t6(padap)) {
3332 tid->hash_base = t4_read_reg(padap, A_T6_LE_DB_HASH_TID_BASE);
3333 tid1->tid_start = t4_read_reg(padap, A_LE_DB_ACTIVE_TABLE_START_INDEX);
3336 tid->le_db_conf = t4_read_reg(padap, A_LE_DB_CONFIG);
3338 para[0] = FW_PARAM_PFVF_A(FILTER_START);
3339 para[1] = FW_PARAM_PFVF_A(FILTER_END);
3340 para[2] = FW_PARAM_PFVF_A(ACTIVE_FILTER_START);
3341 para[3] = FW_PARAM_PFVF_A(ACTIVE_FILTER_END);
3342 para[4] = FW_PARAM_DEV_A(NTID);
3343 para[5] = FW_PARAM_PFVF_A(SERVER_START);
3344 para[6] = FW_PARAM_PFVF_A(SERVER_END);
3346 rc = begin_synchronized_op(padap, NULL, SLEEP_OK | INTR_OK, "t4cudq");
3351 rc = t4_query_params(padap, mbox, pf, 0, 7, para, val);
3353 if (rc == -FW_EPERM) {
3354 /* It looks like we don't have permission to use
3357 * Try mbox 4. If it works, we'll continue to
3358 * collect the rest of tid info from mbox 4.
3359 * Else, quit trying to collect tid info.
3363 rc = t4_query_params(padap, mbox, pf, 0, 7, para, val);
3365 cudbg_err->sys_err = rc;
3369 cudbg_err->sys_err = rc;
3374 tid->ftid_base = val[0];
3375 tid->nftids = val[1] - val[0] + 1;
3376 /*active filter region*/
3377 if (val[2] != val[3]) {
3379 tid->flags |= FW_OFLD_CONN;
3381 tid->aftid_base = val[2];
3382 tid->aftid_end = val[3];
3384 tid->ntids = val[4];
3385 tid->natids = min_t(u32, tid->ntids / 2, MAX_ATIDS_A);
3386 tid->stid_base = val[5];
3387 tid->nstids = val[6] - val[5] + 1;
3389 if (chip_id(padap) >= CHELSIO_T6) {
3390 para[0] = FW_PARAM_PFVF_A(HPFILTER_START);
3391 para[1] = FW_PARAM_PFVF_A(HPFILTER_END);
3392 rc = t4_query_params(padap, mbox, pf, 0, 2, para, val);
3394 cudbg_err->sys_err = rc;
3398 tid->hpftid_base = val[0];
3399 tid->nhpftids = val[1] - val[0] + 1;
3402 if (chip_id(padap) <= CHELSIO_T5) {
3403 tid->sb = t4_read_reg(padap, A_LE_DB_SERVER_INDEX) / 4;
3404 tid->hash_base /= 4;
3406 tid->sb = t4_read_reg(padap, A_LE_DB_SRVR_START_INDEX);
3408 /*UO context range*/
3409 para[0] = FW_PARAM_PFVF_A(ETHOFLD_START);
3410 para[1] = FW_PARAM_PFVF_A(ETHOFLD_END);
3412 rc = t4_query_params(padap, mbox, pf, 0, 2, para, val);
3414 cudbg_err->sys_err = rc;
3418 if (val[0] != val[1]) {
3419 tid->uotid_base = val[0];
3420 tid->nuotids = val[1] - val[0] + 1;
3422 tid->IP_users = t4_read_reg(padap, A_LE_DB_ACT_CNT_IPV4);
3423 tid->IPv6_users = t4_read_reg(padap, A_LE_DB_ACT_CNT_IPV6);
3425 #undef FW_PARAM_PFVF_A
3426 #undef FW_PARAM_DEV_A
3429 rc = write_compression_hdr(&scratch_buff, dbg_buff);
3432 rc = compress_buff(&scratch_buff, dbg_buff);
3435 end_synchronized_op(padap, 0);
3436 release_scratch_buff(&scratch_buff, dbg_buff);
3441 static int collect_tx_rate(struct cudbg_init *pdbg_init,
3442 struct cudbg_buffer *dbg_buff,
3443 struct cudbg_error *cudbg_err)
3445 struct cudbg_buffer scratch_buff;
3446 struct adapter *padap = pdbg_init->adap;
3447 struct tx_rate *tx_rate;
3451 size = sizeof(struct tx_rate);
3452 scratch_buff.size = size;
3454 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3458 tx_rate = (struct tx_rate *)scratch_buff.data;
3459 t4_get_chan_txrate(padap, tx_rate->nrate, tx_rate->orate);
3460 tx_rate->nchan = padap->chip_params->nchan;
3462 rc = write_compression_hdr(&scratch_buff, dbg_buff);
3466 rc = compress_buff(&scratch_buff, dbg_buff);
3469 release_scratch_buff(&scratch_buff, dbg_buff);
3474 static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
3477 y = (__force u64)cpu_to_be64(y);
3478 memcpy(addr, (char *)&y + 2, ETH_ALEN);
3481 static void mps_rpl_backdoor(struct adapter *padap, struct fw_ldst_mps_rplc *mps_rplc)
3484 mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
3485 A_MPS_VF_RPLCT_MAP3));
3486 mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
3487 A_MPS_VF_RPLCT_MAP2));
3488 mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
3489 A_MPS_VF_RPLCT_MAP1));
3490 mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
3491 A_MPS_VF_RPLCT_MAP0));
3493 mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
3494 A_MPS_VF_RPLCT_MAP7));
3495 mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
3496 A_MPS_VF_RPLCT_MAP6));
3497 mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
3498 A_MPS_VF_RPLCT_MAP5));
3499 mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
3500 A_MPS_VF_RPLCT_MAP4));
3502 mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP3));
3503 mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP2));
3504 mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP1));
3505 mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP0));
3508 static int collect_mps_tcam(struct cudbg_init *pdbg_init,
3509 struct cudbg_buffer *dbg_buff,
3510 struct cudbg_error *cudbg_err)
3512 struct cudbg_buffer scratch_buff;
3513 struct adapter *padap = pdbg_init->adap;
3514 struct cudbg_mps_tcam *tcam = NULL;
3515 u32 size = 0, i, n, total_size = 0;
3517 u64 tcamy, tcamx, val;
3520 n = padap->chip_params->mps_tcam_size;
3521 size = sizeof(struct cudbg_mps_tcam) * n;
3522 scratch_buff.size = size;
3524 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3527 memset(scratch_buff.data, 0, size);
3529 tcam = (struct cudbg_mps_tcam *)scratch_buff.data;
3530 for (i = 0; i < n; i++) {
3531 if (chip_id(padap) >= CHELSIO_T6) {
3532 /* CtlReqID - 1: use Host Driver Requester ID
3533 * CtlCmdType - 0: Read, 1: Write
3534 * CtlTcamSel - 0: TCAM0, 1: TCAM1
3535 * CtlXYBitSel- 0: Y bit, 1: X bit
3539 ctl = (V_CTLREQID(1) |
3540 V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0));
3542 ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0);
3544 ctl |= V_CTLTCAMINDEX(i - 256) |
3547 t4_write_reg(padap, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
3548 val = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
3549 tcamy = G_DMACH(val) << 32;
3550 tcamy |= t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
3551 data2 = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
3552 tcam->lookup_type = G_DATALKPTYPE(data2);
3554 /* 0 - Outer header, 1 - Inner header
3555 * [71:48] bit locations are overloaded for
3556 * outer vs. inner lookup types.
3559 if (tcam->lookup_type &&
3560 (tcam->lookup_type != M_DATALKPTYPE)) {
3561 /* Inner header VNI */
3562 tcam->vniy = ((data2 & F_DATAVIDH2) << 23) |
3563 (G_DATAVIDH1(data2) << 16) |
3565 tcam->dip_hit = data2 & F_DATADIPHIT;
3567 tcam->vlan_vld = data2 & F_DATAVIDH2;
3568 tcam->ivlan = G_VIDL(val);
3571 tcam->port_num = G_DATAPORTNUM(data2);
3573 /* Read tcamx. Change the control param */
3574 ctl |= V_CTLXYBITSEL(1);
3575 t4_write_reg(padap, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
3576 val = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
3577 tcamx = G_DMACH(val) << 32;
3578 tcamx |= t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
3579 data2 = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
3580 if (tcam->lookup_type &&
3581 (tcam->lookup_type != M_DATALKPTYPE)) {
3582 /* Inner header VNI mask */
3583 tcam->vnix = ((data2 & F_DATAVIDH2) << 23) |
3584 (G_DATAVIDH1(data2) << 16) |
3588 tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(i));
3589 tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(i));
3595 tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(i));
3596 tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(i));
3599 tcam->repli = (tcam->cls_lo & F_REPLICATE);
3600 else if (is_t6(padap))
3601 tcam->repli = (tcam->cls_lo & F_T6_REPLICATE);
3604 struct fw_ldst_cmd ldst_cmd;
3605 struct fw_ldst_mps_rplc mps_rplc;
3607 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
3608 ldst_cmd.op_to_addrspace =
3609 htonl(V_FW_CMD_OP(FW_LDST_CMD) |
3612 V_FW_LDST_CMD_ADDRSPACE(
3613 FW_LDST_ADDRSPC_MPS));
3615 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
3617 ldst_cmd.u.mps.rplc.fid_idx =
3618 htons(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
3619 V_FW_LDST_CMD_IDX(i));
3621 rc = begin_synchronized_op(padap, NULL,
3622 SLEEP_OK | INTR_OK, "t4cudm");
3624 rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd,
3625 sizeof(ldst_cmd), &ldst_cmd);
3626 end_synchronized_op(padap, 0);
3630 mps_rpl_backdoor(padap, &mps_rplc);
3632 mps_rplc = ldst_cmd.u.mps.rplc;
3634 tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
3635 tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
3636 tcam->rplc[2] = ntohl(mps_rplc.rplc95_64);
3637 tcam->rplc[3] = ntohl(mps_rplc.rplc127_96);
3638 if (padap->chip_params->mps_rplc_size >
3639 CUDBG_MAX_RPLC_SIZE) {
3640 tcam->rplc[4] = ntohl(mps_rplc.rplc159_128);
3641 tcam->rplc[5] = ntohl(mps_rplc.rplc191_160);
3642 tcam->rplc[6] = ntohl(mps_rplc.rplc223_192);
3643 tcam->rplc[7] = ntohl(mps_rplc.rplc255_224);
3646 cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask);
3649 tcam->rplc_size = padap->chip_params->mps_rplc_size;
3651 total_size += sizeof(struct cudbg_mps_tcam);
3656 if (total_size == 0) {
3657 rc = CUDBG_SYSTEM_ERROR;
3661 scratch_buff.size = total_size;
3662 rc = write_compression_hdr(&scratch_buff, dbg_buff);
3666 rc = compress_buff(&scratch_buff, dbg_buff);
3669 scratch_buff.size = size;
3670 release_scratch_buff(&scratch_buff, dbg_buff);
3675 static int collect_pcie_config(struct cudbg_init *pdbg_init,
3676 struct cudbg_buffer *dbg_buff,
3677 struct cudbg_error *cudbg_err)
3679 struct cudbg_buffer scratch_buff;
3680 struct adapter *padap = pdbg_init->adap;
3681 u32 size, *value, j;
3684 size = sizeof(u32) * NUM_PCIE_CONFIG_REGS;
3685 n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32));
3686 scratch_buff.size = size;
3688 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3692 value = (u32 *)scratch_buff.data;
3693 for (i = 0; i < n; i++) {
3694 for (j = t5_pcie_config_array[i][0];
3695 j <= t5_pcie_config_array[i][1]; j += 4) {
3696 *value++ = t4_hw_pci_read_cfg4(padap, j);
3700 rc = write_compression_hdr(&scratch_buff, dbg_buff);
3704 rc = compress_buff(&scratch_buff, dbg_buff);
3707 release_scratch_buff(&scratch_buff, dbg_buff);
3712 static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid,
3713 struct cudbg_tid_data *tid_data)
3715 int i, cmd_retry = 8;
3716 struct adapter *padap = pdbg_init->adap;
3719 /* Fill REQ_DATA regs with 0's */
3720 for (i = 0; i < CUDBG_NUM_REQ_REGS; i++)
3721 t4_write_reg(padap, A_LE_DB_DBGI_REQ_DATA + (i << 2), 0);
3723 /* Write DBIG command */
3724 val = (0x4 << S_DBGICMD) | tid;
3725 t4_write_reg(padap, A_LE_DB_DBGI_REQ_TCAM_CMD, val);
3726 tid_data->dbig_cmd = val;
3729 val |= 1 << S_DBGICMDSTRT;
3730 val |= 1; /* LE mode */
3731 t4_write_reg(padap, A_LE_DB_DBGI_CONFIG, val);
3732 tid_data->dbig_conf = val;
3734 /* Poll the DBGICMDBUSY bit */
3737 val = t4_read_reg(padap, A_LE_DB_DBGI_CONFIG);
3738 val = (val >> S_DBGICMDBUSY) & 1;
3741 if (pdbg_init->verbose)
3742 pdbg_init->print("%s(): Timeout waiting for non-busy\n",
3744 return CUDBG_SYSTEM_ERROR;
3748 /* Check RESP status */
3750 val = t4_read_reg(padap, A_LE_DB_DBGI_RSP_STATUS);
3751 tid_data->dbig_rsp_stat = val;
3753 if (pdbg_init->verbose)
3754 pdbg_init->print("%s(): DBGI command failed\n", __func__);
3755 return CUDBG_SYSTEM_ERROR;
3758 /* Read RESP data */
3759 for (i = 0; i < CUDBG_NUM_REQ_REGS; i++)
3760 tid_data->data[i] = t4_read_reg(padap,
3761 A_LE_DB_DBGI_RSP_DATA +
3764 tid_data->tid = tid;
3769 static int collect_le_tcam(struct cudbg_init *pdbg_init,
3770 struct cudbg_buffer *dbg_buff,
3771 struct cudbg_error *cudbg_err)
3773 struct cudbg_buffer scratch_buff;
3774 struct adapter *padap = pdbg_init->adap;
3775 struct cudbg_tcam tcam_region = {0};
3776 struct cudbg_tid_data *tid_data = NULL;
3777 u32 value, bytes = 0, bytes_left = 0;
3781 /* Get the LE regions */
3782 value = t4_read_reg(padap, A_LE_DB_TID_HASHBASE); /* Get hash base
3784 tcam_region.tid_hash_base = value;
3786 /* Get routing table index */
3787 value = t4_read_reg(padap, A_LE_DB_ROUTING_TABLE_INDEX);
3788 tcam_region.routing_start = value;
3790 /*Get clip table index */
3791 value = t4_read_reg(padap, A_LE_DB_CLIP_TABLE_INDEX);
3792 tcam_region.clip_start = value;
3794 /* Get filter table index */
3795 value = t4_read_reg(padap, A_LE_DB_FILTER_TABLE_INDEX);
3796 tcam_region.filter_start = value;
3798 /* Get server table index */
3799 value = t4_read_reg(padap, A_LE_DB_SERVER_INDEX);
3800 tcam_region.server_start = value;
3802 /* Check whether hash is enabled and calculate the max tids */
3803 value = t4_read_reg(padap, A_LE_DB_CONFIG);
3804 if ((value >> S_HASHEN) & 1) {
3805 value = t4_read_reg(padap, A_LE_DB_HASH_CONFIG);
3806 if (chip_id(padap) > CHELSIO_T5)
3807 tcam_region.max_tid = (value & 0xFFFFF) +
3808 tcam_region.tid_hash_base;
3810 value = G_HASHTIDSIZE(value);
3812 tcam_region.max_tid = value +
3813 tcam_region.tid_hash_base;
3815 } else /* hash not enabled */
3816 tcam_region.max_tid = CUDBG_MAX_TCAM_TID;
3818 size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
3819 size += sizeof(struct cudbg_tcam);
3820 scratch_buff.size = size;
3822 rc = write_compression_hdr(&scratch_buff, dbg_buff);
3826 rc = get_scratch_buff(dbg_buff, CUDBG_CHUNK_SIZE, &scratch_buff);
3830 memcpy(scratch_buff.data, &tcam_region, sizeof(struct cudbg_tcam));
3832 tid_data = (struct cudbg_tid_data *)(((struct cudbg_tcam *)
3833 scratch_buff.data) + 1);
3834 bytes_left = CUDBG_CHUNK_SIZE - sizeof(struct cudbg_tcam);
3835 bytes = sizeof(struct cudbg_tcam);
3838 for (i = 0; i < tcam_region.max_tid; i++) {
3839 if (bytes_left < sizeof(struct cudbg_tid_data)) {
3840 scratch_buff.size = bytes;
3841 rc = compress_buff(&scratch_buff, dbg_buff);
3844 scratch_buff.size = CUDBG_CHUNK_SIZE;
3845 release_scratch_buff(&scratch_buff, dbg_buff);
3848 rc = get_scratch_buff(dbg_buff, CUDBG_CHUNK_SIZE,
3853 tid_data = (struct cudbg_tid_data *)(scratch_buff.data);
3854 bytes_left = CUDBG_CHUNK_SIZE;
3858 rc = cudbg_read_tid(pdbg_init, i, tid_data);
3861 cudbg_err->sys_err = rc;
3866 bytes_left -= sizeof(struct cudbg_tid_data);
3867 bytes += sizeof(struct cudbg_tid_data);
3871 scratch_buff.size = bytes;
3872 rc = compress_buff(&scratch_buff, dbg_buff);
3876 scratch_buff.size = CUDBG_CHUNK_SIZE;
3877 release_scratch_buff(&scratch_buff, dbg_buff);
3882 static int collect_ma_indirect(struct cudbg_init *pdbg_init,
3883 struct cudbg_buffer *dbg_buff,
3884 struct cudbg_error *cudbg_err)
3886 struct cudbg_buffer scratch_buff;
3887 struct adapter *padap = pdbg_init->adap;
3888 struct ireg_buf *ma_indr = NULL;
3892 if (chip_id(padap) < CHELSIO_T6) {
3893 if (pdbg_init->verbose)
3894 pdbg_init->print("MA indirect available only in T6\n");
3895 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
3899 n = sizeof(t6_ma_ireg_array) / (4 * sizeof(u32));
3900 size = sizeof(struct ireg_buf) * n * 2;
3901 scratch_buff.size = size;
3903 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3907 ma_indr = (struct ireg_buf *)scratch_buff.data;
3909 for (i = 0; i < n; i++) {
3910 struct ireg_field *ma_fli = &ma_indr->tp_pio;
3911 u32 *buff = ma_indr->outbuf;
3913 ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
3914 ma_fli->ireg_data = t6_ma_ireg_array[i][1];
3915 ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
3916 ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
3918 t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
3919 buff, ma_fli->ireg_offset_range,
3920 ma_fli->ireg_local_offset);
3926 n = sizeof(t6_ma_ireg_array2) / (4 * sizeof(u32));
3928 for (i = 0; i < n; i++) {
3929 struct ireg_field *ma_fli = &ma_indr->tp_pio;
3930 u32 *buff = ma_indr->outbuf;
3932 ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
3933 ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
3934 ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
3936 for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
3937 t4_read_indirect(padap, ma_fli->ireg_addr,
3938 ma_fli->ireg_data, buff, 1,
3939 ma_fli->ireg_local_offset);
3941 ma_fli->ireg_local_offset += 0x20;
3946 rc = write_compression_hdr(&scratch_buff, dbg_buff);
3950 rc = compress_buff(&scratch_buff, dbg_buff);
3953 release_scratch_buff(&scratch_buff, dbg_buff);
3958 static int collect_hma_indirect(struct cudbg_init *pdbg_init,
3959 struct cudbg_buffer *dbg_buff,
3960 struct cudbg_error *cudbg_err)
3962 struct cudbg_buffer scratch_buff;
3963 struct adapter *padap = pdbg_init->adap;
3964 struct ireg_buf *hma_indr = NULL;
3968 if (chip_id(padap) < CHELSIO_T6) {
3969 if (pdbg_init->verbose)
3970 pdbg_init->print("HMA indirect available only in T6\n");
3971 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
3975 n = sizeof(t6_hma_ireg_array) / (4 * sizeof(u32));
3976 size = sizeof(struct ireg_buf) * n;
3977 scratch_buff.size = size;
3979 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3983 hma_indr = (struct ireg_buf *)scratch_buff.data;
3985 for (i = 0; i < n; i++) {
3986 struct ireg_field *hma_fli = &hma_indr->tp_pio;
3987 u32 *buff = hma_indr->outbuf;
3989 hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
3990 hma_fli->ireg_data = t6_hma_ireg_array[i][1];
3991 hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
3992 hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
3994 t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
3995 buff, hma_fli->ireg_offset_range,
3996 hma_fli->ireg_local_offset);
4002 rc = write_compression_hdr(&scratch_buff, dbg_buff);
4006 rc = compress_buff(&scratch_buff, dbg_buff);
4009 release_scratch_buff(&scratch_buff, dbg_buff);
4014 static int collect_pcie_indirect(struct cudbg_init *pdbg_init,
4015 struct cudbg_buffer *dbg_buff,
4016 struct cudbg_error *cudbg_err)
4018 struct cudbg_buffer scratch_buff;
4019 struct adapter *padap = pdbg_init->adap;
4020 struct ireg_buf *ch_pcie;
4024 n = sizeof(t5_pcie_pdbg_array) / (4 * sizeof(u32));
4025 size = sizeof(struct ireg_buf) * n * 2;
4026 scratch_buff.size = size;
4028 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4032 ch_pcie = (struct ireg_buf *)scratch_buff.data;
4035 for (i = 0; i < n; i++) {
4036 struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
4037 u32 *buff = ch_pcie->outbuf;
4039 pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
4040 pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
4041 pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
4042 pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
4044 t4_read_indirect(padap,
4045 pcie_pio->ireg_addr,
4046 pcie_pio->ireg_data,
4048 pcie_pio->ireg_offset_range,
4049 pcie_pio->ireg_local_offset);
4055 n = sizeof(t5_pcie_cdbg_array) / (4 * sizeof(u32));
4056 for (i = 0; i < n; i++) {
4057 struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
4058 u32 *buff = ch_pcie->outbuf;
4060 pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
4061 pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
4062 pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
4063 pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
4065 t4_read_indirect(padap,
4066 pcie_pio->ireg_addr,
4067 pcie_pio->ireg_data,
4069 pcie_pio->ireg_offset_range,
4070 pcie_pio->ireg_local_offset);
4075 rc = write_compression_hdr(&scratch_buff, dbg_buff);
4079 rc = compress_buff(&scratch_buff, dbg_buff);
4082 release_scratch_buff(&scratch_buff, dbg_buff);
4088 static int collect_tp_indirect(struct cudbg_init *pdbg_init,
4089 struct cudbg_buffer *dbg_buff,
4090 struct cudbg_error *cudbg_err)
4092 struct cudbg_buffer scratch_buff;
4093 struct adapter *padap = pdbg_init->adap;
4094 struct ireg_buf *ch_tp_pio;
4099 n = sizeof(t5_tp_pio_array) / (4 * sizeof(u32));
4100 else if (is_t6(padap))
4101 n = sizeof(t6_tp_pio_array) / (4 * sizeof(u32));
4103 size = sizeof(struct ireg_buf) * n * 3;
4104 scratch_buff.size = size;
4106 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4110 ch_tp_pio = (struct ireg_buf *)scratch_buff.data;
4113 for (i = 0; i < n; i++) {
4114 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
4115 u32 *buff = ch_tp_pio->outbuf;
4118 tp_pio->ireg_addr = t5_tp_pio_array[i][0];
4119 tp_pio->ireg_data = t5_tp_pio_array[i][1];
4120 tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
4121 tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
4122 } else if (is_t6(padap)) {
4123 tp_pio->ireg_addr = t6_tp_pio_array[i][0];
4124 tp_pio->ireg_data = t6_tp_pio_array[i][1];
4125 tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
4126 tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
4129 t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
4130 tp_pio->ireg_local_offset, true);
4137 n = sizeof(t5_tp_tm_pio_array) / (4 * sizeof(u32));
4138 else if (is_t6(padap))
4139 n = sizeof(t6_tp_tm_pio_array) / (4 * sizeof(u32));
4141 for (i = 0; i < n; i++) {
4142 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
4143 u32 *buff = ch_tp_pio->outbuf;
4146 tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
4147 tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
4148 tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
4149 tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
4150 } else if (is_t6(padap)) {
4151 tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
4152 tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
4153 tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
4154 tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
4157 t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
4158 tp_pio->ireg_local_offset, true);
4165 n = sizeof(t5_tp_mib_index_array) / (4 * sizeof(u32));
4166 else if (is_t6(padap))
4167 n = sizeof(t6_tp_mib_index_array) / (4 * sizeof(u32));
4169 for (i = 0; i < n ; i++) {
4170 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
4171 u32 *buff = ch_tp_pio->outbuf;
4174 tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
4175 tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
4176 tp_pio->ireg_local_offset =
4177 t5_tp_mib_index_array[i][2];
4178 tp_pio->ireg_offset_range =
4179 t5_tp_mib_index_array[i][3];
4180 } else if (is_t6(padap)) {
4181 tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
4182 tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
4183 tp_pio->ireg_local_offset =
4184 t6_tp_mib_index_array[i][2];
4185 tp_pio->ireg_offset_range =
4186 t6_tp_mib_index_array[i][3];
4189 t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
4190 tp_pio->ireg_local_offset, true);
4195 rc = write_compression_hdr(&scratch_buff, dbg_buff);
4199 rc = compress_buff(&scratch_buff, dbg_buff);
4202 release_scratch_buff(&scratch_buff, dbg_buff);
4207 static int collect_sge_indirect(struct cudbg_init *pdbg_init,
4208 struct cudbg_buffer *dbg_buff,
4209 struct cudbg_error *cudbg_err)
4211 struct cudbg_buffer scratch_buff;
4212 struct adapter *padap = pdbg_init->adap;
4213 struct ireg_buf *ch_sge_dbg;
4217 size = sizeof(struct ireg_buf) * 2;
4218 scratch_buff.size = size;
4220 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4224 ch_sge_dbg = (struct ireg_buf *)scratch_buff.data;
4226 for (i = 0; i < 2; i++) {
4227 struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
4228 u32 *buff = ch_sge_dbg->outbuf;
4230 sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
4231 sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
4232 sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
4233 sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
4235 t4_read_indirect(padap,
4239 sge_pio->ireg_offset_range,
4240 sge_pio->ireg_local_offset);
4245 rc = write_compression_hdr(&scratch_buff, dbg_buff);
4249 rc = compress_buff(&scratch_buff, dbg_buff);
4252 release_scratch_buff(&scratch_buff, dbg_buff);
4257 static int collect_full(struct cudbg_init *pdbg_init,
4258 struct cudbg_buffer *dbg_buff,
4259 struct cudbg_error *cudbg_err)
4261 struct cudbg_buffer scratch_buff;
4262 struct adapter *padap = pdbg_init->adap;
4263 u32 reg_addr, reg_data, reg_local_offset, reg_offset_range;
4268 /* Collect Registers:
4269 * TP_DBG_SCHED_TX (0x7e40 + 0x6a),
4270 * TP_DBG_SCHED_RX (0x7e40 + 0x6b),
4271 * TP_DBG_CSIDE_INT (0x7e40 + 0x23f),
4272 * TP_DBG_ESIDE_INT (0x7e40 + 0x148),
4273 * PCIE_CDEBUG_INDEX[AppData0] (0x5a10 + 2),
4274 * PCIE_CDEBUG_INDEX[AppData1] (0x5a10 + 3) This is for T6
4275 * SGE_DEBUG_DATA_HIGH_INDEX_10 (0x12a8)
4280 else if (is_t6(padap))
4283 scratch_buff.size = nreg * sizeof(u32);
4285 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4289 sp = (u32 *)scratch_buff.data;
4291 /* TP_DBG_SCHED_TX */
4292 reg_local_offset = t5_tp_pio_array[3][2] + 0xa;
4293 reg_offset_range = 1;
4295 t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4299 /* TP_DBG_SCHED_RX */
4300 reg_local_offset = t5_tp_pio_array[3][2] + 0xb;
4301 reg_offset_range = 1;
4303 t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4307 /* TP_DBG_CSIDE_INT */
4308 reg_local_offset = t5_tp_pio_array[9][2] + 0xf;
4309 reg_offset_range = 1;
4311 t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4315 /* TP_DBG_ESIDE_INT */
4316 reg_local_offset = t5_tp_pio_array[8][2] + 3;
4317 reg_offset_range = 1;
4319 t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4323 /* PCIE_CDEBUG_INDEX[AppData0] */
4324 reg_addr = t5_pcie_cdbg_array[0][0];
4325 reg_data = t5_pcie_cdbg_array[0][1];
4326 reg_local_offset = t5_pcie_cdbg_array[0][2] + 2;
4327 reg_offset_range = 1;
4329 t4_read_indirect(padap, reg_addr, reg_data, sp, reg_offset_range,
4335 /* PCIE_CDEBUG_INDEX[AppData1] */
4336 reg_addr = t5_pcie_cdbg_array[0][0];
4337 reg_data = t5_pcie_cdbg_array[0][1];
4338 reg_local_offset = t5_pcie_cdbg_array[0][2] + 3;
4339 reg_offset_range = 1;
4341 t4_read_indirect(padap, reg_addr, reg_data, sp,
4342 reg_offset_range, reg_local_offset);
4347 /* SGE_DEBUG_DATA_HIGH_INDEX_10 */
4348 *sp = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_10);
4350 rc = write_compression_hdr(&scratch_buff, dbg_buff);
4354 rc = compress_buff(&scratch_buff, dbg_buff);
4357 release_scratch_buff(&scratch_buff, dbg_buff);
4362 static int collect_vpd_data(struct cudbg_init *pdbg_init,
4363 struct cudbg_buffer *dbg_buff,
4364 struct cudbg_error *cudbg_err)
4367 struct cudbg_buffer scratch_buff;
4368 struct adapter *padap = pdbg_init->adap;
4369 struct struct_vpd_data *vpd_data;
4375 size = sizeof(struct struct_vpd_data);
4376 scratch_buff.size = size;
4378 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4382 vpd_data = (struct struct_vpd_data *)scratch_buff.data;
4385 read_vpd_reg(padap, SN_REG_ADDR, SN_MAX_LEN, vpd_data->sn);
4386 read_vpd_reg(padap, BN_REG_ADDR, BN_MAX_LEN, vpd_data->bn);
4387 read_vpd_reg(padap, NA_REG_ADDR, NA_MAX_LEN, vpd_data->na);
4388 read_vpd_reg(padap, MN_REG_ADDR, MN_MAX_LEN, vpd_data->mn);
4389 } else if (is_t6(padap)) {
4390 read_vpd_reg(padap, SN_T6_ADDR, SN_MAX_LEN, vpd_data->sn);
4391 read_vpd_reg(padap, BN_T6_ADDR, BN_MAX_LEN, vpd_data->bn);
4392 read_vpd_reg(padap, NA_T6_ADDR, NA_MAX_LEN, vpd_data->na);
4393 read_vpd_reg(padap, MN_T6_ADDR, MN_MAX_LEN, vpd_data->mn);
4396 if (is_fw_attached(pdbg_init)) {
4397 rc = t4_get_scfg_version(padap, &vpd_data->scfg_vers);
4403 /* Now trying with backdoor mechanism */
4404 rc = read_vpd_reg(padap, SCFG_VER_ADDR, SCFG_VER_LEN,
4405 (u8 *)&vpd_data->scfg_vers);
4410 if (is_fw_attached(pdbg_init)) {
4411 rc = t4_get_vpd_version(padap, &vpd_data->vpd_vers);
4417 /* Now trying with backdoor mechanism */
4418 rc = read_vpd_reg(padap, VPD_VER_ADDR, VPD_VER_LEN,
4422 /* read_vpd_reg return string of stored hex
4423 * converting hex string to char string
4424 * vpd version is 2 bytes only */
4425 sprintf(vpd_ver, "%c%c\n", vpd_ver[0], vpd_ver[1]);
4426 vpd_data->vpd_vers = simple_strtoul(vpd_ver, NULL, 16);
4429 /* Get FW version if it's not already filled in */
4430 fw_vers = padap->params.fw_vers;
4432 rc = t4_get_fw_version(padap, &fw_vers);
4437 vpd_data->fw_major = G_FW_HDR_FW_VER_MAJOR(fw_vers);
4438 vpd_data->fw_minor = G_FW_HDR_FW_VER_MINOR(fw_vers);
4439 vpd_data->fw_micro = G_FW_HDR_FW_VER_MICRO(fw_vers);
4440 vpd_data->fw_build = G_FW_HDR_FW_VER_BUILD(fw_vers);
4442 rc = write_compression_hdr(&scratch_buff, dbg_buff);
4446 rc = compress_buff(&scratch_buff, dbg_buff);
4449 release_scratch_buff(&scratch_buff, dbg_buff);