]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/cxgbe/cudbg/cudbg_lib.c
MFV: r355716
[FreeBSD/FreeBSD.git] / sys / dev / cxgbe / cudbg / cudbg_lib.c
1 /*-
2  * Copyright (c) 2017 Chelsio Communications, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/types.h>
31 #include <sys/param.h>
32
33 #include "common/common.h"
34 #include "common/t4_regs.h"
35 #include "cudbg.h"
36 #include "cudbg_lib_common.h"
37 #include "cudbg_lib.h"
38 #include "cudbg_entity.h"
39 #define  BUFFER_WARN_LIMIT 10000000
40
41 struct large_entity large_entity_list[] = {
42         {CUDBG_EDC0, 0, 0},
43         {CUDBG_EDC1, 0 , 0},
44         {CUDBG_MC0, 0, 0},
45         {CUDBG_MC1, 0, 0}
46 };
47
48 static int is_fw_attached(struct cudbg_init *pdbg_init)
49 {
50
51         return (pdbg_init->adap->flags & FW_OK);
52 }
53
54 /* This function will add additional padding bytes into debug_buffer to make it
55  * 4 byte aligned.*/
56 static void align_debug_buffer(struct cudbg_buffer *dbg_buff,
57                         struct cudbg_entity_hdr *entity_hdr)
58 {
59         u8 zero_buf[4] = {0};
60         u8 padding, remain;
61
62         remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
63         padding = 4 - remain;
64         if (remain) {
65                 memcpy(((u8 *) dbg_buff->data) + dbg_buff->offset, &zero_buf,
66                        padding);
67                 dbg_buff->offset += padding;
68                 entity_hdr->num_pad = padding;
69         }
70
71         entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
72 }
73
74 static void read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
75                           enum ctxt_type ctype, u32 *data)
76 {
77         struct adapter *padap = pdbg_init->adap;
78         int rc = -1;
79
80         if (is_fw_attached(pdbg_init)) {
81                 rc = begin_synchronized_op(padap, NULL, SLEEP_OK | INTR_OK,
82                     "t4cudf");
83                 if (rc != 0)
84                         goto out;
85                 rc = t4_sge_ctxt_rd(padap, padap->mbox, cid, ctype,
86                                     data);
87                 end_synchronized_op(padap, 0);
88         }
89
90 out:
91         if (rc)
92                 t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
93 }
94
95 static int get_next_ext_entity_hdr(void *outbuf, u32 *ext_size,
96                             struct cudbg_buffer *dbg_buff,
97                             struct cudbg_entity_hdr **entity_hdr)
98 {
99         struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
100         int rc = 0;
101         u32 ext_offset = cudbg_hdr->data_len;
102         *ext_size = 0;
103
104         if (dbg_buff->size - dbg_buff->offset <=
105                  sizeof(struct cudbg_entity_hdr)) {
106                 rc = CUDBG_STATUS_BUFFER_SHORT;
107                 goto err;
108         }
109
110         *entity_hdr = (struct cudbg_entity_hdr *)
111                        ((char *)outbuf + cudbg_hdr->data_len);
112
113         /* Find the last extended entity header */
114         while ((*entity_hdr)->size) {
115
116                 ext_offset += sizeof(struct cudbg_entity_hdr) +
117                                      (*entity_hdr)->size;
118
119                 *ext_size += (*entity_hdr)->size +
120                               sizeof(struct cudbg_entity_hdr);
121
122                 if (dbg_buff->size - dbg_buff->offset + *ext_size  <=
123                         sizeof(struct cudbg_entity_hdr)) {
124                         rc = CUDBG_STATUS_BUFFER_SHORT;
125                         goto err;
126                 }
127
128                 if (ext_offset != (*entity_hdr)->next_ext_offset) {
129                         ext_offset -= sizeof(struct cudbg_entity_hdr) +
130                                      (*entity_hdr)->size;
131                         break;
132                 }
133
134                 (*entity_hdr)->next_ext_offset = *ext_size;
135
136                 *entity_hdr = (struct cudbg_entity_hdr *)
137                                            ((char *)outbuf +
138                                            ext_offset);
139         }
140
141         /* update the data offset */
142         dbg_buff->offset = ext_offset;
143 err:
144         return rc;
145 }
146
147 static int wr_entity_to_flash(void *handle, struct cudbg_buffer *dbg_buff,
148                        u32 cur_entity_data_offset,
149                        u32 cur_entity_size,
150                        int entity_nu, u32 ext_size)
151 {
152         struct cudbg_private *priv = handle;
153         struct cudbg_init *cudbg_init = &priv->dbg_init;
154         struct cudbg_flash_sec_info *sec_info = &priv->sec_info;
155         u64 timestamp;
156         u32 cur_entity_hdr_offset = sizeof(struct cudbg_hdr);
157         u32 remain_flash_size;
158         u32 flash_data_offset;
159         u32 data_hdr_size;
160         int rc = -1;
161
162         data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
163                         sizeof(struct cudbg_hdr);
164
165         flash_data_offset = (FLASH_CUDBG_NSECS *
166                              (sizeof(struct cudbg_flash_hdr) +
167                               data_hdr_size)) +
168                             (cur_entity_data_offset - data_hdr_size);
169
170         if (flash_data_offset > CUDBG_FLASH_SIZE) {
171                 update_skip_size(sec_info, cur_entity_size);
172                 if (cudbg_init->verbose)
173                         cudbg_init->print("Large entity skipping...\n");
174                 return rc;
175         }
176
177         remain_flash_size = CUDBG_FLASH_SIZE - flash_data_offset;
178
179         if (cur_entity_size > remain_flash_size) {
180                 update_skip_size(sec_info, cur_entity_size);
181                 if (cudbg_init->verbose)
182                         cudbg_init->print("Large entity skipping...\n");
183         } else {
184                 timestamp = 0;
185
186                 cur_entity_hdr_offset +=
187                         (sizeof(struct cudbg_entity_hdr) *
188                         (entity_nu - 1));
189
190                 rc = cudbg_write_flash(handle, timestamp, dbg_buff,
191                                        cur_entity_data_offset,
192                                        cur_entity_hdr_offset,
193                                        cur_entity_size,
194                                        ext_size);
195                 if (rc == CUDBG_STATUS_FLASH_FULL && cudbg_init->verbose)
196                         cudbg_init->print("\n\tFLASH is full... "
197                                 "can not write in flash more\n\n");
198         }
199
200         return rc;
201 }
202
203 int cudbg_collect(void *handle, void *outbuf, u32 *outbuf_size)
204 {
205         struct cudbg_entity_hdr *entity_hdr = NULL;
206         struct cudbg_entity_hdr *ext_entity_hdr = NULL;
207         struct cudbg_hdr *cudbg_hdr;
208         struct cudbg_buffer dbg_buff;
209         struct cudbg_error cudbg_err = {0};
210         int large_entity_code;
211
212         u8 *dbg_bitmap = ((struct cudbg_private *)handle)->dbg_init.dbg_bitmap;
213         struct cudbg_init *cudbg_init =
214                 &(((struct cudbg_private *)handle)->dbg_init);
215         struct adapter *padap = cudbg_init->adap;
216         u32 total_size, remaining_buf_size;
217         u32 ext_size = 0;
218         int index, bit, i, rc = -1;
219         int all;
220         bool flag_ext = 0;
221
222         reset_skip_entity();
223
224         dbg_buff.data = outbuf;
225         dbg_buff.size = *outbuf_size;
226         dbg_buff.offset = 0;
227
228         cudbg_hdr = (struct cudbg_hdr *)dbg_buff.data;
229         cudbg_hdr->signature = CUDBG_SIGNATURE;
230         cudbg_hdr->hdr_len = sizeof(struct cudbg_hdr);
231         cudbg_hdr->major_ver = CUDBG_MAJOR_VERSION;
232         cudbg_hdr->minor_ver = CUDBG_MINOR_VERSION;
233         cudbg_hdr->max_entities = CUDBG_MAX_ENTITY;
234         cudbg_hdr->chip_ver = padap->params.chipid;
235
236         if (cudbg_hdr->data_len)
237                 flag_ext = 1;
238
239         if (cudbg_init->use_flash) {
240 #ifndef notyet
241                 rc = t4_get_flash_params(padap);
242                 if (rc) {
243                         if (cudbg_init->verbose)
244                                 cudbg_init->print("\nGet flash params failed.\n\n");
245                         cudbg_init->use_flash = 0;
246                 }
247 #endif
248
249 #ifdef notyet
250                 /* Timestamp is mandatory. If it is not passed then disable
251                  * flash support
252                  */
253                 if (!cudbg_init->dbg_params[CUDBG_TIMESTAMP_PARAM].u.time) {
254                         if (cudbg_init->verbose)
255                                 cudbg_init->print("\nTimestamp param missing,"
256                                           "so ignoring flash write request\n\n");
257                         cudbg_init->use_flash = 0;
258                 }
259 #endif
260         }
261
262         if (sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY >
263             dbg_buff.size) {
264                 rc = CUDBG_STATUS_SMALL_BUFF;
265                 total_size = cudbg_hdr->hdr_len;
266                 goto err;
267         }
268
269         /* If ext flag is set then move the offset to the end of the buf
270          * so that we can add ext entities
271          */
272         if (flag_ext) {
273                 ext_entity_hdr = (struct cudbg_entity_hdr *)
274                               ((char *)outbuf + cudbg_hdr->hdr_len +
275                               (sizeof(struct cudbg_entity_hdr) *
276                               (CUDBG_EXT_ENTITY - 1)));
277                 ext_entity_hdr->start_offset = cudbg_hdr->data_len;
278                 ext_entity_hdr->entity_type = CUDBG_EXT_ENTITY;
279                 ext_entity_hdr->size = 0;
280                 dbg_buff.offset = cudbg_hdr->data_len;
281         } else {
282                 dbg_buff.offset += cudbg_hdr->hdr_len; /* move 24 bytes*/
283                 dbg_buff.offset += CUDBG_MAX_ENTITY *
284                                         sizeof(struct cudbg_entity_hdr);
285         }
286
287         total_size = dbg_buff.offset;
288         all = dbg_bitmap[0] & (1 << CUDBG_ALL);
289
290         /*sort(large_entity_list);*/
291
292         for (i = 1; i < CUDBG_MAX_ENTITY; i++) {
293                 index = i / 8;
294                 bit = i % 8;
295
296                 if (entity_list[i].bit == CUDBG_EXT_ENTITY)
297                         continue;
298
299                 if (all || (dbg_bitmap[index] & (1 << bit))) {
300
301                         if (!flag_ext) {
302                                 rc = get_entity_hdr(outbuf, i, dbg_buff.size,
303                                                     &entity_hdr);
304                                 if (rc)
305                                         cudbg_hdr->hdr_flags = rc;
306                         } else {
307                                 rc = get_next_ext_entity_hdr(outbuf, &ext_size,
308                                                              &dbg_buff,
309                                                              &entity_hdr);
310                                 if (rc)
311                                         goto err;
312
313                                 /* move the offset after the ext header */
314                                 dbg_buff.offset +=
315                                         sizeof(struct cudbg_entity_hdr);
316                         }
317
318                         entity_hdr->entity_type = i;
319                         entity_hdr->start_offset = dbg_buff.offset;
320                         /* process each entity by calling process_entity fp */
321                         remaining_buf_size = dbg_buff.size - dbg_buff.offset;
322
323                         if ((remaining_buf_size <= BUFFER_WARN_LIMIT) &&
324                             is_large_entity(i)) {
325                                 if (cudbg_init->verbose)
326                                         cudbg_init->print("Skipping %s\n",
327                                             entity_list[i].name);
328                                 skip_entity(i);
329                                 continue;
330                         } else {
331
332                                 /* If fw_attach is 0, then skip entities which
333                                  * communicates with firmware
334                                  */
335
336                                 if (!is_fw_attached(cudbg_init) &&
337                                     (entity_list[i].flag &
338                                     (1 << ENTITY_FLAG_FW_NO_ATTACH))) {
339                                         if (cudbg_init->verbose)
340                                                 cudbg_init->print("Skipping %s entity,"\
341                                                           "because fw_attach "\
342                                                           "is 0\n",
343                                                           entity_list[i].name);
344                                         continue;
345                                 }
346
347                                 if (cudbg_init->verbose)
348                                         cudbg_init->print("collecting debug entity: "\
349                                                   "%s\n", entity_list[i].name);
350                                 memset(&cudbg_err, 0,
351                                        sizeof(struct cudbg_error));
352                                 rc = process_entity[i-1](cudbg_init, &dbg_buff,
353                                                          &cudbg_err);
354                         }
355
356                         if (rc) {
357                                 entity_hdr->size = 0;
358                                 dbg_buff.offset = entity_hdr->start_offset;
359                         } else
360                                 align_debug_buffer(&dbg_buff, entity_hdr);
361
362                         if (cudbg_err.sys_err)
363                                 rc = CUDBG_SYSTEM_ERROR;
364
365                         entity_hdr->hdr_flags =  rc;
366                         entity_hdr->sys_err = cudbg_err.sys_err;
367                         entity_hdr->sys_warn =  cudbg_err.sys_warn;
368
369                         /* We don't want to include ext entity size in global
370                          * header
371                          */
372                         if (!flag_ext)
373                                 total_size += entity_hdr->size;
374
375                         cudbg_hdr->data_len = total_size;
376                         *outbuf_size = total_size;
377
378                         /* consider the size of the ext entity header and data
379                          * also
380                          */
381                         if (flag_ext) {
382                                 ext_size += (sizeof(struct cudbg_entity_hdr) +
383                                              entity_hdr->size);
384                                 entity_hdr->start_offset -= cudbg_hdr->data_len;
385                                 ext_entity_hdr->size = ext_size;
386                                 entity_hdr->next_ext_offset = ext_size;
387                                 entity_hdr->flag |= CUDBG_EXT_DATA_VALID;
388                         }
389
390                         if (cudbg_init->use_flash) {
391                                 if (flag_ext) {
392                                         wr_entity_to_flash(handle,
393                                                            &dbg_buff,
394                                                            ext_entity_hdr->
395                                                            start_offset,
396                                                            entity_hdr->
397                                                            size,
398                                                            CUDBG_EXT_ENTITY,
399                                                            ext_size);
400                                 }
401                                 else
402                                         wr_entity_to_flash(handle,
403                                                            &dbg_buff,
404                                                            entity_hdr->\
405                                                            start_offset,
406                                                            entity_hdr->size,
407                                                            i, ext_size);
408                         }
409                 }
410         }
411
412         for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
413              i++) {
414                 large_entity_code = large_entity_list[i].entity_code;
415                 if (large_entity_list[i].skip_flag) {
416                         if (!flag_ext) {
417                                 rc = get_entity_hdr(outbuf, large_entity_code,
418                                                     dbg_buff.size, &entity_hdr);
419                                 if (rc)
420                                         cudbg_hdr->hdr_flags = rc;
421                         } else {
422                                 rc = get_next_ext_entity_hdr(outbuf, &ext_size,
423                                                              &dbg_buff,
424                                                              &entity_hdr);
425                                 if (rc)
426                                         goto err;
427
428                                 dbg_buff.offset +=
429                                         sizeof(struct cudbg_entity_hdr);
430                         }
431
432                         /* If fw_attach is 0, then skip entities which
433                          * communicates with firmware
434                          */
435                         if (!is_fw_attached(cudbg_init) &&
436                             (entity_list[large_entity_code].flag &
437                             (1 << ENTITY_FLAG_FW_NO_ATTACH))) {
438                                 if (cudbg_init->verbose)
439                                         cudbg_init->print("Skipping %s entity,"\
440                                                   "because fw_attach "\
441                                                   "is 0\n",
442                                                   entity_list[large_entity_code]
443                                                   .name);
444                                 continue;
445                         }
446
447                         entity_hdr->entity_type = large_entity_code;
448                         entity_hdr->start_offset = dbg_buff.offset;
449                         if (cudbg_init->verbose)
450                                 cudbg_init->print("Re-trying debug entity: %s\n",
451                                           entity_list[large_entity_code].name);
452
453                         memset(&cudbg_err, 0, sizeof(struct cudbg_error));
454                         rc = process_entity[large_entity_code - 1](cudbg_init,
455                                                                    &dbg_buff,
456                                                                    &cudbg_err);
457                         if (rc) {
458                                 entity_hdr->size = 0;
459                                 dbg_buff.offset = entity_hdr->start_offset;
460                         } else
461                                 align_debug_buffer(&dbg_buff, entity_hdr);
462
463                         if (cudbg_err.sys_err)
464                                 rc = CUDBG_SYSTEM_ERROR;
465
466                         entity_hdr->hdr_flags = rc;
467                         entity_hdr->sys_err = cudbg_err.sys_err;
468                         entity_hdr->sys_warn =  cudbg_err.sys_warn;
469
470                         /* We don't want to include ext entity size in global
471                          * header
472                          */
473                         if (!flag_ext)
474                                 total_size += entity_hdr->size;
475
476                         cudbg_hdr->data_len = total_size;
477                         *outbuf_size = total_size;
478
479                         /* consider the size of the ext entity header and
480                          * data also
481                          */
482                         if (flag_ext) {
483                                 ext_size += (sizeof(struct cudbg_entity_hdr) +
484                                                    entity_hdr->size);
485                                 entity_hdr->start_offset -=
486                                                         cudbg_hdr->data_len;
487                                 ext_entity_hdr->size = ext_size;
488                                 entity_hdr->flag |= CUDBG_EXT_DATA_VALID;
489                         }
490
491                         if (cudbg_init->use_flash) {
492                                 if (flag_ext)
493                                         wr_entity_to_flash(handle,
494                                                            &dbg_buff,
495                                                            ext_entity_hdr->
496                                                            start_offset,
497                                                            entity_hdr->size,
498                                                            CUDBG_EXT_ENTITY,
499                                                            ext_size);
500                                 else
501                                         wr_entity_to_flash(handle,
502                                                            &dbg_buff,
503                                                            entity_hdr->
504                                                            start_offset,
505                                                            entity_hdr->
506                                                            size,
507                                                            large_entity_list[i].
508                                                            entity_code,
509                                                            ext_size);
510                         }
511                 }
512         }
513
514         cudbg_hdr->data_len = total_size;
515         *outbuf_size = total_size;
516
517         if (flag_ext)
518                 *outbuf_size += ext_size;
519
520         return 0;
521 err:
522         return rc;
523 }
524
525 void reset_skip_entity(void)
526 {
527         int i;
528
529         for (i = 0; i < ARRAY_SIZE(large_entity_list); i++)
530                 large_entity_list[i].skip_flag = 0;
531 }
532
533 void skip_entity(int entity_code)
534 {
535         int i;
536         for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
537              i++) {
538                 if (large_entity_list[i].entity_code == entity_code)
539                         large_entity_list[i].skip_flag = 1;
540         }
541 }
542
543 int is_large_entity(int entity_code)
544 {
545         int i;
546
547         for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
548              i++) {
549                 if (large_entity_list[i].entity_code == entity_code)
550                         return 1;
551         }
552         return 0;
553 }
554
555 int get_entity_hdr(void *outbuf, int i, u32 size,
556                    struct cudbg_entity_hdr **entity_hdr)
557 {
558         int rc = 0;
559         struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
560
561         if (cudbg_hdr->hdr_len + (sizeof(struct cudbg_entity_hdr)*i) > size)
562                 return CUDBG_STATUS_SMALL_BUFF;
563
564         *entity_hdr = (struct cudbg_entity_hdr *)
565                       ((char *)outbuf+cudbg_hdr->hdr_len +
566                        (sizeof(struct cudbg_entity_hdr)*(i-1)));
567         return rc;
568 }
569
570 static int collect_rss(struct cudbg_init *pdbg_init,
571                        struct cudbg_buffer *dbg_buff,
572                        struct cudbg_error *cudbg_err)
573 {
574         struct adapter *padap = pdbg_init->adap;
575         struct cudbg_buffer scratch_buff;
576         u32 size;
577         int rc = 0;
578
579         size = RSS_NENTRIES  * sizeof(u16);
580         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
581         if (rc)
582                 goto err;
583
584         rc = t4_read_rss(padap, (u16 *)scratch_buff.data);
585         if (rc) {
586                 if (pdbg_init->verbose)
587                         pdbg_init->print("%s(), t4_read_rss failed!, rc: %d\n",
588                                  __func__, rc);
589                 cudbg_err->sys_err = rc;
590                 goto err1;
591         }
592
593         rc = write_compression_hdr(&scratch_buff, dbg_buff);
594         if (rc)
595                 goto err1;
596
597         rc = compress_buff(&scratch_buff, dbg_buff);
598
599 err1:
600         release_scratch_buff(&scratch_buff, dbg_buff);
601 err:
602         return rc;
603 }
604
605 static int collect_sw_state(struct cudbg_init *pdbg_init,
606                             struct cudbg_buffer *dbg_buff,
607                             struct cudbg_error *cudbg_err)
608 {
609         struct adapter *padap = pdbg_init->adap;
610         struct cudbg_buffer scratch_buff;
611         struct sw_state *swstate;
612         u32 size;
613         int rc = 0;
614
615         size = sizeof(struct sw_state);
616
617         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
618         if (rc)
619                 goto err;
620
621         swstate = (struct sw_state *) scratch_buff.data;
622
623         swstate->fw_state = t4_read_reg(padap, A_PCIE_FW);
624         snprintf(swstate->caller_string, sizeof(swstate->caller_string), "%s",
625             "FreeBSD");
626         swstate->os_type = 0;
627
628         rc = write_compression_hdr(&scratch_buff, dbg_buff);
629         if (rc)
630                 goto err1;
631
632         rc = compress_buff(&scratch_buff, dbg_buff);
633
634 err1:
635         release_scratch_buff(&scratch_buff, dbg_buff);
636 err:
637         return rc;
638 }
639
640 static int collect_ddp_stats(struct cudbg_init *pdbg_init,
641                              struct cudbg_buffer *dbg_buff,
642                              struct cudbg_error *cudbg_err)
643 {
644         struct adapter *padap = pdbg_init->adap;
645         struct cudbg_buffer scratch_buff;
646         struct tp_usm_stats  *tp_usm_stats_buff;
647         u32 size;
648         int rc = 0;
649
650         size = sizeof(struct tp_usm_stats);
651
652         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
653         if (rc)
654                 goto err;
655
656         tp_usm_stats_buff = (struct tp_usm_stats *) scratch_buff.data;
657
658         /* spin_lock(&padap->stats_lock);       TODO*/
659         t4_get_usm_stats(padap, tp_usm_stats_buff, 1);
660         /* spin_unlock(&padap->stats_lock);     TODO*/
661
662         rc = write_compression_hdr(&scratch_buff, dbg_buff);
663         if (rc)
664                 goto err1;
665
666         rc = compress_buff(&scratch_buff, dbg_buff);
667
668 err1:
669         release_scratch_buff(&scratch_buff, dbg_buff);
670 err:
671         return rc;
672 }
673
674 static int collect_ulptx_la(struct cudbg_init *pdbg_init,
675                             struct cudbg_buffer *dbg_buff,
676                             struct cudbg_error *cudbg_err)
677 {
678         struct adapter *padap = pdbg_init->adap;
679         struct cudbg_buffer scratch_buff;
680         struct struct_ulptx_la *ulptx_la_buff;
681         u32 size, i, j;
682         int rc = 0;
683
684         size = sizeof(struct struct_ulptx_la);
685
686         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
687         if (rc)
688                 goto err;
689
690         ulptx_la_buff = (struct struct_ulptx_la *) scratch_buff.data;
691
692         for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
693                 ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
694                                                       A_ULP_TX_LA_RDPTR_0 +
695                                                       0x10 * i);
696                 ulptx_la_buff->wrptr[i] = t4_read_reg(padap,
697                                                       A_ULP_TX_LA_WRPTR_0 +
698                                                       0x10 * i);
699                 ulptx_la_buff->rddata[i] = t4_read_reg(padap,
700                                                        A_ULP_TX_LA_RDDATA_0 +
701                                                        0x10 * i);
702                 for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++) {
703                         ulptx_la_buff->rd_data[i][j] =
704                                 t4_read_reg(padap,
705                                             A_ULP_TX_LA_RDDATA_0 + 0x10 * i);
706                 }
707         }
708
709         rc = write_compression_hdr(&scratch_buff, dbg_buff);
710         if (rc)
711                 goto err1;
712
713         rc = compress_buff(&scratch_buff, dbg_buff);
714
715 err1:
716         release_scratch_buff(&scratch_buff, dbg_buff);
717 err:
718         return rc;
719
720 }
721
722 static int collect_ulprx_la(struct cudbg_init *pdbg_init,
723                             struct cudbg_buffer *dbg_buff,
724                             struct cudbg_error *cudbg_err)
725 {
726         struct adapter *padap = pdbg_init->adap;
727         struct cudbg_buffer scratch_buff;
728         struct struct_ulprx_la *ulprx_la_buff;
729         u32 size;
730         int rc = 0;
731
732         size = sizeof(struct struct_ulprx_la);
733
734         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
735         if (rc)
736                 goto err;
737
738         ulprx_la_buff = (struct struct_ulprx_la *) scratch_buff.data;
739         t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
740         ulprx_la_buff->size = ULPRX_LA_SIZE;
741
742         rc = write_compression_hdr(&scratch_buff, dbg_buff);
743         if (rc)
744                 goto err1;
745
746         rc = compress_buff(&scratch_buff, dbg_buff);
747
748 err1:
749         release_scratch_buff(&scratch_buff, dbg_buff);
750 err:
751         return rc;
752 }
753
754 static int collect_cpl_stats(struct cudbg_init *pdbg_init,
755                              struct cudbg_buffer *dbg_buff,
756                              struct cudbg_error *cudbg_err)
757 {
758         struct adapter *padap = pdbg_init->adap;
759         struct cudbg_buffer scratch_buff;
760         struct struct_tp_cpl_stats *tp_cpl_stats_buff;
761         u32 size;
762         int rc = 0;
763
764         size = sizeof(struct struct_tp_cpl_stats);
765
766         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
767         if (rc)
768                 goto err;
769
770         tp_cpl_stats_buff = (struct struct_tp_cpl_stats *) scratch_buff.data;
771         tp_cpl_stats_buff->nchan = padap->chip_params->nchan;
772
773         /* spin_lock(&padap->stats_lock);       TODO*/
774         t4_tp_get_cpl_stats(padap, &tp_cpl_stats_buff->stats, 1);
775         /* spin_unlock(&padap->stats_lock);     TODO*/
776
777         rc = write_compression_hdr(&scratch_buff, dbg_buff);
778         if (rc)
779                 goto err1;
780
781         rc = compress_buff(&scratch_buff, dbg_buff);
782
783 err1:
784         release_scratch_buff(&scratch_buff, dbg_buff);
785 err:
786         return rc;
787 }
788
789 static int collect_wc_stats(struct cudbg_init *pdbg_init,
790                             struct cudbg_buffer *dbg_buff,
791                             struct cudbg_error *cudbg_err)
792 {
793         struct adapter *padap = pdbg_init->adap;
794         struct cudbg_buffer scratch_buff;
795         struct struct_wc_stats *wc_stats_buff;
796         u32 val1;
797         u32 val2;
798         u32 size;
799
800         int rc = 0;
801
802         size = sizeof(struct struct_wc_stats);
803
804         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
805         if (rc)
806                 goto err;
807
808         wc_stats_buff = (struct struct_wc_stats *) scratch_buff.data;
809
810         if (!is_t4(padap)) {
811                 val1 = t4_read_reg(padap, A_SGE_STAT_TOTAL);
812                 val2 = t4_read_reg(padap, A_SGE_STAT_MATCH);
813                 wc_stats_buff->wr_cl_success = val1 - val2;
814                 wc_stats_buff->wr_cl_fail = val2;
815         } else {
816                 wc_stats_buff->wr_cl_success = 0;
817                 wc_stats_buff->wr_cl_fail = 0;
818         }
819
820         rc = write_compression_hdr(&scratch_buff, dbg_buff);
821         if (rc)
822                 goto err1;
823
824         rc = compress_buff(&scratch_buff, dbg_buff);
825 err1:
826         release_scratch_buff(&scratch_buff, dbg_buff);
827 err:
828         return rc;
829 }
830
831 static int mem_desc_cmp(const void *a, const void *b)
832 {
833         return ((const struct struct_mem_desc *)a)->base -
834                 ((const struct struct_mem_desc *)b)->base;
835 }
836
837 static int fill_meminfo(struct adapter *padap,
838                         struct struct_meminfo *meminfo_buff)
839 {
840         struct struct_mem_desc *md;
841         u32 size, lo, hi;
842         u32 used, alloc;
843         int n, i, rc = 0;
844
845         size = sizeof(struct struct_meminfo);
846
847         memset(meminfo_buff->avail, 0,
848                ARRAY_SIZE(meminfo_buff->avail) *
849                sizeof(struct struct_mem_desc));
850         memset(meminfo_buff->mem, 0,
851                (ARRAY_SIZE(region) + 3) * sizeof(struct struct_mem_desc));
852         md  = meminfo_buff->mem;
853
854         for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) {
855                 meminfo_buff->mem[i].limit = 0;
856                 meminfo_buff->mem[i].idx = i;
857         }
858
859         i = 0;
860
861         lo = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
862
863         if (lo & F_EDRAM0_ENABLE) {
864                 hi = t4_read_reg(padap, A_MA_EDRAM0_BAR);
865                 meminfo_buff->avail[i].base = G_EDRAM0_BASE(hi) << 20;
866                 meminfo_buff->avail[i].limit = meminfo_buff->avail[i].base +
867                                                (G_EDRAM0_SIZE(hi) << 20);
868                 meminfo_buff->avail[i].idx = 0;
869                 i++;
870         }
871
872         if (lo & F_EDRAM1_ENABLE) {
873                 hi =  t4_read_reg(padap, A_MA_EDRAM1_BAR);
874                 meminfo_buff->avail[i].base = G_EDRAM1_BASE(hi) << 20;
875                 meminfo_buff->avail[i].limit = meminfo_buff->avail[i].base +
876                                                (G_EDRAM1_SIZE(hi) << 20);
877                 meminfo_buff->avail[i].idx = 1;
878                 i++;
879         }
880
881         if (is_t5(padap)) {
882                 if (lo & F_EXT_MEM0_ENABLE) {
883                         hi = t4_read_reg(padap, A_MA_EXT_MEMORY0_BAR);
884                         meminfo_buff->avail[i].base = G_EXT_MEM_BASE(hi) << 20;
885                         meminfo_buff->avail[i].limit =
886                                 meminfo_buff->avail[i].base +
887                                 (G_EXT_MEM_SIZE(hi) << 20);
888                         meminfo_buff->avail[i].idx = 3;
889                         i++;
890                 }
891
892                 if (lo & F_EXT_MEM1_ENABLE) {
893                         hi = t4_read_reg(padap, A_MA_EXT_MEMORY1_BAR);
894                         meminfo_buff->avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
895                         meminfo_buff->avail[i].limit =
896                                 meminfo_buff->avail[i].base +
897                                 (G_EXT_MEM1_SIZE(hi) << 20);
898                         meminfo_buff->avail[i].idx = 4;
899                         i++;
900                 }
901         } else if (is_t6(padap)) {
902                 if (lo & F_EXT_MEM_ENABLE) {
903                         hi = t4_read_reg(padap, A_MA_EXT_MEMORY_BAR);
904                         meminfo_buff->avail[i].base = G_EXT_MEM_BASE(hi) << 20;
905                         meminfo_buff->avail[i].limit =
906                                 meminfo_buff->avail[i].base +
907                                 (G_EXT_MEM_SIZE(hi) << 20);
908                         meminfo_buff->avail[i].idx = 2;
909                         i++;
910                 }
911         }
912
913         if (!i) {                                  /* no memory available */
914                 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
915                 goto err;
916         }
917
918         meminfo_buff->avail_c = i;
919         qsort(meminfo_buff->avail, i, sizeof(struct struct_mem_desc),
920             mem_desc_cmp);
921         (md++)->base = t4_read_reg(padap, A_SGE_DBQ_CTXT_BADDR);
922         (md++)->base = t4_read_reg(padap, A_SGE_IMSG_CTXT_BADDR);
923         (md++)->base = t4_read_reg(padap, A_SGE_FLM_CACHE_BADDR);
924         (md++)->base = t4_read_reg(padap, A_TP_CMM_TCB_BASE);
925         (md++)->base = t4_read_reg(padap, A_TP_CMM_MM_BASE);
926         (md++)->base = t4_read_reg(padap, A_TP_CMM_TIMER_BASE);
927         (md++)->base = t4_read_reg(padap, A_TP_CMM_MM_RX_FLST_BASE);
928         (md++)->base = t4_read_reg(padap, A_TP_CMM_MM_TX_FLST_BASE);
929         (md++)->base = t4_read_reg(padap, A_TP_CMM_MM_PS_FLST_BASE);
930
931         /* the next few have explicit upper bounds */
932         md->base = t4_read_reg(padap, A_TP_PMM_TX_BASE);
933         md->limit = md->base - 1 +
934                     t4_read_reg(padap,
935                                 A_TP_PMM_TX_PAGE_SIZE) *
936                                 G_PMTXMAXPAGE(t4_read_reg(padap,
937                                                           A_TP_PMM_TX_MAX_PAGE)
938                                              );
939         md++;
940
941         md->base = t4_read_reg(padap, A_TP_PMM_RX_BASE);
942         md->limit = md->base - 1 +
943                     t4_read_reg(padap,
944                                 A_TP_PMM_RX_PAGE_SIZE) *
945                                 G_PMRXMAXPAGE(t4_read_reg(padap,
946                                                           A_TP_PMM_RX_MAX_PAGE)
947                                               );
948         md++;
949         if (t4_read_reg(padap, A_LE_DB_CONFIG) & F_HASHEN) {
950                 if (chip_id(padap) <= CHELSIO_T5) {
951                         hi = t4_read_reg(padap, A_LE_DB_TID_HASHBASE) / 4;
952                         md->base = t4_read_reg(padap, A_LE_DB_HASH_TID_BASE);
953                 } else {
954                         hi = t4_read_reg(padap, A_LE_DB_HASH_TID_BASE);
955                         md->base = t4_read_reg(padap,
956                                                A_LE_DB_HASH_TBL_BASE_ADDR);
957                 }
958                 md->limit = 0;
959         } else {
960                 md->base = 0;
961                 md->idx = ARRAY_SIZE(region);  /* hide it */
962         }
963         md++;
964 #define ulp_region(reg) \
965         {\
966                 md->base = t4_read_reg(padap, A_ULP_ ## reg ## _LLIMIT);\
967                 (md++)->limit = t4_read_reg(padap, A_ULP_ ## reg ## _ULIMIT);\
968         }
969
970         ulp_region(RX_ISCSI);
971         ulp_region(RX_TDDP);
972         ulp_region(TX_TPT);
973         ulp_region(RX_STAG);
974         ulp_region(RX_RQ);
975         ulp_region(RX_RQUDP);
976         ulp_region(RX_PBL);
977         ulp_region(TX_PBL);
978 #undef ulp_region
979         md->base = 0;
980         md->idx = ARRAY_SIZE(region);
981         if (!is_t4(padap)) {
982                 u32 sge_ctrl = t4_read_reg(padap, A_SGE_CONTROL2);
983                 u32 fifo_size = t4_read_reg(padap, A_SGE_DBVFIFO_SIZE);
984                 if (is_t5(padap)) {
985                         if (sge_ctrl & F_VFIFO_ENABLE)
986                                 size = G_DBVFIFO_SIZE(fifo_size);
987                 } else
988                         size = G_T6_DBVFIFO_SIZE(fifo_size);
989
990                 if (size) {
991                         md->base = G_BASEADDR(t4_read_reg(padap,
992                                                           A_SGE_DBVFIFO_BADDR));
993                         md->limit = md->base + (size << 2) - 1;
994                 }
995         }
996
997         md++;
998
999         md->base = t4_read_reg(padap, A_ULP_RX_CTX_BASE);
1000         md->limit = 0;
1001         md++;
1002         md->base = t4_read_reg(padap, A_ULP_TX_ERR_TABLE_BASE);
1003         md->limit = 0;
1004         md++;
1005 #ifndef __NO_DRIVER_OCQ_SUPPORT__
1006         /*md->base = padap->vres.ocq.start;*/
1007         /*if (adap->vres.ocq.size)*/
1008         /*        md->limit = md->base + adap->vres.ocq.size - 1;*/
1009         /*else*/
1010         md->idx = ARRAY_SIZE(region);  /* hide it */
1011         md++;
1012 #endif
1013
1014         /* add any address-space holes, there can be up to 3 */
1015         for (n = 0; n < i - 1; n++)
1016                 if (meminfo_buff->avail[n].limit <
1017                     meminfo_buff->avail[n + 1].base)
1018                         (md++)->base = meminfo_buff->avail[n].limit;
1019
1020         if (meminfo_buff->avail[n].limit)
1021                 (md++)->base = meminfo_buff->avail[n].limit;
1022
1023         n = (int) (md - meminfo_buff->mem);
1024         meminfo_buff->mem_c = n;
1025
1026         qsort(meminfo_buff->mem, n, sizeof(struct struct_mem_desc),
1027             mem_desc_cmp);
1028
1029         lo = t4_read_reg(padap, A_CIM_SDRAM_BASE_ADDR);
1030         hi = t4_read_reg(padap, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
1031         meminfo_buff->up_ram_lo = lo;
1032         meminfo_buff->up_ram_hi = hi;
1033
1034         lo = t4_read_reg(padap, A_CIM_EXTMEM2_BASE_ADDR);
1035         hi = t4_read_reg(padap, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
1036         meminfo_buff->up_extmem2_lo = lo;
1037         meminfo_buff->up_extmem2_hi = hi;
1038
1039         lo = t4_read_reg(padap, A_TP_PMM_RX_MAX_PAGE);
1040         meminfo_buff->rx_pages_data[0] =  G_PMRXMAXPAGE(lo);
1041         meminfo_buff->rx_pages_data[1] =
1042                 t4_read_reg(padap, A_TP_PMM_RX_PAGE_SIZE) >> 10;
1043         meminfo_buff->rx_pages_data[2] = (lo & F_PMRXNUMCHN) ? 2 : 1 ;
1044
1045         lo = t4_read_reg(padap, A_TP_PMM_TX_MAX_PAGE);
1046         hi = t4_read_reg(padap, A_TP_PMM_TX_PAGE_SIZE);
1047         meminfo_buff->tx_pages_data[0] = G_PMTXMAXPAGE(lo);
1048         meminfo_buff->tx_pages_data[1] =
1049                 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
1050         meminfo_buff->tx_pages_data[2] =
1051                 hi >= (1 << 20) ? 'M' : 'K';
1052         meminfo_buff->tx_pages_data[3] = 1 << G_PMTXNUMCHN(lo);
1053
1054         for (i = 0; i < 4; i++) {
1055                 if (chip_id(padap) > CHELSIO_T5)
1056                         lo = t4_read_reg(padap,
1057                                          A_MPS_RX_MAC_BG_PG_CNT0 + i * 4);
1058                 else
1059                         lo = t4_read_reg(padap, A_MPS_RX_PG_RSV0 + i * 4);
1060                 if (is_t5(padap)) {
1061                         used = G_T5_USED(lo);
1062                         alloc = G_T5_ALLOC(lo);
1063                 } else {
1064                         used = G_USED(lo);
1065                         alloc = G_ALLOC(lo);
1066                 }
1067                 meminfo_buff->port_used[i] = used;
1068                 meminfo_buff->port_alloc[i] = alloc;
1069         }
1070
1071         for (i = 0; i < padap->chip_params->nchan; i++) {
1072                 if (chip_id(padap) > CHELSIO_T5)
1073                         lo = t4_read_reg(padap,
1074                                          A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4);
1075                 else
1076                         lo = t4_read_reg(padap, A_MPS_RX_PG_RSV4 + i * 4);
1077                 if (is_t5(padap)) {
1078                         used = G_T5_USED(lo);
1079                         alloc = G_T5_ALLOC(lo);
1080                 } else {
1081                         used = G_USED(lo);
1082                         alloc = G_ALLOC(lo);
1083                 }
1084                 meminfo_buff->loopback_used[i] = used;
1085                 meminfo_buff->loopback_alloc[i] = alloc;
1086         }
1087 err:
1088         return rc;
1089 }
1090
1091 static int collect_meminfo(struct cudbg_init *pdbg_init,
1092                            struct cudbg_buffer *dbg_buff,
1093                            struct cudbg_error *cudbg_err)
1094 {
1095         struct adapter *padap = pdbg_init->adap;
1096         struct cudbg_buffer scratch_buff;
1097         struct struct_meminfo *meminfo_buff;
1098         int rc = 0;
1099         u32 size;
1100
1101         size = sizeof(struct struct_meminfo);
1102
1103         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1104         if (rc)
1105                 goto err;
1106
1107         meminfo_buff = (struct struct_meminfo *)scratch_buff.data;
1108
1109         rc = fill_meminfo(padap, meminfo_buff);
1110         if (rc)
1111                 goto err;
1112
1113         rc = write_compression_hdr(&scratch_buff, dbg_buff);
1114         if (rc)
1115                 goto err1;
1116
1117         rc = compress_buff(&scratch_buff, dbg_buff);
1118 err1:
1119         release_scratch_buff(&scratch_buff, dbg_buff);
1120 err:
1121         return rc;
1122 }
1123
1124 static int collect_lb_stats(struct cudbg_init *pdbg_init,
1125                             struct cudbg_buffer *dbg_buff,
1126                             struct cudbg_error *cudbg_err)
1127 {
1128         struct adapter *padap = pdbg_init->adap;
1129         struct cudbg_buffer scratch_buff;
1130         struct lb_port_stats *tmp_stats;
1131         struct struct_lb_stats *lb_stats_buff;
1132         u32 i, n, size;
1133         int rc = 0;
1134
1135         rc = padap->params.nports;
1136         if (rc < 0)
1137                 goto err;
1138
1139         n = rc;
1140         size = sizeof(struct struct_lb_stats) +
1141                n * sizeof(struct lb_port_stats);
1142
1143         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1144         if (rc)
1145                 goto err;
1146
1147         lb_stats_buff = (struct struct_lb_stats *) scratch_buff.data;
1148
1149         lb_stats_buff->nchan = n;
1150         tmp_stats = lb_stats_buff->s;
1151
1152         for (i = 0; i < n; i += 2, tmp_stats += 2) {
1153                 t4_get_lb_stats(padap, i, tmp_stats);
1154                 t4_get_lb_stats(padap, i + 1, tmp_stats+1);
1155         }
1156
1157         rc = write_compression_hdr(&scratch_buff, dbg_buff);
1158         if (rc)
1159                 goto err1;
1160
1161         rc = compress_buff(&scratch_buff, dbg_buff);
1162 err1:
1163         release_scratch_buff(&scratch_buff, dbg_buff);
1164 err:
1165         return rc;
1166 }
1167
1168 static int collect_rdma_stats(struct cudbg_init *pdbg_init,
1169                               struct cudbg_buffer *dbg_buff,
1170                               struct cudbg_error *cudbg_er)
1171 {
1172         struct adapter *padap = pdbg_init->adap;
1173         struct cudbg_buffer scratch_buff;
1174         struct tp_rdma_stats *rdma_stats_buff;
1175         u32 size;
1176         int rc = 0;
1177
1178         size = sizeof(struct tp_rdma_stats);
1179
1180         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1181         if (rc)
1182                 goto err;
1183
1184         rdma_stats_buff = (struct tp_rdma_stats *) scratch_buff.data;
1185
1186         /* spin_lock(&padap->stats_lock);       TODO*/
1187         t4_tp_get_rdma_stats(padap, rdma_stats_buff, 1);
1188         /* spin_unlock(&padap->stats_lock);     TODO*/
1189
1190         rc = write_compression_hdr(&scratch_buff, dbg_buff);
1191         if (rc)
1192                 goto err1;
1193
1194         rc = compress_buff(&scratch_buff, dbg_buff);
1195 err1:
1196         release_scratch_buff(&scratch_buff, dbg_buff);
1197 err:
1198         return rc;
1199 }
1200
1201 static int collect_clk_info(struct cudbg_init *pdbg_init,
1202                             struct cudbg_buffer *dbg_buff,
1203                             struct cudbg_error *cudbg_err)
1204 {
1205         struct cudbg_buffer scratch_buff;
1206         struct adapter *padap = pdbg_init->adap;
1207         struct struct_clk_info *clk_info_buff;
1208         u64 tp_tick_us;
1209         int size;
1210         int rc = 0;
1211
1212         if (!padap->params.vpd.cclk) {
1213                 rc =  CUDBG_STATUS_CCLK_NOT_DEFINED;
1214                 goto err;
1215         }
1216
1217         size = sizeof(struct struct_clk_info);
1218         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1219         if (rc)
1220                 goto err;
1221
1222         clk_info_buff = (struct struct_clk_info *) scratch_buff.data;
1223
1224         clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk;  /* in ps
1225         */
1226         clk_info_buff->res = t4_read_reg(padap, A_TP_TIMER_RESOLUTION);
1227         clk_info_buff->tre = G_TIMERRESOLUTION(clk_info_buff->res);
1228         clk_info_buff->dack_re = G_DELAYEDACKRESOLUTION(clk_info_buff->res);
1229         tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000;
1230         /* in us */
1231         clk_info_buff->dack_timer = ((clk_info_buff->cclk_ps <<
1232                                       clk_info_buff->dack_re) / 1000000) *
1233                                      t4_read_reg(padap, A_TP_DACK_TIMER);
1234
1235         clk_info_buff->retransmit_min =
1236                 tp_tick_us * t4_read_reg(padap, A_TP_RXT_MIN);
1237         clk_info_buff->retransmit_max =
1238                 tp_tick_us * t4_read_reg(padap, A_TP_RXT_MAX);
1239
1240         clk_info_buff->persist_timer_min =
1241                 tp_tick_us * t4_read_reg(padap, A_TP_PERS_MIN);
1242         clk_info_buff->persist_timer_max =
1243                 tp_tick_us * t4_read_reg(padap, A_TP_PERS_MAX);
1244
1245         clk_info_buff->keepalive_idle_timer =
1246                 tp_tick_us * t4_read_reg(padap, A_TP_KEEP_IDLE);
1247         clk_info_buff->keepalive_interval =
1248                 tp_tick_us * t4_read_reg(padap, A_TP_KEEP_INTVL);
1249
1250         clk_info_buff->initial_srtt =
1251                 tp_tick_us * G_INITSRTT(t4_read_reg(padap, A_TP_INIT_SRTT));
1252         clk_info_buff->finwait2_timer =
1253                 tp_tick_us * t4_read_reg(padap, A_TP_FINWAIT2_TIMER);
1254
1255         rc = write_compression_hdr(&scratch_buff, dbg_buff);
1256
1257         if (rc)
1258                 goto err1;
1259
1260         rc = compress_buff(&scratch_buff, dbg_buff);
1261 err1:
1262         release_scratch_buff(&scratch_buff, dbg_buff);
1263 err:
1264         return rc;
1265
1266 }
1267
1268 static int collect_macstats(struct cudbg_init *pdbg_init,
1269                             struct cudbg_buffer *dbg_buff,
1270                             struct cudbg_error *cudbg_err)
1271 {
1272         struct adapter *padap = pdbg_init->adap;
1273         struct cudbg_buffer scratch_buff;
1274         struct struct_mac_stats_rev1 *mac_stats_buff;
1275         u32 i, n, size;
1276         int rc = 0;
1277
1278         rc = padap->params.nports;
1279         if (rc < 0)
1280                 goto err;
1281
1282         n = rc;
1283         size = sizeof(struct struct_mac_stats_rev1);
1284
1285         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1286         if (rc)
1287                 goto err;
1288
1289         mac_stats_buff = (struct struct_mac_stats_rev1 *) scratch_buff.data;
1290
1291         mac_stats_buff->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
1292         mac_stats_buff->ver_hdr.revision = CUDBG_MAC_STATS_REV;
1293         mac_stats_buff->ver_hdr.size = sizeof(struct struct_mac_stats_rev1) -
1294                                        sizeof(struct cudbg_ver_hdr);
1295
1296         mac_stats_buff->port_count = n;
1297         for (i = 0; i <  mac_stats_buff->port_count; i++)
1298                 t4_get_port_stats(padap, i, &mac_stats_buff->stats[i]);
1299
1300         rc = write_compression_hdr(&scratch_buff, dbg_buff);
1301         if (rc)
1302                 goto err1;
1303
1304         rc = compress_buff(&scratch_buff, dbg_buff);
1305 err1:
1306         release_scratch_buff(&scratch_buff, dbg_buff);
1307 err:
1308         return rc;
1309 }
1310
1311 static int collect_cim_pif_la(struct cudbg_init *pdbg_init,
1312                               struct cudbg_buffer *dbg_buff,
1313                               struct cudbg_error *cudbg_err)
1314 {
1315         struct adapter *padap = pdbg_init->adap;
1316         struct cudbg_buffer scratch_buff;
1317         struct cim_pif_la *cim_pif_la_buff;
1318         u32 size;
1319         int rc = 0;
1320
1321         size = sizeof(struct cim_pif_la) +
1322                2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
1323
1324         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1325         if (rc)
1326                 goto err;
1327
1328         cim_pif_la_buff = (struct cim_pif_la *) scratch_buff.data;
1329         cim_pif_la_buff->size = CIM_PIFLA_SIZE;
1330
1331         t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
1332                            (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
1333                            NULL, NULL);
1334
1335         rc = write_compression_hdr(&scratch_buff, dbg_buff);
1336         if (rc)
1337                 goto err1;
1338
1339         rc = compress_buff(&scratch_buff, dbg_buff);
1340 err1:
1341         release_scratch_buff(&scratch_buff, dbg_buff);
1342 err:
1343         return rc;
1344 }
1345
1346 static int collect_tp_la(struct cudbg_init *pdbg_init,
1347                          struct cudbg_buffer *dbg_buff,
1348                          struct cudbg_error *cudbg_err)
1349 {
1350         struct adapter *padap = pdbg_init->adap;
1351         struct cudbg_buffer scratch_buff;
1352         struct struct_tp_la *tp_la_buff;
1353         u32 size;
1354         int rc = 0;
1355
1356         size = sizeof(struct struct_tp_la) + TPLA_SIZE *  sizeof(u64);
1357
1358         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1359         if (rc)
1360                 goto err;
1361
1362         tp_la_buff = (struct struct_tp_la *) scratch_buff.data;
1363
1364         tp_la_buff->mode = G_DBGLAMODE(t4_read_reg(padap, A_TP_DBG_LA_CONFIG));
1365         t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
1366
1367         rc = write_compression_hdr(&scratch_buff, dbg_buff);
1368         if (rc)
1369                 goto err1;
1370
1371         rc = compress_buff(&scratch_buff, dbg_buff);
1372 err1:
1373         release_scratch_buff(&scratch_buff, dbg_buff);
1374 err:
1375         return rc;
1376 }
1377
1378 static int collect_fcoe_stats(struct cudbg_init *pdbg_init,
1379                               struct cudbg_buffer *dbg_buff,
1380                               struct cudbg_error *cudbg_err)
1381 {
1382         struct adapter *padap = pdbg_init->adap;
1383         struct cudbg_buffer scratch_buff;
1384         struct struct_tp_fcoe_stats  *tp_fcoe_stats_buff;
1385         u32 size;
1386         int rc = 0;
1387
1388         size = sizeof(struct struct_tp_fcoe_stats);
1389
1390         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1391         if (rc)
1392                 goto err;
1393
1394         tp_fcoe_stats_buff = (struct struct_tp_fcoe_stats *) scratch_buff.data;
1395
1396         /* spin_lock(&padap->stats_lock);       TODO*/
1397         t4_get_fcoe_stats(padap, 0, &tp_fcoe_stats_buff->stats[0], 1);
1398         t4_get_fcoe_stats(padap, 1, &tp_fcoe_stats_buff->stats[1], 1);
1399         if (padap->chip_params->nchan == NCHAN) {
1400                 t4_get_fcoe_stats(padap, 2, &tp_fcoe_stats_buff->stats[2], 1);
1401                 t4_get_fcoe_stats(padap, 3, &tp_fcoe_stats_buff->stats[3], 1);
1402         }
1403         /* spin_unlock(&padap->stats_lock);     TODO*/
1404
1405         rc = write_compression_hdr(&scratch_buff, dbg_buff);
1406         if (rc)
1407                 goto err1;
1408
1409         rc = compress_buff(&scratch_buff, dbg_buff);
1410 err1:
1411         release_scratch_buff(&scratch_buff, dbg_buff);
1412 err:
1413         return rc;
1414 }
1415
1416 static int collect_tp_err_stats(struct cudbg_init *pdbg_init,
1417                                 struct cudbg_buffer *dbg_buff,
1418                                 struct cudbg_error *cudbg_err)
1419 {
1420         struct adapter *padap = pdbg_init->adap;
1421         struct cudbg_buffer scratch_buff;
1422         struct struct_tp_err_stats *tp_err_stats_buff;
1423         u32 size;
1424         int rc = 0;
1425
1426         size = sizeof(struct struct_tp_err_stats);
1427
1428         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1429         if (rc)
1430                 goto err;
1431
1432         tp_err_stats_buff = (struct struct_tp_err_stats *) scratch_buff.data;
1433
1434         /* spin_lock(&padap->stats_lock);       TODO*/
1435         t4_tp_get_err_stats(padap, &tp_err_stats_buff->stats, 1);
1436         /* spin_unlock(&padap->stats_lock);     TODO*/
1437         tp_err_stats_buff->nchan = padap->chip_params->nchan;
1438
1439         rc = write_compression_hdr(&scratch_buff, dbg_buff);
1440         if (rc)
1441                 goto err1;
1442
1443         rc = compress_buff(&scratch_buff, dbg_buff);
1444 err1:
1445         release_scratch_buff(&scratch_buff, dbg_buff);
1446 err:
1447         return rc;
1448 }
1449
1450 static int collect_tcp_stats(struct cudbg_init *pdbg_init,
1451                              struct cudbg_buffer *dbg_buff,
1452                              struct cudbg_error *cudbg_err)
1453 {
1454         struct adapter *padap = pdbg_init->adap;
1455         struct cudbg_buffer scratch_buff;
1456         struct struct_tcp_stats *tcp_stats_buff;
1457         u32 size;
1458         int rc = 0;
1459
1460         size = sizeof(struct struct_tcp_stats);
1461
1462         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1463         if (rc)
1464                 goto err;
1465
1466         tcp_stats_buff = (struct struct_tcp_stats *) scratch_buff.data;
1467
1468         /* spin_lock(&padap->stats_lock);       TODO*/
1469         t4_tp_get_tcp_stats(padap, &tcp_stats_buff->v4, &tcp_stats_buff->v6, 1);
1470         /* spin_unlock(&padap->stats_lock);     TODO*/
1471
1472         rc = write_compression_hdr(&scratch_buff, dbg_buff);
1473         if (rc)
1474                 goto err1;
1475
1476         rc = compress_buff(&scratch_buff, dbg_buff);
1477 err1:
1478         release_scratch_buff(&scratch_buff, dbg_buff);
1479 err:
1480         return rc;
1481 }
1482
1483 static int collect_hw_sched(struct cudbg_init *pdbg_init,
1484                             struct cudbg_buffer *dbg_buff,
1485                             struct cudbg_error *cudbg_err)
1486 {
1487         struct adapter *padap = pdbg_init->adap;
1488         struct cudbg_buffer scratch_buff;
1489         struct struct_hw_sched *hw_sched_buff;
1490         u32 size;
1491         int i, rc = 0;
1492
1493         if (!padap->params.vpd.cclk) {
1494                 rc =  CUDBG_STATUS_CCLK_NOT_DEFINED;
1495                 goto err;
1496         }
1497
1498         size = sizeof(struct struct_hw_sched);
1499         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1500         if (rc)
1501                 goto err;
1502
1503         hw_sched_buff = (struct struct_hw_sched *) scratch_buff.data;
1504
1505         hw_sched_buff->map = t4_read_reg(padap, A_TP_TX_MOD_QUEUE_REQ_MAP);
1506         hw_sched_buff->mode = G_TIMERMODE(t4_read_reg(padap, A_TP_MOD_CONFIG));
1507         t4_read_pace_tbl(padap, hw_sched_buff->pace_tab);
1508
1509         for (i = 0; i < NTX_SCHED; ++i) {
1510                 t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
1511                     &hw_sched_buff->ipg[i], 1);
1512         }
1513
1514         rc = write_compression_hdr(&scratch_buff, dbg_buff);
1515         if (rc)
1516                 goto err1;
1517
1518         rc = compress_buff(&scratch_buff, dbg_buff);
1519 err1:
1520         release_scratch_buff(&scratch_buff, dbg_buff);
1521 err:
1522         return rc;
1523 }
1524
1525 static int collect_pm_stats(struct cudbg_init *pdbg_init,
1526                             struct cudbg_buffer *dbg_buff,
1527                             struct cudbg_error *cudbg_err)
1528 {
1529         struct adapter *padap = pdbg_init->adap;
1530         struct cudbg_buffer scratch_buff;
1531         struct struct_pm_stats *pm_stats_buff;
1532         u32 size;
1533         int rc = 0;
1534
1535         size = sizeof(struct struct_pm_stats);
1536
1537         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1538         if (rc)
1539                 goto err;
1540
1541         pm_stats_buff = (struct struct_pm_stats *) scratch_buff.data;
1542
1543         t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
1544         t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
1545
1546         rc = write_compression_hdr(&scratch_buff, dbg_buff);
1547         if (rc)
1548                 goto err1;
1549
1550         rc = compress_buff(&scratch_buff, dbg_buff);
1551 err1:
1552         release_scratch_buff(&scratch_buff, dbg_buff);
1553 err:
1554         return rc;
1555 }
1556
1557 static int collect_path_mtu(struct cudbg_init *pdbg_init,
1558                             struct cudbg_buffer *dbg_buff,
1559                             struct cudbg_error *cudbg_err)
1560 {
1561         struct adapter *padap = pdbg_init->adap;
1562         struct cudbg_buffer scratch_buff;
1563         u32 size;
1564         int rc = 0;
1565
1566         size = NMTUS  * sizeof(u16);
1567
1568         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1569         if (rc)
1570                 goto err;
1571
1572         t4_read_mtu_tbl(padap, (u16 *)scratch_buff.data, NULL);
1573
1574         rc = write_compression_hdr(&scratch_buff, dbg_buff);
1575         if (rc)
1576                 goto err1;
1577
1578         rc = compress_buff(&scratch_buff, dbg_buff);
1579 err1:
1580         release_scratch_buff(&scratch_buff, dbg_buff);
1581 err:
1582         return rc;
1583 }
1584
1585 static int collect_rss_key(struct cudbg_init *pdbg_init,
1586                            struct cudbg_buffer *dbg_buff,
1587                            struct cudbg_error *cudbg_err)
1588 {
1589         struct adapter *padap = pdbg_init->adap;
1590         struct cudbg_buffer scratch_buff;
1591         u32 size;
1592
1593         int rc = 0;
1594
1595         size = 10  * sizeof(u32);
1596         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1597         if (rc)
1598                 goto err;
1599
1600         t4_read_rss_key(padap, (u32 *)scratch_buff.data, 1);
1601
1602         rc = write_compression_hdr(&scratch_buff, dbg_buff);
1603         if (rc)
1604                 goto err1;
1605
1606         rc = compress_buff(&scratch_buff, dbg_buff);
1607 err1:
1608         release_scratch_buff(&scratch_buff, dbg_buff);
1609 err:
1610         return rc;
1611 }
1612
1613 static int collect_rss_config(struct cudbg_init *pdbg_init,
1614                               struct cudbg_buffer *dbg_buff,
1615                               struct cudbg_error *cudbg_err)
1616 {
1617         struct adapter *padap = pdbg_init->adap;
1618         struct cudbg_buffer scratch_buff;
1619         struct rss_config *rss_conf;
1620         int rc;
1621         u32 size;
1622
1623         size = sizeof(struct rss_config);
1624
1625         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1626         if (rc)
1627                 goto err;
1628
1629         rss_conf =  (struct rss_config *)scratch_buff.data;
1630
1631         rss_conf->tp_rssconf = t4_read_reg(padap, A_TP_RSS_CONFIG);
1632         rss_conf->tp_rssconf_tnl = t4_read_reg(padap, A_TP_RSS_CONFIG_TNL);
1633         rss_conf->tp_rssconf_ofd = t4_read_reg(padap, A_TP_RSS_CONFIG_OFD);
1634         rss_conf->tp_rssconf_syn = t4_read_reg(padap, A_TP_RSS_CONFIG_SYN);
1635         rss_conf->tp_rssconf_vrt = t4_read_reg(padap, A_TP_RSS_CONFIG_VRT);
1636         rss_conf->tp_rssconf_cng = t4_read_reg(padap, A_TP_RSS_CONFIG_CNG);
1637         rss_conf->chip = padap->params.chipid;
1638
1639         rc = write_compression_hdr(&scratch_buff, dbg_buff);
1640         if (rc)
1641                 goto err1;
1642
1643         rc = compress_buff(&scratch_buff, dbg_buff);
1644
1645 err1:
1646         release_scratch_buff(&scratch_buff, dbg_buff);
1647 err:
1648         return rc;
1649 }
1650
1651 static int collect_rss_vf_config(struct cudbg_init *pdbg_init,
1652                                  struct cudbg_buffer *dbg_buff,
1653                                  struct cudbg_error *cudbg_err)
1654 {
1655         struct adapter *padap = pdbg_init->adap;
1656         struct cudbg_buffer scratch_buff;
1657         struct rss_vf_conf *vfconf;
1658         int vf, rc, vf_count;
1659         u32 size;
1660
1661         vf_count = padap->chip_params->vfcount;
1662         size = vf_count * sizeof(*vfconf);
1663
1664         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1665         if (rc)
1666                 goto err;
1667
1668         vfconf =  (struct rss_vf_conf *)scratch_buff.data;
1669
1670         for (vf = 0; vf < vf_count; vf++) {
1671                 t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
1672                                       &vfconf[vf].rss_vf_vfh, 1);
1673         }
1674
1675         rc = write_compression_hdr(&scratch_buff, dbg_buff);
1676         if (rc)
1677                 goto err1;
1678
1679         rc = compress_buff(&scratch_buff, dbg_buff);
1680
1681 err1:
1682         release_scratch_buff(&scratch_buff, dbg_buff);
1683 err:
1684         return rc;
1685 }
1686
1687 static int collect_rss_pf_config(struct cudbg_init *pdbg_init,
1688                                  struct cudbg_buffer *dbg_buff,
1689                                  struct cudbg_error *cudbg_err)
1690 {
1691         struct cudbg_buffer scratch_buff;
1692         struct rss_pf_conf *pfconf;
1693         struct adapter *padap = pdbg_init->adap;
1694         u32 rss_pf_map, rss_pf_mask, size;
1695         int pf, rc;
1696
1697         size = 8  * sizeof(*pfconf);
1698
1699         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1700         if (rc)
1701                 goto err;
1702
1703         pfconf =  (struct rss_pf_conf *)scratch_buff.data;
1704
1705         rss_pf_map = t4_read_rss_pf_map(padap, 1);
1706         rss_pf_mask = t4_read_rss_pf_mask(padap, 1);
1707
1708         for (pf = 0; pf < 8; pf++) {
1709                 pfconf[pf].rss_pf_map = rss_pf_map;
1710                 pfconf[pf].rss_pf_mask = rss_pf_mask;
1711                 /* no return val */
1712                 t4_read_rss_pf_config(padap, pf, &pfconf[pf].rss_pf_config, 1);
1713         }
1714
1715         rc = write_compression_hdr(&scratch_buff, dbg_buff);
1716         if (rc)
1717                 goto err1;
1718
1719         rc = compress_buff(&scratch_buff, dbg_buff);
1720 err1:
1721         release_scratch_buff(&scratch_buff, dbg_buff);
1722 err:
1723         return rc;
1724 }
1725
1726 static int check_valid(u32 *buf, int type)
1727 {
1728         int index;
1729         int bit;
1730         int bit_pos = 0;
1731
1732         switch (type) {
1733         case CTXT_EGRESS:
1734                 bit_pos = 176;
1735                 break;
1736         case CTXT_INGRESS:
1737                 bit_pos = 141;
1738                 break;
1739         case CTXT_FLM:
1740                 bit_pos = 89;
1741                 break;
1742         }
1743         index = bit_pos / 32;
1744         bit =  bit_pos % 32;
1745
1746         return buf[index] & (1U << bit);
1747 }
1748
1749 /**
1750  * Get EGRESS, INGRESS, FLM, and CNM max qid.
1751  *
1752  * For EGRESS and INGRESS, do the following calculation.
1753  * max_qid = (DBQ/IMSG context region size in bytes) /
1754  *           (size of context in bytes).
1755  *
1756  * For FLM, do the following calculation.
1757  * max_qid = (FLM cache region size in bytes) /
1758  *           ((number of pointers cached in EDRAM) * 8 (bytes per pointer)).
1759  *
1760  * There's a 1-to-1 mapping between FLM and CNM if there's no header splitting
1761  * enabled; i.e., max CNM qid is equal to max FLM qid. However, if header
1762  * splitting is enabled, then max CNM qid is half of max FLM qid.
1763  */
1764 static int get_max_ctxt_qid(struct adapter *padap,
1765                             struct struct_meminfo *meminfo,
1766                             u32 *max_ctx_qid, u8 nelem)
1767 {
1768         u32 i, idx, found = 0;
1769
1770         if (nelem != (CTXT_CNM + 1))
1771                 return -EINVAL;
1772
1773         for (i = 0; i < meminfo->mem_c; i++) {
1774                 if (meminfo->mem[i].idx >= ARRAY_SIZE(region))
1775                         continue;                        /* skip holes */
1776
1777                 idx = meminfo->mem[i].idx;
1778                 /* Get DBQ, IMSG, and FLM context region size */
1779                 if (idx <= CTXT_FLM) {
1780                         if (!(meminfo->mem[i].limit))
1781                                 meminfo->mem[i].limit =
1782                                         i < meminfo->mem_c - 1 ?
1783                                         meminfo->mem[i + 1].base - 1 : ~0;
1784
1785                         if (idx < CTXT_FLM) {
1786                                 /* Get EGRESS and INGRESS max qid. */
1787                                 max_ctx_qid[idx] = (meminfo->mem[i].limit -
1788                                                     meminfo->mem[i].base + 1) /
1789                                                    CUDBG_CTXT_SIZE_BYTES;
1790                                 found++;
1791                         } else {
1792                                 /* Get FLM and CNM max qid. */
1793                                 u32 value, edram_ptr_count;
1794                                 u8 bytes_per_ptr = 8;
1795                                 u8 nohdr;
1796
1797                                 value = t4_read_reg(padap, A_SGE_FLM_CFG);
1798
1799                                 /* Check if header splitting is enabled. */
1800                                 nohdr = (value >> S_NOHDR) & 1U;
1801
1802                                 /* Get the number of pointers in EDRAM per
1803                                  * qid in units of 32.
1804                                  */
1805                                 edram_ptr_count = 32 *
1806                                                   (1U << G_EDRAMPTRCNT(value));
1807
1808                                 /* EDRAMPTRCNT value of 3 is reserved.
1809                                  * So don't exceed 128.
1810                                  */
1811                                 if (edram_ptr_count > 128)
1812                                         edram_ptr_count = 128;
1813
1814                                 max_ctx_qid[idx] = (meminfo->mem[i].limit -
1815                                                     meminfo->mem[i].base + 1) /
1816                                                    (edram_ptr_count *
1817                                                     bytes_per_ptr);
1818                                 found++;
1819
1820                                 /* CNM has 1-to-1 mapping with FLM.
1821                                  * However, if header splitting is enabled,
1822                                  * then max CNM qid is half of max FLM qid.
1823                                  */
1824                                 max_ctx_qid[CTXT_CNM] = nohdr ?
1825                                                         max_ctx_qid[idx] :
1826                                                         max_ctx_qid[idx] >> 1;
1827
1828                                 /* One more increment for CNM */
1829                                 found++;
1830                         }
1831                 }
1832                 if (found == nelem)
1833                         break;
1834         }
1835
1836         /* Sanity check. Ensure the values are within known max. */
1837         max_ctx_qid[CTXT_EGRESS] = min_t(u32, max_ctx_qid[CTXT_EGRESS],
1838                                          M_CTXTQID);
1839         max_ctx_qid[CTXT_INGRESS] = min_t(u32, max_ctx_qid[CTXT_INGRESS],
1840                                           CUDBG_MAX_INGRESS_QIDS);
1841         max_ctx_qid[CTXT_FLM] = min_t(u32, max_ctx_qid[CTXT_FLM],
1842                                       CUDBG_MAX_FL_QIDS);
1843         max_ctx_qid[CTXT_CNM] = min_t(u32, max_ctx_qid[CTXT_CNM],
1844                                       CUDBG_MAX_CNM_QIDS);
1845         return 0;
1846 }
1847
1848 static int collect_dump_context(struct cudbg_init *pdbg_init,
1849                                 struct cudbg_buffer *dbg_buff,
1850                                 struct cudbg_error *cudbg_err)
1851 {
1852         struct cudbg_buffer scratch_buff;
1853         struct cudbg_buffer temp_buff;
1854         struct adapter *padap = pdbg_init->adap;
1855         u32 size = 0, next_offset = 0, total_size = 0;
1856         struct cudbg_ch_cntxt *buff = NULL;
1857         struct struct_meminfo meminfo;
1858         int bytes = 0;
1859         int rc = 0;
1860         u32 i, j;
1861         u32 max_ctx_qid[CTXT_CNM + 1];
1862         bool limit_qid = false;
1863         u32 qid_count = 0;
1864
1865         rc = fill_meminfo(padap, &meminfo);
1866         if (rc)
1867                 goto err;
1868
1869         /* Get max valid qid for each type of queue */
1870         rc = get_max_ctxt_qid(padap, &meminfo, max_ctx_qid, CTXT_CNM + 1);
1871         if (rc)
1872                 goto err;
1873
1874         /* There are four types of queues. Collect context upto max
1875          * qid of each type of queue.
1876          */
1877         for (i = CTXT_EGRESS; i <= CTXT_CNM; i++)
1878                 size += sizeof(struct cudbg_ch_cntxt) * max_ctx_qid[i];
1879
1880         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1881         if (rc == CUDBG_STATUS_NO_SCRATCH_MEM) {
1882                 /* Not enough scratch Memory available.
1883                  * Collect context of at least CUDBG_LOWMEM_MAX_CTXT_QIDS
1884                  * for each queue type.
1885                  */
1886                 size = 0;
1887                 for (i = CTXT_EGRESS; i <= CTXT_CNM; i++)
1888                         size += sizeof(struct cudbg_ch_cntxt) *
1889                                 CUDBG_LOWMEM_MAX_CTXT_QIDS;
1890
1891                 limit_qid = true;
1892                 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1893                 if (rc)
1894                         goto err;
1895         }
1896
1897         buff = (struct cudbg_ch_cntxt *)scratch_buff.data;
1898
1899         /* Collect context data */
1900         for (i = CTXT_EGRESS; i <= CTXT_FLM; i++) {
1901                 qid_count = 0;
1902                 for (j = 0; j < max_ctx_qid[i]; j++) {
1903                         read_sge_ctxt(pdbg_init, j, i, buff->data);
1904
1905                         rc = check_valid(buff->data, i);
1906                         if (rc) {
1907                                 buff->cntxt_type = i;
1908                                 buff->cntxt_id = j;
1909                                 buff++;
1910                                 total_size += sizeof(struct cudbg_ch_cntxt);
1911
1912                                 if (i == CTXT_FLM) {
1913                                         read_sge_ctxt(pdbg_init, j, CTXT_CNM,
1914                                                       buff->data);
1915                                         buff->cntxt_type = CTXT_CNM;
1916                                         buff->cntxt_id = j;
1917                                         buff++;
1918                                         total_size +=
1919                                                 sizeof(struct cudbg_ch_cntxt);
1920                                 }
1921                                 qid_count++;
1922                         }
1923
1924                         /* If there's not enough space to collect more qids,
1925                          * then bail and move on to next queue type.
1926                          */
1927                         if (limit_qid &&
1928                             qid_count >= CUDBG_LOWMEM_MAX_CTXT_QIDS)
1929                                 break;
1930                 }
1931         }
1932
1933         scratch_buff.size = total_size;
1934         rc = write_compression_hdr(&scratch_buff, dbg_buff);
1935         if (rc)
1936                 goto err1;
1937
1938         /* Splitting buffer and writing in terms of CUDBG_CHUNK_SIZE */
1939         while (total_size > 0) {
1940                 bytes = min_t(unsigned long, (unsigned long)total_size,
1941                               (unsigned long)CUDBG_CHUNK_SIZE);
1942                 temp_buff.size = bytes;
1943                 temp_buff.data = (void *)((char *)scratch_buff.data +
1944                                           next_offset);
1945
1946                 rc = compress_buff(&temp_buff, dbg_buff);
1947                 if (rc)
1948                         goto err1;
1949
1950                 total_size -= bytes;
1951                 next_offset += bytes;
1952         }
1953
1954 err1:
1955         scratch_buff.size = size;
1956         release_scratch_buff(&scratch_buff, dbg_buff);
1957 err:
1958         return rc;
1959 }
1960
1961 static int collect_fw_devlog(struct cudbg_init *pdbg_init,
1962                              struct cudbg_buffer *dbg_buff,
1963                              struct cudbg_error *cudbg_err)
1964 {
1965 #ifdef notyet
1966         struct adapter *padap = pdbg_init->adap;
1967         struct devlog_params *dparams = &padap->params.devlog;
1968         struct cudbg_param *params = NULL;
1969         struct cudbg_buffer scratch_buff;
1970         u32 offset;
1971         int rc = 0, i;
1972
1973         rc = t4_init_devlog_params(padap, 1);
1974
1975         if (rc < 0) {
1976                 pdbg_init->print("%s(), t4_init_devlog_params failed!, rc: "\
1977                                  "%d\n", __func__, rc);
1978                 for (i = 0; i < pdbg_init->dbg_params_cnt; i++) {
1979                         if (pdbg_init->dbg_params[i].param_type ==
1980                             CUDBG_DEVLOG_PARAM) {
1981                                 params = &pdbg_init->dbg_params[i];
1982                                 break;
1983                         }
1984                 }
1985
1986                 if (params) {
1987                         dparams->memtype = params->u.devlog_param.memtype;
1988                         dparams->start = params->u.devlog_param.start;
1989                         dparams->size = params->u.devlog_param.size;
1990                 } else {
1991                         cudbg_err->sys_err = rc;
1992                         goto err;
1993                 }
1994         }
1995
1996         rc = get_scratch_buff(dbg_buff, dparams->size, &scratch_buff);
1997
1998         if (rc)
1999                 goto err;
2000
2001         /* Collect FW devlog */
2002         if (dparams->start != 0) {
2003                 offset = scratch_buff.offset;
2004                 rc = t4_memory_rw(padap, padap->params.drv_memwin,
2005                                   dparams->memtype, dparams->start,
2006                                   dparams->size,
2007                                   (__be32 *)((char *)scratch_buff.data +
2008                                              offset), 1);
2009
2010                 if (rc) {
2011                         pdbg_init->print("%s(), t4_memory_rw failed!, rc: "\
2012                                          "%d\n", __func__, rc);
2013                         cudbg_err->sys_err = rc;
2014                         goto err1;
2015                 }
2016         }
2017
2018         rc = write_compression_hdr(&scratch_buff, dbg_buff);
2019
2020         if (rc)
2021                 goto err1;
2022
2023         rc = compress_buff(&scratch_buff, dbg_buff);
2024
2025 err1:
2026         release_scratch_buff(&scratch_buff, dbg_buff);
2027 err:
2028         return rc;
2029 #endif
2030         return (EDOOFUS);
2031 }
2032 /* CIM OBQ */
2033
2034 static int collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
2035                                 struct cudbg_buffer *dbg_buff,
2036                                 struct cudbg_error *cudbg_err)
2037 {
2038         int rc = 0, qid = 0;
2039
2040         rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2041
2042         return rc;
2043 }
2044
2045 static int collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
2046                                 struct cudbg_buffer *dbg_buff,
2047                                 struct cudbg_error *cudbg_err)
2048 {
2049         int rc = 0, qid = 1;
2050
2051         rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2052
2053         return rc;
2054 }
2055
2056 static int collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
2057                                 struct cudbg_buffer *dbg_buff,
2058                                 struct cudbg_error *cudbg_err)
2059 {
2060         int rc = 0, qid = 2;
2061
2062         rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2063
2064         return rc;
2065 }
2066
2067 static int collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
2068                                 struct cudbg_buffer *dbg_buff,
2069                                 struct cudbg_error *cudbg_err)
2070 {
2071         int rc = 0, qid = 3;
2072
2073         rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2074
2075         return rc;
2076 }
2077
2078 static int collect_cim_obq_sge(struct cudbg_init *pdbg_init,
2079                                struct cudbg_buffer *dbg_buff,
2080                                struct cudbg_error *cudbg_err)
2081 {
2082         int rc = 0, qid = 4;
2083
2084         rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2085
2086         return rc;
2087 }
2088
2089 static int collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
2090                                 struct cudbg_buffer *dbg_buff,
2091                                 struct cudbg_error *cudbg_err)
2092 {
2093         int rc = 0, qid = 5;
2094
2095         rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2096
2097         return rc;
2098 }
2099
2100 static int collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
2101                                  struct cudbg_buffer *dbg_buff,
2102                                  struct cudbg_error *cudbg_err)
2103 {
2104         int rc = 0, qid = 6;
2105
2106         rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2107
2108         return rc;
2109 }
2110
2111 static int collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
2112                                  struct cudbg_buffer *dbg_buff,
2113                                  struct cudbg_error *cudbg_err)
2114 {
2115         int rc = 0, qid = 7;
2116
2117         rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2118
2119         return rc;
2120 }
2121
2122 static int read_cim_obq(struct cudbg_init *pdbg_init,
2123                         struct cudbg_buffer *dbg_buff,
2124                         struct cudbg_error *cudbg_err, int qid)
2125 {
2126         struct cudbg_buffer scratch_buff;
2127         struct adapter *padap = pdbg_init->adap;
2128         u32 qsize;
2129         int rc;
2130         int no_of_read_words;
2131
2132         /* collect CIM OBQ */
2133         qsize =  6 * CIM_OBQ_SIZE * 4 *  sizeof(u32);
2134         rc = get_scratch_buff(dbg_buff, qsize, &scratch_buff);
2135         if (rc)
2136                 goto err;
2137
2138         /* t4_read_cim_obq will return no. of read words or error */
2139         no_of_read_words = t4_read_cim_obq(padap, qid,
2140                                            (u32 *)((u32 *)scratch_buff.data +
2141                                            scratch_buff.offset), qsize);
2142
2143         /* no_of_read_words is less than or equal to 0 means error */
2144         if (no_of_read_words <= 0) {
2145                 if (no_of_read_words == 0)
2146                         rc = CUDBG_SYSTEM_ERROR;
2147                 else
2148                         rc = no_of_read_words;
2149                 if (pdbg_init->verbose)
2150                         pdbg_init->print("%s: t4_read_cim_obq failed (%d)\n",
2151                                  __func__, rc);
2152                 cudbg_err->sys_err = rc;
2153                 goto err1;
2154         }
2155
2156         scratch_buff.size = no_of_read_words * 4;
2157
2158         rc = write_compression_hdr(&scratch_buff, dbg_buff);
2159
2160         if (rc)
2161                 goto err1;
2162
2163         rc = compress_buff(&scratch_buff, dbg_buff);
2164
2165         if (rc)
2166                 goto err1;
2167
2168 err1:
2169         release_scratch_buff(&scratch_buff, dbg_buff);
2170 err:
2171         return rc;
2172 }
2173
2174 /* CIM IBQ */
2175
2176 static int collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
2177                                struct cudbg_buffer *dbg_buff,
2178                                struct cudbg_error *cudbg_err)
2179 {
2180         int rc = 0, qid = 0;
2181
2182         rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2183         return rc;
2184 }
2185
2186 static int collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
2187                                struct cudbg_buffer *dbg_buff,
2188                                struct cudbg_error *cudbg_err)
2189 {
2190         int rc = 0, qid = 1;
2191
2192         rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2193         return rc;
2194 }
2195
2196 static int collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
2197                                struct cudbg_buffer *dbg_buff,
2198                                struct cudbg_error *cudbg_err)
2199 {
2200         int rc = 0, qid = 2;
2201
2202         rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2203         return rc;
2204 }
2205
2206 static int collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
2207                                 struct cudbg_buffer *dbg_buff,
2208                                 struct cudbg_error *cudbg_err)
2209 {
2210         int rc = 0, qid = 3;
2211
2212         rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2213         return rc;
2214 }
2215
2216 static int collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
2217                                 struct cudbg_buffer *dbg_buff,
2218                                 struct cudbg_error *cudbg_err)
2219 {
2220         int rc = 0, qid = 4;
2221
2222         rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2223         return rc;
2224 }
2225
2226 static int collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
2227                                 struct cudbg_buffer *dbg_buff,
2228                                 struct cudbg_error *cudbg_err)
2229 {
2230         int rc, qid = 5;
2231
2232         rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2233         return rc;
2234 }
2235
2236 static int read_cim_ibq(struct cudbg_init *pdbg_init,
2237                         struct cudbg_buffer *dbg_buff,
2238                         struct cudbg_error *cudbg_err, int qid)
2239 {
2240         struct adapter *padap = pdbg_init->adap;
2241         struct cudbg_buffer scratch_buff;
2242         u32 qsize;
2243         int rc;
2244         int no_of_read_words;
2245
2246         /* collect CIM IBQ */
2247         qsize = CIM_IBQ_SIZE * 4 *  sizeof(u32);
2248         rc = get_scratch_buff(dbg_buff, qsize, &scratch_buff);
2249
2250         if (rc)
2251                 goto err;
2252
2253         /* t4_read_cim_ibq will return no. of read words or error */
2254         no_of_read_words = t4_read_cim_ibq(padap, qid,
2255                                            (u32 *)((u32 *)scratch_buff.data +
2256                                            scratch_buff.offset), qsize);
2257         /* no_of_read_words is less than or equal to 0 means error */
2258         if (no_of_read_words <= 0) {
2259                 if (no_of_read_words == 0)
2260                         rc = CUDBG_SYSTEM_ERROR;
2261                 else
2262                         rc = no_of_read_words;
2263                 if (pdbg_init->verbose)
2264                         pdbg_init->print("%s: t4_read_cim_ibq failed (%d)\n",
2265                                  __func__, rc);
2266                 cudbg_err->sys_err = rc;
2267                 goto err1;
2268         }
2269
2270         rc = write_compression_hdr(&scratch_buff, dbg_buff);
2271         if (rc)
2272                 goto err1;
2273
2274         rc = compress_buff(&scratch_buff, dbg_buff);
2275         if (rc)
2276                 goto err1;
2277
2278 err1:
2279         release_scratch_buff(&scratch_buff, dbg_buff);
2280
2281 err:
2282         return rc;
2283 }
2284
2285 static int collect_cim_ma_la(struct cudbg_init *pdbg_init,
2286                              struct cudbg_buffer *dbg_buff,
2287                              struct cudbg_error *cudbg_err)
2288 {
2289         struct cudbg_buffer scratch_buff;
2290         struct adapter *padap = pdbg_init->adap;
2291         u32 rc = 0;
2292
2293         /* collect CIM MA LA */
2294         scratch_buff.size =  2 * CIM_MALA_SIZE * 5 * sizeof(u32);
2295         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2296         if (rc)
2297                 goto err;
2298
2299         /* no return */
2300         t4_cim_read_ma_la(padap,
2301                           (u32 *) ((char *)scratch_buff.data +
2302                                    scratch_buff.offset),
2303                           (u32 *) ((char *)scratch_buff.data +
2304                                    scratch_buff.offset + 5 * CIM_MALA_SIZE));
2305
2306         rc = write_compression_hdr(&scratch_buff, dbg_buff);
2307         if (rc)
2308                 goto err1;
2309
2310         rc = compress_buff(&scratch_buff, dbg_buff);
2311
2312 err1:
2313         release_scratch_buff(&scratch_buff, dbg_buff);
2314 err:
2315         return rc;
2316 }
2317
2318 static int collect_cim_la(struct cudbg_init *pdbg_init,
2319                           struct cudbg_buffer *dbg_buff,
2320                           struct cudbg_error *cudbg_err)
2321 {
2322         struct cudbg_buffer scratch_buff;
2323         struct adapter *padap = pdbg_init->adap;
2324
2325         int rc;
2326         u32 cfg = 0;
2327         int size;
2328
2329         /* collect CIM LA */
2330         if (is_t6(padap)) {
2331                 size = padap->params.cim_la_size / 10 + 1;
2332                 size *= 11 * sizeof(u32);
2333         } else {
2334                 size = padap->params.cim_la_size / 8;
2335                 size *= 8 * sizeof(u32);
2336         }
2337
2338         size += sizeof(cfg);
2339
2340         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
2341         if (rc)
2342                 goto err;
2343
2344         rc = t4_cim_read(padap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
2345
2346         if (rc) {
2347                 if (pdbg_init->verbose)
2348                         pdbg_init->print("%s: t4_cim_read failed (%d)\n",
2349                                  __func__, rc);
2350                 cudbg_err->sys_err = rc;
2351                 goto err1;
2352         }
2353
2354         memcpy((char *)scratch_buff.data + scratch_buff.offset, &cfg,
2355                sizeof(cfg));
2356
2357         rc = t4_cim_read_la(padap,
2358                             (u32 *) ((char *)scratch_buff.data +
2359                                      scratch_buff.offset + sizeof(cfg)), NULL);
2360         if (rc < 0) {
2361                 if (pdbg_init->verbose)
2362                         pdbg_init->print("%s: t4_cim_read_la failed (%d)\n",
2363                                  __func__, rc);
2364                 cudbg_err->sys_err = rc;
2365                 goto err1;
2366         }
2367
2368         rc = write_compression_hdr(&scratch_buff, dbg_buff);
2369         if (rc)
2370                 goto err1;
2371
2372         rc = compress_buff(&scratch_buff, dbg_buff);
2373         if (rc)
2374                 goto err1;
2375
2376 err1:
2377         release_scratch_buff(&scratch_buff, dbg_buff);
2378 err:
2379         return rc;
2380 }
2381
2382 static int collect_cim_qcfg(struct cudbg_init *pdbg_init,
2383                             struct cudbg_buffer *dbg_buff,
2384                             struct cudbg_error *cudbg_err)
2385 {
2386         struct cudbg_buffer scratch_buff;
2387         struct adapter *padap = pdbg_init->adap;
2388         u32 offset;
2389         int cim_num_obq, rc = 0;
2390
2391         struct struct_cim_qcfg *cim_qcfg_data = NULL;
2392
2393         rc = get_scratch_buff(dbg_buff, sizeof(struct struct_cim_qcfg),
2394                               &scratch_buff);
2395
2396         if (rc)
2397                 goto err;
2398
2399         offset = scratch_buff.offset;
2400
2401         cim_num_obq = is_t4(padap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
2402
2403         cim_qcfg_data =
2404                 (struct struct_cim_qcfg *)((u8 *)((char *)scratch_buff.data +
2405                                            offset));
2406
2407         rc = t4_cim_read(padap, A_UP_IBQ_0_RDADDR,
2408                          ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
2409
2410         if (rc) {
2411                 if (pdbg_init->verbose)
2412                         pdbg_init->print("%s: t4_cim_read IBQ_0_RDADDR failed (%d)\n",
2413                             __func__, rc);
2414                 cudbg_err->sys_err = rc;
2415                 goto err1;
2416         }
2417
2418         rc = t4_cim_read(padap, A_UP_OBQ_0_REALADDR,
2419                          ARRAY_SIZE(cim_qcfg_data->obq_wr),
2420                          cim_qcfg_data->obq_wr);
2421
2422         if (rc) {
2423                 if (pdbg_init->verbose)
2424                         pdbg_init->print("%s: t4_cim_read OBQ_0_REALADDR failed (%d)\n",
2425                             __func__, rc);
2426                 cudbg_err->sys_err = rc;
2427                 goto err1;
2428         }
2429
2430         /* no return val */
2431         t4_read_cimq_cfg(padap,
2432                         cim_qcfg_data->base,
2433                         cim_qcfg_data->size,
2434                         cim_qcfg_data->thres);
2435
2436         rc = write_compression_hdr(&scratch_buff, dbg_buff);
2437         if (rc)
2438                 goto err1;
2439
2440         rc = compress_buff(&scratch_buff, dbg_buff);
2441         if (rc)
2442                 goto err1;
2443
2444 err1:
2445         release_scratch_buff(&scratch_buff, dbg_buff);
2446 err:
2447         return rc;
2448 }
2449
2450 /**
2451  * Fetch the TX/RX payload regions start and end.
2452  *
2453  * @padap (IN): adapter handle.
2454  * @mem_type (IN): EDC0, EDC1, MC/MC0/MC1.
2455  * @mem_tot_len (IN): total length of @mem_type memory region to read.
2456  * @payload_type (IN): TX or RX Payload.
2457  * @reg_info (OUT): store the payload region info.
2458  *
2459  * Fetch the TX/RX payload region information from meminfo.
2460  * However, reading from the @mem_type region starts at 0 and not
2461  * from whatever base info is stored in meminfo.  Hence, if the
2462  * payload region exists, then calculate the payload region
2463  * start and end wrt 0 and @mem_tot_len, respectively, and set
2464  * @reg_info->exist to true. Otherwise, set @reg_info->exist to false.
2465  */
2466 #ifdef notyet
2467 static int get_payload_range(struct adapter *padap, u8 mem_type,
2468                              unsigned long mem_tot_len, u8 payload_type,
2469                              struct struct_region_info *reg_info)
2470 {
2471         struct struct_meminfo meminfo;
2472         struct struct_mem_desc mem_region;
2473         struct struct_mem_desc payload;
2474         u32 i, idx, found = 0;
2475         u8 mc_type;
2476         int rc;
2477
2478         /* Get meminfo of all regions */
2479         rc = fill_meminfo(padap, &meminfo);
2480         if (rc)
2481                 return rc;
2482
2483         /* Extract the specified TX or RX Payload region range */
2484         memset(&payload, 0, sizeof(struct struct_mem_desc));
2485         for (i = 0; i < meminfo.mem_c; i++) {
2486                 if (meminfo.mem[i].idx >= ARRAY_SIZE(region))
2487                         continue;                        /* skip holes */
2488
2489                 idx = meminfo.mem[i].idx;
2490                 /* Get TX or RX Payload region start and end */
2491                 if (idx == payload_type) {
2492                         if (!(meminfo.mem[i].limit))
2493                                 meminfo.mem[i].limit =
2494                                         i < meminfo.mem_c - 1 ?
2495                                         meminfo.mem[i + 1].base - 1 : ~0;
2496
2497                         memcpy(&payload, &meminfo.mem[i], sizeof(payload));
2498                         found = 1;
2499                         break;
2500                 }
2501         }
2502
2503         /* If TX or RX Payload region is not found return error. */
2504         if (!found)
2505                 return -EINVAL;
2506
2507         if (mem_type < MEM_MC) {
2508                 memcpy(&mem_region, &meminfo.avail[mem_type],
2509                        sizeof(mem_region));
2510         } else {
2511                 /* Check if both MC0 and MC1 exist by checking if a
2512                  * base address for the specified @mem_type exists.
2513                  * If a base address exists, then there is MC1 and
2514                  * hence use the base address stored at index 3.
2515                  * Otherwise, use the base address stored at index 2.
2516                  */
2517                 mc_type = meminfo.avail[mem_type].base ?
2518                           mem_type : mem_type - 1;
2519                 memcpy(&mem_region, &meminfo.avail[mc_type],
2520                        sizeof(mem_region));
2521         }
2522
2523         /* Check if payload region exists in current memory */
2524         if (payload.base < mem_region.base && payload.limit < mem_region.base) {
2525                 reg_info->exist = false;
2526                 return 0;
2527         }
2528
2529         /* Get Payload region start and end with respect to 0 and
2530          * mem_tot_len, respectively.  This is because reading from the
2531          * memory region starts at 0 and not at base info stored in meminfo.
2532          */
2533         if (payload.base < mem_region.limit) {
2534                 reg_info->exist = true;
2535                 if (payload.base >= mem_region.base)
2536                         reg_info->start = payload.base - mem_region.base;
2537                 else
2538                         reg_info->start = 0;
2539
2540                 if (payload.limit < mem_region.limit)
2541                         reg_info->end = payload.limit - mem_region.base;
2542                 else
2543                         reg_info->end = mem_tot_len;
2544         }
2545
2546         return 0;
2547 }
2548 #endif
2549
2550 static int read_fw_mem(struct cudbg_init *pdbg_init,
2551                         struct cudbg_buffer *dbg_buff, u8 mem_type,
2552                         unsigned long tot_len, struct cudbg_error *cudbg_err)
2553 {
2554 #ifdef notyet
2555         struct cudbg_buffer scratch_buff;
2556         struct adapter *padap = pdbg_init->adap;
2557         unsigned long bytes_read = 0;
2558         unsigned long bytes_left;
2559         unsigned long bytes;
2560         int           rc;
2561         struct struct_region_info payload[2]; /* TX and RX Payload Region */
2562         u16 get_payload_flag;
2563         u8 i;
2564
2565         get_payload_flag =
2566                 pdbg_init->dbg_params[CUDBG_GET_PAYLOAD_PARAM].param_type;
2567
2568         /* If explicitly asked to get TX/RX Payload data,
2569          * then don't zero out the payload data. Otherwise,
2570          * zero out the payload data.
2571          */
2572         if (!get_payload_flag) {
2573                 u8 region_index[2];
2574                 u8 j = 0;
2575
2576                 /* Find the index of TX and RX Payload regions in meminfo */
2577                 for (i = 0; i < ARRAY_SIZE(region); i++) {
2578                         if (!strcmp(region[i], "Tx payload:") ||
2579                             !strcmp(region[i], "Rx payload:")) {
2580                                 region_index[j] = i;
2581                                 j++;
2582                                 if (j == 2)
2583                                         break;
2584                         }
2585                 }
2586
2587                 /* Get TX/RX Payload region range if they exist */
2588                 memset(payload, 0, ARRAY_SIZE(payload) * sizeof(payload[0]));
2589                 for (i = 0; i < ARRAY_SIZE(payload); i++) {
2590                         rc = get_payload_range(padap, mem_type, tot_len,
2591                                                region_index[i],
2592                                                &payload[i]);
2593                         if (rc)
2594                                 goto err;
2595
2596                         if (payload[i].exist) {
2597                                 /* Align start and end to avoid wrap around */
2598                                 payload[i].start =
2599                                         roundup(payload[i].start,
2600                                             CUDBG_CHUNK_SIZE);
2601                                 payload[i].end =
2602                                         rounddown(payload[i].end,
2603                                             CUDBG_CHUNK_SIZE);
2604                         }
2605                 }
2606         }
2607
2608         bytes_left = tot_len;
2609         scratch_buff.size = tot_len;
2610         rc = write_compression_hdr(&scratch_buff, dbg_buff);
2611         if (rc)
2612                 goto err;
2613
2614         while (bytes_left > 0) {
2615                 bytes = min_t(unsigned long, bytes_left, (unsigned long)CUDBG_CHUNK_SIZE);
2616                 rc = get_scratch_buff(dbg_buff, bytes, &scratch_buff);
2617
2618                 if (rc) {
2619                         rc = CUDBG_STATUS_NO_SCRATCH_MEM;
2620                         goto err;
2621                 }
2622
2623                 if (!get_payload_flag) {
2624                         for (i = 0; i < ARRAY_SIZE(payload); i++) {
2625                                 if (payload[i].exist &&
2626                                     bytes_read >= payload[i].start &&
2627                                     (bytes_read + bytes) <= payload[i].end) {
2628                                         memset(scratch_buff.data, 0, bytes);
2629                                         /* TX and RX Payload regions
2630                                          * can't overlap.
2631                                          */
2632                                         goto skip_read;
2633                                 }
2634                         }
2635                 }
2636
2637                 /* Read from file */
2638                 /*fread(scratch_buff.data, 1, Bytes, in);*/
2639                 rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type, bytes_read,
2640                                   bytes, (__be32 *)(scratch_buff.data), 1);
2641
2642                 if (rc) {
2643                         if (pdbg_init->verbose)
2644                                 pdbg_init->print("%s: t4_memory_rw failed (%d)",
2645                                     __func__, rc);
2646                         cudbg_err->sys_err = rc;
2647                         goto err1;
2648                 }
2649
2650 skip_read:
2651                 rc = compress_buff(&scratch_buff, dbg_buff);
2652                 if (rc)
2653                         goto err1;
2654
2655                 bytes_left -= bytes;
2656                 bytes_read += bytes;
2657                 release_scratch_buff(&scratch_buff, dbg_buff);
2658         }
2659
2660 err1:
2661         if (rc)
2662                 release_scratch_buff(&scratch_buff, dbg_buff);
2663
2664 err:
2665         return rc;
2666 #endif
2667         return (EDOOFUS);
2668 }
2669
2670 static void collect_mem_info(struct cudbg_init *pdbg_init,
2671                              struct card_mem *mem_info)
2672 {
2673         struct adapter *padap = pdbg_init->adap;
2674         u32 value;
2675         int t4 = 0;
2676
2677         if (is_t4(padap))
2678                 t4 = 1;
2679
2680         if (t4) {
2681                 value = t4_read_reg(padap, A_MA_EXT_MEMORY_BAR);
2682                 value = G_EXT_MEM_SIZE(value);
2683                 mem_info->size_mc0 = (u16)value;  /* size in MB */
2684
2685                 value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
2686                 if (value & F_EXT_MEM_ENABLE)
2687                         mem_info->mem_flag |= (1 << MC0_FLAG); /* set mc0 flag
2688                                                                   bit */
2689         } else {
2690                 value = t4_read_reg(padap, A_MA_EXT_MEMORY0_BAR);
2691                 value = G_EXT_MEM0_SIZE(value);
2692                 mem_info->size_mc0 = (u16)value;
2693
2694                 value = t4_read_reg(padap, A_MA_EXT_MEMORY1_BAR);
2695                 value = G_EXT_MEM1_SIZE(value);
2696                 mem_info->size_mc1 = (u16)value;
2697
2698                 value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
2699                 if (value & F_EXT_MEM0_ENABLE)
2700                         mem_info->mem_flag |= (1 << MC0_FLAG);
2701                 if (value & F_EXT_MEM1_ENABLE)
2702                         mem_info->mem_flag |= (1 << MC1_FLAG);
2703         }
2704
2705         value = t4_read_reg(padap, A_MA_EDRAM0_BAR);
2706         value = G_EDRAM0_SIZE(value);
2707         mem_info->size_edc0 = (u16)value;
2708
2709         value = t4_read_reg(padap, A_MA_EDRAM1_BAR);
2710         value = G_EDRAM1_SIZE(value);
2711         mem_info->size_edc1 = (u16)value;
2712
2713         value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
2714         if (value & F_EDRAM0_ENABLE)
2715                 mem_info->mem_flag |= (1 << EDC0_FLAG);
2716         if (value & F_EDRAM1_ENABLE)
2717                 mem_info->mem_flag |= (1 << EDC1_FLAG);
2718
2719 }
2720
2721 static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
2722                                 struct cudbg_error *cudbg_err)
2723 {
2724         struct adapter *padap = pdbg_init->adap;
2725         int rc;
2726
2727         if (is_fw_attached(pdbg_init)) {
2728
2729                 /* Flush uP dcache before reading edcX/mcX  */
2730                 rc = begin_synchronized_op(padap, NULL, SLEEP_OK | INTR_OK,
2731                     "t4cudl");
2732                 if (rc == 0) {
2733                         rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
2734                         end_synchronized_op(padap, 0);
2735                 }
2736
2737                 if (rc) {
2738                         if (pdbg_init->verbose)
2739                                 pdbg_init->print("%s: t4_fwcache failed (%d)\n",
2740                                  __func__, rc);
2741                         cudbg_err->sys_warn = rc;
2742                 }
2743         }
2744 }
2745
2746 static int collect_edc0_meminfo(struct cudbg_init *pdbg_init,
2747                                 struct cudbg_buffer *dbg_buff,
2748                                 struct cudbg_error *cudbg_err)
2749 {
2750         struct card_mem mem_info = {0};
2751         unsigned long edc0_size;
2752         int rc;
2753
2754         cudbg_t4_fwcache(pdbg_init, cudbg_err);
2755
2756         collect_mem_info(pdbg_init, &mem_info);
2757
2758         if (mem_info.mem_flag & (1 << EDC0_FLAG)) {
2759                 edc0_size = (((unsigned long)mem_info.size_edc0) * 1024 * 1024);
2760                 rc = read_fw_mem(pdbg_init, dbg_buff, MEM_EDC0,
2761                                  edc0_size, cudbg_err);
2762                 if (rc)
2763                         goto err;
2764
2765         } else {
2766                 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2767                 if (pdbg_init->verbose)
2768                         pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
2769                                  __func__, err_msg[-rc]);
2770                 goto err;
2771
2772         }
2773 err:
2774         return rc;
2775 }
2776
2777 static int collect_edc1_meminfo(struct cudbg_init *pdbg_init,
2778                                 struct cudbg_buffer *dbg_buff,
2779                                 struct cudbg_error *cudbg_err)
2780 {
2781         struct card_mem mem_info = {0};
2782         unsigned long edc1_size;
2783         int rc;
2784
2785         cudbg_t4_fwcache(pdbg_init, cudbg_err);
2786
2787         collect_mem_info(pdbg_init, &mem_info);
2788
2789         if (mem_info.mem_flag & (1 << EDC1_FLAG)) {
2790                 edc1_size = (((unsigned long)mem_info.size_edc1) * 1024 * 1024);
2791                 rc = read_fw_mem(pdbg_init, dbg_buff, MEM_EDC1,
2792                                  edc1_size, cudbg_err);
2793                 if (rc)
2794                         goto err;
2795         } else {
2796                 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2797                 if (pdbg_init->verbose)
2798                         pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
2799                                  __func__, err_msg[-rc]);
2800                 goto err;
2801         }
2802
2803 err:
2804
2805         return rc;
2806 }
2807
2808 static int collect_mc0_meminfo(struct cudbg_init *pdbg_init,
2809                                struct cudbg_buffer *dbg_buff,
2810                                struct cudbg_error *cudbg_err)
2811 {
2812         struct card_mem mem_info = {0};
2813         unsigned long mc0_size;
2814         int rc;
2815
2816         cudbg_t4_fwcache(pdbg_init, cudbg_err);
2817
2818         collect_mem_info(pdbg_init, &mem_info);
2819
2820         if (mem_info.mem_flag & (1 << MC0_FLAG)) {
2821                 mc0_size = (((unsigned long)mem_info.size_mc0) * 1024 * 1024);
2822                 rc = read_fw_mem(pdbg_init, dbg_buff, MEM_MC0,
2823                                  mc0_size, cudbg_err);
2824                 if (rc)
2825                         goto err;
2826         } else {
2827                 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2828                 if (pdbg_init->verbose)
2829                         pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
2830                                  __func__, err_msg[-rc]);
2831                 goto err;
2832         }
2833
2834 err:
2835         return rc;
2836 }
2837
2838 static int collect_mc1_meminfo(struct cudbg_init *pdbg_init,
2839                                struct cudbg_buffer *dbg_buff,
2840                                struct cudbg_error *cudbg_err)
2841 {
2842         struct card_mem mem_info = {0};
2843         unsigned long mc1_size;
2844         int rc;
2845
2846         cudbg_t4_fwcache(pdbg_init, cudbg_err);
2847
2848         collect_mem_info(pdbg_init, &mem_info);
2849
2850         if (mem_info.mem_flag & (1 << MC1_FLAG)) {
2851                 mc1_size = (((unsigned long)mem_info.size_mc1) * 1024 * 1024);
2852                 rc = read_fw_mem(pdbg_init, dbg_buff, MEM_MC1,
2853                                  mc1_size, cudbg_err);
2854                 if (rc)
2855                         goto err;
2856         } else {
2857                 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2858
2859                 if (pdbg_init->verbose)
2860                         pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
2861                                  __func__, err_msg[-rc]);
2862                 goto err;
2863         }
2864 err:
2865         return rc;
2866 }
2867
2868 static int collect_reg_dump(struct cudbg_init *pdbg_init,
2869                             struct cudbg_buffer *dbg_buff,
2870                             struct cudbg_error *cudbg_err)
2871 {
2872         struct cudbg_buffer scratch_buff;
2873         struct cudbg_buffer tmp_scratch_buff;
2874         struct adapter *padap = pdbg_init->adap;
2875         unsigned long        bytes_read = 0;
2876         unsigned long        bytes_left;
2877         u32                  buf_size = 0, bytes = 0;
2878         int                  rc = 0;
2879
2880         if (is_t4(padap))
2881                 buf_size = T4_REGMAP_SIZE ;/*+ sizeof(unsigned int);*/
2882         else if (is_t5(padap) || is_t6(padap))
2883                 buf_size = T5_REGMAP_SIZE;
2884
2885         scratch_buff.size = buf_size;
2886
2887         tmp_scratch_buff = scratch_buff;
2888
2889         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2890         if (rc)
2891                 goto err;
2892
2893         /* no return */
2894         t4_get_regs(padap, (void *)scratch_buff.data, scratch_buff.size);
2895         bytes_left =   scratch_buff.size;
2896
2897         rc = write_compression_hdr(&scratch_buff, dbg_buff);
2898         if (rc)
2899                 goto err1;
2900
2901         while (bytes_left > 0) {
2902                 tmp_scratch_buff.data =
2903                         ((char *)scratch_buff.data) + bytes_read;
2904                 bytes = min_t(unsigned long, bytes_left, (unsigned long)CUDBG_CHUNK_SIZE);
2905                 tmp_scratch_buff.size = bytes;
2906                 compress_buff(&tmp_scratch_buff, dbg_buff);
2907                 bytes_left -= bytes;
2908                 bytes_read += bytes;
2909         }
2910
2911 err1:
2912         release_scratch_buff(&scratch_buff, dbg_buff);
2913 err:
2914         return rc;
2915 }
2916
2917 static int collect_cctrl(struct cudbg_init *pdbg_init,
2918                          struct cudbg_buffer *dbg_buff,
2919                          struct cudbg_error *cudbg_err)
2920 {
2921         struct cudbg_buffer scratch_buff;
2922         struct adapter *padap = pdbg_init->adap;
2923         u32 size;
2924         int rc;
2925
2926         size = sizeof(u16) * NMTUS * NCCTRL_WIN;
2927         scratch_buff.size = size;
2928
2929         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2930         if (rc)
2931                 goto err;
2932
2933         t4_read_cong_tbl(padap, (void *)scratch_buff.data);
2934
2935         rc = write_compression_hdr(&scratch_buff, dbg_buff);
2936         if (rc)
2937                 goto err1;
2938
2939         rc = compress_buff(&scratch_buff, dbg_buff);
2940
2941 err1:
2942         release_scratch_buff(&scratch_buff, dbg_buff);
2943 err:
2944         return rc;
2945 }
2946
2947 static int check_busy_bit(struct adapter *padap)
2948 {
2949         u32 val;
2950         u32 busy = 1;
2951         int i = 0;
2952         int retry = 10;
2953         int status = 0;
2954
2955         while (busy & (1 < retry)) {
2956                 val = t4_read_reg(padap, A_CIM_HOST_ACC_CTRL);
2957                 busy = (0 != (val & CUDBG_CIM_BUSY_BIT));
2958                 i++;
2959         }
2960
2961         if (busy)
2962                 status = -1;
2963
2964         return status;
2965 }
2966
2967 static int cim_ha_rreg(struct adapter *padap, u32 addr, u32 *val)
2968 {
2969         int rc = 0;
2970
2971         /* write register address into the A_CIM_HOST_ACC_CTRL */
2972         t4_write_reg(padap, A_CIM_HOST_ACC_CTRL, addr);
2973
2974         /* Poll HOSTBUSY */
2975         rc = check_busy_bit(padap);
2976         if (rc)
2977                 goto err;
2978
2979         /* Read value from A_CIM_HOST_ACC_DATA */
2980         *val = t4_read_reg(padap, A_CIM_HOST_ACC_DATA);
2981
2982 err:
2983         return rc;
2984 }
2985
2986 static int dump_up_cim(struct adapter *padap, struct cudbg_init *pdbg_init,
2987                        struct ireg_field *up_cim_reg, u32 *buff)
2988 {
2989         u32 i;
2990         int rc = 0;
2991
2992         for (i = 0; i < up_cim_reg->ireg_offset_range; i++) {
2993                 rc = cim_ha_rreg(padap,
2994                                  up_cim_reg->ireg_local_offset + (i * 4),
2995                                 buff);
2996                 if (rc) {
2997                         if (pdbg_init->verbose)
2998                                 pdbg_init->print("BUSY timeout reading"
2999                                          "CIM_HOST_ACC_CTRL\n");
3000                         goto err;
3001                 }
3002
3003                 buff++;
3004         }
3005
3006 err:
3007         return rc;
3008 }
3009
3010 static int collect_up_cim_indirect(struct cudbg_init *pdbg_init,
3011                                    struct cudbg_buffer *dbg_buff,
3012                                    struct cudbg_error *cudbg_err)
3013 {
3014         struct cudbg_buffer scratch_buff;
3015         struct adapter *padap = pdbg_init->adap;
3016         struct ireg_buf *up_cim;
3017         u32 size;
3018         int i, rc, n;
3019
3020         n = sizeof(t5_up_cim_reg_array) / (4 * sizeof(u32));
3021         size = sizeof(struct ireg_buf) * n;
3022         scratch_buff.size = size;
3023
3024         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3025         if (rc)
3026                 goto err;
3027
3028         up_cim = (struct ireg_buf *)scratch_buff.data;
3029
3030         for (i = 0; i < n; i++) {
3031                 struct ireg_field *up_cim_reg = &up_cim->tp_pio;
3032                 u32 *buff = up_cim->outbuf;
3033
3034                 if (is_t5(padap)) {
3035                         up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
3036                         up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
3037                         up_cim_reg->ireg_local_offset =
3038                                                 t5_up_cim_reg_array[i][2];
3039                         up_cim_reg->ireg_offset_range =
3040                                                 t5_up_cim_reg_array[i][3];
3041                 } else if (is_t6(padap)) {
3042                         up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
3043                         up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
3044                         up_cim_reg->ireg_local_offset =
3045                                                 t6_up_cim_reg_array[i][2];
3046                         up_cim_reg->ireg_offset_range =
3047                                                 t6_up_cim_reg_array[i][3];
3048                 }
3049
3050                 rc = dump_up_cim(padap, pdbg_init, up_cim_reg, buff);
3051
3052                 up_cim++;
3053         }
3054
3055         rc = write_compression_hdr(&scratch_buff, dbg_buff);
3056         if (rc)
3057                 goto err1;
3058
3059         rc = compress_buff(&scratch_buff, dbg_buff);
3060
3061 err1:
3062         release_scratch_buff(&scratch_buff, dbg_buff);
3063 err:
3064         return rc;
3065 }
3066
3067 static int collect_mbox_log(struct cudbg_init *pdbg_init,
3068                             struct cudbg_buffer *dbg_buff,
3069                             struct cudbg_error *cudbg_err)
3070 {
3071 #ifdef notyet
3072         struct cudbg_buffer scratch_buff;
3073         struct cudbg_mbox_log *mboxlog = NULL;
3074         struct mbox_cmd_log *log = NULL;
3075         struct mbox_cmd *entry;
3076         u64 flit;
3077         u32 size;
3078         unsigned int entry_idx;
3079         int i, k, rc;
3080         u16 mbox_cmds;
3081
3082         if (pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.mboxlog_param.log) {
3083                 log = pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.
3084                         mboxlog_param.log;
3085                 mbox_cmds = pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.
3086                                 mboxlog_param.mbox_cmds;
3087         } else {
3088                 if (pdbg_init->verbose)
3089                         pdbg_init->print("Mbox log is not requested\n");
3090                 return CUDBG_STATUS_ENTITY_NOT_REQUESTED;
3091         }
3092
3093         size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
3094         scratch_buff.size = size;
3095         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3096         if (rc)
3097                 goto err;
3098
3099         mboxlog = (struct cudbg_mbox_log *)scratch_buff.data;
3100
3101         for (k = 0; k < mbox_cmds; k++) {
3102                 entry_idx = log->cursor + k;
3103                 if (entry_idx >= log->size)
3104                         entry_idx -= log->size;
3105                 entry = mbox_cmd_log_entry(log, entry_idx);
3106
3107                 /* skip over unused entries */
3108                 if (entry->timestamp == 0)
3109                         continue;
3110
3111                 memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
3112
3113                 for (i = 0; i < MBOX_LEN / 8; i++) {
3114                         flit = entry->cmd[i];
3115                         mboxlog->hi[i] = (u32)(flit >> 32);
3116                         mboxlog->lo[i] = (u32)flit;
3117                 }
3118
3119                 mboxlog++;
3120         }
3121
3122         rc = write_compression_hdr(&scratch_buff, dbg_buff);
3123         if (rc)
3124                 goto err1;
3125
3126         rc = compress_buff(&scratch_buff, dbg_buff);
3127
3128 err1:
3129         release_scratch_buff(&scratch_buff, dbg_buff);
3130 err:
3131         return rc;
3132 #endif
3133         return (EDOOFUS);
3134 }
3135
3136 static int collect_pbt_tables(struct cudbg_init *pdbg_init,
3137                               struct cudbg_buffer *dbg_buff,
3138                               struct cudbg_error *cudbg_err)
3139 {
3140         struct cudbg_buffer scratch_buff;
3141         struct adapter *padap = pdbg_init->adap;
3142         struct cudbg_pbt_tables *pbt = NULL;
3143         u32 size;
3144         u32 addr;
3145         int i, rc;
3146
3147         size = sizeof(struct cudbg_pbt_tables);
3148         scratch_buff.size = size;
3149
3150         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3151         if (rc)
3152                 goto err;
3153
3154         pbt = (struct cudbg_pbt_tables *)scratch_buff.data;
3155
3156         /* PBT dynamic entries */
3157         addr = CUDBG_CHAC_PBT_ADDR;
3158         for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) {
3159                 rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_dynamic[i]);
3160                 if (rc) {
3161                         if (pdbg_init->verbose)
3162                                 pdbg_init->print("BUSY timeout reading"
3163                                          "CIM_HOST_ACC_CTRL\n");
3164                         goto err1;
3165                 }
3166         }
3167
3168         /* PBT static entries */
3169
3170         /* static entries start when bit 6 is set */
3171         addr = CUDBG_CHAC_PBT_ADDR + (1 << 6);
3172         for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) {
3173                 rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_static[i]);
3174                 if (rc) {
3175                         if (pdbg_init->verbose)
3176                                 pdbg_init->print("BUSY timeout reading"
3177                                          "CIM_HOST_ACC_CTRL\n");
3178                         goto err1;
3179                 }
3180         }
3181
3182         /* LRF entries */
3183         addr = CUDBG_CHAC_PBT_LRF;
3184         for (i = 0; i < CUDBG_LRF_ENTRIES; i++) {
3185                 rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->lrf_table[i]);
3186                 if (rc) {
3187                         if (pdbg_init->verbose)
3188                                 pdbg_init->print("BUSY timeout reading"
3189                                          "CIM_HOST_ACC_CTRL\n");
3190                         goto err1;
3191                 }
3192         }
3193
3194         /* PBT data entries */
3195         addr = CUDBG_CHAC_PBT_DATA;
3196         for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) {
3197                 rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_data[i]);
3198                 if (rc) {
3199                         if (pdbg_init->verbose)
3200                                 pdbg_init->print("BUSY timeout reading"
3201                                          "CIM_HOST_ACC_CTRL\n");
3202                         goto err1;
3203                 }
3204         }
3205
3206         rc = write_compression_hdr(&scratch_buff, dbg_buff);
3207         if (rc)
3208                 goto err1;
3209
3210         rc = compress_buff(&scratch_buff, dbg_buff);
3211
3212 err1:
3213         release_scratch_buff(&scratch_buff, dbg_buff);
3214 err:
3215         return rc;
3216 }
3217
3218 static int collect_pm_indirect(struct cudbg_init *pdbg_init,
3219                                struct cudbg_buffer *dbg_buff,
3220                                struct cudbg_error *cudbg_err)
3221 {
3222         struct cudbg_buffer scratch_buff;
3223         struct adapter *padap = pdbg_init->adap;
3224         struct ireg_buf *ch_pm;
3225         u32 size;
3226         int i, rc, n;
3227
3228         n = sizeof(t5_pm_rx_array) / (4 * sizeof(u32));
3229         size = sizeof(struct ireg_buf) * n * 2;
3230         scratch_buff.size = size;
3231
3232         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3233         if (rc)
3234                 goto err;
3235
3236         ch_pm = (struct ireg_buf *)scratch_buff.data;
3237
3238         /*PM_RX*/
3239         for (i = 0; i < n; i++) {
3240                 struct ireg_field *pm_pio = &ch_pm->tp_pio;
3241                 u32 *buff = ch_pm->outbuf;
3242
3243                 pm_pio->ireg_addr = t5_pm_rx_array[i][0];
3244                 pm_pio->ireg_data = t5_pm_rx_array[i][1];
3245                 pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
3246                 pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
3247
3248                 t4_read_indirect(padap,
3249                                 pm_pio->ireg_addr,
3250                                 pm_pio->ireg_data,
3251                                 buff,
3252                                 pm_pio->ireg_offset_range,
3253                                 pm_pio->ireg_local_offset);
3254
3255                 ch_pm++;
3256         }
3257
3258         /*PM_Tx*/
3259         n = sizeof(t5_pm_tx_array) / (4 * sizeof(u32));
3260         for (i = 0; i < n; i++) {
3261                 struct ireg_field *pm_pio = &ch_pm->tp_pio;
3262                 u32 *buff = ch_pm->outbuf;
3263
3264                 pm_pio->ireg_addr = t5_pm_tx_array[i][0];
3265                 pm_pio->ireg_data = t5_pm_tx_array[i][1];
3266                 pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
3267                 pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
3268
3269                 t4_read_indirect(padap,
3270                                 pm_pio->ireg_addr,
3271                                 pm_pio->ireg_data,
3272                                 buff,
3273                                 pm_pio->ireg_offset_range,
3274                                 pm_pio->ireg_local_offset);
3275
3276                 ch_pm++;
3277         }
3278
3279         rc = write_compression_hdr(&scratch_buff, dbg_buff);
3280         if (rc)
3281                 goto err1;
3282
3283         rc = compress_buff(&scratch_buff, dbg_buff);
3284
3285 err1:
3286         release_scratch_buff(&scratch_buff, dbg_buff);
3287 err:
3288         return rc;
3289
3290 }
3291
3292 static int collect_tid(struct cudbg_init *pdbg_init,
3293                        struct cudbg_buffer *dbg_buff,
3294                        struct cudbg_error *cudbg_err)
3295 {
3296
3297         struct cudbg_buffer scratch_buff;
3298         struct adapter *padap = pdbg_init->adap;
3299         struct tid_info_region *tid;
3300         struct tid_info_region_rev1 *tid1;
3301         u32 para[7], val[7];
3302         u32 mbox, pf;
3303         int rc;
3304
3305         scratch_buff.size = sizeof(struct tid_info_region_rev1);
3306
3307         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3308         if (rc)
3309                 goto err;
3310
3311 #define FW_PARAM_DEV_A(param) \
3312         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3313          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3314 #define FW_PARAM_PFVF_A(param) \
3315         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
3316          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)|  \
3317          V_FW_PARAMS_PARAM_Y(0) | \
3318          V_FW_PARAMS_PARAM_Z(0))
3319 #define MAX_ATIDS_A 8192U
3320
3321         tid1 = (struct tid_info_region_rev1 *)scratch_buff.data;
3322         tid = &(tid1->tid);
3323         tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
3324         tid1->ver_hdr.revision = CUDBG_TID_INFO_REV;
3325         tid1->ver_hdr.size = sizeof(struct tid_info_region_rev1) -
3326                              sizeof(struct cudbg_ver_hdr);
3327
3328         if (is_t5(padap)) {
3329                 tid->hash_base = t4_read_reg(padap, A_LE_DB_TID_HASHBASE);
3330                 tid1->tid_start = 0;
3331         } else if (is_t6(padap)) {
3332                 tid->hash_base = t4_read_reg(padap, A_T6_LE_DB_HASH_TID_BASE);
3333                 tid1->tid_start = t4_read_reg(padap, A_LE_DB_ACTIVE_TABLE_START_INDEX);
3334         }
3335
3336         tid->le_db_conf = t4_read_reg(padap, A_LE_DB_CONFIG);
3337
3338         para[0] = FW_PARAM_PFVF_A(FILTER_START);
3339         para[1] = FW_PARAM_PFVF_A(FILTER_END);
3340         para[2] = FW_PARAM_PFVF_A(ACTIVE_FILTER_START);
3341         para[3] = FW_PARAM_PFVF_A(ACTIVE_FILTER_END);
3342         para[4] = FW_PARAM_DEV_A(NTID);
3343         para[5] = FW_PARAM_PFVF_A(SERVER_START);
3344         para[6] = FW_PARAM_PFVF_A(SERVER_END);
3345
3346         rc = begin_synchronized_op(padap, NULL, SLEEP_OK | INTR_OK, "t4cudq");
3347         if (rc)
3348                 goto err;
3349         mbox = padap->mbox;
3350         pf = padap->pf;
3351         rc = t4_query_params(padap, mbox, pf, 0, 7, para, val);
3352         if (rc <  0) {
3353                 if (rc == -FW_EPERM) {
3354                         /* It looks like we don't have permission to use
3355                          * padap->mbox.
3356                          *
3357                          * Try mbox 4.  If it works, we'll continue to
3358                          * collect the rest of tid info from mbox 4.
3359                          * Else, quit trying to collect tid info.
3360                          */
3361                         mbox = 4;
3362                         pf = 4;
3363                         rc = t4_query_params(padap, mbox, pf, 0, 7, para, val);
3364                         if (rc < 0) {
3365                                 cudbg_err->sys_err = rc;
3366                                 goto err1;
3367                         }
3368                 } else {
3369                         cudbg_err->sys_err = rc;
3370                         goto err1;
3371                 }
3372         }
3373
3374         tid->ftid_base = val[0];
3375         tid->nftids = val[1] - val[0] + 1;
3376         /*active filter region*/
3377         if (val[2] != val[3]) {
3378 #ifdef notyet
3379                 tid->flags |= FW_OFLD_CONN;
3380 #endif
3381                 tid->aftid_base = val[2];
3382                 tid->aftid_end = val[3];
3383         }
3384         tid->ntids = val[4];
3385         tid->natids = min_t(u32, tid->ntids / 2, MAX_ATIDS_A);
3386         tid->stid_base = val[5];
3387         tid->nstids = val[6] - val[5] + 1;
3388
3389         if (chip_id(padap) >= CHELSIO_T6) {
3390                 para[0] = FW_PARAM_PFVF_A(HPFILTER_START);
3391                 para[1] = FW_PARAM_PFVF_A(HPFILTER_END);
3392                 rc = t4_query_params(padap, mbox, pf, 0, 2, para, val);
3393                 if (rc < 0) {
3394                         cudbg_err->sys_err = rc;
3395                         goto err1;
3396                 }
3397
3398                 tid->hpftid_base = val[0];
3399                 tid->nhpftids = val[1] - val[0] + 1;
3400         }
3401
3402         if (chip_id(padap) <= CHELSIO_T5) {
3403                 tid->sb = t4_read_reg(padap, A_LE_DB_SERVER_INDEX) / 4;
3404                 tid->hash_base /= 4;
3405         } else
3406                 tid->sb = t4_read_reg(padap, A_LE_DB_SRVR_START_INDEX);
3407
3408         /*UO context range*/
3409         para[0] = FW_PARAM_PFVF_A(ETHOFLD_START);
3410         para[1] = FW_PARAM_PFVF_A(ETHOFLD_END);
3411
3412         rc = t4_query_params(padap, mbox, pf, 0, 2, para, val);
3413         if (rc <  0) {
3414                 cudbg_err->sys_err = rc;
3415                 goto err1;
3416         }
3417
3418         if (val[0] != val[1]) {
3419                 tid->uotid_base = val[0];
3420                 tid->nuotids = val[1] - val[0] + 1;
3421         }
3422         tid->IP_users = t4_read_reg(padap, A_LE_DB_ACT_CNT_IPV4);
3423         tid->IPv6_users = t4_read_reg(padap, A_LE_DB_ACT_CNT_IPV6);
3424
3425 #undef FW_PARAM_PFVF_A
3426 #undef FW_PARAM_DEV_A
3427 #undef MAX_ATIDS_A
3428
3429         rc = write_compression_hdr(&scratch_buff, dbg_buff);
3430         if (rc)
3431                 goto err1;
3432         rc = compress_buff(&scratch_buff, dbg_buff);
3433
3434 err1:
3435         end_synchronized_op(padap, 0);
3436         release_scratch_buff(&scratch_buff, dbg_buff);
3437 err:
3438         return rc;
3439 }
3440
3441 static int collect_tx_rate(struct cudbg_init *pdbg_init,
3442                            struct cudbg_buffer *dbg_buff,
3443                            struct cudbg_error *cudbg_err)
3444 {
3445         struct cudbg_buffer scratch_buff;
3446         struct adapter *padap = pdbg_init->adap;
3447         struct tx_rate *tx_rate;
3448         u32 size;
3449         int rc;
3450
3451         size = sizeof(struct tx_rate);
3452         scratch_buff.size = size;
3453
3454         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3455         if (rc)
3456                 goto err;
3457
3458         tx_rate = (struct tx_rate *)scratch_buff.data;
3459         t4_get_chan_txrate(padap, tx_rate->nrate, tx_rate->orate);
3460         tx_rate->nchan = padap->chip_params->nchan;
3461
3462         rc = write_compression_hdr(&scratch_buff, dbg_buff);
3463         if (rc)
3464                 goto err1;
3465
3466         rc = compress_buff(&scratch_buff, dbg_buff);
3467
3468 err1:
3469         release_scratch_buff(&scratch_buff, dbg_buff);
3470 err:
3471         return rc;
3472 }
3473
3474 static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
3475 {
3476         *mask = x | y;
3477         y = (__force u64)cpu_to_be64(y);
3478         memcpy(addr, (char *)&y + 2, ETH_ALEN);
3479 }
3480
3481 static void mps_rpl_backdoor(struct adapter *padap, struct fw_ldst_mps_rplc *mps_rplc)
3482 {
3483         if (is_t5(padap)) {
3484                 mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
3485                                                           A_MPS_VF_RPLCT_MAP3));
3486                 mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
3487                                                           A_MPS_VF_RPLCT_MAP2));
3488                 mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
3489                                                           A_MPS_VF_RPLCT_MAP1));
3490                 mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
3491                                                           A_MPS_VF_RPLCT_MAP0));
3492         } else {
3493                 mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
3494                                                           A_MPS_VF_RPLCT_MAP7));
3495                 mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
3496                                                           A_MPS_VF_RPLCT_MAP6));
3497                 mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
3498                                                           A_MPS_VF_RPLCT_MAP5));
3499                 mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
3500                                                           A_MPS_VF_RPLCT_MAP4));
3501         }
3502         mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP3));
3503         mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP2));
3504         mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP1));
3505         mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP0));
3506 }
3507
3508 static int collect_mps_tcam(struct cudbg_init *pdbg_init,
3509                             struct cudbg_buffer *dbg_buff,
3510                             struct cudbg_error *cudbg_err)
3511 {
3512         struct cudbg_buffer scratch_buff;
3513         struct adapter *padap = pdbg_init->adap;
3514         struct cudbg_mps_tcam *tcam = NULL;
3515         u32 size = 0, i, n, total_size = 0;
3516         u32 ctl, data2;
3517         u64 tcamy, tcamx, val;
3518         int rc;
3519
3520         n = padap->chip_params->mps_tcam_size;
3521         size = sizeof(struct cudbg_mps_tcam) * n;
3522         scratch_buff.size = size;
3523
3524         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3525         if (rc)
3526                 goto err;
3527         memset(scratch_buff.data, 0, size);
3528
3529         tcam = (struct cudbg_mps_tcam *)scratch_buff.data;
3530         for (i = 0; i < n; i++) {
3531                 if (chip_id(padap) >= CHELSIO_T6) {
3532                         /* CtlReqID   - 1: use Host Driver Requester ID
3533                          * CtlCmdType - 0: Read, 1: Write
3534                          * CtlTcamSel - 0: TCAM0, 1: TCAM1
3535                          * CtlXYBitSel- 0: Y bit, 1: X bit
3536                          */
3537
3538                         /* Read tcamy */
3539                         ctl = (V_CTLREQID(1) |
3540                                V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0));
3541                         if (i < 256)
3542                                 ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0);
3543                         else
3544                                 ctl |= V_CTLTCAMINDEX(i - 256) |
3545                                        V_CTLTCAMSEL(1);
3546
3547                         t4_write_reg(padap, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
3548                         val = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
3549                         tcamy = G_DMACH(val) << 32;
3550                         tcamy |= t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
3551                         data2 = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
3552                         tcam->lookup_type = G_DATALKPTYPE(data2);
3553
3554                         /* 0 - Outer header, 1 - Inner header
3555                          * [71:48] bit locations are overloaded for
3556                          * outer vs. inner lookup types.
3557                          */
3558
3559                         if (tcam->lookup_type &&
3560                             (tcam->lookup_type != M_DATALKPTYPE)) {
3561                                 /* Inner header VNI */
3562                                 tcam->vniy = ((data2 & F_DATAVIDH2) << 23) |
3563                                              (G_DATAVIDH1(data2) << 16) |
3564                                              G_VIDL(val);
3565                                 tcam->dip_hit = data2 & F_DATADIPHIT;
3566                         } else {
3567                                 tcam->vlan_vld = data2 & F_DATAVIDH2;
3568                                 tcam->ivlan = G_VIDL(val);
3569                         }
3570
3571                         tcam->port_num = G_DATAPORTNUM(data2);
3572
3573                         /* Read tcamx. Change the control param */
3574                         ctl |= V_CTLXYBITSEL(1);
3575                         t4_write_reg(padap, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
3576                         val = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
3577                         tcamx = G_DMACH(val) << 32;
3578                         tcamx |= t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
3579                         data2 = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
3580                         if (tcam->lookup_type &&
3581                             (tcam->lookup_type != M_DATALKPTYPE)) {
3582                                 /* Inner header VNI mask */
3583                                 tcam->vnix = ((data2 & F_DATAVIDH2) << 23) |
3584                                              (G_DATAVIDH1(data2) << 16) |
3585                                              G_VIDL(val);
3586                         }
3587                 } else {
3588                         tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(i));
3589                         tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(i));
3590                 }
3591
3592                 if (tcamx & tcamy)
3593                         continue;
3594
3595                 tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(i));
3596                 tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(i));
3597
3598                 if (is_t5(padap))
3599                         tcam->repli = (tcam->cls_lo & F_REPLICATE);
3600                 else if (is_t6(padap))
3601                         tcam->repli = (tcam->cls_lo & F_T6_REPLICATE);
3602
3603                 if (tcam->repli) {
3604                         struct fw_ldst_cmd ldst_cmd;
3605                         struct fw_ldst_mps_rplc mps_rplc;
3606
3607                         memset(&ldst_cmd, 0, sizeof(ldst_cmd));
3608                         ldst_cmd.op_to_addrspace =
3609                                 htonl(V_FW_CMD_OP(FW_LDST_CMD) |
3610                                       F_FW_CMD_REQUEST |
3611                                       F_FW_CMD_READ |
3612                                       V_FW_LDST_CMD_ADDRSPACE(
3613                                               FW_LDST_ADDRSPC_MPS));
3614
3615                         ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
3616
3617                         ldst_cmd.u.mps.rplc.fid_idx =
3618                                 htons(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
3619                                       V_FW_LDST_CMD_IDX(i));
3620
3621                         rc = begin_synchronized_op(padap, NULL,
3622                             SLEEP_OK | INTR_OK, "t4cudm");
3623                         if (rc == 0) {
3624                                 rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd,
3625                                                 sizeof(ldst_cmd), &ldst_cmd);
3626                                 end_synchronized_op(padap, 0);
3627                         }
3628
3629                         if (rc)
3630                                 mps_rpl_backdoor(padap, &mps_rplc);
3631                         else
3632                                 mps_rplc = ldst_cmd.u.mps.rplc;
3633
3634                         tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
3635                         tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
3636                         tcam->rplc[2] = ntohl(mps_rplc.rplc95_64);
3637                         tcam->rplc[3] = ntohl(mps_rplc.rplc127_96);
3638                         if (padap->chip_params->mps_rplc_size >
3639                                         CUDBG_MAX_RPLC_SIZE) {
3640                                 tcam->rplc[4] = ntohl(mps_rplc.rplc159_128);
3641                                 tcam->rplc[5] = ntohl(mps_rplc.rplc191_160);
3642                                 tcam->rplc[6] = ntohl(mps_rplc.rplc223_192);
3643                                 tcam->rplc[7] = ntohl(mps_rplc.rplc255_224);
3644                         }
3645                 }
3646                 cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask);
3647
3648                 tcam->idx = i;
3649                 tcam->rplc_size = padap->chip_params->mps_rplc_size;
3650
3651                 total_size += sizeof(struct cudbg_mps_tcam);
3652
3653                 tcam++;
3654         }
3655
3656         if (total_size == 0) {
3657                 rc = CUDBG_SYSTEM_ERROR;
3658                 goto err1;
3659         }
3660
3661         scratch_buff.size = total_size;
3662         rc = write_compression_hdr(&scratch_buff, dbg_buff);
3663         if (rc)
3664                 goto err1;
3665
3666         rc = compress_buff(&scratch_buff, dbg_buff);
3667
3668 err1:
3669         scratch_buff.size = size;
3670         release_scratch_buff(&scratch_buff, dbg_buff);
3671 err:
3672         return rc;
3673 }
3674
3675 static int collect_pcie_config(struct cudbg_init *pdbg_init,
3676                                struct cudbg_buffer *dbg_buff,
3677                                struct cudbg_error *cudbg_err)
3678 {
3679         struct cudbg_buffer scratch_buff;
3680         struct adapter *padap = pdbg_init->adap;
3681         u32 size, *value, j;
3682         int i, rc, n;
3683
3684         size = sizeof(u32) * NUM_PCIE_CONFIG_REGS;
3685         n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32));
3686         scratch_buff.size = size;
3687
3688         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3689         if (rc)
3690                 goto err;
3691
3692         value = (u32 *)scratch_buff.data;
3693         for (i = 0; i < n; i++) {
3694                 for (j = t5_pcie_config_array[i][0];
3695                      j <= t5_pcie_config_array[i][1]; j += 4) {
3696                         *value++ = t4_hw_pci_read_cfg4(padap, j);
3697                 }
3698         }
3699
3700         rc = write_compression_hdr(&scratch_buff, dbg_buff);
3701         if (rc)
3702                 goto err1;
3703
3704         rc = compress_buff(&scratch_buff, dbg_buff);
3705
3706 err1:
3707         release_scratch_buff(&scratch_buff, dbg_buff);
3708 err:
3709         return rc;
3710 }
3711
3712 static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid,
3713                           struct cudbg_tid_data *tid_data)
3714 {
3715         int i, cmd_retry = 8;
3716         struct adapter *padap = pdbg_init->adap;
3717         u32 val;
3718
3719         /* Fill REQ_DATA regs with 0's */
3720         for (i = 0; i < CUDBG_NUM_REQ_REGS; i++)
3721                 t4_write_reg(padap, A_LE_DB_DBGI_REQ_DATA + (i << 2), 0);
3722
3723         /* Write DBIG command */
3724         val = (0x4 << S_DBGICMD) | tid;
3725         t4_write_reg(padap, A_LE_DB_DBGI_REQ_TCAM_CMD, val);
3726         tid_data->dbig_cmd = val;
3727
3728         val = 0;
3729         val |= 1 << S_DBGICMDSTRT;
3730         val |= 1;  /* LE mode */
3731         t4_write_reg(padap, A_LE_DB_DBGI_CONFIG, val);
3732         tid_data->dbig_conf = val;
3733
3734         /* Poll the DBGICMDBUSY bit */
3735         val = 1;
3736         while (val) {
3737                 val = t4_read_reg(padap, A_LE_DB_DBGI_CONFIG);
3738                 val = (val >> S_DBGICMDBUSY) & 1;
3739                 cmd_retry--;
3740                 if (!cmd_retry) {
3741                         if (pdbg_init->verbose)
3742                                 pdbg_init->print("%s(): Timeout waiting for non-busy\n",
3743                                          __func__);
3744                         return CUDBG_SYSTEM_ERROR;
3745                 }
3746         }
3747
3748         /* Check RESP status */
3749         val = 0;
3750         val = t4_read_reg(padap, A_LE_DB_DBGI_RSP_STATUS);
3751         tid_data->dbig_rsp_stat = val;
3752         if (!(val & 1)) {
3753                 if (pdbg_init->verbose)
3754                         pdbg_init->print("%s(): DBGI command failed\n", __func__);
3755                 return CUDBG_SYSTEM_ERROR;
3756         }
3757
3758         /* Read RESP data */
3759         for (i = 0; i < CUDBG_NUM_REQ_REGS; i++)
3760                 tid_data->data[i] = t4_read_reg(padap,
3761                                                 A_LE_DB_DBGI_RSP_DATA +
3762                                                 (i << 2));
3763
3764         tid_data->tid = tid;
3765
3766         return 0;
3767 }
3768
3769 static int collect_le_tcam(struct cudbg_init *pdbg_init,
3770                            struct cudbg_buffer *dbg_buff,
3771                            struct cudbg_error *cudbg_err)
3772 {
3773         struct cudbg_buffer scratch_buff;
3774         struct adapter *padap = pdbg_init->adap;
3775         struct cudbg_tcam tcam_region = {0};
3776         struct cudbg_tid_data *tid_data = NULL;
3777         u32 value, bytes = 0, bytes_left  = 0;
3778         u32 i;
3779         int rc, size;
3780
3781         /* Get the LE regions */
3782         value = t4_read_reg(padap, A_LE_DB_TID_HASHBASE); /* Get hash base
3783                                                              index */
3784         tcam_region.tid_hash_base = value;
3785
3786         /* Get routing table index */
3787         value = t4_read_reg(padap, A_LE_DB_ROUTING_TABLE_INDEX);
3788         tcam_region.routing_start = value;
3789
3790         /*Get clip table index */
3791         value = t4_read_reg(padap, A_LE_DB_CLIP_TABLE_INDEX);
3792         tcam_region.clip_start = value;
3793
3794         /* Get filter table index */
3795         value = t4_read_reg(padap, A_LE_DB_FILTER_TABLE_INDEX);
3796         tcam_region.filter_start = value;
3797
3798         /* Get server table index */
3799         value = t4_read_reg(padap, A_LE_DB_SERVER_INDEX);
3800         tcam_region.server_start = value;
3801
3802         /* Check whether hash is enabled and calculate the max tids */
3803         value = t4_read_reg(padap, A_LE_DB_CONFIG);
3804         if ((value >> S_HASHEN) & 1) {
3805                 value = t4_read_reg(padap, A_LE_DB_HASH_CONFIG);
3806                 if (chip_id(padap) > CHELSIO_T5)
3807                         tcam_region.max_tid = (value & 0xFFFFF) +
3808                                               tcam_region.tid_hash_base;
3809                 else {      /* for T5 */
3810                         value = G_HASHTIDSIZE(value);
3811                         value = 1 << value;
3812                         tcam_region.max_tid = value +
3813                                 tcam_region.tid_hash_base;
3814                 }
3815         } else   /* hash not enabled */
3816                 tcam_region.max_tid = CUDBG_MAX_TCAM_TID;
3817
3818         size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
3819         size += sizeof(struct cudbg_tcam);
3820         scratch_buff.size = size;
3821
3822         rc = write_compression_hdr(&scratch_buff, dbg_buff);
3823         if (rc)
3824                 goto err;
3825
3826         rc = get_scratch_buff(dbg_buff, CUDBG_CHUNK_SIZE, &scratch_buff);
3827         if (rc)
3828                 goto err;
3829
3830         memcpy(scratch_buff.data, &tcam_region, sizeof(struct cudbg_tcam));
3831
3832         tid_data = (struct cudbg_tid_data *)(((struct cudbg_tcam *)
3833                                              scratch_buff.data) + 1);
3834         bytes_left = CUDBG_CHUNK_SIZE - sizeof(struct cudbg_tcam);
3835         bytes = sizeof(struct cudbg_tcam);
3836
3837         /* read all tid */
3838         for (i = 0; i < tcam_region.max_tid; i++) {
3839                 if (bytes_left < sizeof(struct cudbg_tid_data)) {
3840                         scratch_buff.size = bytes;
3841                         rc = compress_buff(&scratch_buff, dbg_buff);
3842                         if (rc)
3843                                 goto err1;
3844                         scratch_buff.size = CUDBG_CHUNK_SIZE;
3845                         release_scratch_buff(&scratch_buff, dbg_buff);
3846
3847                         /* new alloc */
3848                         rc = get_scratch_buff(dbg_buff, CUDBG_CHUNK_SIZE,
3849                                               &scratch_buff);
3850                         if (rc)
3851                                 goto err;
3852
3853                         tid_data = (struct cudbg_tid_data *)(scratch_buff.data);
3854                         bytes_left = CUDBG_CHUNK_SIZE;
3855                         bytes = 0;
3856                 }
3857
3858                 rc = cudbg_read_tid(pdbg_init, i, tid_data);
3859
3860                 if (rc) {
3861                         cudbg_err->sys_err = rc;
3862                         goto err1;
3863                 }
3864
3865                 tid_data++;
3866                 bytes_left -= sizeof(struct cudbg_tid_data);
3867                 bytes += sizeof(struct cudbg_tid_data);
3868         }
3869
3870         if (bytes) {
3871                 scratch_buff.size = bytes;
3872                 rc = compress_buff(&scratch_buff, dbg_buff);
3873         }
3874
3875 err1:
3876         scratch_buff.size = CUDBG_CHUNK_SIZE;
3877         release_scratch_buff(&scratch_buff, dbg_buff);
3878 err:
3879         return rc;
3880 }
3881
3882 static int collect_ma_indirect(struct cudbg_init *pdbg_init,
3883                                struct cudbg_buffer *dbg_buff,
3884                                struct cudbg_error *cudbg_err)
3885 {
3886         struct cudbg_buffer scratch_buff;
3887         struct adapter *padap = pdbg_init->adap;
3888         struct ireg_buf *ma_indr = NULL;
3889         u32 size, j;
3890         int i, rc, n;
3891
3892         if (chip_id(padap) < CHELSIO_T6) {
3893                 if (pdbg_init->verbose)
3894                         pdbg_init->print("MA indirect available only in T6\n");
3895                 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
3896                 goto err;
3897         }
3898
3899         n = sizeof(t6_ma_ireg_array) / (4 * sizeof(u32));
3900         size = sizeof(struct ireg_buf) * n * 2;
3901         scratch_buff.size = size;
3902
3903         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3904         if (rc)
3905                 goto err;
3906
3907         ma_indr = (struct ireg_buf *)scratch_buff.data;
3908
3909         for (i = 0; i < n; i++) {
3910                 struct ireg_field *ma_fli = &ma_indr->tp_pio;
3911                 u32 *buff = ma_indr->outbuf;
3912
3913                 ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
3914                 ma_fli->ireg_data = t6_ma_ireg_array[i][1];
3915                 ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
3916                 ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
3917
3918                 t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
3919                                  buff, ma_fli->ireg_offset_range,
3920                                  ma_fli->ireg_local_offset);
3921
3922                 ma_indr++;
3923
3924         }
3925
3926         n = sizeof(t6_ma_ireg_array2) / (4 * sizeof(u32));
3927
3928         for (i = 0; i < n; i++) {
3929                 struct ireg_field *ma_fli = &ma_indr->tp_pio;
3930                 u32 *buff = ma_indr->outbuf;
3931
3932                 ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
3933                 ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
3934                 ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
3935
3936                 for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
3937                         t4_read_indirect(padap, ma_fli->ireg_addr,
3938                                          ma_fli->ireg_data, buff, 1,
3939                                          ma_fli->ireg_local_offset);
3940                         buff++;
3941                         ma_fli->ireg_local_offset += 0x20;
3942                 }
3943                 ma_indr++;
3944         }
3945
3946         rc = write_compression_hdr(&scratch_buff, dbg_buff);
3947         if (rc)
3948                 goto err1;
3949
3950         rc = compress_buff(&scratch_buff, dbg_buff);
3951
3952 err1:
3953         release_scratch_buff(&scratch_buff, dbg_buff);
3954 err:
3955         return rc;
3956 }
3957
3958 static int collect_hma_indirect(struct cudbg_init *pdbg_init,
3959                                struct cudbg_buffer *dbg_buff,
3960                                struct cudbg_error *cudbg_err)
3961 {
3962         struct cudbg_buffer scratch_buff;
3963         struct adapter *padap = pdbg_init->adap;
3964         struct ireg_buf *hma_indr = NULL;
3965         u32 size;
3966         int i, rc, n;
3967
3968         if (chip_id(padap) < CHELSIO_T6) {
3969                 if (pdbg_init->verbose)
3970                         pdbg_init->print("HMA indirect available only in T6\n");
3971                 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
3972                 goto err;
3973         }
3974
3975         n = sizeof(t6_hma_ireg_array) / (4 * sizeof(u32));
3976         size = sizeof(struct ireg_buf) * n;
3977         scratch_buff.size = size;
3978
3979         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3980         if (rc)
3981                 goto err;
3982
3983         hma_indr = (struct ireg_buf *)scratch_buff.data;
3984
3985         for (i = 0; i < n; i++) {
3986                 struct ireg_field *hma_fli = &hma_indr->tp_pio;
3987                 u32 *buff = hma_indr->outbuf;
3988
3989                 hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
3990                 hma_fli->ireg_data = t6_hma_ireg_array[i][1];
3991                 hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
3992                 hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
3993
3994                 t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
3995                                  buff, hma_fli->ireg_offset_range,
3996                                  hma_fli->ireg_local_offset);
3997
3998                 hma_indr++;
3999
4000         }
4001
4002         rc = write_compression_hdr(&scratch_buff, dbg_buff);
4003         if (rc)
4004                 goto err1;
4005
4006         rc = compress_buff(&scratch_buff, dbg_buff);
4007
4008 err1:
4009         release_scratch_buff(&scratch_buff, dbg_buff);
4010 err:
4011         return rc;
4012 }
4013
4014 static int collect_pcie_indirect(struct cudbg_init *pdbg_init,
4015                                  struct cudbg_buffer *dbg_buff,
4016                                  struct cudbg_error *cudbg_err)
4017 {
4018         struct cudbg_buffer scratch_buff;
4019         struct adapter *padap = pdbg_init->adap;
4020         struct ireg_buf *ch_pcie;
4021         u32 size;
4022         int i, rc, n;
4023
4024         n = sizeof(t5_pcie_pdbg_array) / (4 * sizeof(u32));
4025         size = sizeof(struct ireg_buf) * n * 2;
4026         scratch_buff.size = size;
4027
4028         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4029         if (rc)
4030                 goto err;
4031
4032         ch_pcie = (struct ireg_buf *)scratch_buff.data;
4033
4034         /*PCIE_PDBG*/
4035         for (i = 0; i < n; i++) {
4036                 struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
4037                 u32 *buff = ch_pcie->outbuf;
4038
4039                 pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
4040                 pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
4041                 pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
4042                 pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
4043
4044                 t4_read_indirect(padap,
4045                                 pcie_pio->ireg_addr,
4046                                 pcie_pio->ireg_data,
4047                                 buff,
4048                                 pcie_pio->ireg_offset_range,
4049                                 pcie_pio->ireg_local_offset);
4050
4051                 ch_pcie++;
4052         }
4053
4054         /*PCIE_CDBG*/
4055         n = sizeof(t5_pcie_cdbg_array) / (4 * sizeof(u32));
4056         for (i = 0; i < n; i++) {
4057                 struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
4058                 u32 *buff = ch_pcie->outbuf;
4059
4060                 pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
4061                 pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
4062                 pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
4063                 pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
4064
4065                 t4_read_indirect(padap,
4066                                 pcie_pio->ireg_addr,
4067                                 pcie_pio->ireg_data,
4068                                 buff,
4069                                 pcie_pio->ireg_offset_range,
4070                                 pcie_pio->ireg_local_offset);
4071
4072                 ch_pcie++;
4073         }
4074
4075         rc = write_compression_hdr(&scratch_buff, dbg_buff);
4076         if (rc)
4077                 goto err1;
4078
4079         rc = compress_buff(&scratch_buff, dbg_buff);
4080
4081 err1:
4082         release_scratch_buff(&scratch_buff, dbg_buff);
4083 err:
4084         return rc;
4085
4086 }
4087
4088 static int collect_tp_indirect(struct cudbg_init *pdbg_init,
4089                                struct cudbg_buffer *dbg_buff,
4090                                struct cudbg_error *cudbg_err)
4091 {
4092         struct cudbg_buffer scratch_buff;
4093         struct adapter *padap = pdbg_init->adap;
4094         struct ireg_buf *ch_tp_pio;
4095         u32 size;
4096         int i, rc, n = 0;
4097
4098         if (is_t5(padap))
4099                 n = sizeof(t5_tp_pio_array) / (4 * sizeof(u32));
4100         else if (is_t6(padap))
4101                 n = sizeof(t6_tp_pio_array) / (4 * sizeof(u32));
4102
4103         size = sizeof(struct ireg_buf) * n * 3;
4104         scratch_buff.size = size;
4105
4106         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4107         if (rc)
4108                 goto err;
4109
4110         ch_tp_pio = (struct ireg_buf *)scratch_buff.data;
4111
4112         /* TP_PIO*/
4113         for (i = 0; i < n; i++) {
4114                 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
4115                 u32 *buff = ch_tp_pio->outbuf;
4116
4117                 if (is_t5(padap)) {
4118                         tp_pio->ireg_addr = t5_tp_pio_array[i][0];
4119                         tp_pio->ireg_data = t5_tp_pio_array[i][1];
4120                         tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
4121                         tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
4122                 } else if (is_t6(padap)) {
4123                         tp_pio->ireg_addr = t6_tp_pio_array[i][0];
4124                         tp_pio->ireg_data = t6_tp_pio_array[i][1];
4125                         tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
4126                         tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
4127                 }
4128
4129                 t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
4130                                tp_pio->ireg_local_offset, true);
4131
4132                 ch_tp_pio++;
4133         }
4134
4135         /* TP_TM_PIO*/
4136         if (is_t5(padap))
4137                 n = sizeof(t5_tp_tm_pio_array) / (4 * sizeof(u32));
4138         else if (is_t6(padap))
4139                 n = sizeof(t6_tp_tm_pio_array) / (4 * sizeof(u32));
4140
4141         for (i = 0; i < n; i++) {
4142                 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
4143                 u32 *buff = ch_tp_pio->outbuf;
4144
4145                 if (is_t5(padap)) {
4146                         tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
4147                         tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
4148                         tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
4149                         tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
4150                 } else if (is_t6(padap)) {
4151                         tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
4152                         tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
4153                         tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
4154                         tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
4155                 }
4156
4157                 t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
4158                                   tp_pio->ireg_local_offset, true);
4159
4160                 ch_tp_pio++;
4161         }
4162
4163         /* TP_MIB_INDEX*/
4164         if (is_t5(padap))
4165                 n = sizeof(t5_tp_mib_index_array) / (4 * sizeof(u32));
4166         else if (is_t6(padap))
4167                 n = sizeof(t6_tp_mib_index_array) / (4 * sizeof(u32));
4168
4169         for (i = 0; i < n ; i++) {
4170                 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
4171                 u32 *buff = ch_tp_pio->outbuf;
4172
4173                 if (is_t5(padap)) {
4174                         tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
4175                         tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
4176                         tp_pio->ireg_local_offset =
4177                                 t5_tp_mib_index_array[i][2];
4178                         tp_pio->ireg_offset_range =
4179                                 t5_tp_mib_index_array[i][3];
4180                 } else if (is_t6(padap)) {
4181                         tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
4182                         tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
4183                         tp_pio->ireg_local_offset =
4184                                 t6_tp_mib_index_array[i][2];
4185                         tp_pio->ireg_offset_range =
4186                                 t6_tp_mib_index_array[i][3];
4187                 }
4188
4189                 t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
4190                                tp_pio->ireg_local_offset, true);
4191
4192                 ch_tp_pio++;
4193         }
4194
4195         rc = write_compression_hdr(&scratch_buff, dbg_buff);
4196         if (rc)
4197                 goto err1;
4198
4199         rc = compress_buff(&scratch_buff, dbg_buff);
4200
4201 err1:
4202         release_scratch_buff(&scratch_buff, dbg_buff);
4203 err:
4204         return rc;
4205 }
4206
4207 static int collect_sge_indirect(struct cudbg_init *pdbg_init,
4208                                 struct cudbg_buffer *dbg_buff,
4209                                 struct cudbg_error *cudbg_err)
4210 {
4211         struct cudbg_buffer scratch_buff;
4212         struct adapter *padap = pdbg_init->adap;
4213         struct ireg_buf *ch_sge_dbg;
4214         u32 size;
4215         int i, rc;
4216
4217         size = sizeof(struct ireg_buf) * 2;
4218         scratch_buff.size = size;
4219
4220         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4221         if (rc)
4222                 goto err;
4223
4224         ch_sge_dbg = (struct ireg_buf *)scratch_buff.data;
4225
4226         for (i = 0; i < 2; i++) {
4227                 struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
4228                 u32 *buff = ch_sge_dbg->outbuf;
4229
4230                 sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
4231                 sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
4232                 sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
4233                 sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
4234
4235                 t4_read_indirect(padap,
4236                                 sge_pio->ireg_addr,
4237                                 sge_pio->ireg_data,
4238                                 buff,
4239                                 sge_pio->ireg_offset_range,
4240                                 sge_pio->ireg_local_offset);
4241
4242                 ch_sge_dbg++;
4243         }
4244
4245         rc = write_compression_hdr(&scratch_buff, dbg_buff);
4246         if (rc)
4247                 goto err1;
4248
4249         rc = compress_buff(&scratch_buff, dbg_buff);
4250
4251 err1:
4252         release_scratch_buff(&scratch_buff, dbg_buff);
4253 err:
4254         return rc;
4255 }
4256
4257 static int collect_full(struct cudbg_init *pdbg_init,
4258                         struct cudbg_buffer *dbg_buff,
4259                         struct cudbg_error *cudbg_err)
4260 {
4261         struct cudbg_buffer scratch_buff;
4262         struct adapter *padap = pdbg_init->adap;
4263         u32 reg_addr, reg_data, reg_local_offset, reg_offset_range;
4264         u32 *sp;
4265         int rc;
4266         int nreg = 0;
4267
4268         /* Collect Registers:
4269          * TP_DBG_SCHED_TX (0x7e40 + 0x6a),
4270          * TP_DBG_SCHED_RX (0x7e40 + 0x6b),
4271          * TP_DBG_CSIDE_INT (0x7e40 + 0x23f),
4272          * TP_DBG_ESIDE_INT (0x7e40 + 0x148),
4273          * PCIE_CDEBUG_INDEX[AppData0] (0x5a10 + 2),
4274          * PCIE_CDEBUG_INDEX[AppData1] (0x5a10 + 3)  This is for T6
4275          * SGE_DEBUG_DATA_HIGH_INDEX_10 (0x12a8)
4276          **/
4277
4278         if (is_t5(padap))
4279                 nreg = 6;
4280         else if (is_t6(padap))
4281                 nreg = 7;
4282
4283         scratch_buff.size = nreg * sizeof(u32);
4284
4285         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4286         if (rc)
4287                 goto err;
4288
4289         sp = (u32 *)scratch_buff.data;
4290
4291         /* TP_DBG_SCHED_TX */
4292         reg_local_offset = t5_tp_pio_array[3][2] + 0xa;
4293         reg_offset_range = 1;
4294
4295         t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4296
4297         sp++;
4298
4299         /* TP_DBG_SCHED_RX */
4300         reg_local_offset = t5_tp_pio_array[3][2] + 0xb;
4301         reg_offset_range = 1;
4302
4303         t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4304
4305         sp++;
4306
4307         /* TP_DBG_CSIDE_INT */
4308         reg_local_offset = t5_tp_pio_array[9][2] + 0xf;
4309         reg_offset_range = 1;
4310
4311         t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4312
4313         sp++;
4314
4315         /* TP_DBG_ESIDE_INT */
4316         reg_local_offset = t5_tp_pio_array[8][2] + 3;
4317         reg_offset_range = 1;
4318
4319         t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4320
4321         sp++;
4322
4323         /* PCIE_CDEBUG_INDEX[AppData0] */
4324         reg_addr = t5_pcie_cdbg_array[0][0];
4325         reg_data = t5_pcie_cdbg_array[0][1];
4326         reg_local_offset = t5_pcie_cdbg_array[0][2] + 2;
4327         reg_offset_range = 1;
4328
4329         t4_read_indirect(padap, reg_addr, reg_data, sp, reg_offset_range,
4330                          reg_local_offset);
4331
4332         sp++;
4333
4334         if (is_t6(padap)) {
4335                 /* PCIE_CDEBUG_INDEX[AppData1] */
4336                 reg_addr = t5_pcie_cdbg_array[0][0];
4337                 reg_data = t5_pcie_cdbg_array[0][1];
4338                 reg_local_offset = t5_pcie_cdbg_array[0][2] + 3;
4339                 reg_offset_range = 1;
4340
4341                 t4_read_indirect(padap, reg_addr, reg_data, sp,
4342                                  reg_offset_range, reg_local_offset);
4343
4344                 sp++;
4345         }
4346
4347         /* SGE_DEBUG_DATA_HIGH_INDEX_10 */
4348         *sp = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_10);
4349
4350         rc = write_compression_hdr(&scratch_buff, dbg_buff);
4351         if (rc)
4352                 goto err1;
4353
4354         rc = compress_buff(&scratch_buff, dbg_buff);
4355
4356 err1:
4357         release_scratch_buff(&scratch_buff, dbg_buff);
4358 err:
4359         return rc;
4360 }
4361
4362 static int collect_vpd_data(struct cudbg_init *pdbg_init,
4363                             struct cudbg_buffer *dbg_buff,
4364                             struct cudbg_error *cudbg_err)
4365 {
4366 #ifdef notyet
4367         struct cudbg_buffer scratch_buff;
4368         struct adapter *padap = pdbg_init->adap;
4369         struct struct_vpd_data *vpd_data;
4370         char vpd_ver[4];
4371         u32 fw_vers;
4372         u32 size;
4373         int rc;
4374
4375         size = sizeof(struct struct_vpd_data);
4376         scratch_buff.size = size;
4377
4378         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4379         if (rc)
4380                 goto err;
4381
4382         vpd_data = (struct struct_vpd_data *)scratch_buff.data;
4383
4384         if (is_t5(padap)) {
4385                 read_vpd_reg(padap, SN_REG_ADDR, SN_MAX_LEN, vpd_data->sn);
4386                 read_vpd_reg(padap, BN_REG_ADDR, BN_MAX_LEN, vpd_data->bn);
4387                 read_vpd_reg(padap, NA_REG_ADDR, NA_MAX_LEN, vpd_data->na);
4388                 read_vpd_reg(padap, MN_REG_ADDR, MN_MAX_LEN, vpd_data->mn);
4389         } else if (is_t6(padap)) {
4390                 read_vpd_reg(padap, SN_T6_ADDR, SN_MAX_LEN, vpd_data->sn);
4391                 read_vpd_reg(padap, BN_T6_ADDR, BN_MAX_LEN, vpd_data->bn);
4392                 read_vpd_reg(padap, NA_T6_ADDR, NA_MAX_LEN, vpd_data->na);
4393                 read_vpd_reg(padap, MN_T6_ADDR, MN_MAX_LEN, vpd_data->mn);
4394         }
4395
4396         if (is_fw_attached(pdbg_init)) {
4397            rc = t4_get_scfg_version(padap, &vpd_data->scfg_vers);
4398         } else {
4399                 rc = 1;
4400         }
4401
4402         if (rc) {
4403                 /* Now trying with backdoor mechanism */
4404                 rc = read_vpd_reg(padap, SCFG_VER_ADDR, SCFG_VER_LEN,
4405                                   (u8 *)&vpd_data->scfg_vers);
4406                 if (rc)
4407                         goto err1;
4408         }
4409
4410         if (is_fw_attached(pdbg_init)) {
4411                 rc = t4_get_vpd_version(padap, &vpd_data->vpd_vers);
4412         } else {
4413                 rc = 1;
4414         }
4415
4416         if (rc) {
4417                 /* Now trying with backdoor mechanism */
4418                 rc = read_vpd_reg(padap, VPD_VER_ADDR, VPD_VER_LEN,
4419                                   (u8 *)vpd_ver);
4420                 if (rc)
4421                         goto err1;
4422                 /* read_vpd_reg return string of stored hex
4423                  * converting hex string to char string
4424                  * vpd version is 2 bytes only */
4425                 sprintf(vpd_ver, "%c%c\n", vpd_ver[0], vpd_ver[1]);
4426                 vpd_data->vpd_vers = simple_strtoul(vpd_ver, NULL, 16);
4427         }
4428
4429         /* Get FW version if it's not already filled in */
4430         fw_vers = padap->params.fw_vers;
4431         if (!fw_vers) {
4432                 rc = t4_get_fw_version(padap, &fw_vers);
4433                 if (rc)
4434                         goto err1;
4435         }
4436
4437         vpd_data->fw_major = G_FW_HDR_FW_VER_MAJOR(fw_vers);
4438         vpd_data->fw_minor = G_FW_HDR_FW_VER_MINOR(fw_vers);
4439         vpd_data->fw_micro = G_FW_HDR_FW_VER_MICRO(fw_vers);
4440         vpd_data->fw_build = G_FW_HDR_FW_VER_BUILD(fw_vers);
4441
4442         rc = write_compression_hdr(&scratch_buff, dbg_buff);
4443         if (rc)
4444                 goto err1;
4445
4446         rc = compress_buff(&scratch_buff, dbg_buff);
4447
4448 err1:
4449         release_scratch_buff(&scratch_buff, dbg_buff);
4450 err:
4451         return rc;
4452 #endif
4453         return (EDOOFUS);
4454 }